diff --git a/CREDITS b/CREDITS index 837367624e4598e1b936c25b9c2095f5d43e33eb..c58560701d13158f535046c09fd4b825922ced94 100644 --- a/CREDITS +++ b/CREDITS @@ -9,7 +9,7 @@ Linus ---------- -M: Matt Mackal +N: Matt Mackal E: mpm@selenic.com D: SLOB slab allocator @@ -1910,7 +1910,7 @@ S: Ra'annana, Israel N: Andi Kleen E: andi@firstfloor.org -U: http://www.halobates.de +W: http://www.halobates.de D: network, x86, NUMA, various hacks S: Schwalbenstr. 96 S: 85551 Ottobrunn @@ -2089,8 +2089,8 @@ D: ST Microelectronics SPEAr13xx PCI host bridge driver D: Synopsys Designware PCI host bridge driver N: Gabor Kuti -M: seasons@falcon.sch.bme.hu -M: seasons@makosteszta.sote.hu +E: seasons@falcon.sch.bme.hu +E: seasons@makosteszta.sote.hu D: Original author of software suspend N: Jaroslav Kysela @@ -2775,6 +2775,10 @@ S: C/ Mieses 20, 9-B S: Valladolid 47009 S: Spain +N: Peter Oruba +D: AMD Microcode loader driver +S: Germany + N: Jens Osterkamp E: jens@de.ibm.com D: Maintainer of Spidernet network driver for Cell @@ -3945,8 +3949,6 @@ E: gwingerde@gmail.com D: Ralink rt2x00 WLAN driver D: Minix V2 file-system D: Misc fixes -S: Geessinkweg 177 -S: 7544 TX Enschede S: The Netherlands N: Lars Wirzenius diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX index 5bd4b07c2f903683a4dded6abc6d21231319d192..c8a8eb1a2b119c064f038559fa67f6511a31bce6 100644 --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX @@ -152,8 +152,6 @@ driver-model/ - directory with info about Linux driver model. early-userspace/ - info about initramfs, klibc, and userspace early during boot. -edac.txt - - information on EDAC - Error Detection And Correction efi-stub.txt - How to use the EFI boot stub to bypass GRUB or elilo on EFI systems. eisa.txt diff --git a/Documentation/ABI/stable/sysfs-devices b/Documentation/ABI/stable/sysfs-devices index df449d79b563ebe850f1eeb28b0e2e91d517c430..35c457f8ce736403a8af48fb1952f9aa7706e1da 100644 --- a/Documentation/ABI/stable/sysfs-devices +++ b/Documentation/ABI/stable/sysfs-devices @@ -8,3 +8,17 @@ Description: Any device associated with a device-tree node will have an of_path symlink pointing to the corresponding device node in /sys/firmware/devicetree/ + +What: /sys/devices/*/devspec +Date: October 2016 +Contact: Device Tree mailing list +Description: + If CONFIG_OF is enabled, then this file is present. When + read, it returns full name of the device node. + +What: /sys/devices/*/obppath +Date: October 2016 +Contact: Device Tree mailing list +Description: + If CONFIG_OF is enabled, then this file is present. When + read, it returns full name of the device node. diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block index 71d184dbb70d29daabf9ad1461d12615c7d34fbf..2da04ce6aeef482645bfd9edd924ce195275ea26 100644 --- a/Documentation/ABI/testing/sysfs-block +++ b/Documentation/ABI/testing/sysfs-block @@ -235,3 +235,45 @@ Description: write_same_max_bytes is 0, write same is not supported by the device. +What: /sys/block//queue/write_zeroes_max_bytes +Date: November 2016 +Contact: Chaitanya Kulkarni +Description: + Devices that support write zeroes operation in which a + single request can be issued to zero out the range of + contiguous blocks on storage without having any payload + in the request. This can be used to optimize writing zeroes + to the devices. write_zeroes_max_bytes indicates how many + bytes can be written in a single write zeroes command. If + write_zeroes_max_bytes is 0, write zeroes is not supported + by the device. + +What: /sys/block//queue/zoned +Date: September 2016 +Contact: Damien Le Moal +Description: + zoned indicates if the device is a zoned block device + and the zone model of the device if it is indeed zoned. + The possible values indicated by zoned are "none" for + regular block devices and "host-aware" or "host-managed" + for zoned block devices. The characteristics of + host-aware and host-managed zoned block devices are + described in the ZBC (Zoned Block Commands) and ZAC + (Zoned Device ATA Command Set) standards. These standards + also define the "drive-managed" zone model. However, + since drive-managed zoned block devices do not support + zone commands, they will be treated as regular block + devices and zoned will report "none". + +What: /sys/block//queue/chunk_sectors +Date: September 2016 +Contact: Hannes Reinecke +Description: + chunk_sectors has different meaning depending on the type + of the disk. For a RAID device (dm-raid), chunk_sectors + indicates the size in 512B sectors of the RAID volume + stripe segment. For a zoned block device, either + host-aware or host-managed, chunk_sectors indicates the + size of 512B sectors of the zones of the device, with + the eventual exception of the last zone of the device + which may be smaller. diff --git a/Documentation/ABI/testing/sysfs-bus-fsl-mc b/Documentation/ABI/testing/sysfs-bus-fsl-mc new file mode 100644 index 0000000000000000000000000000000000000000..80256b8b4f26119e0d97fa455e67d4e7e5f4fdeb --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-fsl-mc @@ -0,0 +1,21 @@ +What: /sys/bus/fsl-mc/drivers/.../bind +Date: December 2016 +Contact: stuart.yoder@nxp.com +Description: + Writing a device location to this file will cause + the driver to attempt to bind to the device found at + this location. The format for the location is Object.Id + and is the same as found in /sys/bus/fsl-mc/devices/. + For example: + # echo dpni.2 > /sys/bus/fsl-mc/drivers/fsl_dpaa2_eth/bind + +What: /sys/bus/fsl-mc/drivers/.../unbind +Date: December 2016 +Contact: stuart.yoder@nxp.com +Description: + Writing a device location to this file will cause the + driver to attempt to unbind from the device found at + this location. The format for the location is Object.Id + and is the same as found in /sys/bus/fsl-mc/devices/. + For example: + # echo dpni.2 > /sys/bus/fsl-mc/drivers/fsl_dpaa2_eth/unbind diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index fee35c00cc4ed6fd59fdff3fd14cac396e9336a4..b8f220f978dd335389f7cae80e1e1eafb5fdde2d 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -329,6 +329,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_pressure_scale What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_scale What: /sys/bus/iio/devices/iio:deviceX/in_velocity_sqrt(x^2+y^2+z^2)_scale What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_scale +What: /sys/bus/iio/devices/iio:deviceX/in_countY_scale KernelVersion: 2.6.35 Contact: linux-iio@vger.kernel.org Description: @@ -1579,3 +1580,20 @@ Contact: linux-iio@vger.kernel.org Description: Raw (unscaled no offset etc.) electric conductivity reading that can be processed to siemens per meter. + +What: /sys/bus/iio/devices/iio:deviceX/in_countY_raw +KernelVersion: 4.9 +Contact: linux-iio@vger.kernel.org +Description: + Raw counter device counts from channel Y. For quadrature + counters, multiplication by an available [Y]_scale results in + the counts of a single quadrature signal phase from channel Y. + +What: /sys/bus/iio/devices/iio:deviceX/in_indexY_raw +KernelVersion: 4.9 +Contact: linux-iio@vger.kernel.org +Description: + Raw counter device index value from channel Y. This attribute + provides an absolute positional reference (e.g. a pulse once per + revolution) which may be used to home positional systems as + required. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector b/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector new file mode 100644 index 0000000000000000000000000000000000000000..2071f9bcfaa5c021fe5ef5e0fa0f9c0f6da41cab --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector @@ -0,0 +1,36 @@ +What: /sys/bus/iio/devices/iio:deviceX/in_altvoltageY_invert +Date: October 2016 +KernelVersion: 4.9 +Contact: Peter Rosin +Description: + The DAC is used to find the peak level of an alternating + voltage input signal by a binary search using the output + of a comparator wired to an interrupt pin. Like so: + _ + | \ + input +------>-------|+ \ + | \ + .-------. | }---. + | | | / | + | dac|-->--|- / | + | | |_/ | + | | | + | | | + | irq|------<-------' + | | + '-------' + The boolean invert attribute (0/1) should be set when the + input signal is centered around the maximum value of the + dac instead of zero. The envelope detector will search + from below in this case and will also invert the result. + The edge/level of the interrupt is also switched to its + opposite value. + +What: /sys/bus/iio/devices/iio:deviceX/in_altvoltageY_compare_interval +Date: October 2016 +KernelVersion: 4.9 +Contact: Peter Rosin +Description: + Number of milliseconds to wait for the comparator in each + step of the binary search for the input peak level. Needs + to relate to the frequency of the input signal. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8 b/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8 new file mode 100644 index 0000000000000000000000000000000000000000..ba676520b9530e7c6d955ef0cdd6e68a2fd93823 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8 @@ -0,0 +1,125 @@ +What: /sys/bus/iio/devices/iio:deviceX/in_count_count_direction_available +What: /sys/bus/iio/devices/iio:deviceX/in_count_count_mode_available +What: /sys/bus/iio/devices/iio:deviceX/in_count_noise_error_available +What: /sys/bus/iio/devices/iio:deviceX/in_count_quadrature_mode_available +What: /sys/bus/iio/devices/iio:deviceX/in_index_index_polarity_available +What: /sys/bus/iio/devices/iio:deviceX/in_index_synchronous_mode_available +KernelVersion: 4.9 +Contact: linux-iio@vger.kernel.org +Description: + Discrete set of available values for the respective counter + configuration are listed in this file. + +What: /sys/bus/iio/devices/iio:deviceX/in_countY_count_direction +KernelVersion: 4.9 +Contact: linux-iio@vger.kernel.org +Description: + Read-only attribute that indicates whether the counter for + channel Y is counting up or down. + +What: /sys/bus/iio/devices/iio:deviceX/in_countY_count_mode +KernelVersion: 4.9 +Contact: linux-iio@vger.kernel.org +Description: + Count mode for channel Y. Four count modes are available: + normal, range limit, non-recycle, and modulo-n. The preset value + for channel Y is used by the count mode where required. + + Normal: + Counting is continuous in either direction. + + Range Limit: + An upper or lower limit is set, mimicking limit switches + in the mechanical counterpart. The upper limit is set to + the preset value, while the lower limit is set to 0. The + counter freezes at count = preset when counting up, and + at count = 0 when counting down. At either of these + limits, the counting is resumed only when the count + direction is reversed. + + Non-recycle: + Counter is disabled whenever a 24-bit count overflow or + underflow takes place. The counter is re-enabled when a + new count value is loaded to the counter via a preset + operation or write to raw. + + Modulo-N: + A count boundary is set between 0 and the preset value. + The counter is reset to 0 at count = preset when + counting up, while the counter is set to the preset + value at count = 0 when counting down; the counter does + not freeze at the bundary points, but counts + continuously throughout. + +What: /sys/bus/iio/devices/iio:deviceX/in_countY_noise_error +KernelVersion: 4.9 +Contact: linux-iio@vger.kernel.org +Description: + Read-only attribute that indicates whether excessive noise is + present at the channel Y count inputs in quadrature clock mode; + irrelevant in non-quadrature clock mode. + +What: /sys/bus/iio/devices/iio:deviceX/in_countY_preset +KernelVersion: 4.9 +Contact: linux-iio@vger.kernel.org +Description: + If the counter device supports preset registers, the preset + count for channel Y is provided by this attribute. + +What: /sys/bus/iio/devices/iio:deviceX/in_countY_quadrature_mode +KernelVersion: 4.9 +Contact: linux-iio@vger.kernel.org +Description: + Configure channel Y counter for non-quadrature or quadrature + clock mode. Selecting non-quadrature clock mode will disable + synchronous load mode. In quadrature clock mode, the channel Y + scale attribute selects the encoder phase division (scale of 1 + selects full-cycle, scale of 0.5 selects half-cycle, scale of + 0.25 selects quarter-cycle) processed by the channel Y counter. + + Non-quadrature: + The filter and decoder circuit are bypassed. Encoder A + input serves as the count input and B as the UP/DOWN + direction control input, with B = 1 selecting UP Count + mode and B = 0 selecting Down Count mode. + + Quadrature: + Encoder A and B inputs are digitally filtered and + decoded for UP/DN clock. + +What: /sys/bus/iio/devices/iio:deviceX/in_countY_set_to_preset_on_index +KernelVersion: 4.9 +Contact: linux-iio@vger.kernel.org +Description: + Whether to set channel Y counter with channel Y preset value + when channel Y index input is active, or continuously count. + Valid attribute values are boolean. + +What: /sys/bus/iio/devices/iio:deviceX/in_indexY_index_polarity +KernelVersion: 4.9 +Contact: linux-iio@vger.kernel.org +Description: + Active level of channel Y index input; irrelevant in + non-synchronous load mode. + +What: /sys/bus/iio/devices/iio:deviceX/in_indexY_synchronous_mode +KernelVersion: 4.9 +Contact: linux-iio@vger.kernel.org +Description: + Configure channel Y counter for non-synchronous or synchronous + load mode. Synchronous load mode cannot be selected in + non-quadrature clock mode. + + Non-synchronous: + A logic low level is the active level at this index + input. The index function (as enabled via + set_to_preset_on_index) is performed directly on the + active level of the index input. + + Synchronous: + Intended for interfacing with encoder Index output in + quadrature clock mode. The active level is configured + via index_polarity. The index function (as enabled via + set_to_preset_on_index) is performed synchronously with + the quadrature clock on the active level of the index + input. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-cros-ec b/Documentation/ABI/testing/sysfs-bus-iio-cros-ec new file mode 100644 index 0000000000000000000000000000000000000000..297b9720f024129d50683b0c515cee58672e1d00 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-iio-cros-ec @@ -0,0 +1,18 @@ +What: /sys/bus/iio/devices/iio:deviceX/calibrate +Date: July 2015 +KernelVersion: 4.7 +Contact: linux-iio@vger.kernel.org +Description: + Writing '1' will perform a FOC (Fast Online Calibration). The + corresponding calibration offsets can be read from *_calibbias + entries. + +What: /sys/bus/iio/devices/iio:deviceX/location +Date: July 2015 +KernelVersion: 4.7 +Contact: linux-iio@vger.kernel.org +Description: + This attribute returns a string with the physical location where + the motion sensor is placed. For example, in a laptop a motion + sensor can be located on the base or on the lid. Current valid + values are 'base' and 'lid'. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac b/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac new file mode 100644 index 0000000000000000000000000000000000000000..580e93f373f6e5ebb1ccd46b3a8f0b609fff77e4 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac @@ -0,0 +1,8 @@ +What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw_available +Date: October 2016 +KernelVersion: 4.9 +Contact: Peter Rosin +Description: + The range of available values represented as the minimum value, + the step and the maximum value, all enclosed in square brackets. + Example: [0 1 256] diff --git a/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018 b/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018 new file mode 100644 index 0000000000000000000000000000000000000000..f0ce0a0476ea6f32153494a9e14d99868dd00756 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018 @@ -0,0 +1,19 @@ +What: /sys/bus/iio/devices/iio:deviceX/proximity_on_chip_ambient_infrared_suppression +Date: January 2011 +KernelVersion: 2.6.37 +Contact: linux-iio@vger.kernel.org +Description: + From ISL29018 Data Sheet (FN6619.4, Oct 8, 2012) regarding the + infrared suppression: + + Scheme 0, makes full n (4, 8, 12, 16) bits (unsigned) proximity + detection. The range of Scheme 0 proximity count is from 0 to + 2^n. Logic 1 of this bit, Scheme 1, makes n-1 (3, 7, 11, 15) + bits (2's complementary) proximity_less_ambient detection. The + range of Scheme 1 proximity count is from -2^(n-1) to 2^(n-1). + The sign bit is extended for resolutions less than 16. While + Scheme 0 has wider dynamic range, Scheme 1 proximity detection + is less affected by the ambient IR noise variation. + + 0 Sensing IR from LED and ambient + 1 Sensing IR from LED with ambient IR rejection diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583 b/Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583 similarity index 74% rename from drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583 rename to Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583 index 660781df409fbc15972a038b3cde41993609fa63..a2e19964e87e9013223b8f8e2bf76b699bdb774e 100644 --- a/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583 +++ b/Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583 @@ -1,18 +1,18 @@ -What: /sys/bus/iio/devices/device[n]/lux_table +What: /sys/bus/iio/devices/device[n]/in_illuminance_calibrate KernelVersion: 2.6.37 Contact: linux-iio@vger.kernel.org Description: - This property gets/sets the table of coefficients - used in calculating illuminance in lux. + This property causes an internal calibration of the als gain trim + value which is later used in calculating illuminance in lux. -What: /sys/bus/iio/devices/device[n]/illuminance0_calibrate +What: /sys/bus/iio/devices/device[n]/in_illuminance_lux_table KernelVersion: 2.6.37 Contact: linux-iio@vger.kernel.org Description: - This property causes an internal calibration of the als gain trim - value which is later used in calculating illuminance in lux. + This property gets/sets the table of coefficients + used in calculating illuminance in lux. -What: /sys/bus/iio/devices/device[n]/illuminance0_input_target +What: /sys/bus/iio/devices/device[n]/in_illuminance_input_target KernelVersion: 2.6.37 Contact: linux-iio@vger.kernel.org Description: diff --git a/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531 b/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531 new file mode 100644 index 0000000000000000000000000000000000000000..2a91fbe394fce9014b32249e2d3e77465bf89cf9 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531 @@ -0,0 +1,8 @@ +What: /sys/bus/iio/devices/iio:deviceX/out_resistance_raw_available +Date: October 2016 +KernelVersion: 4.9 +Contact: Peter Rosin +Description: + The range of available values represented as the minimum value, + the step and the maximum value, all enclosed in square brackets. + Example: [0 1 256] diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci index b3bc50f650ee49ee97a748442afee11502cb7ada..5a1732b787071ce1495982f125759f15926e581f 100644 --- a/Documentation/ABI/testing/sysfs-bus-pci +++ b/Documentation/ABI/testing/sysfs-bus-pci @@ -294,3 +294,10 @@ Description: a firmware bug to the system vendor. Writing to this file taints the kernel with TAINT_FIRMWARE_WORKAROUND, which reduces the supportability of your system. + +What: /sys/bus/pci/devices/.../revision +Date: November 2016 +Contact: Emil Velikov +Description: + This file contains the revision field of the the PCI device. + The value comes from device config space. The file is read only. diff --git a/Documentation/ABI/testing/sysfs-bus-vfio-mdev b/Documentation/ABI/testing/sysfs-bus-vfio-mdev new file mode 100644 index 0000000000000000000000000000000000000000..452dbe39270ecab61f6ca9b1aa30f628519f761e --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-vfio-mdev @@ -0,0 +1,111 @@ +What: /sys/...//mdev_supported_types/ +Date: October 2016 +Contact: Kirti Wankhede +Description: + This directory contains list of directories of currently + supported mediated device types and their details for + . Supported type attributes are defined by the + vendor driver who registers with Mediated device framework. + Each supported type is a directory whose name is created + by adding the device driver string as a prefix to the + string provided by the vendor driver. + +What: /sys/...//mdev_supported_types// +Date: October 2016 +Contact: Kirti Wankhede +Description: + This directory gives details of supported type, like name, + description, available_instances, device_api etc. + 'device_api' and 'available_instances' are mandatory + attributes to be provided by vendor driver. 'name', + 'description' and other vendor driver specific attributes + are optional. + +What: /sys/.../mdev_supported_types//create +Date: October 2016 +Contact: Kirti Wankhede +Description: + Writing UUID to this file will create mediated device of + type for parent device . This is a + write-only file. + For example: + # echo "83b8f4f2-509f-382f-3c1e-e6bfe0fa1001" > \ + /sys/devices/foo/mdev_supported_types/foo-1/create + +What: /sys/.../mdev_supported_types//devices/ +Date: October 2016 +Contact: Kirti Wankhede +Description: + This directory contains symbolic links pointing to mdev + devices sysfs entries which are created of this . + +What: /sys/.../mdev_supported_types//available_instances +Date: October 2016 +Contact: Kirti Wankhede +Description: + Reading this attribute will show the number of mediated + devices of type that can be created. This is a + readonly file. +Users: + Userspace applications interested in creating mediated + device of that type. Userspace application should check + the number of available instances could be created before + creating mediated device of this type. + +What: /sys/.../mdev_supported_types//device_api +Date: October 2016 +Contact: Kirti Wankhede +Description: + Reading this attribute will show VFIO device API supported + by this type. For example, "vfio-pci" for a PCI device, + "vfio-platform" for platform device. + +What: /sys/.../mdev_supported_types//name +Date: October 2016 +Contact: Kirti Wankhede +Description: + Reading this attribute will show human readable name of the + mediated device that will get created of type . + This is optional attribute. For example: "Grid M60-0Q" +Users: + Userspace applications interested in knowing the name of + a particular that can help in understanding the + type of mediated device. + +What: /sys/.../mdev_supported_types//description +Date: October 2016 +Contact: Kirti Wankhede +Description: + Reading this attribute will show description of the type of + mediated device that will get created of type . + This is optional attribute. For example: + "2 heads, 512M FB, 2560x1600 maximum resolution" +Users: + Userspace applications interested in knowing the details of + a particular that can help in understanding the + features provided by that type of mediated device. + +What: /sys/.../// +Date: October 2016 +Contact: Kirti Wankhede +Description: + This directory represents device directory of mediated + device. It contains all the attributes related to mediated + device. + +What: /sys/...///mdev_type +Date: October 2016 +Contact: Kirti Wankhede +Description: + This is symbolic link pointing to supported type, + directory of which this mediated device is created. + +What: /sys/...///remove +Date: October 2016 +Contact: Kirti Wankhede +Description: + Writing '1' to this file destroys the mediated device. The + vendor driver can fail the remove() callback if that device + is active and the vendor driver doesn't support hot unplug. + Example: + # echo 1 > /sys/bus/mdev/devices//remove diff --git a/Documentation/ABI/testing/sysfs-class-fpga-bridge b/Documentation/ABI/testing/sysfs-class-fpga-bridge new file mode 100644 index 0000000000000000000000000000000000000000..312ae2c579d8a4466d2b003858930c480875d83c --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-fpga-bridge @@ -0,0 +1,11 @@ +What: /sys/class/fpga_bridge//name +Date: January 2016 +KernelVersion: 4.5 +Contact: Alan Tull +Description: Name of low level FPGA bridge driver. + +What: /sys/class/fpga_bridge//state +Date: January 2016 +KernelVersion: 4.5 +Contact: Alan Tull +Description: Show bridge state as "enabled" or "disabled" diff --git a/Documentation/ABI/testing/sysfs-class-led b/Documentation/ABI/testing/sysfs-class-led index 86ace287d48bb28467a5ad9793e8710172417368..491cdeedc195041a7f611f10490cc4ca569aeb9a 100644 --- a/Documentation/ABI/testing/sysfs-class-led +++ b/Documentation/ABI/testing/sysfs-class-led @@ -4,16 +4,24 @@ KernelVersion: 2.6.17 Contact: Richard Purdie Description: Set the brightness of the LED. Most LEDs don't - have hardware brightness support so will just be turned on for + have hardware brightness support, so will just be turned on for non-zero brightness settings. The value is between 0 and /sys/class/leds//max_brightness. + Writing 0 to this file clears active trigger. + + Writing non-zero to this file while trigger is active changes the + top brightness trigger is going to use. + What: /sys/class/leds//max_brightness Date: March 2006 KernelVersion: 2.6.17 Contact: Richard Purdie Description: - Maximum brightness level for this led, default is 255 (LED_FULL). + Maximum brightness level for this LED, default is 255 (LED_FULL). + + If the LED does not support different brightness levels, this + should be 1. What: /sys/class/leds//trigger Date: March 2006 @@ -21,7 +29,7 @@ KernelVersion: 2.6.17 Contact: Richard Purdie Description: Set the trigger for this LED. A trigger is a kernel based source - of led events. + of LED events. You can change triggers in a similar manner to the way an IO scheduler is chosen. Trigger specific parameters can appear in /sys/class/leds/ once a given trigger is selected. For diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei index 80d9888a8ece2673686ead1cda4504ce568a1051..5096a82f4cde6c0ceac7597fcdbdc1b979f5d8c8 100644 --- a/Documentation/ABI/testing/sysfs-class-mei +++ b/Documentation/ABI/testing/sysfs-class-mei @@ -29,3 +29,19 @@ Description: Display fw status registers content Also number of registers varies between 1 and 6 depending on generation. +What: /sys/class/mei/meiN/hbm_ver +Date: Aug 2016 +KernelVersion: 4.9 +Contact: Tomas Winkler +Description: Display the negotiated HBM protocol version. + + The HBM protocol version negotiated + between the driver and the device. + +What: /sys/class/mei/meiN/hbm_ver_drv +Date: Aug 2016 +KernelVersion: 4.9 +Contact: Tomas Winkler +Description: Display the driver HBM protocol version. + + The HBM protocol version supported by the driver. diff --git a/Documentation/ABI/testing/sysfs-class-remoteproc b/Documentation/ABI/testing/sysfs-class-remoteproc new file mode 100644 index 0000000000000000000000000000000000000000..d188afebc8ba77409785e47da76b548368ec77d2 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-remoteproc @@ -0,0 +1,50 @@ +What: /sys/class/remoteproc/.../firmware +Date: October 2016 +Contact: Matt Redfearn +Description: Remote processor firmware + + Reports the name of the firmware currently loaded to the + remote processor. + + To change the running firmware, ensure the remote processor is + stopped (using /sys/class/remoteproc/.../state) and write a new filename. + +What: /sys/class/remoteproc/.../state +Date: October 2016 +Contact: Matt Redfearn +Description: Remote processor state + + Reports the state of the remote processor, which will be one of: + + "offline" + "suspended" + "running" + "crashed" + "invalid" + + "offline" means the remote processor is powered off. + + "suspended" means that the remote processor is suspended and + must be woken to receive messages. + + "running" is the normal state of an available remote processor + + "crashed" indicates that a problem/crash has been detected on + the remote processor. + + "invalid" is returned if the remote processor is in an + unknown state. + + Writing this file controls the state of the remote processor. + The following states can be written: + + "start" + "stop" + + Writing "start" will attempt to start the processor running the + firmware indicated by, or written to, + /sys/class/remoteproc/.../firmware. The remote processor should + transition to "running" state. + + Writing "stop" will attempt to halt the remote processor and + return it to the "offline" state. diff --git a/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration b/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration index 4cf1e72222d9c449464c1f3cd07f2ac7508d9349..ec950c93e5c6998daf2ee497732f732a2182ec25 100644 --- a/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration +++ b/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration @@ -1,8 +1,9 @@ -What: Attribute for calibrating ST-Ericsson AB8500 Real Time Clock +What: /sys/class/rtc/rtc0/device/rtc_calibration Date: Oct 2011 KernelVersion: 3.0 Contact: Mark Godfrey -Description: The rtc_calibration attribute allows the userspace to +Description: Attribute for calibrating ST-Ericsson AB8500 Real Time Clock + The rtc_calibration attribute allows the userspace to calibrate the AB8500.s 32KHz Real Time Clock. Every 60 seconds the AB8500 will correct the RTC's value by adding to it the value of this attribute. diff --git a/Documentation/ABI/testing/sysfs-devices-deferred_probe b/Documentation/ABI/testing/sysfs-devices-deferred_probe new file mode 100644 index 0000000000000000000000000000000000000000..58553d7a321fb3651e20b3382f940a0b2706b0d4 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-deferred_probe @@ -0,0 +1,12 @@ +What: /sys/devices/.../deferred_probe +Date: August 2016 +Contact: Ben Hutchings +Description: + The /sys/devices/.../deferred_probe attribute is + present for all devices. If a driver detects during + probing a device that a related device is not yet + ready, it may defer probing of the first device. The + kernel will retry probing the first device after any + other device is successfully probed. This attribute + reads as 1 if probing of this device is currently + deferred, or 0 otherwise. diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 49874173705507f72fac2f5f55da46fc34c3cc52..2a4a423d08e0d3ed8bce7cc0c9bcd36bb30bb19d 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -272,6 +272,22 @@ Description: Parameters for the CPU cache attributes the modified cache line is written to main memory only when it is replaced + +What: /sys/devices/system/cpu/cpu*/cache/index*/id +Date: September 2016 +Contact: Linux kernel mailing list +Description: Cache id + + The id provides a unique number for a specific instance of + a cache of a particular type. E.g. there may be a level + 3 unified cache on each socket in a server and we may + assign them ids 0, 1, 2, ... + + Note that id value can be non-contiguous. E.g. level 1 + caches typically exist per core, but there may not be a + power of two cores on a socket, so these caches may be + numbered 0, 1, 2, 3, 4, 5, 8, 9, 10, ... + What: /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/turbo_stat /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/sub_turbo_stat diff --git a/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl b/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl index b82deeaec314b8ff711b122282396340496946c8..470def06ab0a42e40f860d1fe54f8e0d489ec4c3 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl +++ b/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl @@ -1,4 +1,4 @@ -What: state +What: /sys/devices/system/ibm_rtl/state Date: Sep 2010 KernelVersion: 2.6.37 Contact: Vernon Mauery @@ -10,7 +10,7 @@ Description: The state file allows a means by which to change in and Users: The ibm-prtm userspace daemon uses this interface. -What: version +What: /sys/devices/system/ibm_rtl/version Date: Sep 2010 KernelVersion: 2.6.37 Contact: Vernon Mauery diff --git a/Documentation/ABI/testing/sysfs-platform-phy-rcar-gen3-usb2 b/Documentation/ABI/testing/sysfs-platform-phy-rcar-gen3-usb2 new file mode 100644 index 0000000000000000000000000000000000000000..6212697bbf6f5775c3d2614b1817b2cb138a0d48 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-platform-phy-rcar-gen3-usb2 @@ -0,0 +1,15 @@ +What: /sys/devices/platform//role +Date: October 2016 +KernelVersion: 4.10 +Contact: Yoshihiro Shimoda +Description: + This file can be read and write. + The file can show/change the phy mode for role swap of usb. + + Write the following strings to change the mode: + "host" - switching mode from peripheral to host. + "peripheral" - switching mode from host to peripheral. + + Read the file, then it shows the following strings: + "host" - The mode is host now. + "peripheral" - The mode is peripheral now. diff --git a/Documentation/ABI/testing/sysfs-platform-sst-atom b/Documentation/ABI/testing/sysfs-platform-sst-atom new file mode 100644 index 0000000000000000000000000000000000000000..0d07c0395660e68b0754923456550d31e8dbabdb --- /dev/null +++ b/Documentation/ABI/testing/sysfs-platform-sst-atom @@ -0,0 +1,17 @@ +What: /sys/devices/platform/8086%x:00/firmware_version +Date: November 2016 +KernelVersion: 4.10 +Contact: "Sebastien Guiriec" +Description: + LPE Firmware version for SST driver on all atom + plaforms (BYT/CHT/Merrifield/BSW). + If the FW has never been loaded it will display: + "FW not yet loaded" + If FW has been loaded it will display: + "v01.aa.bb.cc" + aa: Major version is reflecting SoC version: + 0d: BYT FW + 0b: BSW FW + 07: Merrifield FW + bb: Minor version + cc: Build version diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power index 50b368d490b599f29809e55bbdd0a29535163fa4..f523e5a3ac33297999aba5c52a81b6075b0a7fa1 100644 --- a/Documentation/ABI/testing/sysfs-power +++ b/Documentation/ABI/testing/sysfs-power @@ -7,30 +7,35 @@ Description: subsystem. What: /sys/power/state -Date: May 2014 +Date: November 2016 Contact: Rafael J. Wysocki Description: The /sys/power/state file controls system sleep states. Reading from this file returns the available sleep state - labels, which may be "mem", "standby", "freeze" and "disk" - (hibernation). The meanings of the first three labels depend on - the relative_sleep_states command line argument as follows: - 1) relative_sleep_states = 1 - "mem", "standby", "freeze" represent non-hibernation sleep - states from the deepest ("mem", always present) to the - shallowest ("freeze"). "standby" and "freeze" may or may - not be present depending on the capabilities of the - platform. "freeze" can only be present if "standby" is - present. - 2) relative_sleep_states = 0 (default) - "mem" - "suspend-to-RAM", present if supported. - "standby" - "power-on suspend", present if supported. - "freeze" - "suspend-to-idle", always present. - - Writing to this file one of these strings causes the system to - transition into the corresponding state, if available. See - Documentation/power/states.txt for a description of what - "suspend-to-RAM", "power-on suspend" and "suspend-to-idle" mean. + labels, which may be "mem" (suspend), "standby" (power-on + suspend), "freeze" (suspend-to-idle) and "disk" (hibernation). + + Writing one of the above strings to this file causes the system + to transition into the corresponding state, if available. + + See Documentation/power/states.txt for more information. + +What: /sys/power/mem_sleep +Date: November 2016 +Contact: Rafael J. Wysocki +Description: + The /sys/power/mem_sleep file controls the operating mode of + system suspend. Reading from it returns the available modes + as "s2idle" (always present), "shallow" and "deep" (present if + supported). The mode that will be used on subsequent attempts + to suspend the system (by writing "mem" to the /sys/power/state + file described above) is enclosed in square brackets. + + Writing one of the above strings to this file causes the mode + represented by it to be used on subsequent attempts to suspend + the system. + + See Documentation/power/states.txt for more information. What: /sys/power/disk Date: September 2006 diff --git a/Documentation/Changes b/Documentation/Changes new file mode 120000 index 0000000000000000000000000000000000000000..7564ae1682bae84b10e025026dcd080e34dc98ce --- /dev/null +++ b/Documentation/Changes @@ -0,0 +1 @@ +process/changes.rst \ No newline at end of file diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt index c0d8788e75d3f1f2a335a27a883f161ad51128ac..72292308d0f5bb140412345e8f924ff243cd7bdb 100644 --- a/Documentation/IPMI.txt +++ b/Documentation/IPMI.txt @@ -111,6 +111,8 @@ ipmi_ssif - A driver for accessing BMCs on the SMBus. It uses the I2C kernel driver's SMBus interfaces to send and receive IPMI messages over the SMBus. +ipmi_powernv - A driver for access BMCs on POWERNV systems. + ipmi_watchdog - IPMI requires systems to have a very capable watchdog timer. This driver implements the standard Linux watchdog timer interface on top of the IPMI message handler. @@ -118,17 +120,15 @@ interface on top of the IPMI message handler. ipmi_poweroff - Some systems support the ability to be turned off via IPMI commands. -These are all individually selectable via configuration options. +bt-bmc - This is not part of the main driver, but instead a driver for +accessing a BMC-side interface of a BT interface. It is used on BMCs +running Linux to provide an interface to the host. -Note that the KCS-only interface has been removed. The af_ipmi driver -is no longer supported and has been removed because it was impossible -to do 32 bit emulation on 64-bit kernels with it. +These are all individually selectable via configuration options. Much documentation for the interface is in the include files. The IPMI include files are: -net/af_ipmi.h - Contains the socket interface. - linux/ipmi.h - Contains the user interface and IOCTL interface for IPMI. linux/ipmi_smi.h - Contains the interface for system management interfaces @@ -245,6 +245,16 @@ addressed (because some boards actually have multiple BMCs on them) and the user should not have to care what type of SMI is below them. +Watching For Interfaces + +When your code comes up, the IPMI driver may or may not have detected +if IPMI devices exist. So you might have to defer your setup until +the device is detected, or you might be able to do it immediately. +To handle this, and to allow for discovery, you register an SMI +watcher with ipmi_smi_watcher_register() to iterate over interfaces +and tell you when they come and go. + + Creating the User To user the message handler, you must first create a user using @@ -263,7 +273,7 @@ closing the device automatically destroys the user. Messaging -To send a message from kernel-land, the ipmi_request() call does +To send a message from kernel-land, the ipmi_request_settime() call does pretty much all message handling. Most of the parameter are self-explanatory. However, it takes a "msgid" parameter. This is NOT the sequence number of messages. It is simply a long value that is @@ -352,11 +362,12 @@ that for more details. The SI Driver ------------- -The SI driver allows up to 4 KCS or SMIC interfaces to be configured -in the system. By default, scan the ACPI tables for interfaces, and -if it doesn't find any the driver will attempt to register one KCS -interface at the spec-specified I/O port 0xca2 without interrupts. -You can change this at module load time (for a module) with: +The SI driver allows KCS, BT, and SMIC interfaces to be configured +in the system. It discovers interfaces through a host of different +methods, depending on the system. + +You can specify up to four interfaces on the module load line and +control some module parameters: modprobe ipmi_si.o type=,.... ports=,... addrs=,... @@ -367,7 +378,7 @@ You can change this at module load time (for a module) with: force_kipmid=,,... kipmid_max_busy_us=,,... unload_when_empty=[0|1] - trydefaults=[0|1] trydmi=[0|1] tryacpi=[0|1] + trydmi=[0|1] tryacpi=[0|1] tryplatform=[0|1] trypci=[0|1] Each of these except try... items is a list, the first item for the @@ -386,10 +397,6 @@ use the I/O port given as the device address. If you specify irqs as non-zero for an interface, the driver will attempt to use the given interrupt for the device. -trydefaults sets whether the standard IPMI interface at 0xca2 and -any interfaces specified by ACPE are tried. By default, the driver -tries it, set this value to zero to turn this off. - The other try... items disable discovery by their corresponding names. These are all enabled by default, set them to zero to disable them. The tryplatform disables openfirmware. @@ -434,7 +441,7 @@ kernel command line as: ipmi_si.type=,... ipmi_si.ports=,... ipmi_si.addrs=,... - ipmi_si.irqs=,... ipmi_si.trydefaults=[0|1] + ipmi_si.irqs=,... ipmi_si.regspacings=,,... ipmi_si.regsizes=,,... ipmi_si.regshifts=,,... @@ -444,11 +451,6 @@ kernel command line as: It works the same as the module parameters of the same names. -By default, the driver will attempt to detect any device specified by -ACPI, and if none of those then a KCS device at the spec-specified -0xca2. If you want to turn this off, set the "trydefaults" option to -false. - If your IPMI interface does not support interrupts and is a KCS or SMIC interface, the IPMI driver will start a kernel thread for the interface to help speed things up. This is a low-priority kernel @@ -500,7 +502,8 @@ at module load time (for a module) with: addr=[,[,...]] adapter=[,[...]] dbg=,... - slave_addrs=,,... + slave_addrs=,,... + tryacpi=[0|1] trydmi=[0|1] [dbg_probe=1] The addresses are normal I2C addresses. The adapter is the string @@ -513,6 +516,9 @@ spaces in kernel parameters. The debug flags are bit flags for each BMC found, they are: IPMI messages: 1, driver state: 2, timing: 4, I2C probe: 8 +The tryxxx parameters can be used to disable detecting interfaces +from various sources. + Setting dbg_probe to 1 will enable debugging of the probing and detection process for BMCs on the SMBusses. @@ -535,7 +541,8 @@ kernel command line as: ipmi_ssif.adapter=[,[...]] ipmi_ssif.dbg=[,[...]] ipmi_ssif.dbg_probe=1 - ipmi_ssif.slave_addrs=[,[...]] + ipmi_ssif.slave_addrs=[,[...]] + ipmi_ssif.tryacpi=[0|1] ipmi_ssif.trydmi=[0|1] These are the same options as on the module command line. diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html index a4d3838130e4ef6c30ac7ece5dc56e452d5274cb..39bcb74ea733c920d930a3a1191969a8de99a56a 100644 --- a/Documentation/RCU/Design/Requirements/Requirements.html +++ b/Documentation/RCU/Design/Requirements/Requirements.html @@ -547,7 +547,7 @@ The rcu_access_pointer() on line 6 is similar to It could reuse a value formerly fetched from this same pointer. It could also fetch the pointer from gp in a byte-at-a-time manner, resulting in load tearing, in turn resulting a bytewise - mash-up of two distince pointer values. + mash-up of two distinct pointer values. It might even use value-speculation optimizations, where it makes a wrong guess, but by the time it gets around to checking the value, an update has changed the pointer to match the wrong guess. @@ -659,6 +659,29 @@ systems with more than one CPU: In other words, a given instance of synchronize_rcu() can avoid waiting on a given RCU read-side critical section only if it can prove that synchronize_rcu() started first. + +

+ A related question is “When rcu_read_lock() + doesn't generate any code, why does it matter how it relates + to a grace period?” + The answer is that it is not the relationship of + rcu_read_lock() itself that is important, but rather + the relationship of the code within the enclosed RCU read-side + critical section to the code preceding and following the + grace period. + If we take this viewpoint, then a given RCU read-side critical + section begins before a given grace period when some access + preceding the grace period observes the effect of some access + within the critical section, in which case none of the accesses + within the critical section may observe the effects of any + access following the grace period. + +

+ As of late 2016, mathematical models of RCU take this + viewpoint, for example, see slides 62 and 63 + of the + 2016 LinuxCon EU + presentation.   diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt index 204422719197ec080e13bd9f076e5c20d960c541..5cbd8b2395b811489c68acb7da4616b4a1ef9523 100644 --- a/Documentation/RCU/whatisRCU.txt +++ b/Documentation/RCU/whatisRCU.txt @@ -237,7 +237,7 @@ rcu_dereference() The reader uses rcu_dereference() to fetch an RCU-protected pointer, which returns a value that may then be safely - dereferenced. Note that rcu_deference() does not actually + dereferenced. Note that rcu_dereference() does not actually dereference the pointer, instead, it protects the pointer for later dereferencing. It also executes any needed memory-barrier instructions for a given CPU architecture. Currently, only Alpha diff --git a/Documentation/acpi/DSD-properties-rules.txt b/Documentation/acpi/DSD-properties-rules.txt new file mode 100644 index 0000000000000000000000000000000000000000..3e4862bdad98cc792831bf3419a6b19910d59e26 --- /dev/null +++ b/Documentation/acpi/DSD-properties-rules.txt @@ -0,0 +1,97 @@ +_DSD Device Properties Usage Rules +---------------------------------- + +Properties, Property Sets and Property Subsets +---------------------------------------------- + +The _DSD (Device Specific Data) configuration object, introduced in ACPI 5.1, +allows any type of device configuration data to be provided via the ACPI +namespace. In principle, the format of the data may be arbitrary, but it has to +be identified by a UUID which must be recognized by the driver processing the +_DSD output. However, there are generic UUIDs defined for _DSD recognized by +the ACPI subsystem in the Linux kernel which automatically processes the data +packages associated with them and makes those data available to device drivers +as "device properties". + +A device property is a data item consisting of a string key and a value (of a +specific type) associated with it. + +In the ACPI _DSD context it is an element of the sub-package following the +generic Device Properties UUID in the _DSD return package as specified in the +Device Properties UUID definition document [1]. + +It also may be regarded as the definition of a key and the associated data type +that can be returned by _DSD in the Device Properties UUID sub-package for a +given device. + +A property set is a collection of properties applicable to a hardware entity +like a device. In the ACPI _DSD context it is the set of all properties that +can be returned in the Device Properties UUID sub-package for the device in +question. + +Property subsets are nested collections of properties. Each of them is +associated with an additional key (name) allowing the subset to be referred +to as a whole (and to be treated as a separate entity). The canonical +representation of property subsets is via the mechanism specified in the +Hierarchical Properties Extension UUID definition document [2]. + +Property sets may be hierarchical. That is, a property set may contain +multiple property subsets that each may contain property subsets of its +own and so on. + +General Validity Rule for Property Sets +--------------------------------------- + +Valid property sets must follow the guidance given by the Device Properties UUID +definition document [1]. + +_DSD properties are intended to be used in addition to, and not instead of, the +existing mechanisms defined by the ACPI specification. Therefore, as a rule, +they should only be used if the ACPI specification does not make direct +provisions for handling the underlying use case. It generally is invalid to +return property sets which do not follow that rule from _DSD in data packages +associated with the Device Properties UUID. + +Additional Considerations +------------------------- + +There are cases in which, even if the general rule given above is followed in +principle, the property set may still not be regarded as a valid one. + +For example, that applies to device properties which may cause kernel code +(either a device driver or a library/subsystem) to access hardware in a way +possibly leading to a conflict with AML methods in the ACPI namespace. In +particular, that may happen if the kernel code uses device properties to +manipulate hardware normally controlled by ACPI methods related to power +management, like _PSx and _DSW (for device objects) or _ON and _OFF (for power +resource objects), or by ACPI device disabling/enabling methods, like _DIS and +_SRS. + +In all cases in which kernel code may do something that will confuse AML as a +result of using device properties, the device properties in question are not +suitable for the ACPI environment and consequently they cannot belong to a valid +property set. + +Property Sets and Device Tree Bindings +-------------------------------------- + +It often is useful to make _DSD return property sets that follow Device Tree +bindings. + +In those cases, however, the above validity considerations must be taken into +account in the first place and returning invalid property sets from _DSD must be +avoided. For this reason, it may not be possible to make _DSD return a property +set following the given DT binding literally and completely. Still, for the +sake of code re-use, it may make sense to provide as much of the configuration +data as possible in the form of device properties and complement that with an +ACPI-specific mechanism suitable for the use case at hand. + +In any case, property sets following DT bindings literally should not be +expected to automatically work in the ACPI environment regardless of their +contents. + +References +---------- + +[1] http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf +[2] http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt index a91ec5af52df28cf3ff3c2ae03b163272caae0e6..209a5eba6b8711e34000b102f68814e76bfcded8 100644 --- a/Documentation/acpi/enumeration.txt +++ b/Documentation/acpi/enumeration.txt @@ -415,3 +415,12 @@ the "compatible" property in the _DSD or a _CID as long as one of their ancestors provides a _DSD with a valid "compatible" property. Such device objects are then simply regarded as additional "blocks" providing hierarchical configuration information to the driver of the composite ancestor device. + +However, PRP0001 can only be returned from either _HID or _CID of a device +object if all of the properties returned by the _DSD associated with it (either +the _DSD of the device object itself or the _DSD of its ancestor in the +"composite device" case described above) can be used in the ACPI environment. +Otherwise, the _DSD itself is regarded as invalid and therefore the "compatible" +property returned by it is meaningless. + +Refer to DSD-properties-rules.txt for more information. diff --git a/Documentation/acpi/gpio-properties.txt b/Documentation/acpi/gpio-properties.txt index 5aafe0b351a1327106197f8212918ddfadfd8105..2aff0349facd6e4f289b7df28f3ea6b3e1dced40 100644 --- a/Documentation/acpi/gpio-properties.txt +++ b/Documentation/acpi/gpio-properties.txt @@ -51,6 +51,68 @@ it to 1 marks the GPIO as active low. In our Bluetooth example the "reset-gpios" refers to the second GpioIo() resource, second pin in that resource with the GPIO number of 31. +It is possible to leave holes in the array of GPIOs. This is useful in +cases like with SPI host controllers where some chip selects may be +implemented as GPIOs and some as native signals. For example a SPI host +controller can have chip selects 0 and 2 implemented as GPIOs and 1 as +native: + + Package () { + "cs-gpios", + Package () { + ^GPIO, 19, 0, 0, // chip select 0: GPIO + 0, // chip select 1: native signal + ^GPIO, 20, 0, 0, // chip select 2: GPIO + } + } + +Other supported properties +-------------------------- + +Following Device Tree compatible device properties are also supported by +_DSD device properties for GPIO controllers: + +- gpio-hog +- output-high +- output-low +- input +- line-name + +Example: + + Name (_DSD, Package () { + // _DSD Hierarchical Properties Extension UUID + ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + Package () { + Package () {"hog-gpio8", "G8PU"} + } + }) + + Name (G8PU, Package () { + ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + Package () { + Package () {"gpio-hog", 1}, + Package () {"gpios", Package () {8, 0}}, + Package () {"output-high", 1}, + Package () {"line-name", "gpio8-pullup"}, + } + }) + +- gpio-line-names + +Example: + + Package () { + "gpio-line-names", + Package () { + "SPI0_CS_N", "EXP2_INT", "MUX6_IO", "UART0_RXD", "MUX7_IO", + "LVL_C_A1", "MUX0_IO", "SPI1_MISO" + } + } + +See Documentation/devicetree/bindings/gpio/gpio.txt for more information +about these properties. + ACPI GPIO Mappings Provided by Drivers -------------------------------------- diff --git a/Documentation/acpi/osi.txt b/Documentation/acpi/osi.txt new file mode 100644 index 0000000000000000000000000000000000000000..50cde0ceb9b005f2474817bfc126697bd9ea721f --- /dev/null +++ b/Documentation/acpi/osi.txt @@ -0,0 +1,187 @@ +ACPI _OSI and _REV methods +-------------------------- + +An ACPI BIOS can use the "Operating System Interfaces" method (_OSI) +to find out what the operating system supports. Eg. If BIOS +AML code includes _OSI("XYZ"), the kernel's AML interpreter +can evaluate that method, look to see if it supports 'XYZ' +and answer YES or NO to the BIOS. + +The ACPI _REV method returns the "Revision of the ACPI specification +that OSPM supports" + +This document explains how and why the BIOS and Linux should use these methods. +It also explains how and why they are widely misused. + +How to use _OSI +--------------- + +Linux runs on two groups of machines -- those that are tested by the OEM +to be compatible with Linux, and those that were never tested with Linux, +but where Linux was installed to replace the original OS (Windows or OSX). + +The larger group is the systems tested to run only Windows. Not only that, +but many were tested to run with just one specific version of Windows. +So even though the BIOS may use _OSI to query what version of Windows is running, +only a single path through the BIOS has actually been tested. +Experience shows that taking untested paths through the BIOS +exposes Linux to an entire category of BIOS bugs. +For this reason, Linux _OSI defaults must continue to claim compatibility +with all versions of Windows. + +But Linux isn't actually compatible with Windows, and the Linux community +has also been hurt with regressions when Linux adds the latest version of +Windows to its list of _OSI strings. So it is possible that additional strings +will be more thoroughly vetted before shipping upstream in the future. +But it is likely that they will all eventually be added. + +What should an OEM do if they want to support Linux and Windows +using the same BIOS image? Often they need to do something different +for Linux to deal with how Linux is different from Windows. +Here the BIOS should ask exactly what it wants to know: + +_OSI("Linux-OEM-my_interface_name") +where 'OEM' is needed if this is an OEM-specific hook, +and 'my_interface_name' describes the hook, which could be a +quirk, a bug, or a bug-fix. + +In addition, the OEM should send a patch to upstream Linux +via the linux-acpi@vger.kernel.org mailing list. When that patch +is checked into Linux, the OS will answer "YES" when the BIOS +on the OEM's system uses _OSI to ask if the interface is supported +by the OS. Linux distributors can back-port that patch for Linux +pre-installs, and it will be included by all distributions that +re-base to upstream. If the distribution can not update the kernel binary, +they can also add an acpi_osi=Linux-OEM-my_interface_name +cmdline parameter to the boot loader, as needed. + +If the string refers to a feature where the upstream kernel +eventually grows support, a patch should be sent to remove +the string when that support is added to the kernel. + +That was easy. Read on, to find out how to do it wrong. + +Before _OSI, there was _OS +-------------------------- + +ACPI 1.0 specified "_OS" as an +"object that evaluates to a string that identifies the operating system." + +The ACPI BIOS flow would include an evaluation of _OS, and the AML +interpreter in the kernel would return to it a string identifying the OS: + +Windows 98, SE: "Microsoft Windows" +Windows ME: "Microsoft WindowsME:Millenium Edition" +Windows NT: "Microsoft Windows NT" + +The idea was on a platform tasked with running multiple OS's, +the BIOS could use _OS to enable devices that an OS +might support, or enable quirks or bug workarounds +necessary to make the platform compatible with that pre-existing OS. + +But _OS had fundamental problems. First, the BIOS needed to know the name +of every possible version of the OS that would run on it, and needed to know +all the quirks of those OS's. Certainly it would make more sense +for the BIOS to ask *specific* things of the OS, such +"do you support a specific interface", and thus in ACPI 3.0, +_OSI was born to replace _OS. + +_OS was abandoned, though even today, many BIOS look for +_OS "Microsoft Windows NT", though it seems somewhat far-fetched +that anybody would install those old operating systems +over what came with the machine. + +Linux answers "Microsoft Windows NT" to please that BIOS idiom. +That is the *only* viable strategy, as that is what modern Windows does, +and so doing otherwise could steer the BIOS down an untested path. + +_OSI is born, and immediately misused +-------------------------------------- + +With _OSI, the *BIOS* provides the string describing an interface, +and asks the OS: "YES/NO, are you compatible with this interface?" + +eg. _OSI("3.0 Thermal Model") would return TRUE if the OS knows how +to deal with the thermal extensions made to the ACPI 3.0 specification. +An old OS that doesn't know about those extensions would answer FALSE, +and a new OS may be able to return TRUE. + +For an OS-specific interface, the ACPI spec said that the BIOS and the OS +were to agree on a string of the form such as "Windows-interface_name". + +But two bad things happened. First, the Windows ecosystem used _OSI +not as designed, but as a direct replacement for _OS -- identifying +the OS version, rather than an OS supported interface. Indeed, right +from the start, the ACPI 3.0 spec itself codified this misuse +in example code using _OSI("Windows 2001"). + +This misuse was adopted and continues today. + +Linux had no choice but to also return TRUE to _OSI("Windows 2001") +and its successors. To do otherwise would virtually guarantee breaking +a BIOS that has been tested only with that _OSI returning TRUE. + +This strategy is problematic, as Linux is never completely compatible with +the latest version of Windows, and sometimes it takes more than a year +to iron out incompatibilities. + +Not to be out-done, the Linux community made things worse by returning TRUE +to _OSI("Linux"). Doing so is even worse than the Windows misuse +of _OSI, as "Linux" does not even contain any version information. +_OSI("Linux") led to some BIOS' malfunctioning due to BIOS writer's +using it in untested BIOS flows. But some OEM's used _OSI("Linux") +in tested flows to support real Linux features. In 2009, Linux +removed _OSI("Linux"), and added a cmdline parameter to restore it +for legacy systems still needed it. Further a BIOS_BUG warning prints +for all BIOS's that invoke it. + +No BIOS should use _OSI("Linux"). + +The result is a strategy for Linux to maximize compatibility with +ACPI BIOS that are tested on Windows machines. There is a real risk +of over-stating that compatibility; but the alternative has often been +catastrophic failure resulting from the BIOS taking paths that +were never validated under *any* OS. + +Do not use _REV +--------------- + +Since _OSI("Linux") went away, some BIOS writers used _REV +to support Linux and Windows differences in the same BIOS. + +_REV was defined in ACPI 1.0 to return the version of ACPI +supported by the OS and the OS AML interpreter. + +Modern Windows returns _REV = 2. Linux used ACPI_CA_SUPPORT_LEVEL, +which would increment, based on the version of the spec supported. + +Unfortunately, _REV was also misused. eg. some BIOS would check +for _REV = 3, and do something for Linux, but when Linux returned +_REV = 4, that support broke. + +In response to this problem, Linux returns _REV = 2 always, +from mid-2015 onward. The ACPI specification will also be updated +to reflect that _REV is deprecated, and always returns 2. + +Apple Mac and _OSI("Darwin") +---------------------------- + +On Apple's Mac platforms, the ACPI BIOS invokes _OSI("Darwin") +to determine if the machine is running Apple OSX. + +Like Linux's _OSI("*Windows*") strategy, Linux defaults to +answering YES to _OSI("Darwin") to enable full access +to the hardware and validated BIOS paths seen by OSX. +Just like on Windows-tested platforms, this strategy has risks. + +Starting in Linux-3.18, the kernel answered YES to _OSI("Darwin") +for the purpose of enabling Mac Thunderbolt support. Further, +if the kernel noticed _OSI("Darwin") being invoked, it additionally +disabled all _OSI("*Windows*") to keep poorly written Mac BIOS +from going down untested combinations of paths. + +The Linux-3.18 change in default caused power regressions on Mac +laptops, and the 3.18 implementation did not allow changing +the default via cmdline "acpi_osi=!Darwin". Linux-4.7 fixed +the ability to use acpi_osi=!Darwin as a workaround, and +we hope to see Mac Thunderbolt power management support in Linux-4.11. diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst index 2681cbd24cddf232be828156e9f613c18b6c0f4e..8ddae4e4299aa10e84f27ab480a3ec3f79c97261 100644 --- a/Documentation/admin-guide/index.rst +++ b/Documentation/admin-guide/index.rst @@ -59,6 +59,7 @@ configure specific aspects of kernel behavior to your liking. binfmt-misc mono java + ras .. only:: subproject and html diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 456248b2722056a4a1f144c0cf8b090106cb478a..21e2d88637050b7a33f141e558fff43d3f23d0c9 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -863,6 +863,11 @@ dscc4.setup= [NET] + dump_apple_properties [X86] + Dump name and content of EFI device properties on + x86 Macs. Useful for driver authors to determine + what data is available or for reverse-engineering. + dyndbg[="val"] [KNL,DYNAMIC_DEBUG] module.dyndbg[="val"] Enable debug messages at boot time. See @@ -875,12 +880,6 @@ nopku [X86] Disable Memory Protection Keys CPU feature found in some Intel CPUs. - eagerfpu= [X86] - on enable eager fpu restore - off disable eager fpu restore - auto selects the default scheme, which automatically - enables eagerfpu restore for xsaveopt. - module.async_probe [KNL] Enable asynchronous probe on this module. @@ -1442,6 +1441,10 @@ The builtin appraise policy appraises all files owned by uid=0. + ima_canonical_fmt [IMA] + Use the canonical format for the binary runtime + measurements, instead of host native format. + ima_hash= [IMA] Format: { md5 | sha1 | rmd160 | sha256 | sha384 | sha512 | ... } @@ -1561,6 +1564,12 @@ disable Do not enable intel_pstate as the default scaling driver for the supported processors + passive + Use intel_pstate as a scaling driver, but configure it + to work with generic cpufreq governors (instead of + enabling its internal governor). This mode cannot be + used along with the hardware-managed P-states (HWP) + feature. force Enable intel_pstate on systems that prohibit it by default in favor of acpi-cpufreq. Forcing the intel_pstate driver @@ -1581,6 +1590,9 @@ Description Table, specifies preferred power management profile as "Enterprise Server" or "Performance Server", then this feature is turned on by default. + per_cpu_perf_limits + Allow per-logical-CPU P-State performance control limits using + cpufreq sysfs interface intremap= [X86-64, Intel-IOMMU] on enable Interrupt Remapping (default) @@ -1759,9 +1771,6 @@ kmemcheck=2 (one-shot mode) Default: 2 (one-shot mode) - kstack=N [X86] Print N words from the kernel stack - in oops dumps. - kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs. Default is 0 (don't ignore, but inject #GP) @@ -2126,6 +2135,12 @@ memory contents and reserves bad memory regions that are detected. + mem_sleep_default= [SUSPEND] Default system suspend mode: + s2idle - Suspend-To-Idle + shallow - Power-On Suspend or equivalent (if supported) + deep - Suspend-To-RAM or equivalent (if supported) + See Documentation/power/states.txt. + meye.*= [HW] Set MotionEye Camera parameters See Documentation/video4linux/meye.txt. @@ -2202,7 +2217,7 @@ that the amount of memory usable for all allocations is not too small. - movable_node [KNL,X86] Boot-time switch to enable the effects + movable_node [KNL] Boot-time switch to enable the effects of CONFIG_MOVABLE_NODE=y. See mm/Kconfig for details. MTD_Partition= [MTD] @@ -2555,6 +2570,10 @@ no-kvmapf [X86,KVM] Disable paravirtualized asynchronous page fault handling. + no-vmw-sched-clock + [X86,PV_OPS] Disable paravirtualized VMware scheduler + clock and use the default one. + no-steal-acc [X86,KVM] Disable paravirtualized steal time accounting. steal time is computed, but won't influence scheduler behaviour @@ -3475,13 +3494,6 @@ [KNL, SMP] Set scheduler's default relax_domain_level. See Documentation/cgroup-v1/cpusets.txt. - relative_sleep_states= - [SUSPEND] Use sleep state labeling where the deepest - state available other than hibernation is always "mem". - Format: { "0" | "1" } - 0 -- Traditional sleep state labels. - 1 -- Relative sleep state labels. - reserve= [KNL,BUGS] Force the kernel to ignore some iomem area reservetop= [X86-32] @@ -3631,12 +3643,6 @@ shapers= [NET] Maximal number of shapers. - show_msr= [x86] show boot-time MSR settings - Format: { } - Show boot-time (BIOS-initialized) MSR settings. - The parameter means the number of CPUs to show, - for example 1 means boot CPU only. - simeth= [IA-64] simscsi= diff --git a/Documentation/admin-guide/ras.rst b/Documentation/admin-guide/ras.rst new file mode 100644 index 0000000000000000000000000000000000000000..d71340e86c27590ff139d309b57e37d755c8dfd7 --- /dev/null +++ b/Documentation/admin-guide/ras.rst @@ -0,0 +1,1190 @@ +.. include:: + +============================================ +Reliability, Availability and Serviceability +============================================ + +RAS concepts +************ + +Reliability, Availability and Serviceability (RAS) is a concept used on +servers meant to measure their robusteness. + +Reliability + is the probability that a system will produce correct outputs. + + * Generally measured as Mean Time Between Failures (MTBF) + * Enhanced by features that help to avoid, detect and repair hardware faults + +Availability + is the probability that a system is operational at a given time + + * Generally measured as a percentage of downtime per a period of time + * Often uses mechanisms to detect and correct hardware faults in + runtime; + +Serviceability (or maintainability) + is the simplicity and speed with which a system can be repaired or + maintained + + * Generally measured on Mean Time Between Repair (MTBR) + +Improving RAS +------------- + +In order to reduce systems downtime, a system should be capable of detecting +hardware errors, and, when possible correcting them in runtime. It should +also provide mechanisms to detect hardware degradation, in order to warn +the system administrator to take the action of replacing a component before +it causes data loss or system downtime. + +Among the monitoring measures, the most usual ones include: + +* CPU – detect errors at instruction execution and at L1/L2/L3 caches; +* Memory – add error correction logic (ECC) to detect and correct errors; +* I/O – add CRC checksums for tranfered data; +* Storage – RAID, journal file systems, checksums, + Self-Monitoring, Analysis and Reporting Technology (SMART). + +By monitoring the number of occurrences of error detections, it is possible +to identify if the probability of hardware errors is increasing, and, on such +case, do a preventive maintainance to replace a degrated component while +those errors are correctable. + +Types of errors +--------------- + +Most mechanisms used on modern systems use use technologies like Hamming +Codes that allow error correction when the number of errors on a bit packet +is below a threshold. If the number of errors is above, those mechanisms +can indicate with a high degree of confidence that an error happened, but +they can't correct. + +Also, sometimes an error occur on a component that it is not used. For +example, a part of the memory that it is not currently allocated. + +That defines some categories of errors: + +* **Correctable Error (CE)** - the error detection mechanism detected and + corrected the error. Such errors are usually not fatal, although some + Kernel mechanisms allow the system administrator to consider them as fatal. + +* **Uncorrected Error (UE)** - the amount of errors happened above the error + correction threshold, and the system was unable to auto-correct. + +* **Fatal Error** - when an UE error happens on a critical component of the + system (for example, a piece of the Kernel got corrupted by an UE), the + only reliable way to avoid data corruption is to hang or reboot the machine. + +* **Non-fatal Error** - when an UE error happens on an unused component, + like a CPU in power down state or an unused memory bank, the system may + still run, eventually replacing the affected hardware by a hot spare, + if available. + + Also, when an error happens on an userspace process, it is also possible to + kill such process and let userspace restart it. + +The mechanism for handling non-fatal errors is usually complex and may +require the help of some userspace application, in order to apply the +policy desired by the system administrator. + +Identifying a bad hardware component +------------------------------------ + +Just detecting a hardware flaw is usually not enough, as the system needs +to pinpoint to the minimal replaceable unit (MRU) that should be exchanged +to make the hardware reliable again. + +So, it requires not only error logging facilities, but also mechanisms that +will translate the error message to the silkscreen or component label for +the MRU. + +Typically, it is very complex for memory, as modern CPUs interlace memory +from different memory modules, in order to provide a better performance. The +DMI BIOS usually have a list of memory module labels, with can be obtained +using the ``dmidecode`` tool. For example, on a desktop machine, it shows:: + + Memory Device + Total Width: 64 bits + Data Width: 64 bits + Size: 16384 MB + Form Factor: SODIMM + Set: None + Locator: ChannelA-DIMM0 + Bank Locator: BANK 0 + Type: DDR4 + Type Detail: Synchronous + Speed: 2133 MHz + Rank: 2 + Configured Clock Speed: 2133 MHz + +On the above example, a DDR4 SO-DIMM memory module is located at the +system's memory labeled as "BANK 0", as given by the *bank locator* field. +Please notice that, on such system, the *total width* is equal to the +*data witdh*. It means that such memory module doesn't have error +detection/correction mechanisms. + +Unfortunately, not all systems use the same field to specify the memory +bank. On this example, from an older server, ``dmidecode`` shows:: + + Memory Device + Array Handle: 0x1000 + Error Information Handle: Not Provided + Total Width: 72 bits + Data Width: 64 bits + Size: 8192 MB + Form Factor: DIMM + Set: 1 + Locator: DIMM_A1 + Bank Locator: Not Specified + Type: DDR3 + Type Detail: Synchronous Registered (Buffered) + Speed: 1600 MHz + Rank: 2 + Configured Clock Speed: 1600 MHz + +There, the DDR3 RDIMM memory module is located at the system's memory labeled +as "DIMM_A1", as given by the *locator* field. Please notice that this +memory module has 64 bits of *data witdh* and 72 bits of *total width*. So, +it has 8 extra bits to be used by error detection and correction mechanisms. +Such kind of memory is called Error-correcting code memory (ECC memory). + +To make things even worse, it is not uncommon that systems with different +labels on their system's board to use exactly the same BIOS, meaning that +the labels provided by the BIOS won't match the real ones. + +ECC memory +---------- + +As mentioned on the previous section, ECC memory has extra bits to be +used for error correction. So, on 64 bit systems, a memory module +has 64 bits of *data width*, and 74 bits of *total width*. So, there are +8 bits extra bits to be used for the error detection and correction +mechanisms. Those extra bits are called *syndrome*\ [#f1]_\ [#f2]_. + +So, when the cpu requests the memory controller to write a word with +*data width*, the memory controller calculates the *syndrome* in real time, +using Hamming code, or some other error correction code, like SECDED+, +producing a code with *total width* size. Such code is then written +on the memory modules. + +At read, the *total width* bits code is converted back, using the same +ECC code used on write, producing a word with *data width* and a *syndrome*. +The word with *data width* is sent to the CPU, even when errors happen. + +The memory controller also looks at the *syndrome* in order to check if +there was an error, and if the ECC code was able to fix such error. +If the error was corrected, a Corrected Error (CE) happened. If not, an +Uncorrected Error (UE) happened. + +The information about the CE/UE errors is stored on some special registers +at the memory controller and can be accessed by reading such registers, +either by BIOS, by some special CPUs or by Linux EDAC driver. On x86 64 +bit CPUs, such errors can also be retrieved via the Machine Check +Architecture (MCA)\ [#f3]_. + +.. [#f1] Please notice that several memory controllers allow operation on a + mode called "Lock-Step", where it groups two memory modules together, + doing 128-bit reads/writes. That gives 16 bits for error correction, with + significatively improves the error correction mechanism, at the expense + that, when an error happens, there's no way to know what memory module is + to blame. So, it has to blame both memory modules. + +.. [#f2] Some memory controllers also allow using memory in mirror mode. + On such mode, the same data is written to two memory modules. At read, + the system checks both memory modules, in order to check if both provide + identical data. On such configuration, when an error happens, there's no + way to know what memory module is to blame. So, it has to blame both + memory modules (or 4 memory modules, if the system is also on Lock-step + mode). + +.. [#f3] For more details about the Machine Check Architecture (MCA), + please read Documentation/x86/x86_64/machinecheck at the Kernel tree. + +EDAC - Error Detection And Correction +************************************* + +.. note:: + + "bluesmoke" was the name for this device driver subsystem when it + was "out-of-tree" and maintained at http://bluesmoke.sourceforge.net. + That site is mostly archaic now and can be used only for historical + purposes. + + When the subsystem was pushed upstream for the first time, on + Kernel 2.6.16, for the first time, it was renamed to ``EDAC``. + +Purpose +------- + +The ``edac`` kernel module's goal is to detect and report hardware errors +that occur within the computer system running under linux. + +Memory +------ + +Memory Correctable Errors (CE) and Uncorrectable Errors (UE) are the +primary errors being harvested. These types of errors are harvested by +the ``edac_mc`` device. + +Detecting CE events, then harvesting those events and reporting them, +**can** but must not necessarily be a predictor of future UE events. With +CE events only, the system can and will continue to operate as no data +has been damaged yet. + +However, preventive maintenance and proactive part replacement of memory +modules exhibiting CEs can reduce the likelihood of the dreaded UE events +and system panics. + +Other hardware elements +----------------------- + +A new feature for EDAC, the ``edac_device`` class of device, was added in +the 2.6.23 version of the kernel. + +This new device type allows for non-memory type of ECC hardware detectors +to have their states harvested and presented to userspace via the sysfs +interface. + +Some architectures have ECC detectors for L1, L2 and L3 caches, +along with DMA engines, fabric switches, main data path switches, +interconnections, and various other hardware data paths. If the hardware +reports it, then a edac_device device probably can be constructed to +harvest and present that to userspace. + + +PCI bus scanning +---------------- + +In addition, PCI devices are scanned for PCI Bus Parity and SERR Errors +in order to determine if errors are occurring during data transfers. + +The presence of PCI Parity errors must be examined with a grain of salt. +There are several add-in adapters that do **not** follow the PCI specification +with regards to Parity generation and reporting. The specification says +the vendor should tie the parity status bits to 0 if they do not intend +to generate parity. Some vendors do not do this, and thus the parity bit +can "float" giving false positives. + +There is a PCI device attribute located in sysfs that is checked by +the EDAC PCI scanning code. If that attribute is set, PCI parity/error +scanning is skipped for that device. The attribute is:: + + broken_parity_status + +and is located in ``/sys/devices/pci/0000:XX:YY.Z`` directories for +PCI devices. + + +Versioning +---------- + +EDAC is composed of a "core" module (``edac_core.ko``) and several Memory +Controller (MC) driver modules. On a given system, the CORE is loaded +and one MC driver will be loaded. Both the CORE and the MC driver (or +``edac_device`` driver) have individual versions that reflect current +release level of their respective modules. + +Thus, to "report" on what version a system is running, one must report +both the CORE's and the MC driver's versions. + + +Loading +------- + +If ``edac`` was statically linked with the kernel then no loading +is necessary. If ``edac`` was built as modules then simply modprobe +the ``edac`` pieces that you need. You should be able to modprobe +hardware-specific modules and have the dependencies load the necessary +core modules. + +Example:: + + $ modprobe amd76x_edac + +loads both the ``amd76x_edac.ko`` memory controller module and the +``edac_mc.ko`` core module. + + +Sysfs interface +--------------- + +EDAC presents a ``sysfs`` interface for control and reporting purposes. It +lives in the /sys/devices/system/edac directory. + +Within this directory there currently reside 2 components: + + ======= ============================== + mc memory controller(s) system + pci PCI control and status system + ======= ============================== + + + +Memory Controller (mc) Model +---------------------------- + +Each ``mc`` device controls a set of memory modules [#f4]_. These modules +are laid out in a Chip-Select Row (``csrowX``) and Channel table (``chX``). +There can be multiple csrows and multiple channels. + +.. [#f4] Nowadays, the term DIMM (Dual In-line Memory Module) is widely + used to refer to a memory module, although there are other memory + packaging alternatives, like SO-DIMM, SIMM, etc. Along this document, + and inside the EDAC system, the term "dimm" is used for all memory + modules, even when they use a different kind of packaging. + +Memory controllers allow for several csrows, with 8 csrows being a +typical value. Yet, the actual number of csrows depends on the layout of +a given motherboard, memory controller and memory module characteristics. + +Dual channels allow for dual data length (e. g. 128 bits, on 64 bit systems) +data transfers to/from the CPU from/to memory. Some newer chipsets allow +for more than 2 channels, like Fully Buffered DIMMs (FB-DIMMs) memory +controllers. The following example will assume 2 channels: + + +------------+-----------------------+ + | Chip | Channels | + | Select +-----------+-----------+ + | rows | ``ch0`` | ``ch1`` | + +============+===========+===========+ + | ``csrow0`` | DIMM_A0 | DIMM_B0 | + +------------+ | | + | ``csrow1`` | | | + +------------+-----------+-----------+ + | ``csrow2`` | DIMM_A1 | DIMM_B1 | + +------------+ | | + | ``csrow3`` | | | + +------------+-----------+-----------+ + +In the above example, there are 4 physical slots on the motherboard +for memory DIMMs: + + +---------+---------+ + | DIMM_A0 | DIMM_B0 | + +---------+---------+ + | DIMM_A1 | DIMM_B1 | + +---------+---------+ + +Labels for these slots are usually silk-screened on the motherboard. +Slots labeled ``A`` are channel 0 in this example. Slots labeled ``B`` are +channel 1. Notice that there are two csrows possible on a physical DIMM. +These csrows are allocated their csrow assignment based on the slot into +which the memory DIMM is placed. Thus, when 1 DIMM is placed in each +Channel, the csrows cross both DIMMs. + +Memory DIMMs come single or dual "ranked". A rank is a populated csrow. +Thus, 2 single ranked DIMMs, placed in slots DIMM_A0 and DIMM_B0 above +will have just one csrow (csrow0). csrow1 will be empty. On the other +hand, when 2 dual ranked DIMMs are similarly placed, then both csrow0 +and csrow1 will be populated. The pattern repeats itself for csrow2 and +csrow3. + +The representation of the above is reflected in the directory +tree in EDAC's sysfs interface. Starting in directory +``/sys/devices/system/edac/mc``, each memory controller will be +represented by its own ``mcX`` directory, where ``X`` is the +index of the MC:: + + ..../edac/mc/ + | + |->mc0 + |->mc1 + |->mc2 + .... + +Under each ``mcX`` directory each ``csrowX`` is again represented by a +``csrowX``, where ``X`` is the csrow index:: + + .../mc/mc0/ + | + |->csrow0 + |->csrow2 + |->csrow3 + .... + +Notice that there is no csrow1, which indicates that csrow0 is composed +of a single ranked DIMMs. This should also apply in both Channels, in +order to have dual-channel mode be operational. Since both csrow2 and +csrow3 are populated, this indicates a dual ranked set of DIMMs for +channels 0 and 1. + +Within each of the ``mcX`` and ``csrowX`` directories are several EDAC +control and attribute files. + +``mcX`` directories +------------------- + +In ``mcX`` directories are EDAC control and attribute files for +this ``X`` instance of the memory controllers. + +For a description of the sysfs API, please see: + + Documentation/ABI/testing/sysfs-devices-edac + + +``dimmX`` or ``rankX`` directories +---------------------------------- + +The recommended way to use the EDAC subsystem is to look at the information +provided by the ``dimmX`` or ``rankX`` directories [#f5]_. + +A typical EDAC system has the following structure under +``/sys/devices/system/edac/``\ [#f6]_:: + + /sys/devices/system/edac/ + ├── mc + │   ├── mc0 + │   │   ├── ce_count + │   │   ├── ce_noinfo_count + │   │   ├── dimm0 + │   │   │   ├── dimm_dev_type + │   │   │   ├── dimm_edac_mode + │   │   │   ├── dimm_label + │   │   │   ├── dimm_location + │   │   │   ├── dimm_mem_type + │   │   │   ├── size + │   │   │   └── uevent + │   │   ├── max_location + │   │   ├── mc_name + │   │   ├── reset_counters + │   │   ├── seconds_since_reset + │   │   ├── size_mb + │   │   ├── ue_count + │   │   ├── ue_noinfo_count + │   │   └── uevent + │   ├── mc1 + │   │   ├── ce_count + │   │   ├── ce_noinfo_count + │   │   ├── dimm0 + │   │   │   ├── dimm_dev_type + │   │   │   ├── dimm_edac_mode + │   │   │   ├── dimm_label + │   │   │   ├── dimm_location + │   │   │   ├── dimm_mem_type + │   │   │   ├── size + │   │   │   └── uevent + │   │   ├── max_location + │   │   ├── mc_name + │   │   ├── reset_counters + │   │   ├── seconds_since_reset + │   │   ├── size_mb + │   │   ├── ue_count + │   │   ├── ue_noinfo_count + │   │   └── uevent + │   └── uevent + └── uevent + +In the ``dimmX`` directories are EDAC control and attribute files for +this ``X`` memory module: + +- ``size`` - Total memory managed by this csrow attribute file + + This attribute file displays, in count of megabytes, the memory + that this csrow contains. + +- ``dimm_dev_type`` - Device type attribute file + + This attribute file will display what type of DRAM device is + being utilized on this DIMM. + Examples: + + - x1 + - x2 + - x4 + - x8 + +- ``dimm_edac_mode`` - EDAC Mode of operation attribute file + + This attribute file will display what type of Error detection + and correction is being utilized. + +- ``dimm_label`` - memory module label control file + + This control file allows this DIMM to have a label assigned + to it. With this label in the module, when errors occur + the output can provide the DIMM label in the system log. + This becomes vital for panic events to isolate the + cause of the UE event. + + DIMM Labels must be assigned after booting, with information + that correctly identifies the physical slot with its + silk screen label. This information is currently very + motherboard specific and determination of this information + must occur in userland at this time. + +- ``dimm_location`` - location of the memory module + + The location can have up to 3 levels, and describe how the + memory controller identifies the location of a memory module. + Depending on the type of memory and memory controller, it + can be: + + - *csrow* and *channel* - used when the memory controller + doesn't identify a single DIMM - e. g. in ``rankX`` dir; + - *branch*, *channel*, *slot* - typically used on FB-DIMM memory + controllers; + - *channel*, *slot* - used on Nehalem and newer Intel drivers. + +- ``dimm_mem_type`` - Memory Type attribute file + + This attribute file will display what type of memory is currently + on this csrow. Normally, either buffered or unbuffered memory. + Examples: + + - Registered-DDR + - Unbuffered-DDR + +.. [#f5] On some systems, the memory controller doesn't have any logic + to identify the memory module. On such systems, the directory is called ``rankX`` and works on a similar way as the ``csrowX`` directories. + On modern Intel memory controllers, the memory controller identifies the + memory modules directly. On such systems, the directory is called ``dimmX``. + +.. [#f6] There are also some ``power`` directories and ``subsystem`` + symlinks inside the sysfs mapping that are automatically created by + the sysfs subsystem. Currently, they serve no purpose. + +``csrowX`` directories +---------------------- + +When CONFIG_EDAC_LEGACY_SYSFS is enabled, sysfs will contain the ``csrowX`` +directories. As this API doesn't work properly for Rambus, FB-DIMMs and +modern Intel Memory Controllers, this is being deprecated in favor of +``dimmX`` directories. + +In the ``csrowX`` directories are EDAC control and attribute files for +this ``X`` instance of csrow: + + +- ``ue_count`` - Total Uncorrectable Errors count attribute file + + This attribute file displays the total count of uncorrectable + errors that have occurred on this csrow. If panic_on_ue is set + this counter will not have a chance to increment, since EDAC + will panic the system. + + +- ``ce_count`` - Total Correctable Errors count attribute file + + This attribute file displays the total count of correctable + errors that have occurred on this csrow. This count is very + important to examine. CEs provide early indications that a + DIMM is beginning to fail. This count field should be + monitored for non-zero values and report such information + to the system administrator. + + +- ``size_mb`` - Total memory managed by this csrow attribute file + + This attribute file displays, in count of megabytes, the memory + that this csrow contains. + + +- ``mem_type`` - Memory Type attribute file + + This attribute file will display what type of memory is currently + on this csrow. Normally, either buffered or unbuffered memory. + Examples: + + - Registered-DDR + - Unbuffered-DDR + + +- ``edac_mode`` - EDAC Mode of operation attribute file + + This attribute file will display what type of Error detection + and correction is being utilized. + + +- ``dev_type`` - Device type attribute file + + This attribute file will display what type of DRAM device is + being utilized on this DIMM. + Examples: + + - x1 + - x2 + - x4 + - x8 + + +- ``ch0_ce_count`` - Channel 0 CE Count attribute file + + This attribute file will display the count of CEs on this + DIMM located in channel 0. + + +- ``ch0_ue_count`` - Channel 0 UE Count attribute file + + This attribute file will display the count of UEs on this + DIMM located in channel 0. + + +- ``ch0_dimm_label`` - Channel 0 DIMM Label control file + + + This control file allows this DIMM to have a label assigned + to it. With this label in the module, when errors occur + the output can provide the DIMM label in the system log. + This becomes vital for panic events to isolate the + cause of the UE event. + + DIMM Labels must be assigned after booting, with information + that correctly identifies the physical slot with its + silk screen label. This information is currently very + motherboard specific and determination of this information + must occur in userland at this time. + + +- ``ch1_ce_count`` - Channel 1 CE Count attribute file + + + This attribute file will display the count of CEs on this + DIMM located in channel 1. + + +- ``ch1_ue_count`` - Channel 1 UE Count attribute file + + + This attribute file will display the count of UEs on this + DIMM located in channel 0. + + +- ``ch1_dimm_label`` - Channel 1 DIMM Label control file + + This control file allows this DIMM to have a label assigned + to it. With this label in the module, when errors occur + the output can provide the DIMM label in the system log. + This becomes vital for panic events to isolate the + cause of the UE event. + + DIMM Labels must be assigned after booting, with information + that correctly identifies the physical slot with its + silk screen label. This information is currently very + motherboard specific and determination of this information + must occur in userland at this time. + + +System Logging +-------------- + +If logging for UEs and CEs is enabled, then system logs will contain +information indicating that errors have been detected:: + + EDAC MC0: CE page 0x283, offset 0xce0, grain 8, syndrome 0x6ec3, row 0, channel 1 "DIMM_B1": amd76x_edac + EDAC MC0: CE page 0x1e5, offset 0xfb0, grain 8, syndrome 0xb741, row 0, channel 1 "DIMM_B1": amd76x_edac + + +The structure of the message is: + + +---------------------------------------+-------------+ + | Content + Example | + +=======================================+=============+ + | The memory controller | MC0 | + +---------------------------------------+-------------+ + | Error type | CE | + +---------------------------------------+-------------+ + | Memory page | 0x283 | + +---------------------------------------+-------------+ + | Offset in the page | 0xce0 | + +---------------------------------------+-------------+ + | The byte granularity | grain 8 | + | or resolution of the error | | + +---------------------------------------+-------------+ + | The error syndrome | 0xb741 | + +---------------------------------------+-------------+ + | Memory row | row 0 + + +---------------------------------------+-------------+ + | Memory channel | channel 1 | + +---------------------------------------+-------------+ + | DIMM label, if set prior | DIMM B1 | + +---------------------------------------+-------------+ + | And then an optional, driver-specific | | + | message that may have additional | | + | information. | | + +---------------------------------------+-------------+ + +Both UEs and CEs with no info will lack all but memory controller, error +type, a notice of "no info" and then an optional, driver-specific error +message. + + +PCI Bus Parity Detection +------------------------ + +On Header Type 00 devices, the primary status is looked at for any +parity error regardless of whether parity is enabled on the device or +not. (The spec indicates parity is generated in some cases). On Header +Type 01 bridges, the secondary status register is also looked at to see +if parity occurred on the bus on the other side of the bridge. + + +Sysfs configuration +------------------- + +Under ``/sys/devices/system/edac/pci`` are control and attribute files as +follows: + + +- ``check_pci_parity`` - Enable/Disable PCI Parity checking control file + + This control file enables or disables the PCI Bus Parity scanning + operation. Writing a 1 to this file enables the scanning. Writing + a 0 to this file disables the scanning. + + Enable:: + + echo "1" >/sys/devices/system/edac/pci/check_pci_parity + + Disable:: + + echo "0" >/sys/devices/system/edac/pci/check_pci_parity + + +- ``pci_parity_count`` - Parity Count + + This attribute file will display the number of parity errors that + have been detected. + + +Module parameters +----------------- + +- ``edac_mc_panic_on_ue`` - Panic on UE control file + + An uncorrectable error will cause a machine panic. This is usually + desirable. It is a bad idea to continue when an uncorrectable error + occurs - it is indeterminate what was uncorrected and the operating + system context might be so mangled that continuing will lead to further + corruption. If the kernel has MCE configured, then EDAC will never + notice the UE. + + LOAD TIME:: + + module/kernel parameter: edac_mc_panic_on_ue=[0|1] + + RUN TIME:: + + echo "1" > /sys/module/edac_core/parameters/edac_mc_panic_on_ue + + +- ``edac_mc_log_ue`` - Log UE control file + + + Generate kernel messages describing uncorrectable errors. These errors + are reported through the system message log system. UE statistics + will be accumulated even when UE logging is disabled. + + LOAD TIME:: + + module/kernel parameter: edac_mc_log_ue=[0|1] + + RUN TIME:: + + echo "1" > /sys/module/edac_core/parameters/edac_mc_log_ue + + +- ``edac_mc_log_ce`` - Log CE control file + + + Generate kernel messages describing correctable errors. These + errors are reported through the system message log system. + CE statistics will be accumulated even when CE logging is disabled. + + LOAD TIME:: + + module/kernel parameter: edac_mc_log_ce=[0|1] + + RUN TIME:: + + echo "1" > /sys/module/edac_core/parameters/edac_mc_log_ce + + +- ``edac_mc_poll_msec`` - Polling period control file + + + The time period, in milliseconds, for polling for error information. + Too small a value wastes resources. Too large a value might delay + necessary handling of errors and might loose valuable information for + locating the error. 1000 milliseconds (once each second) is the current + default. Systems which require all the bandwidth they can get, may + increase this. + + LOAD TIME:: + + module/kernel parameter: edac_mc_poll_msec=[0|1] + + RUN TIME:: + + echo "1000" > /sys/module/edac_core/parameters/edac_mc_poll_msec + + +- ``panic_on_pci_parity`` - Panic on PCI PARITY Error + + + This control file enables or disables panicking when a parity + error has been detected. + + + module/kernel parameter:: + + edac_panic_on_pci_pe=[0|1] + + Enable:: + + echo "1" > /sys/module/edac_core/parameters/edac_panic_on_pci_pe + + Disable:: + + echo "0" > /sys/module/edac_core/parameters/edac_panic_on_pci_pe + + + +EDAC device type +---------------- + +In the header file, edac_pci.h, there is a series of edac_device structures +and APIs for the EDAC_DEVICE. + +User space access to an edac_device is through the sysfs interface. + +At the location ``/sys/devices/system/edac`` (sysfs) new edac_device devices +will appear. + +There is a three level tree beneath the above ``edac`` directory. For example, +the ``test_device_edac`` device (found at the http://bluesmoke.sourceforget.net +website) installs itself as:: + + /sys/devices/system/edac/test-instance + +in this directory are various controls, a symlink and one or more ``instance`` +directories. + +The standard default controls are: + + ============== ======================================================= + log_ce boolean to log CE events + log_ue boolean to log UE events + panic_on_ue boolean to ``panic`` the system if an UE is encountered + (default off, can be set true via startup script) + poll_msec time period between POLL cycles for events + ============== ======================================================= + +The test_device_edac device adds at least one of its own custom control: + + ============== ================================================== + test_bits which in the current test driver does nothing but + show how it is installed. A ported driver can + add one or more such controls and/or attributes + for specific uses. + One out-of-tree driver uses controls here to allow + for ERROR INJECTION operations to hardware + injection registers + ============== ================================================== + +The symlink points to the 'struct dev' that is registered for this edac_device. + +Instances +--------- + +One or more instance directories are present. For the ``test_device_edac`` +case: + + +----------------+ + | test-instance0 | + +----------------+ + + +In this directory there are two default counter attributes, which are totals of +counter in deeper subdirectories. + + ============== ==================================== + ce_count total of CE events of subdirectories + ue_count total of UE events of subdirectories + ============== ==================================== + +Blocks +------ + +At the lowest directory level is the ``block`` directory. There can be 0, 1 +or more blocks specified in each instance: + + +-------------+ + | test-block0 | + +-------------+ + +In this directory the default attributes are: + + ============== ================================================ + ce_count which is counter of CE events for this ``block`` + of hardware being monitored + ue_count which is counter of UE events for this ``block`` + of hardware being monitored + ============== ================================================ + + +The ``test_device_edac`` device adds 4 attributes and 1 control: + + ================== ==================================================== + test-block-bits-0 for every POLL cycle this counter + is incremented + test-block-bits-1 every 10 cycles, this counter is bumped once, + and test-block-bits-0 is set to 0 + test-block-bits-2 every 100 cycles, this counter is bumped once, + and test-block-bits-1 is set to 0 + test-block-bits-3 every 1000 cycles, this counter is bumped once, + and test-block-bits-2 is set to 0 + ================== ==================================================== + + + ================== ==================================================== + reset-counters writing ANY thing to this control will + reset all the above counters. + ================== ==================================================== + + +Use of the ``test_device_edac`` driver should enable any others to create their own +unique drivers for their hardware systems. + +The ``test_device_edac`` sample driver is located at the +http://bluesmoke.sourceforge.net project site for EDAC. + + +Usage of EDAC APIs on Nehalem and newer Intel CPUs +-------------------------------------------------- + +On older Intel architectures, the memory controller was part of the North +Bridge chipset. Nehalem, Sandy Bridge, Ivy Bridge, Haswell, Sky Lake and +newer Intel architectures integrated an enhanced version of the memory +controller (MC) inside the CPUs. + +This chapter will cover the differences of the enhanced memory controllers +found on newer Intel CPUs, such as ``i7core_edac``, ``sb_edac`` and +``sbx_edac`` drivers. + +.. note:: + + The Xeon E7 processor families use a separate chip for the memory + controller, called Intel Scalable Memory Buffer. This section doesn't + apply for such families. + +1) There is one Memory Controller per Quick Patch Interconnect + (QPI). At the driver, the term "socket" means one QPI. This is + associated with a physical CPU socket. + + Each MC have 3 physical read channels, 3 physical write channels and + 3 logic channels. The driver currently sees it as just 3 channels. + Each channel can have up to 3 DIMMs. + + The minimum known unity is DIMMs. There are no information about csrows. + As EDAC API maps the minimum unity is csrows, the driver sequentially + maps channel/DIMM into different csrows. + + For example, supposing the following layout:: + + Ch0 phy rd0, wr0 (0x063f4031): 2 ranks, UDIMMs + dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400 + dimm 1 1024 Mb offset: 4, bank: 8, rank: 1, row: 0x4000, col: 0x400 + Ch1 phy rd1, wr1 (0x063f4031): 2 ranks, UDIMMs + dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400 + Ch2 phy rd3, wr3 (0x063f4031): 2 ranks, UDIMMs + dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400 + + The driver will map it as:: + + csrow0: channel 0, dimm0 + csrow1: channel 0, dimm1 + csrow2: channel 1, dimm0 + csrow3: channel 2, dimm0 + + exports one DIMM per csrow. + + Each QPI is exported as a different memory controller. + +2) The MC has the ability to inject errors to test drivers. The drivers + implement this functionality via some error injection nodes: + + For injecting a memory error, there are some sysfs nodes, under + ``/sys/devices/system/edac/mc/mc?/``: + + - ``inject_addrmatch/*``: + Controls the error injection mask register. It is possible to specify + several characteristics of the address to match an error code:: + + dimm = the affected dimm. Numbers are relative to a channel; + rank = the memory rank; + channel = the channel that will generate an error; + bank = the affected bank; + page = the page address; + column (or col) = the address column. + + each of the above values can be set to "any" to match any valid value. + + At driver init, all values are set to any. + + For example, to generate an error at rank 1 of dimm 2, for any channel, + any bank, any page, any column:: + + echo 2 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/dimm + echo 1 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/rank + + To return to the default behaviour of matching any, you can do:: + + echo any >/sys/devices/system/edac/mc/mc0/inject_addrmatch/dimm + echo any >/sys/devices/system/edac/mc/mc0/inject_addrmatch/rank + + - ``inject_eccmask``: + specifies what bits will have troubles, + + - ``inject_section``: + specifies what ECC cache section will get the error:: + + 3 for both + 2 for the highest + 1 for the lowest + + - ``inject_type``: + specifies the type of error, being a combination of the following bits:: + + bit 0 - repeat + bit 1 - ecc + bit 2 - parity + + - ``inject_enable``: + starts the error generation when something different than 0 is written. + + All inject vars can be read. root permission is needed for write. + + Datasheet states that the error will only be generated after a write on an + address that matches inject_addrmatch. It seems, however, that reading will + also produce an error. + + For example, the following code will generate an error for any write access + at socket 0, on any DIMM/address on channel 2:: + + echo 2 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/channel + echo 2 >/sys/devices/system/edac/mc/mc0/inject_type + echo 64 >/sys/devices/system/edac/mc/mc0/inject_eccmask + echo 3 >/sys/devices/system/edac/mc/mc0/inject_section + echo 1 >/sys/devices/system/edac/mc/mc0/inject_enable + dd if=/dev/mem of=/dev/null seek=16k bs=4k count=1 >& /dev/null + + For socket 1, it is needed to replace "mc0" by "mc1" at the above + commands. + + The generated error message will look like:: + + EDAC MC0: UE row 0, channel-a= 0 channel-b= 0 labels "-": NON_FATAL (addr = 0x0075b980, socket=0, Dimm=0, Channel=2, syndrome=0x00000040, count=1, Err=8c0000400001009f:4000080482 (read error: read ECC error)) + +3) Corrected Error memory register counters + + Those newer MCs have some registers to count memory errors. The driver + uses those registers to report Corrected Errors on devices with Registered + DIMMs. + + However, those counters don't work with Unregistered DIMM. As the chipset + offers some counters that also work with UDIMMs (but with a worse level of + granularity than the default ones), the driver exposes those registers for + UDIMM memories. + + They can be read by looking at the contents of ``all_channel_counts/``:: + + $ for i in /sys/devices/system/edac/mc/mc0/all_channel_counts/*; do echo $i; cat $i; done + /sys/devices/system/edac/mc/mc0/all_channel_counts/udimm0 + 0 + /sys/devices/system/edac/mc/mc0/all_channel_counts/udimm1 + 0 + /sys/devices/system/edac/mc/mc0/all_channel_counts/udimm2 + 0 + + What happens here is that errors on different csrows, but at the same + dimm number will increment the same counter. + So, in this memory mapping:: + + csrow0: channel 0, dimm0 + csrow1: channel 0, dimm1 + csrow2: channel 1, dimm0 + csrow3: channel 2, dimm0 + + The hardware will increment udimm0 for an error at the first dimm at either + csrow0, csrow2 or csrow3; + + The hardware will increment udimm1 for an error at the second dimm at either + csrow0, csrow2 or csrow3; + + The hardware will increment udimm2 for an error at the third dimm at either + csrow0, csrow2 or csrow3; + +4) Standard error counters + + The standard error counters are generated when an mcelog error is received + by the driver. Since, with UDIMM, this is counted by software, it is + possible that some errors could be lost. With RDIMM's, they display the + contents of the registers + +Reference documents used on ``amd64_edac`` +------------------------------------------ + +``amd64_edac`` module is based on the following documents +(available from http://support.amd.com/en-us/search/tech-docs): + +1. :Title: BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD + Opteron Processors + :AMD publication #: 26094 + :Revision: 3.26 + :Link: http://support.amd.com/TechDocs/26094.PDF + +2. :Title: BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh + Processors + :AMD publication #: 32559 + :Revision: 3.00 + :Issue Date: May 2006 + :Link: http://support.amd.com/TechDocs/32559.pdf + +3. :Title: BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h + Processors + :AMD publication #: 31116 + :Revision: 3.00 + :Issue Date: September 07, 2007 + :Link: http://support.amd.com/TechDocs/31116.pdf + +4. :Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 15h + Models 30h-3Fh Processors + :AMD publication #: 49125 + :Revision: 3.06 + :Issue Date: 2/12/2015 (latest release) + :Link: http://support.amd.com/TechDocs/49125_15h_Models_30h-3Fh_BKDG.pdf + +5. :Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 15h + Models 60h-6Fh Processors + :AMD publication #: 50742 + :Revision: 3.01 + :Issue Date: 7/23/2015 (latest release) + :Link: http://support.amd.com/TechDocs/50742_15h_Models_60h-6Fh_BKDG.pdf + +6. :Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 16h + Models 00h-0Fh Processors + :AMD publication #: 48751 + :Revision: 3.03 + :Issue Date: 2/23/2015 (latest release) + :Link: http://support.amd.com/TechDocs/48751_16h_bkdg.pdf + +Credits +======= + +* Written by Doug Thompson + + - 7 Dec 2005 + - 17 Jul 2007 Updated + +* |copy| Mauro Carvalho Chehab + + - 05 Aug 2009 Nehalem interface + - 26 Oct 2016 Converted to ReST and cleanups at the Nehalem section + +* EDAC authors/maintainers: + + - Doug Thompson, Dave Jiang, Dave Peterson et al, + - Mauro Carvalho Chehab + - Borislav Petkov + - original author: Thayne Harbaugh diff --git a/Documentation/admin-guide/vga-softcursor.rst b/Documentation/admin-guide/vga-softcursor.rst index a663a745cff4bffd827ba8a06f9e9302c481b4c8..f52175457e60ed5313dd61de46504a50472a4903 100644 --- a/Documentation/admin-guide/vga-softcursor.rst +++ b/Documentation/admin-guide/vga-softcursor.rst @@ -4,15 +4,13 @@ Software cursor for VGA by Pavel Machek and Martin Mares -Linux now has some ability to manipulate cursor appearance. Normally, you -can set the size of hardware cursor (and also work around some ugly bugs in -those miserable Trident cards [#f1]_. You can now play a few new tricks: -you can make your cursor look - -like a non-blinking red block, make it inverse background of the character it's -over or to highlight that character and still choose whether the original -hardware cursor should remain visible or not. There may be other things I have -never thought of. +Linux now has some ability to manipulate cursor appearance. Normally, +you can set the size of hardware cursor. You can now play a few new +tricks: you can make your cursor look like a non-blinking red block, +make it inverse background of the character it's over or to highlight +that character and still choose whether the original hardware cursor +should remain visible or not. There may be other things I have never +thought of. The cursor appearance is controlled by a ``[?1;2;3c`` escape sequence where 1, 2 and 3 are parameters described below. If you omit any of them, @@ -48,8 +46,6 @@ third parameter Bit setting takes place before bit toggling, so you can simply clear a bit by including it in both the set mask and the toggle mask. -.. [#f1] see ``#define TRIDENT_GLITCH`` in ``drivers/video/vgacon.c``. - Examples -------- diff --git a/Documentation/arm/stm32/overview.txt b/Documentation/arm/stm32/overview.txt index 09aed5588d7c5c71ab895f202136fc7c51751b20..a03b0357c0170aec8d28ff6e0ab24661967ff92d 100644 --- a/Documentation/arm/stm32/overview.txt +++ b/Documentation/arm/stm32/overview.txt @@ -5,7 +5,8 @@ Introduction ------------ The STMicroelectronics family of Cortex-M based MCUs are supported by the - 'STM32' platform of ARM Linux. Currently only the STM32F429 is supported. + 'STM32' platform of ARM Linux. Currently only the STM32F429 (Cortex-M4) + and STM32F746 (Cortex-M7) are supported. Configuration diff --git a/Documentation/arm/stm32/stm32f746-overview.txt b/Documentation/arm/stm32/stm32f746-overview.txt new file mode 100644 index 0000000000000000000000000000000000000000..cffd2b1ccd6f82c4e319eeee71234e57117bc4b6 --- /dev/null +++ b/Documentation/arm/stm32/stm32f746-overview.txt @@ -0,0 +1,34 @@ + STM32F746 Overview + ================== + + Introduction + ------------ + The STM32F746 is a Cortex-M7 MCU aimed at various applications. + It features: + - Cortex-M7 core running up to @216MHz + - 1MB internal flash, 320KBytes internal RAM (+4KB of backup SRAM) + - FMC controller to connect SDRAM, NOR and NAND memories + - Dual mode QSPI + - SD/MMC/SDIO support + - Ethernet controller + - USB OTFG FS & HS controllers + - I2C, SPI, CAN busses support + - Several 16 & 32 bits general purpose timers + - Serial Audio interface + - LCD controller + - HDMI-CEC + - SPDIFRX + + Resources + --------- + Datasheet and reference manual are publicly available on ST website: + - http://www.st.com/content/st_com/en/products/microcontrollers/stm32-32-bit-arm-cortex-mcus/stm32f7-series/stm32f7x6/stm32f746ng.html + + Document Author + --------------- + Alexandre Torgue + + + + + diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index 918e1e0d0e78b2b84847081b0d562e188919c0d8..01ddeaf64b0f6cdba051a66c84e70918198e6e1e 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt @@ -348,7 +348,7 @@ Drivers can now specify a request prepare function (q->prep_rq_fn) that the block layer would invoke to pre-build device commands for a given request, or perform other preparatory processing for the request. This is routine is called by elv_next_request(), i.e. typically just before servicing a request. -(The prepare function would not be called for requests that have REQ_DONTPREP +(The prepare function would not be called for requests that have RQF_DONTPREP enabled) Aside: @@ -553,8 +553,8 @@ struct request { struct request_list *rl; } -See the rq_flag_bits definitions for an explanation of the various flags -available. Some bits are used by the block layer or i/o scheduler. +See the req_ops and req_flag_bits definitions for an explanation of the various +flags available. Some bits are used by the block layer or i/o scheduler. The behaviour of the various sector counts are almost the same as before, except that since we have multi-segment bios, current_nr_sectors refers diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt index 1e4f835a659db7b1740bdc8ecedf8e2ccf7b2ea8..895bd3813115f6b9aa1706616bcc72cb1d76fd37 100644 --- a/Documentation/block/cfq-iosched.txt +++ b/Documentation/block/cfq-iosched.txt @@ -240,11 +240,11 @@ All cfq queues doing synchronous sequential IO go on to sync-idle tree. On this tree we idle on each queue individually. All synchronous non-sequential queues go on sync-noidle tree. Also any -request which are marked with REQ_NOIDLE go on this service tree. On this -tree we do not idle on individual queues instead idle on the whole group -of queues or the tree. So if there are 4 queues waiting for IO to dispatch -we will idle only once last queue has dispatched the IO and there is -no more IO on this service tree. +synchronous write request which is not marked with REQ_IDLE goes on this +service tree. On this tree we do not idle on individual queues instead idle +on the whole group of queues or the tree. So if there are 4 queues waiting +for IO to dispatch we will idle only once last queue has dispatched the IO +and there is no more IO on this service tree. All async writes go on async service tree. There is no idling on async queues. @@ -257,17 +257,17 @@ tree idling provides isolation with buffered write queues on async tree. FAQ === -Q1. Why to idle at all on queues marked with REQ_NOIDLE. +Q1. Why to idle at all on queues not marked with REQ_IDLE. -A1. We only do tree idle (all queues on sync-noidle tree) on queues marked - with REQ_NOIDLE. This helps in providing isolation with all the sync-idle +A1. We only do tree idle (all queues on sync-noidle tree) on queues not marked + with REQ_IDLE. This helps in providing isolation with all the sync-idle queues. Otherwise in presence of many sequential readers, other synchronous IO might not get fair share of disk. For example, if there are 10 sequential readers doing IO and they get - 100ms each. If a REQ_NOIDLE request comes in, it will be scheduled - roughly after 1 second. If after completion of REQ_NOIDLE request we - do not idle, and after a couple of milli seconds a another REQ_NOIDLE + 100ms each. If a !REQ_IDLE request comes in, it will be scheduled + roughly after 1 second. If after completion of !REQ_IDLE request we + do not idle, and after a couple of milli seconds a another !REQ_IDLE request comes in, again it will be scheduled after 1second. Repeat it and notice how a workload can lose its disk share and suffer due to multiple sequential readers. @@ -276,16 +276,16 @@ A1. We only do tree idle (all queues on sync-noidle tree) on queues marked context of fsync, and later some journaling data is written. Journaling data comes in only after fsync has finished its IO (atleast for ext4 that seemed to be the case). Now if one decides not to idle on fsync - thread due to REQ_NOIDLE, then next journaling write will not get + thread due to !REQ_IDLE, then next journaling write will not get scheduled for another second. A process doing small fsync, will suffer badly in presence of multiple sequential readers. - Hence doing tree idling on threads using REQ_NOIDLE flag on requests + Hence doing tree idling on threads using !REQ_IDLE flag on requests provides isolation from multiple sequential readers and at the same time we do not idle on individual threads. -Q2. When to specify REQ_NOIDLE -A2. I would think whenever one is doing synchronous write and not expecting +Q2. When to specify REQ_IDLE +A2. I would think whenever one is doing synchronous write and expecting more writes to be dispatched from same context soon, should be able - to specify REQ_NOIDLE on writes and that probably should work well for + to specify REQ_IDLE on writes and that probably should work well for most of the cases. diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt index d8880ca30af4c35d562c0f77b1b3a56c3ff6e1d7..3140dbd860d8c72c341b4559264c532d062a6f35 100644 --- a/Documentation/block/null_blk.txt +++ b/Documentation/block/null_blk.txt @@ -72,4 +72,4 @@ use_per_node_hctx=[0/1]: Default: 0 queue for each CPU node in the system. use_lightnvm=[0/1]: Default: 0 - Register device with LightNVM. Requires blk-mq to be used. + Register device with LightNVM. Requires blk-mq and CONFIG_NVM to be enabled. diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt index 2a3904030dea5d287f6e1dc2f8f93461707ae018..51642159aedbbc405d1bb90fa89402c2143f8310 100644 --- a/Documentation/block/queue-sysfs.txt +++ b/Documentation/block/queue-sysfs.txt @@ -58,6 +58,20 @@ When read, this file shows the total number of block IO polls and how many returned success. Writing '0' to this file will disable polling for this device. Writing any non-zero value will enable this feature. +io_poll_delay (RW) +------------------ +If polling is enabled, this controls what kind of polling will be +performed. It defaults to -1, which is classic polling. In this mode, +the CPU will repeatedly ask for completions without giving up any time. +If set to 0, a hybrid polling mode is used, where the kernel will attempt +to make an educated guess at when the IO will complete. Based on this +guess, the kernel will put the process issuing IO to sleep for an amount +of time, before entering a classic poll loop. This mode might be a +little slower than pure classic polling, but it will be more efficient. +If set to a value larger than 0, the kernel will put the process issuing +IO to sleep for this amont of microseconds before entering classic +polling. + iostats (RW) ------------- This file is used to control (on/off) the iostats accounting of the @@ -169,5 +183,14 @@ This is the number of bytes the device can write in a single write-same command. A value of '0' means write-same is not supported by this device. +wb_lat_usec (RW) +---------------- +If the device is registered for writeback throttling, then this file shows +the target minimum read latency. If this latency is exceeded in a given +window of time (see wb_window_usec), then the writeback throttling will start +scaling back writes. Writing a value of '0' to this file disables the +feature. Writing a value of '-1' to this file resets the value to the +default setting. + Jens Axboe , February 2009 diff --git a/Documentation/cpu-freq/cpufreq-stats.txt b/Documentation/cpu-freq/cpufreq-stats.txt index 8d9773f23550cbc4e8d97ebecb79d55a161e6d58..3c355f6ad83494e6de96a99f1a39ebf7e413a670 100644 --- a/Documentation/cpu-freq/cpufreq-stats.txt +++ b/Documentation/cpu-freq/cpufreq-stats.txt @@ -44,11 +44,17 @@ the stats driver insertion. total 0 drwxr-xr-x 2 root root 0 May 14 16:06 . drwxr-xr-x 3 root root 0 May 14 15:58 .. +--w------- 1 root root 4096 May 14 16:06 reset -r--r--r-- 1 root root 4096 May 14 16:06 time_in_state -r--r--r-- 1 root root 4096 May 14 16:06 total_trans -r--r--r-- 1 root root 4096 May 14 16:06 trans_table -------------------------------------------------------------------------------- +- reset +Write-only attribute that can be used to reset the stat counters. This can be +useful for evaluating system behaviour under different governors without the +need for a reboot. + - time_in_state This gives the amount of time spent in each of the frequencies supported by this CPU. The cat output will have "

0 > /sys/kernel/debug/mwifiex/mlan0/memrw + cat /sys/kernel/debug/mwifiex/mlan0/memrw + 2) For writing value to firmware memory location. + echo w
[value] > /sys/kernel/debug/mwifiex/mlan0/memrw + + where the parameters are, +
: memory address + [value]: value to be written + + Examples: + echo r 0x4cf70 0 > /sys/kernel/debug/mwifiex/mlan0/memrw + cat /sys/kernel/debug/mwifiex/mlan0/memrw + : Read memory address 0x4cf70 + iwpriv mlan0 memrdwr -0x7fff6000 -0x40000000 + echo w 0x8000a000 0xc0000000 > /sys/kernel/debug/mwifiex/mlan0/memrw + : Write 0xc0000000 to memory address 0x8000a000 + rdeeprom This command is used to read the EEPROM contents of the card. diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 39ce76ad00bc052d9dc99d5f1162842dc2dc2f3a..145cc4b5103b80bc2a0b0f5c94987a2d1349b39d 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1203,6 +1203,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, priv->adapter->curr_iface_comb.p2p_intf--; priv->adapter->curr_iface_comb.sta_intf++; dev->ieee80211_ptr->iftype = type; + if (mwifiex_deinit_priv_params(priv)) + return -1; + if (mwifiex_init_new_priv_params(priv, dev, type)) + return -1; + if (mwifiex_sta_init_cmd(priv, false, false)) + return -1; break; case NL80211_IFTYPE_ADHOC: if (mwifiex_cfg80211_deinit_p2p(priv)) @@ -2137,6 +2143,16 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, ret = mwifiex_set_encode(priv, NULL, NULL, 0, 0, NULL, 1); if (mode == NL80211_IFTYPE_ADHOC) { + u16 enable = true; + + /* set ibss coalescing_status */ + ret = mwifiex_send_cmd( + priv, + HostCmd_CMD_802_11_IBSS_COALESCING_STATUS, + HostCmd_ACT_GEN_SET, 0, &enable, true); + if (ret) + return ret; + /* "privacy" is set only for ad-hoc mode */ if (privacy) { /* @@ -2222,8 +2238,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, is_scanning_required = 1; } else { mwifiex_dbg(priv->adapter, MSG, - "info: trying to associate to '%s' bssid %pM\n", - (char *)req_ssid.ssid, bss->bssid); + "info: trying to associate to '%.*s' bssid %pM\n", + req_ssid.ssid_len, (char *)req_ssid.ssid, + bss->bssid); memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN); break; } @@ -2283,8 +2300,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, } mwifiex_dbg(adapter, INFO, - "info: Trying to associate to %s and bssid %pM\n", - (char *)sme->ssid, sme->bssid); + "info: Trying to associate to %.*s and bssid %pM\n", + (int)sme->ssid_len, (char *)sme->ssid, sme->bssid); if (!mwifiex_stop_bg_scan(priv)) cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); @@ -2417,8 +2434,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, } mwifiex_dbg(priv->adapter, MSG, - "info: trying to join to %s and bssid %pM\n", - (char *)params->ssid, params->bssid); + "info: trying to join to %.*s and bssid %pM\n", + params->ssid_len, (char *)params->ssid, params->bssid); mwifiex_set_ibss_params(priv, params); @@ -3016,6 +3033,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, priv->netdev = NULL; memset(&priv->wdev, 0, sizeof(priv->wdev)); priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; + destroy_workqueue(priv->dfs_cac_workqueue); + priv->dfs_cac_workqueue = NULL; return ERR_PTR(-ENOMEM); } @@ -3070,8 +3089,10 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) mwifiex_stop_net_dev_queue(priv->netdev, adapter); - skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) + skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) { + skb_unlink(skb, &priv->bypass_txq); mwifiex_write_data_complete(priv->adapter, skb, 0, -1); + } if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); @@ -3970,13 +3991,11 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev); struct mwifiex_ds_misc_cmd *hostcmd; struct nlattr *tb[MWIFIEX_TM_ATTR_MAX + 1]; - struct mwifiex_adapter *adapter; struct sk_buff *skb; int err; if (!priv) return -EINVAL; - adapter = priv->adapter; err = nla_parse(tb, MWIFIEX_TM_ATTR_MAX, data, len, mwifiex_tm_policy); diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 53477280f39c21d6daab9effa2c31b474fd78461..25a7475702f7fa9166098e9ca97e9cdd268682a8 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -1118,13 +1118,14 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) void mwifiex_check_ps_cond(struct mwifiex_adapter *adapter) { - if (!adapter->cmd_sent && + if (!adapter->cmd_sent && !atomic_read(&adapter->tx_hw_pending) && !adapter->curr_cmd && !IS_CARD_RX_RCVD(adapter)) mwifiex_dnld_sleep_confirm_cmd(adapter); else mwifiex_dbg(adapter, CMD, - "cmd: Delay Sleep Confirm (%s%s%s)\n", + "cmd: Delay Sleep Confirm (%s%s%s%s)\n", (adapter->cmd_sent) ? "D" : "", + atomic_read(&adapter->tx_hw_pending) ? "T" : "", (adapter->curr_cmd) ? "C" : "", (IS_CARD_RX_RCVD(adapter)) ? "R" : ""); } diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 4b1894b4757ffe654d409828c665e1df76668ca3..ea455948a68aaddc69a96a9454b697e66605cb12 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -181,6 +181,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154) #define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156) #define TLV_TYPE_REPEAT_COUNT (PROPRIETARY_TLV_BASE_ID + 176) +#define TLV_TYPE_PS_PARAMS_IN_HS (PROPRIETARY_TLV_BASE_ID + 181) #define TLV_TYPE_MULTI_CHAN_INFO (PROPRIETARY_TLV_BASE_ID + 183) #define TLV_TYPE_MC_GROUP_INFO (PROPRIETARY_TLV_BASE_ID + 184) #define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194) @@ -218,6 +219,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14)) #define ISSUPP_DRCS_ENABLED(FwCapInfo) (FwCapInfo & BIT(15)) #define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16)) +#define ISSUPP_ADHOC_ENABLED(FwCapInfo) (FwCapInfo & BIT(25)) #define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \ @@ -986,6 +988,15 @@ struct mwifiex_ps_param { __le16 delay_to_ps; }; +#define HS_DEF_WAKE_INTERVAL 100 +#define HS_DEF_INACTIVITY_TIMEOUT 50 + +struct mwifiex_ps_param_in_hs { + struct mwifiex_ie_types_header header; + __le32 hs_wake_int; + __le32 hs_inact_timeout; +}; + #define BITMAP_AUTO_DS 0x01 #define BITMAP_STA_PS 0x10 diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index 82839d9f079fd5bba41effb4b7c8bf067feba09a..b36cb3fef35881112e7ff1246f9fb085e393bdb1 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -270,6 +270,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) adapter->adhoc_11n_enabled = false; mwifiex_wmm_init(adapter); + atomic_set(&adapter->tx_hw_pending, 0); sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *) adapter->sleep_cfm->data; diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 2478ccd6f2d936c9e962f5cc6cdeeb9a5908219a..e5c3a8aa3929ac8e53f228f0e26671e872ac1cff 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -308,6 +308,9 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) /* We have tried to wakeup the card already */ if (adapter->pm_wakeup_fw_try) break; + if (adapter->ps_state == PS_STATE_PRE_SLEEP) + mwifiex_check_ps_cond(adapter); + if (adapter->ps_state != PS_STATE_AWAKE) break; if (adapter->tx_lock_flag) { @@ -355,10 +358,8 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) /* Check if we need to confirm Sleep Request received previously */ - if (adapter->ps_state == PS_STATE_PRE_SLEEP) { - if (!adapter->cmd_sent && !adapter->curr_cmd) - mwifiex_check_ps_cond(adapter); - } + if (adapter->ps_state == PS_STATE_PRE_SLEEP) + mwifiex_check_ps_cond(adapter); /* * The ps_state may have been changed during processing of * Sleep Request event. @@ -517,12 +518,11 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) { int ret; char fmt[64]; - struct mwifiex_private *priv; struct mwifiex_adapter *adapter = context; struct mwifiex_fw_image fw; - struct semaphore *sem = adapter->card_sem; bool init_failed = false; struct wireless_dev *wdev; + struct completion *fw_done = adapter->fw_done; if (!firmware) { mwifiex_dbg(adapter, ERROR, @@ -575,8 +575,6 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) goto err_init_fw; } - priv = adapter->priv[MWIFIEX_BSS_ROLE_STA]; - if (!adapter->wiphy) { if (mwifiex_register_cfg80211(adapter)) { mwifiex_dbg(adapter, ERROR, @@ -669,7 +667,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) } if (init_failed) mwifiex_free_adapter(adapter); - up(sem); + /* Tell all current and future waiters we're finished */ + complete_all(fw_done); return; } @@ -1364,7 +1363,7 @@ static void mwifiex_main_work_queue(struct work_struct *work) * code is extracted from mwifiex_remove_card() */ static int -mwifiex_shutdown_sw(struct mwifiex_adapter *adapter, struct semaphore *sem) +mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv; int i; @@ -1372,8 +1371,9 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter, struct semaphore *sem) if (!adapter) goto exit_return; - if (down_interruptible(sem)) - goto exit_sem_err; + wait_for_completion(adapter->fw_done); + /* Caller should ensure we aren't suspending while this happens */ + reinit_completion(adapter->fw_done); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); mwifiex_deauthenticate(priv, NULL); @@ -1430,8 +1430,6 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter, struct semaphore *sem) rtnl_unlock(); } - up(sem); -exit_sem_err: mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); exit_return: return 0; @@ -1441,21 +1439,18 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter, struct semaphore *sem) * code is extracted from mwifiex_add_card() */ static int -mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct semaphore *sem, +mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct completion *fw_done, struct mwifiex_if_ops *if_ops, u8 iface_type) { char fw_name[32]; struct pcie_service_card *card = adapter->card; - if (down_interruptible(sem)) - goto exit_sem_err; - mwifiex_init_lock_list(adapter); if (adapter->if_ops.up_dev) adapter->if_ops.up_dev(adapter); adapter->iface_type = iface_type; - adapter->card_sem = sem; + adapter->fw_done = fw_done; adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING; adapter->surprise_removed = false; @@ -1506,7 +1501,8 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct semaphore *sem, } strcpy(adapter->fw_name, fw_name); mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); - up(sem); + + complete_all(adapter->fw_done); return 0; err_init_fw: @@ -1526,8 +1522,7 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct semaphore *sem, err_kmalloc: mwifiex_terminate_workqueue(adapter); adapter->surprise_removed = true; - up(sem); -exit_sem_err: + complete_all(adapter->fw_done); mwifiex_dbg(adapter, INFO, "%s, error\n", __func__); return -1; @@ -1542,16 +1537,67 @@ void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare) struct mwifiex_if_ops if_ops; if (!prepare) { - mwifiex_reinit_sw(adapter, adapter->card_sem, &if_ops, + mwifiex_reinit_sw(adapter, adapter->fw_done, &if_ops, adapter->iface_type); } else { memcpy(&if_ops, &adapter->if_ops, sizeof(struct mwifiex_if_ops)); - mwifiex_shutdown_sw(adapter, adapter->card_sem); + mwifiex_shutdown_sw(adapter); } } EXPORT_SYMBOL_GPL(mwifiex_do_flr); +static irqreturn_t mwifiex_irq_wakeup_handler(int irq, void *priv) +{ + struct mwifiex_adapter *adapter = priv; + + if (adapter->irq_wakeup >= 0) { + dev_dbg(adapter->dev, "%s: wake by wifi", __func__); + adapter->wake_by_wifi = true; + disable_irq_nosync(irq); + } + + /* Notify PM core we are wakeup source */ + pm_wakeup_event(adapter->dev, 0); + + return IRQ_HANDLED; +} + +static void mwifiex_probe_of(struct mwifiex_adapter *adapter) +{ + int ret; + struct device *dev = adapter->dev; + + if (!dev->of_node) + return; + + adapter->dt_node = dev->of_node; + adapter->irq_wakeup = irq_of_parse_and_map(adapter->dt_node, 0); + if (!adapter->irq_wakeup) { + dev_info(dev, "fail to parse irq_wakeup from device tree\n"); + return; + } + + ret = devm_request_irq(dev, adapter->irq_wakeup, + mwifiex_irq_wakeup_handler, IRQF_TRIGGER_LOW, + "wifi_wake", adapter); + if (ret) { + dev_err(dev, "Failed to request irq_wakeup %d (%d)\n", + adapter->irq_wakeup, ret); + goto err_exit; + } + + disable_irq(adapter->irq_wakeup); + if (device_init_wakeup(dev, true)) { + dev_err(dev, "fail to init wakeup for mwifiex\n"); + goto err_exit; + } + return; + +err_exit: + adapter->irq_wakeup = 0; +} + /* * This function adds the card. * @@ -1566,21 +1612,22 @@ EXPORT_SYMBOL_GPL(mwifiex_do_flr); * - Add logical interfaces */ int -mwifiex_add_card(void *card, struct semaphore *sem, - struct mwifiex_if_ops *if_ops, u8 iface_type) +mwifiex_add_card(void *card, struct completion *fw_done, + struct mwifiex_if_ops *if_ops, u8 iface_type, + struct device *dev) { struct mwifiex_adapter *adapter; - if (down_interruptible(sem)) - goto exit_sem_err; - if (mwifiex_register(card, if_ops, (void **)&adapter)) { pr_err("%s: software init failed\n", __func__); goto err_init_sw; } + adapter->dev = dev; + mwifiex_probe_of(adapter); + adapter->iface_type = iface_type; - adapter->card_sem = sem; + adapter->fw_done = fw_done; adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING; adapter->surprise_removed = false; @@ -1649,9 +1696,7 @@ mwifiex_add_card(void *card, struct semaphore *sem, mwifiex_free_adapter(adapter); err_init_sw: - up(sem); -exit_sem_err: return -1; } EXPORT_SYMBOL_GPL(mwifiex_add_card); @@ -1667,14 +1712,11 @@ EXPORT_SYMBOL_GPL(mwifiex_add_card); * - Unregister the device * - Free the adapter structure */ -int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem) +int mwifiex_remove_card(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv = NULL; int i; - if (down_trylock(sem)) - goto exit_sem_err; - if (!adapter) goto exit_remove; @@ -1744,8 +1786,6 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem) mwifiex_free_adapter(adapter); exit_remove: - up(sem); -exit_sem_err: return 0; } EXPORT_SYMBOL_GPL(mwifiex_remove_card); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 26df28f4bfb290a36f30c33097f10a3bf12257a4..5c9bd944b6ea9a6c476aed143f306d4e17744c86 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -20,6 +20,7 @@ #ifndef _MWIFIEX_MAIN_H_ #define _MWIFIEX_MAIN_H_ +#include #include #include #include @@ -315,6 +316,7 @@ struct mwifiex_tid_tbl { #define WMM_HIGHEST_PRIORITY 7 #define HIGH_PRIO_TID 7 #define LOW_PRIO_TID 0 +#define NO_PKT_PRIO_TID -1 #define MWIFIEX_WMM_DRV_DELAY_MAX 510 struct mwifiex_wmm_desc { @@ -856,6 +858,7 @@ struct mwifiex_adapter { atomic_t rx_pending; atomic_t tx_pending; atomic_t cmd_pending; + atomic_t tx_hw_pending; struct workqueue_struct *workqueue; struct work_struct main_work; struct workqueue_struct *rx_workqueue; @@ -983,7 +986,10 @@ struct mwifiex_adapter { u32 usr_dot_11ac_mcs_support; atomic_t pending_bridged_pkts; - struct semaphore *card_sem; + + /* For synchronizing FW initialization with device lifecycle. */ + struct completion *fw_done; + bool ext_scan; u8 fw_api_ver; u8 key_api_major_ver, key_api_minor_ver; @@ -1010,6 +1016,10 @@ struct mwifiex_adapter { bool usb_mc_setup; struct cfg80211_wowlan_nd_info *nd_info; struct ieee80211_regdomain *regd; + + /* Wake-on-WLAN (WoWLAN) */ + int irq_wakeup; + bool wake_by_wifi; }; void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter); @@ -1409,10 +1419,39 @@ static inline u8 mwifiex_is_tdls_link_setup(u8 status) return false; } +/* Disable platform specific wakeup interrupt */ +static inline void mwifiex_disable_wake(struct mwifiex_adapter *adapter) +{ + if (adapter->irq_wakeup >= 0) { + disable_irq_wake(adapter->irq_wakeup); + disable_irq(adapter->irq_wakeup); + if (adapter->wake_by_wifi) + /* Undo our disable, since interrupt handler already + * did this. + */ + enable_irq(adapter->irq_wakeup); + + } +} + +/* Enable platform specific wakeup interrupt */ +static inline void mwifiex_enable_wake(struct mwifiex_adapter *adapter) +{ + /* Enable platform specific wakeup interrupt */ + if (adapter->irq_wakeup >= 0) { + adapter->wake_by_wifi = false; + enable_irq(adapter->irq_wakeup); + enable_irq_wake(adapter->irq_wakeup); + } +} + int mwifiex_init_shutdown_fw(struct mwifiex_private *priv, u32 func_init_shutdown); -int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8); -int mwifiex_remove_card(struct mwifiex_adapter *, struct semaphore *); + +int mwifiex_add_card(void *card, struct completion *fw_done, + struct mwifiex_if_ops *if_ops, u8 iface_type, + struct device *dev); +int mwifiex_remove_card(struct mwifiex_adapter *adapter); void mwifiex_get_version(struct mwifiex_adapter *adapter, char *version, int maxlen); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 3c3c4f197da867abd2cce54fc9818feaec28f5b7..4db07da81d8daaa862a64f660d710cfd323c3fd0 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -35,7 +35,21 @@ static u8 user_rmmod; static struct mwifiex_if_ops pcie_ops; -static struct semaphore add_remove_card_sem; +static const struct of_device_id mwifiex_pcie_of_match_table[] = { + { .compatible = "pci11ab,2b42" }, + { .compatible = "pci1b4b,2b42" }, + { } +}; + +static int mwifiex_pcie_probe_of(struct device *dev) +{ + if (!of_match_node(mwifiex_pcie_of_match_table, dev->of_node)) { + dev_err(dev, "required compatible string missing\n"); + return -EINVAL; + } + + return 0; +} static int mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, @@ -101,23 +115,31 @@ static int mwifiex_pcie_suspend(struct device *dev) { struct mwifiex_adapter *adapter; struct pcie_service_card *card; - int hs_actived; struct pci_dev *pdev = to_pci_dev(dev); - if (pdev) { - card = pci_get_drvdata(pdev); - if (!card || !card->adapter) { - pr_err("Card or adapter structure is not valid\n"); - return 0; - } - } else { - pr_err("PCIE device is not specified\n"); + card = pci_get_drvdata(pdev); + + /* Might still be loading firmware */ + wait_for_completion(&card->fw_done); + + adapter = card->adapter; + if (!adapter) { + dev_err(dev, "adapter is not valid\n"); return 0; } - adapter = card->adapter; + mwifiex_enable_wake(adapter); + + /* Enable the Host Sleep */ + if (!mwifiex_enable_hs(adapter)) { + mwifiex_dbg(adapter, ERROR, + "cmd: failed to suspend\n"); + adapter->hs_enabling = false; + mwifiex_disable_wake(adapter); + return -EFAULT; + } - hs_actived = mwifiex_enable_hs(adapter); + flush_workqueue(adapter->workqueue); /* Indicate device suspended */ adapter->is_suspended = true; @@ -140,14 +162,10 @@ static int mwifiex_pcie_resume(struct device *dev) struct pcie_service_card *card; struct pci_dev *pdev = to_pci_dev(dev); - if (pdev) { - card = pci_get_drvdata(pdev); - if (!card || !card->adapter) { - pr_err("Card or adapter structure is not valid\n"); - return 0; - } - } else { - pr_err("PCIE device is not specified\n"); + card = pci_get_drvdata(pdev); + + if (!card->adapter) { + dev_err(dev, "adapter structure is not valid\n"); return 0; } @@ -163,6 +181,7 @@ static int mwifiex_pcie_resume(struct device *dev) mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), MWIFIEX_ASYNC_CMD); + mwifiex_disable_wake(adapter); return 0; } @@ -178,14 +197,17 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct pcie_service_card *card; + int ret; pr_debug("info: vendor=0x%4.04X device=0x%4.04X rev=%d\n", pdev->vendor, pdev->device, pdev->revision); - card = kzalloc(sizeof(struct pcie_service_card), GFP_KERNEL); + card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; + init_completion(&card->fw_done); + card->dev = pdev; if (ent->driver_data) { @@ -199,8 +221,15 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, card->pcie.can_ext_scan = data->can_ext_scan; } - if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops, - MWIFIEX_PCIE)) { + /* device tree node parsing and platform specific configuration*/ + if (pdev->dev.of_node) { + ret = mwifiex_pcie_probe_of(&pdev->dev); + if (ret) + return ret; + } + + if (mwifiex_add_card(card, &card->fw_done, &pcie_ops, + MWIFIEX_PCIE, &pdev->dev)) { pr_err("%s failed\n", __func__); return -1; } @@ -218,19 +247,14 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) struct mwifiex_private *priv; card = pci_get_drvdata(pdev); - if (!card) - return; + + wait_for_completion(&card->fw_done); adapter = card->adapter; if (!adapter || !adapter->priv_num) return; if (user_rmmod && !adapter->mfg_mode) { -#ifdef CONFIG_PM_SLEEP - if (adapter->is_suspended) - mwifiex_pcie_resume(&pdev->dev); -#endif - mwifiex_deauthenticate_all(adapter); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); @@ -240,7 +264,7 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN); } - mwifiex_remove_card(card->adapter, &add_remove_card_sem); + mwifiex_remove_card(adapter); } static void mwifiex_pcie_shutdown(struct pci_dev *pdev) @@ -483,6 +507,7 @@ static int mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter) } } + atomic_set(&adapter->tx_hw_pending, 0); return 0; } @@ -682,6 +707,7 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter) card->tx_buf_list[i] = NULL; } + atomic_set(&adapter->tx_hw_pending, 0); return; } @@ -1119,6 +1145,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter) -1); else mwifiex_write_data_complete(adapter, skb, 0, 0); + atomic_dec(&adapter->tx_hw_pending); } card->tx_buf_list[wrdoneidx] = NULL; @@ -1211,6 +1238,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr; buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); card->tx_buf_list[wrindx] = skb; + atomic_inc(&adapter->tx_hw_pending); if (reg->pfu_enabled) { desc2 = card->txbd_ring[wrindx]; @@ -1288,6 +1316,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, done_unmap: mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); card->tx_buf_list[wrindx] = NULL; + atomic_dec(&adapter->tx_hw_pending); if (reg->pfu_enabled) memset(desc2, 0, sizeof(*desc2)); else @@ -1669,9 +1698,6 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) if (!adapter->curr_cmd) { if (adapter->ps_state == PS_STATE_SLEEP_CFM) { - mwifiex_process_sleep_confirm_resp(adapter, skb->data, - skb->len); - mwifiex_pcie_enable_host_int(adapter); if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_SLEEP_CFM_DONE)) { @@ -1684,6 +1710,9 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) while (reg->sleep_cookie && (count++ < 10) && mwifiex_pcie_ok_to_access_hw(adapter)) usleep_range(50, 60); + mwifiex_pcie_enable_host_int(adapter); + mwifiex_process_sleep_confirm_resp(adapter, skb->data, + skb->len); } else { mwifiex_dbg(adapter, ERROR, "There is no command but got cmdrsp\n"); @@ -2022,7 +2051,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, } /* Wait for the command done interrupt */ - do { + for (tries = 0; tries < MAX_POLL_TRIES; tries++) { if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS, &ireg_intr)) { mwifiex_dbg(adapter, ERROR, @@ -2034,8 +2063,18 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, ret = -1; goto done; } - } while ((ireg_intr & CPU_INTR_DOOR_BELL) == - CPU_INTR_DOOR_BELL); + if (!(ireg_intr & CPU_INTR_DOOR_BELL)) + break; + usleep_range(10, 20); + } + if (ireg_intr & CPU_INTR_DOOR_BELL) { + mwifiex_dbg(adapter, ERROR, "%s: Card failed to ACK download\n", + __func__); + mwifiex_unmap_pci_memory(adapter, skb, + PCI_DMA_TODEVICE); + ret = -1; + goto done; + } mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); @@ -2210,7 +2249,8 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context) } card = pci_get_drvdata(pdev); - if (!card || !card->adapter) { + + if (!card->adapter) { pr_err("info: %s: card=%p adapter=%p\n", __func__, card, card ? card->adapter : NULL); goto exit; @@ -2322,6 +2362,8 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) ret = mwifiex_pcie_process_cmd_complete(adapter); if (ret) return ret; + if (adapter->hs_activated) + return ret; } if (card->msi_enable) { @@ -2806,7 +2848,6 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter) err_set_dma_mask: pci_disable_device(pdev); err_enable_dev: - pci_set_drvdata(pdev, NULL); return ret; } @@ -2840,9 +2881,7 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter) pci_disable_device(pdev); pci_release_region(pdev, 2); pci_release_region(pdev, 0); - pci_set_drvdata(pdev, NULL); } - kfree(card); } static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) @@ -2962,11 +3001,9 @@ static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter) static int mwifiex_register_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - struct pci_dev *pdev = card->dev; /* save adapter pointer in card */ card->adapter = adapter; - adapter->dev = &pdev->dev; if (mwifiex_pcie_request_irq(adapter)) return -1; @@ -2989,30 +3026,28 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - struct pci_dev *pdev; + struct pci_dev *pdev = card->dev; int i; - if (card) { - pdev = card->dev; - if (card->msix_enable) { - for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) - synchronize_irq(card->msix_entries[i].vector); + if (card->msix_enable) { + for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) + synchronize_irq(card->msix_entries[i].vector); - for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) - free_irq(card->msix_entries[i].vector, - &card->msix_ctx[i]); + for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) + free_irq(card->msix_entries[i].vector, + &card->msix_ctx[i]); - card->msix_enable = 0; - pci_disable_msix(pdev); - } else { - mwifiex_dbg(adapter, INFO, - "%s(): calling free_irq()\n", __func__); - free_irq(card->dev->irq, &card->share_irq_ctx); + card->msix_enable = 0; + pci_disable_msix(pdev); + } else { + mwifiex_dbg(adapter, INFO, + "%s(): calling free_irq()\n", __func__); + free_irq(card->dev->irq, &card->share_irq_ctx); - if (card->msi_enable) - pci_disable_msi(pdev); - } + if (card->msi_enable) + pci_disable_msi(pdev); } + card->adapter = NULL; } /* This function initializes the PCI-E host memory space, WCB rings, etc. @@ -3095,18 +3130,14 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) adapter->seq_num = 0; adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K; - if (card) { - if (reg->sleep_cookie) - mwifiex_pcie_delete_sleep_cookie_buf(adapter); - - mwifiex_pcie_delete_cmdrsp_buf(adapter); - mwifiex_pcie_delete_evtbd_ring(adapter); - mwifiex_pcie_delete_rxbd_ring(adapter); - mwifiex_pcie_delete_txbd_ring(adapter); - card->cmdrsp_buf = NULL; - } + if (reg->sleep_cookie) + mwifiex_pcie_delete_sleep_cookie_buf(adapter); - return; + mwifiex_pcie_delete_cmdrsp_buf(adapter); + mwifiex_pcie_delete_evtbd_ring(adapter); + mwifiex_pcie_delete_rxbd_ring(adapter); + mwifiex_pcie_delete_txbd_ring(adapter); + card->cmdrsp_buf = NULL; } static struct mwifiex_if_ops pcie_ops = { @@ -3140,8 +3171,7 @@ static struct mwifiex_if_ops pcie_ops = { /* * This function initializes the PCIE driver module. * - * This initiates the semaphore and registers the device with - * PCIE bus. + * This registers the device with PCIE bus. */ static int mwifiex_pcie_init_module(void) { @@ -3149,8 +3179,6 @@ static int mwifiex_pcie_init_module(void) pr_debug("Marvell PCIe Driver\n"); - sema_init(&add_remove_card_sem, 1); - /* Clear the flag in case user removes the card. */ user_rmmod = 0; @@ -3174,9 +3202,6 @@ static int mwifiex_pcie_init_module(void) */ static void mwifiex_pcie_cleanup_module(void) { - if (!down_interruptible(&add_remove_card_sem)) - up(&add_remove_card_sem); - /* Set the flag as user is removing this module. */ user_rmmod = 1; diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index 46f99cae9399bd7e946b545cf9e51f76f8bd1121..ae3365d1c34e8acc8d882c56b8972c9c17dd5a85 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -22,6 +22,7 @@ #ifndef _MWIFIEX_PCIE_H #define _MWIFIEX_PCIE_H +#include #include #include @@ -345,6 +346,7 @@ struct pcie_service_card { struct pci_dev *dev; struct mwifiex_adapter *adapter; struct mwifiex_pcie_device pcie; + struct completion fw_done; u8 txbd_flush; u32 txbd_wrptr; diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 97c9765b5bc6a33396085fa18286c5969867ea63..181691684a08f0a1bfc6eca17292ba5fe3f1a778 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -827,7 +827,6 @@ mwifiex_config_scan(struct mwifiex_private *priv, u32 num_probes; u32 ssid_len; u32 chan_idx; - u32 chan_num; u32 scan_type; u16 scan_dur; u8 channel; @@ -1105,13 +1104,12 @@ mwifiex_config_scan(struct mwifiex_private *priv, mwifiex_dbg(adapter, INFO, "info: Scan: Scanning current channel only\n"); } - chan_num = chan_idx; } else { mwifiex_dbg(adapter, INFO, "info: Scan: Creating full region channel list\n"); - chan_num = mwifiex_scan_create_channel_list(priv, user_scan_in, - scan_chan_list, - *filtered_scan); + mwifiex_scan_create_channel_list(priv, user_scan_in, + scan_chan_list, + *filtered_scan); } } @@ -1671,6 +1669,10 @@ static int mwifiex_save_hidden_ssid_channels(struct mwifiex_private *priv, } done: + /* beacon_ie buffer was allocated in function + * mwifiex_fill_new_bss_desc(). Free it now. + */ + kfree(bss_desc->beacon_buf); kfree(bss_desc); return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 8718950004f3f6e5bda50b38c9fbf42d7cf474a6..740d79cd91fa1722f74530f0ae3c0a9271d3b1bc 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -49,8 +49,6 @@ static u8 user_rmmod; static struct mwifiex_if_ops sdio_ops; static unsigned long iface_work_flags; -static struct semaphore add_remove_card_sem; - static struct memory_type_mapping generic_mem_type_map[] = { {"DUMP", NULL, 0, 0xDD}, }; @@ -79,59 +77,18 @@ static const struct of_device_id mwifiex_sdio_of_match_table[] = { { } }; -static irqreturn_t mwifiex_wake_irq_wifi(int irq, void *priv) -{ - struct mwifiex_plt_wake_cfg *cfg = priv; - - if (cfg->irq_wifi >= 0) { - pr_info("%s: wake by wifi", __func__); - cfg->wake_by_wifi = true; - disable_irq_nosync(irq); - } - - return IRQ_HANDLED; -} - /* This function parse device tree node using mmc subnode devicetree API. * The device node is saved in card->plt_of_node. * if the device tree node exist and include interrupts attributes, this * function will also request platform specific wakeup interrupt. */ -static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card) +static int mwifiex_sdio_probe_of(struct device *dev) { - struct mwifiex_plt_wake_cfg *cfg; - int ret; - if (!of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) { dev_err(dev, "required compatible string missing\n"); return -EINVAL; } - card->plt_of_node = dev->of_node; - card->plt_wake_cfg = devm_kzalloc(dev, sizeof(*card->plt_wake_cfg), - GFP_KERNEL); - cfg = card->plt_wake_cfg; - if (cfg && card->plt_of_node) { - cfg->irq_wifi = irq_of_parse_and_map(card->plt_of_node, 0); - if (!cfg->irq_wifi) { - dev_dbg(dev, - "fail to parse irq_wifi from device tree\n"); - } else { - ret = devm_request_irq(dev, cfg->irq_wifi, - mwifiex_wake_irq_wifi, - IRQF_TRIGGER_LOW, - "wifi_wake", cfg); - if (ret) { - dev_dbg(dev, - "Failed to request irq_wifi %d (%d)\n", - cfg->irq_wifi, ret); - card->plt_wake_cfg = NULL; - return 0; - } - disable_irq(cfg->irq_wifi); - } - } - return 0; } @@ -152,10 +109,12 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) pr_debug("info: vendor=0x%4.04X device=0x%4.04X class=%d function=%d\n", func->vendor, func->device, func->class, func->num); - card = kzalloc(sizeof(struct sdio_mmc_card), GFP_KERNEL); + card = devm_kzalloc(&func->dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; + init_completion(&card->fw_done); + card->func = func; card->device_id = id; @@ -185,20 +144,18 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) if (ret) { dev_err(&func->dev, "failed to enable function\n"); - goto err_free; + return ret; } /* device tree node parsing and platform specific configuration*/ if (func->dev.of_node) { - ret = mwifiex_sdio_probe_of(&func->dev, card); - if (ret) { - dev_err(&func->dev, "SDIO dt node parse failed\n"); + ret = mwifiex_sdio_probe_of(&func->dev); + if (ret) goto err_disable; - } } - ret = mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops, - MWIFIEX_SDIO); + ret = mwifiex_add_card(card, &card->fw_done, &sdio_ops, + MWIFIEX_SDIO, &func->dev); if (ret) { dev_err(&func->dev, "add card failed\n"); goto err_disable; @@ -210,8 +167,6 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); -err_free: - kfree(card); return ret; } @@ -231,17 +186,10 @@ static int mwifiex_sdio_resume(struct device *dev) struct sdio_func *func = dev_to_sdio_func(dev); struct sdio_mmc_card *card; struct mwifiex_adapter *adapter; - mmc_pm_flag_t pm_flag = 0; - if (func) { - pm_flag = sdio_get_host_pm_caps(func); - card = sdio_get_drvdata(func); - if (!card || !card->adapter) { - pr_err("resume: invalid card or adapter\n"); - return 0; - } - } else { - pr_err("resume: sdio_func is not specified\n"); + card = sdio_get_drvdata(func); + if (!card || !card->adapter) { + dev_err(dev, "resume: invalid card or adapter\n"); return 0; } @@ -259,12 +207,7 @@ static int mwifiex_sdio_resume(struct device *dev) mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), MWIFIEX_SYNC_CMD); - /* Disable platform specific wakeup interrupt */ - if (card->plt_wake_cfg && card->plt_wake_cfg->irq_wifi >= 0) { - disable_irq_wake(card->plt_wake_cfg->irq_wifi); - if (!card->plt_wake_cfg->wake_by_wifi) - disable_irq(card->plt_wake_cfg->irq_wifi); - } + mwifiex_disable_wake(adapter); return 0; } @@ -285,6 +228,8 @@ mwifiex_sdio_remove(struct sdio_func *func) if (!card) return; + wait_for_completion(&card->fw_done); + adapter = card->adapter; if (!adapter || !adapter->priv_num) return; @@ -292,9 +237,6 @@ mwifiex_sdio_remove(struct sdio_func *func) mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num); if (user_rmmod && !adapter->mfg_mode) { - if (adapter->is_suspended) - mwifiex_sdio_resume(adapter->dev); - mwifiex_deauthenticate_all(adapter); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); @@ -302,7 +244,7 @@ mwifiex_sdio_remove(struct sdio_func *func) mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN); } - mwifiex_remove_card(card->adapter, &add_remove_card_sem); + mwifiex_remove_card(adapter); } /* @@ -323,40 +265,38 @@ static int mwifiex_sdio_suspend(struct device *dev) mmc_pm_flag_t pm_flag = 0; int ret = 0; - if (func) { - pm_flag = sdio_get_host_pm_caps(func); - pr_debug("cmd: %s: suspend: PM flag = 0x%x\n", - sdio_func_id(func), pm_flag); - if (!(pm_flag & MMC_PM_KEEP_POWER)) { - pr_err("%s: cannot remain alive while host is" - " suspended\n", sdio_func_id(func)); - return -ENOSYS; - } + pm_flag = sdio_get_host_pm_caps(func); + pr_debug("cmd: %s: suspend: PM flag = 0x%x\n", + sdio_func_id(func), pm_flag); + if (!(pm_flag & MMC_PM_KEEP_POWER)) { + dev_err(dev, "%s: cannot remain alive while host is" + " suspended\n", sdio_func_id(func)); + return -ENOSYS; + } - card = sdio_get_drvdata(func); - if (!card || !card->adapter) { - pr_err("suspend: invalid card or adapter\n"); - return 0; - } - } else { - pr_err("suspend: sdio_func is not specified\n"); + card = sdio_get_drvdata(func); + if (!card) { + dev_err(dev, "suspend: invalid card\n"); return 0; } - adapter = card->adapter; + /* Might still be loading firmware */ + wait_for_completion(&card->fw_done); - /* Enable platform specific wakeup interrupt */ - if (card->plt_wake_cfg && card->plt_wake_cfg->irq_wifi >= 0) { - card->plt_wake_cfg->wake_by_wifi = false; - enable_irq(card->plt_wake_cfg->irq_wifi); - enable_irq_wake(card->plt_wake_cfg->irq_wifi); + adapter = card->adapter; + if (!adapter) { + dev_err(dev, "adapter is not valid\n"); + return 0; } + mwifiex_enable_wake(adapter); + /* Enable the Host Sleep */ if (!mwifiex_enable_hs(adapter)) { mwifiex_dbg(adapter, ERROR, "cmd: failed to suspend\n"); adapter->hs_enabling = false; + mwifiex_disable_wake(adapter); return -EFAULT; } @@ -1195,7 +1135,6 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter, { u32 total_pkt_len, pkt_len; struct sk_buff *skb_deaggr; - u32 pkt_type; u16 blk_size; u8 blk_num; u8 *data; @@ -1216,8 +1155,6 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter, break; } pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET)); - pkt_type = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET + - 2)); if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) { mwifiex_dbg(adapter, ERROR, "%s: error in pkt_len,\t" @@ -2066,6 +2003,7 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter) struct sdio_mmc_card *card = adapter->card; if (adapter->card) { + card->adapter = NULL; sdio_claim_host(card->func); sdio_disable_func(card->func); sdio_release_host(card->func); @@ -2098,9 +2036,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) return ret; } - - adapter->dev = &func->dev; - strcpy(adapter->fw_name, card->firmware); if (card->fw_dump_enh) { adapter->mem_type_mapping_tbl = generic_mem_type_map; @@ -2240,8 +2175,6 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter) kfree(card->mpa_rx.len_arr); kfree(card->mpa_tx.buf); kfree(card->mpa_rx.buf); - sdio_set_drvdata(card->func, NULL); - kfree(card); } /* @@ -2291,6 +2224,14 @@ static void mwifiex_recreate_adapter(struct sdio_mmc_card *card) mwifiex_sdio_remove(func); + /* + * Normally, we would let the driver core take care of releasing these. + * But we're not letting the driver core handle this one. See above + * TODO. + */ + sdio_set_drvdata(func, NULL); + devm_kfree(&func->dev, card); + /* power cycle the adapter */ sdio_claim_host(func); mmc_hw_reset(func->card->host); @@ -2767,14 +2708,11 @@ static struct mwifiex_if_ops sdio_ops = { /* * This function initializes the SDIO driver. * - * This initiates the semaphore and registers the device with - * SDIO bus. + * This registers the device with SDIO bus. */ static int mwifiex_sdio_init_module(void) { - sema_init(&add_remove_card_sem, 1); - /* Clear the flag in case user removes the card. */ user_rmmod = 0; @@ -2793,9 +2731,6 @@ mwifiex_sdio_init_module(void) static void mwifiex_sdio_cleanup_module(void) { - if (!down_interruptible(&add_remove_card_sem)) - up(&add_remove_card_sem); - /* Set the flag as user is removing this module. */ user_rmmod = 1; cancel_work_sync(&sdio_work); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index db837f12c547946d1f76310e9b4c4377052eb904..cdbf3a3ac7f994fbb56ced7df60b501199976202 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -21,6 +21,7 @@ #define _MWIFIEX_SDIO_H +#include #include #include #include @@ -154,11 +155,6 @@ a->mpa_rx.start_port = 0; \ } while (0) -struct mwifiex_plt_wake_cfg { - int irq_wifi; - bool wake_by_wifi; -}; - /* data structure for SDIO MPA TX */ struct mwifiex_sdio_mpa_tx { /* multiport tx aggregation buffer pointer */ @@ -242,9 +238,8 @@ struct mwifiex_sdio_card_reg { struct sdio_mmc_card { struct sdio_func *func; struct mwifiex_adapter *adapter; - struct device_node *plt_of_node; - struct mwifiex_plt_wake_cfg *plt_wake_cfg; + struct completion fw_done; const char *firmware; const struct mwifiex_sdio_card_reg *reg; u8 max_ports; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index 2a162c33d271a58adf0c54b935de9063c3f6ddd3..125e448712ddb5aadd43c5d16c4eda87856485a8 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -368,7 +368,10 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv, { struct mwifiex_adapter *adapter = priv->adapter; struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg = &cmd->params.opt_hs_cfg; + u8 *tlv = (u8 *)hs_cfg + sizeof(struct host_cmd_ds_802_11_hs_cfg_enh); + struct mwifiex_ps_param_in_hs *psparam_tlv = NULL; bool hs_activate = false; + u16 size; if (!hscfg_param) /* New Activate command */ @@ -385,13 +388,14 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv, memcpy(((u8 *) hs_cfg) + sizeof(struct host_cmd_ds_802_11_hs_cfg_enh), adapter->arp_filter, adapter->arp_filter_size); - cmd->size = cpu_to_le16 - (adapter->arp_filter_size + - sizeof(struct host_cmd_ds_802_11_hs_cfg_enh) - + S_DS_GEN); + size = adapter->arp_filter_size + + sizeof(struct host_cmd_ds_802_11_hs_cfg_enh) + + S_DS_GEN; + tlv = (u8 *)hs_cfg + + sizeof(struct host_cmd_ds_802_11_hs_cfg_enh) + + adapter->arp_filter_size; } else { - cmd->size = cpu_to_le16(S_DS_GEN + sizeof(struct - host_cmd_ds_802_11_hs_cfg_enh)); + size = S_DS_GEN + sizeof(struct host_cmd_ds_802_11_hs_cfg_enh); } if (hs_activate) { hs_cfg->action = cpu_to_le16(HS_ACTIVATE); @@ -401,12 +405,25 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv, hs_cfg->params.hs_config.conditions = hscfg_param->conditions; hs_cfg->params.hs_config.gpio = hscfg_param->gpio; hs_cfg->params.hs_config.gap = hscfg_param->gap; + + size += sizeof(struct mwifiex_ps_param_in_hs); + psparam_tlv = (struct mwifiex_ps_param_in_hs *)tlv; + psparam_tlv->header.type = + cpu_to_le16(TLV_TYPE_PS_PARAMS_IN_HS); + psparam_tlv->header.len = + cpu_to_le16(sizeof(struct mwifiex_ps_param_in_hs) + - sizeof(struct mwifiex_ie_types_header)); + psparam_tlv->hs_wake_int = cpu_to_le32(HS_DEF_WAKE_INTERVAL); + psparam_tlv->hs_inact_timeout = + cpu_to_le32(HS_DEF_INACTIVITY_TIMEOUT); + mwifiex_dbg(adapter, CMD, "cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n", hs_cfg->params.hs_config.conditions, hs_cfg->params.hs_config.gpio, hs_cfg->params.hs_config.gap); } + cmd->size = cpu_to_le16(size); return 0; } @@ -1729,7 +1746,6 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, { struct host_cmd_ds_tdls_oper *tdls_oper = &cmd->params.tdls_oper; struct mwifiex_ds_tdls_oper *oper = data_buf; - struct mwifiex_sta_node *sta_ptr; struct host_cmd_tlv_rates *tlv_rates; struct mwifiex_ie_types_htcap *ht_capab; struct mwifiex_ie_types_qos_info *wmm_qos_info; @@ -1747,7 +1763,6 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, tdls_oper->reason = 0; memcpy(tdls_oper->peer_mac, oper->peer_mac, ETH_ALEN); - sta_ptr = mwifiex_get_sta_entry(priv, oper->peer_mac); pos = (u8 *)tdls_oper + sizeof(struct host_cmd_ds_tdls_oper); @@ -1885,6 +1900,24 @@ static int mwifiex_cmd_get_wakeup_reason(struct mwifiex_private *priv, return 0; } +/* This function check if the command is supported by firmware */ +static int mwifiex_is_cmd_supported(struct mwifiex_private *priv, u16 cmd_no) +{ + if (!ISSUPP_ADHOC_ENABLED(priv->adapter->fw_cap_info)) { + switch (cmd_no) { + case HostCmd_CMD_802_11_IBSS_COALESCING_STATUS: + case HostCmd_CMD_802_11_AD_HOC_START: + case HostCmd_CMD_802_11_AD_HOC_JOIN: + case HostCmd_CMD_802_11_AD_HOC_STOP: + return -EOPNOTSUPP; + default: + break; + } + } + + return 0; +} + /* * This function prepares the commands before sending them to the firmware. * @@ -1898,6 +1931,13 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, struct host_cmd_ds_command *cmd_ptr = cmd_buf; int ret = 0; + if (mwifiex_is_cmd_supported(priv, cmd_no)) { + mwifiex_dbg(priv->adapter, ERROR, + "0x%x command not supported by firmware\n", + cmd_no); + return -EOPNOTSUPP; + } + /* Prepare command */ switch (cmd_no) { case HostCmd_CMD_GET_HW_SPEC: @@ -2191,7 +2231,6 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) { struct mwifiex_adapter *adapter = priv->adapter; int ret; - u16 enable = true; struct mwifiex_ds_11n_amsdu_aggr_ctrl amsdu_aggr_ctrl; struct mwifiex_ds_auto_ds auto_ds; enum state_11d_t state_11d; @@ -2218,9 +2257,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) * The cal-data can be read from device tree and/or * a configuration file and downloaded to firmware. */ - if (priv->adapter->iface_type == MWIFIEX_SDIO && - adapter->dev->of_node) { - adapter->dt_node = adapter->dev->of_node; + if (adapter->dt_node) { if (of_property_read_u32(adapter->dt_node, "marvell,wakeup-pin", &data) == 0) { @@ -2228,19 +2265,13 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) adapter->hs_cfg.gpio = data; } - ret = mwifiex_dnld_dt_cfgdata(priv, adapter->dt_node, - "marvell,caldata"); - if (ret) - return -1; + mwifiex_dnld_dt_cfgdata(priv, adapter->dt_node, + "marvell,caldata"); } - if (adapter->cal_data) { - ret = mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA, - HostCmd_ACT_GEN_SET, 0, NULL, - true); - if (ret) - return -1; - } + if (adapter->cal_data) + mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA, + HostCmd_ACT_GEN_SET, 0, NULL, true); /* Read MAC address from HW */ ret = mwifiex_send_cmd(priv, HostCmd_CMD_GET_HW_SPEC, @@ -2312,16 +2343,6 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) if (ret) return -1; - if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) { - /* set ibss coalescing_status */ - ret = mwifiex_send_cmd( - priv, - HostCmd_CMD_802_11_IBSS_COALESCING_STATUS, - HostCmd_ACT_GEN_SET, 0, &enable, true); - if (ret) - return -1; - } - memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl)); amsdu_aggr_ctrl.enable = true; /* Send request to firmware */ diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index a7e9f544f219d717a13ed29de537383c312c2573..35d8636bdb91d14c50b70e72c32e7ab0e10be3c0 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -404,7 +404,7 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv, struct cfg80211_ap_settings *params) { const u8 *vendor_ie; - struct ieee_types_header *wmm_ie; + const u8 *wmm_ie; u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02}; vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, @@ -412,9 +412,9 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv, params->beacon.tail, params->beacon.tail_len); if (vendor_ie) { - wmm_ie = (struct ieee_types_header *)vendor_ie; - memcpy(&bss_cfg->wmm_info, wmm_ie + 1, - sizeof(bss_cfg->wmm_info)); + wmm_ie = vendor_ie; + memcpy(&bss_cfg->wmm_info, wmm_ie + + sizeof(struct ieee_types_header), *(wmm_ie + 1)); priv->wmm_enabled = 1; } else { memset(&bss_cfg->wmm_info, 0, sizeof(bss_cfg->wmm_info)); diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 73eb0846db210be16476e94f0d7f872992154112..c563160b3b6be93f8dd3c96f317f289fbe1d9d7b 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -24,7 +24,6 @@ static u8 user_rmmod; static struct mwifiex_if_ops usb_ops; -static struct semaphore add_remove_card_sem; static struct usb_device_id mwifiex_usb_table[] = { /* 8766 */ @@ -380,16 +379,17 @@ static int mwifiex_usb_probe(struct usb_interface *intf, struct usb_endpoint_descriptor *epd; int ret, i; struct usb_card_rec *card; - u16 id_vendor, id_product, bcd_device, bcd_usb; + u16 id_vendor, id_product, bcd_device; - card = kzalloc(sizeof(struct usb_card_rec), GFP_KERNEL); + card = devm_kzalloc(&intf->dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; + init_completion(&card->fw_done); + id_vendor = le16_to_cpu(udev->descriptor.idVendor); id_product = le16_to_cpu(udev->descriptor.idProduct); bcd_device = le16_to_cpu(udev->descriptor.bcdDevice); - bcd_usb = le16_to_cpu(udev->descriptor.bcdUSB); pr_debug("info: VID/PID = %X/%X, Boot2 version = %X\n", id_vendor, id_product, bcd_device); @@ -475,12 +475,11 @@ static int mwifiex_usb_probe(struct usb_interface *intf, usb_set_intfdata(intf, card); - ret = mwifiex_add_card(card, &add_remove_card_sem, &usb_ops, - MWIFIEX_USB); + ret = mwifiex_add_card(card, &card->fw_done, &usb_ops, + MWIFIEX_USB, &card->udev->dev); if (ret) { pr_err("%s: mwifiex_add_card failed: %d\n", __func__, ret); usb_reset_device(udev); - kfree(card); return ret; } @@ -503,17 +502,27 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message) struct usb_tx_data_port *port; int i, j; - if (!card || !card->adapter) { - pr_err("%s: card or card->adapter is NULL\n", __func__); + /* Might still be loading firmware */ + wait_for_completion(&card->fw_done); + + adapter = card->adapter; + if (!adapter) { + dev_err(&intf->dev, "card is not valid\n"); return 0; } - adapter = card->adapter; if (unlikely(adapter->is_suspended)) mwifiex_dbg(adapter, WARN, "Device already suspended\n"); - mwifiex_enable_hs(adapter); + /* Enable the Host Sleep */ + if (!mwifiex_enable_hs(adapter)) { + mwifiex_dbg(adapter, ERROR, + "cmd: failed to suspend\n"); + adapter->hs_enabling = false; + return -EFAULT; + } + /* 'is_suspended' flag indicates device is suspended. * It must be set here before the usb_kill_urb() calls. Reason @@ -559,8 +568,9 @@ static int mwifiex_usb_resume(struct usb_interface *intf) struct mwifiex_adapter *adapter; int i; - if (!card || !card->adapter) { - pr_err("%s: card or card->adapter is NULL\n", __func__); + if (!card->adapter) { + dev_err(&intf->dev, "%s: card->adapter is NULL\n", + __func__); return 0; } adapter = card->adapter; @@ -602,21 +612,13 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) struct usb_card_rec *card = usb_get_intfdata(intf); struct mwifiex_adapter *adapter; - if (!card || !card->adapter) { - pr_err("%s: card or card->adapter is NULL\n", __func__); - return; - } + wait_for_completion(&card->fw_done); adapter = card->adapter; - if (!adapter->priv_num) + if (!adapter || !adapter->priv_num) return; if (user_rmmod && !adapter->mfg_mode) { -#ifdef CONFIG_PM - if (adapter->is_suspended) - mwifiex_usb_resume(intf); -#endif - mwifiex_deauthenticate_all(adapter); mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter, @@ -628,13 +630,9 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) mwifiex_dbg(adapter, FATAL, "%s: removing card\n", __func__); - mwifiex_remove_card(adapter, &add_remove_card_sem); + mwifiex_remove_card(adapter); - usb_set_intfdata(intf, NULL); usb_put_dev(interface_to_usbdev(intf)); - kfree(card); - - return; } static struct usb_driver mwifiex_usb_driver = { @@ -932,7 +930,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; card->adapter = adapter; - adapter->dev = &card->udev->dev; switch (le16_to_cpu(card->udev->descriptor.idProduct)) { case USB8997_PID_1: @@ -1206,8 +1203,7 @@ static struct mwifiex_if_ops usb_ops = { /* This function initializes the USB driver module. * - * This initiates the semaphore and registers the device with - * USB bus. + * This registers the device with USB bus. */ static int mwifiex_usb_init_module(void) { @@ -1215,8 +1211,6 @@ static int mwifiex_usb_init_module(void) pr_debug("Marvell USB8797 Driver\n"); - sema_init(&add_remove_card_sem, 1); - ret = usb_register(&mwifiex_usb_driver); if (ret) pr_err("Driver register failed!\n"); @@ -1236,9 +1230,6 @@ static int mwifiex_usb_init_module(void) */ static void mwifiex_usb_cleanup_module(void) { - if (!down_interruptible(&add_remove_card_sem)) - up(&add_remove_card_sem); - /* set the flag as user is removing this module */ user_rmmod = 1; diff --git a/drivers/net/wireless/marvell/mwifiex/usb.h b/drivers/net/wireless/marvell/mwifiex/usb.h index 30e8eb8c259d1586b7d29393b875db70826d94f9..e5f204ea018bd8022b5da4f71f8416fadff6d410 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.h +++ b/drivers/net/wireless/marvell/mwifiex/usb.h @@ -20,6 +20,7 @@ #ifndef _MWIFIEX_USB_H #define _MWIFIEX_USB_H +#include #include #define USB8XXX_VID 0x1286 @@ -75,6 +76,7 @@ struct usb_card_rec { struct mwifiex_adapter *adapter; struct usb_device *udev; struct usb_interface *intf; + struct completion fw_done; u8 rx_cmd_ep; struct urb_context rx_cmd; atomic_t rx_cmd_urb_pending; diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c index 0eb246502e1d119b03d83874cba3030ac4849092..28c2f6fae3e644d0787f8a1f9117e7e867f04e2b 100644 --- a/drivers/net/wireless/marvell/mwifiex/wmm.c +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c @@ -503,8 +503,10 @@ mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv, struct mwifiex_adapter *adapter = priv->adapter; struct sk_buff *skb, *tmp; - skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) + skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) { + skb_unlink(skb, &ra_list->skb_head); mwifiex_write_data_complete(adapter, skb, 0, -1); + } } /* @@ -600,11 +602,15 @@ mwifiex_clean_txrx(struct mwifiex_private *priv) priv->adapter->if_ops.clean_pcie_ring(priv->adapter); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); - skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) + skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) { + skb_unlink(skb, &priv->tdls_txq); mwifiex_write_data_complete(priv->adapter, skb, 0, -1); + } - skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) + skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) { + skb_unlink(skb, &priv->bypass_txq); mwifiex_write_data_complete(priv->adapter, skb, 0, -1); + } atomic_set(&priv->adapter->bypass_tx_pending, 0); idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL); @@ -1099,6 +1105,7 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, &adapter->bss_prio_tbl[j].bss_prio_head, list) { +try_again: priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv; if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) && @@ -1134,8 +1141,18 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, ra_list_spinlock, flags_ra); } - } + if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) != 0) { + atomic_set(&priv_tmp->wmm.highest_queued_prio, + HIGH_PRIO_TID); + /* Iterate current private once more, since + * there still exist packets in data queue + */ + goto try_again; + } else + atomic_set(&priv_tmp->wmm.highest_queued_prio, + NO_PKT_PRIO_TID); + } } return NULL; @@ -1328,9 +1345,11 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv, skb = skb_dequeue(&ptr->skb_head); if (adapter->data_sent || adapter->tx_lock_flag) { + ptr->total_pkt_count--; spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); skb_queue_tail(&adapter->tx_data_q, skb); + atomic_dec(&priv->wmm.tx_pkts_queued); atomic_inc(&adapter->tx_queued); return; } @@ -1388,6 +1407,10 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv, if (ret != -EBUSY) { mwifiex_rotate_priolists(priv, ptr, ptr_index); atomic_dec(&priv->wmm.tx_pkts_queued); + spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); + ptr->total_pkt_count--; + spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, + ra_list_flags); } } diff --git a/drivers/net/wireless/mediatek/mt7601u/Makefile b/drivers/net/wireless/mediatek/mt7601u/Makefile index ea9ed8a5db4d3c5169734287946444c980b68838..08fc802ead4b56c423091a3856c295e4caa7c3b2 100644 --- a/drivers/net/wireless/mediatek/mt7601u/Makefile +++ b/drivers/net/wireless/mediatek/mt7601u/Makefile @@ -1,5 +1,3 @@ -ccflags-y += -D__CHECK_ENDIAN__ - obj-$(CONFIG_MT7601U) += mt7601u.o mt7601u-objs = \ diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c index 44d46e25db80ba6bd4aa70d387d1de404f2559c4..a6e9017662269ee06b0d6f1be0d039f83542dbf5 100644 --- a/drivers/net/wireless/mediatek/mt7601u/init.c +++ b/drivers/net/wireless/mediatek/mt7601u/init.c @@ -293,13 +293,13 @@ static void mt7601u_mac_stop_hw(struct mt7601u_dev *dev) ok = 0; i = 200; while (i--) { - if ((mt76_rr(dev, 0x0430) & 0x00ff0000) || - (mt76_rr(dev, 0x0a30) & 0xffffffff) || - (mt76_rr(dev, 0x0a34) & 0xffffffff)) - ok++; - if (ok > 6) - break; - + if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) && + !mt76_rr(dev, 0x0a30) && + !mt76_rr(dev, 0x0a34)) { + if (ok++ > 5) + break; + continue; + } msleep(1); } diff --git a/drivers/net/wireless/mediatek/mt7601u/regs.h b/drivers/net/wireless/mediatek/mt7601u/regs.h index 27a429d90cec02f6cdaec64194ccca7f9ea1bd28..2a8837002f0070a965c0f23b0209f4e7b78617af 100644 --- a/drivers/net/wireless/mediatek/mt7601u/regs.h +++ b/drivers/net/wireless/mediatek/mt7601u/regs.h @@ -192,6 +192,9 @@ #define MT_BCN_OFFSET_BASE 0x041c #define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2)) +#define MT_RXQ_STA 0x0430 +#define MT_TXQ_STA 0x0434 + #define MT_RF_CSR_CFG 0x0500 #define MT_RF_CSR_CFG_DATA GENMASK(7, 0) #define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c index 155f343981fe29679d278b0621d3cf7ce64b2a77..085c5b423bdfa8e4992be6b12047b3b86191a375 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c @@ -1459,10 +1459,7 @@ static int rt2400pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); - if (!is_valid_ether_addr(mac)) { - eth_random_addr(mac); - rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac); - } + rt2x00lib_set_mac_address(rt2x00dev, mac); rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); if (word == 0xffff) { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c index 2553cdd7406623cde30a99531e4d86a74ae4836e..9832fd50c7935e39c27653b1ad935ab6dd0176fc 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c @@ -1585,10 +1585,7 @@ static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); - if (!is_valid_ether_addr(mac)) { - eth_random_addr(mac); - rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac); - } + rt2x00lib_set_mac_address(rt2x00dev, mac); rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); if (word == 0xffff) { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c index 2d64611de300146d98bcc254171f961e3af26831..cd3ab5a9e98da6d25d8d978b28d593afe0033818 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c @@ -1349,10 +1349,7 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); - if (!is_valid_ether_addr(mac)) { - eth_random_addr(mac); - rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac); - } + rt2x00lib_set_mac_address(rt2x00dev, mac); rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); if (word == 0xffff) { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index bf3f0a39908c816efa508db2458ec276a15679a1..4fb79e05078ffe87f2d64ca480dc1ff3a6a5ae0f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -1621,7 +1621,7 @@ static void rt2800_config_ht_opmode(struct rt2x00_dev *rt2x00dev, * => Protect all HT40 transmissions. */ mm20_mode = gf20_mode = 0; - mm40_mode = gf40_mode = 2; + mm40_mode = gf40_mode = 1; break; case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: @@ -1644,7 +1644,7 @@ static void rt2800_config_ht_opmode(struct rt2x00_dev *rt2x00dev, * Legacy STAs are present * => Protect all HT transmissions. */ - mm20_mode = mm40_mode = gf20_mode = gf40_mode = 2; + mm20_mode = mm40_mode = gf20_mode = gf40_mode = 1; /* * If erp protection is needed we have to protect HT @@ -1660,7 +1660,7 @@ static void rt2800_config_ht_opmode(struct rt2x00_dev *rt2x00dev, /* check for STAs not supporting greenfield mode */ if (any_sta_nongf) - gf20_mode = gf40_mode = 2; + gf20_mode = gf40_mode = 1; /* Update HT protection config */ rt2800_register_read(rt2x00dev, MM20_PROT_CFG, ®); @@ -1691,8 +1691,6 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp, if (changed & BSS_CHANGED_ERP_PREAMBLE) { rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, ®); - rt2x00_set_field32(®, AUTO_RSP_CFG_BAC_ACK_POLICY, - !!erp->short_preamble); rt2x00_set_field32(®, AUTO_RSP_CFG_AR_PREAMBLE, !!erp->short_preamble); rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg); @@ -1707,7 +1705,7 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp, if (changed & BSS_CHANGED_BASIC_RATES) { rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, - erp->basic_rates); + 0xff0 | erp->basic_rates); rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003); } @@ -4672,11 +4670,14 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) 0x00000000); } } else if (rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392) || - rt2x00_rt(rt2x00dev, RT5592)) { + rt2x00_rt(rt2x00dev, RT5392)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); + } else if (rt2x00_rt(rt2x00dev, RT5592)) { + rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); + rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); + rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); } else { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); @@ -4735,9 +4736,9 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, ®); rt2x00_set_field32(®, AUTO_RSP_CFG_AUTORESPONDER, 1); rt2x00_set_field32(®, AUTO_RSP_CFG_BAC_ACK_POLICY, 1); - rt2x00_set_field32(®, AUTO_RSP_CFG_CTS_40_MMODE, 0); + rt2x00_set_field32(®, AUTO_RSP_CFG_CTS_40_MMODE, 1); rt2x00_set_field32(®, AUTO_RSP_CFG_CTS_40_MREF, 0); - rt2x00_set_field32(®, AUTO_RSP_CFG_AR_PREAMBLE, 1); + rt2x00_set_field32(®, AUTO_RSP_CFG_AR_PREAMBLE, 0); rt2x00_set_field32(®, AUTO_RSP_CFG_DUAL_CTS_EN, 0); rt2x00_set_field32(®, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0); rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg); @@ -4770,9 +4771,9 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, MM20_PROT_CFG, ®); rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_RATE, 0x4004); - rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_CTRL, 0); + rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_CTRL, 1); rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_NAV_SHORT, 1); - rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1); + rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 0); rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1); rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0); @@ -4783,9 +4784,9 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, MM40_PROT_CFG, ®); rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_RATE, 0x4084); - rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_CTRL, 0); + rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_CTRL, 1); rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_NAV_SHORT, 1); - rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1); + rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 0); rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1); rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1); @@ -4796,9 +4797,9 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, GF20_PROT_CFG, ®); rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_RATE, 0x4004); - rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_CTRL, 0); + rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_CTRL, 1); rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_NAV_SHORT, 1); - rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1); + rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 0); rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1); rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0); @@ -4809,9 +4810,9 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, GF40_PROT_CFG, ®); rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_RATE, 0x4084); - rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_CTRL, 0); + rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_CTRL, 1); rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_NAV_SHORT, 1); - rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1); + rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 0); rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1); rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1); @@ -6756,7 +6757,6 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1); rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1); - rt2x00_set_field32(®, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2); rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); @@ -6919,10 +6919,7 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) * Start validation of the data that has been read. */ mac = rt2800_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); - if (!is_valid_ether_addr(mac)) { - eth_random_addr(mac); - rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac); - } + rt2x00lib_set_mac_address(rt2x00dev, mac); rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word); if (word == 0xffff) { @@ -7464,7 +7461,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) char *default_power1; char *default_power2; char *default_power3; - unsigned int i; + unsigned int i, tx_chains, rx_chains; u32 reg; /* @@ -7475,7 +7472,6 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) /* * Initialize all hw fields. */ - ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_HT_CCK_RATES); ieee80211_hw_set(rt2x00dev->hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(rt2x00dev->hw, AMPDU_AGGREGATION); ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK); @@ -7589,21 +7585,24 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40; - if (rt2x00dev->default_ant.tx_chain_num >= 2) + tx_chains = rt2x00dev->default_ant.tx_chain_num; + rx_chains = rt2x00dev->default_ant.rx_chain_num; + + if (tx_chains >= 2) spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC; - spec->ht.cap |= rt2x00dev->default_ant.rx_chain_num << - IEEE80211_HT_CAP_RX_STBC_SHIFT; + spec->ht.cap |= rx_chains << IEEE80211_HT_CAP_RX_STBC_SHIFT; spec->ht.ampdu_factor = 3; spec->ht.ampdu_density = 4; - spec->ht.mcs.tx_params = - IEEE80211_HT_MCS_TX_DEFINED | - IEEE80211_HT_MCS_TX_RX_DIFF | - ((rt2x00dev->default_ant.tx_chain_num - 1) << - IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + spec->ht.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + if (tx_chains != rx_chains) { + spec->ht.mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; + spec->ht.mcs.tx_params |= + (tx_chains - 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; + } - switch (rt2x00dev->default_ant.rx_chain_num) { + switch (rx_chains) { case 3: spec->ht.mcs.rx_mask[2] = 0xff; case 2: diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index 4b0bb6b4f6f11d4b2c924d1d77ab8bbcc4d80e98..f38c44061b5ba664f832d62b4a899b8a80568b1f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -177,7 +177,7 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev, if (rt2800usb_txstatus_pending(rt2x00dev)) { /* Read register after 1 ms */ hrtimer_start(&rt2x00dev->txstatus_timer, - ktime_set(0, TXSTATUS_READ_INTERVAL), + TXSTATUS_READ_INTERVAL, HRTIMER_MODE_REL); return false; } @@ -204,7 +204,7 @@ static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev) /* Read TX_STA_FIFO register after 2 ms */ hrtimer_start(&rt2x00dev->txstatus_timer, - ktime_set(0, 2*TXSTATUS_READ_INTERVAL), + 2 * TXSTATUS_READ_INTERVAL, HRTIMER_MODE_REL); } @@ -341,8 +341,6 @@ static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev) rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_BBP, 1); rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg); - rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000); - rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, USB_MODE_RESET, REGISTER_TIMEOUT); @@ -353,12 +351,11 @@ static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev) static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev) { - u32 reg; + u32 reg = 0; if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev))) return -EIO; - rt2x00usb_register_read(rt2x00dev, USB_DMA_CFG, ®); rt2x00_set_field32(®, USB_DMA_CFG_PHY_CLEAR, 0); rt2x00_set_field32(®, USB_DMA_CFG_RX_BULK_AGG_EN, 0); rt2x00_set_field32(®, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index f68d492129c6f7189e5116486ffbd2c97c866ec2..aa3d4ceef4adf57bc1dde39099c90cca01c39067 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -1403,6 +1403,7 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, */ u32 rt2x00lib_get_bssidx(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif); +void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr); /* * Interrupt context handlers. diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 4e0c5653054bbdc9d313872760c2994f681fd191..eb7b714436577b1abc64f096608d3c3b80acba24 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -26,6 +26,8 @@ #include #include #include +#include +#include #include "rt2x00.h" #include "rt2x00lib.h" @@ -931,6 +933,21 @@ static void rt2x00lib_rate(struct ieee80211_rate *entry, entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE; } +void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr) +{ + const char *mac_addr; + + mac_addr = of_get_mac_address(rt2x00dev->dev->of_node); + if (mac_addr) + ether_addr_copy(eeprom_mac_addr, mac_addr); + + if (!is_valid_ether_addr(eeprom_mac_addr)) { + eth_random_addr(eeprom_mac_addr); + rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", eeprom_mac_addr); + } +} +EXPORT_SYMBOL_GPL(rt2x00lib_set_mac_address); + static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev, struct hw_mode_spec *spec) { @@ -1362,11 +1379,13 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) if (rt2x00dev->bcn->limit > 0) rt2x00dev->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC) | - BIT(NL80211_IFTYPE_AP) | #ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | #endif - BIT(NL80211_IFTYPE_WDS); +#ifdef CONFIG_WIRELESS_WDS + BIT(NL80211_IFTYPE_WDS) | +#endif + BIT(NL80211_IFTYPE_AP); rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; @@ -1422,7 +1441,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) cancel_work_sync(&rt2x00dev->intf_work); cancel_delayed_work_sync(&rt2x00dev->autowakeup_work); cancel_work_sync(&rt2x00dev->sleep_work); -#ifdef CONFIG_RT2X00_LIB_USB +#if IS_ENABLED(CONFIG_RT2X00_LIB_USB) if (rt2x00_is_usb(rt2x00dev)) { usb_kill_anchored_urbs(rt2x00dev->anchor); hrtimer_cancel(&rt2x00dev->txstatus_timer); diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index 03013eb2f6429d89d96c01013d4ac4a5213b1550..5306a3b2622d0233b2114ab1b1c14217cd2e11fd 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -2413,10 +2413,7 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); - if (!is_valid_ether_addr(mac)) { - eth_random_addr(mac); - rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac); - } + rt2x00lib_set_mac_address(rt2x00dev, mac); rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); if (word == 0xffff) { diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c index c1397a6d3cee06b7df0fb216a5f7047adfd0caa1..1a29c4d205a5ec6272371940185efe07848eb7d2 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c @@ -1766,10 +1766,7 @@ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); - if (!is_valid_ether_addr(mac)) { - eth_random_addr(mac); - rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac); - } + rt2x00lib_set_mac_address(rt2x00dev, mac); rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); if (word == 0xffff) { diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 0881ba8535f4e11bec61d029dc58f29a7f0c4500..b94479441b0c77f41dada030834c8c4b64a86210 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -53,7 +53,7 @@ #include #include -#include +#include /* Warning : these stuff will slow down the driver... */ #define WIRELESS_SPY /* Enable spying addresses */ @@ -272,7 +272,6 @@ static const struct net_device_ops ray_netdev_ops = { .ndo_set_config = ray_dev_config, .ndo_get_stats = ray_get_stats, .ndo_set_rx_mode = set_multicast_list, - .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 08d587a342d32c8e2111e10955519009d9718ca6..df551b2b56ebc9ca0cea244e2dcac22153cf7129 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1337,10 +1337,11 @@ struct rtl8xxxu_fileops { u32 ramask, int sgi); void (*report_connect) (struct rtl8xxxu_priv *priv, u8 macid, bool connect); - void (*fill_txdesc) (struct ieee80211_hdr *hdr, - struct rtl8xxxu_txdesc32 *tx_desc, u32 rate, - u16 rate_flag, bool sgi, bool short_preamble, - bool ampdu_enable); + void (*fill_txdesc) (struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, + struct ieee80211_tx_info *tx_info, + struct rtl8xxxu_txdesc32 *tx_desc, bool sgi, + bool short_preamble, bool ampdu_enable, + u32 rts_rate); int writeN_block_size; int rx_agg_buf_size; char tx_desc_size; @@ -1434,14 +1435,16 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb); int rtl8xxxu_gen2_channel_to_group(int channel); bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv, int result[][8], int c1, int c2); -void rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr, - struct rtl8xxxu_txdesc32 *tx_desc, u32 rate, - u16 rate_flag, bool sgi, bool short_preamble, - bool ampdu_enable); -void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr, - struct rtl8xxxu_txdesc32 *tx_desc32, u32 rate, - u16 rate_flag, bool sgi, bool short_preamble, - bool ampdu_enable); +void rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, + struct ieee80211_tx_info *tx_info, + struct rtl8xxxu_txdesc32 *tx_desc, bool sgi, + bool short_preamble, bool ampdu_enable, + u32 rts_rate); +void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, + struct ieee80211_tx_info *tx_info, + struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi, + bool short_preamble, bool ampdu_enable, + u32 rts_rate); extern struct rtl8xxxu_fileops rtl8192cu_fops; extern struct rtl8xxxu_fileops rtl8192eu_fops; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c index a793fedc3654626ece17de0eaec4dab57a2872eb..a1178c5d6ad8d4a55d0642b73f94396bb2e69a13 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c @@ -1556,7 +1556,7 @@ static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv) return ret; } -void rtl8192eu_power_off(struct rtl8xxxu_priv *priv) +static void rtl8192eu_power_off(struct rtl8xxxu_priv *priv) { u8 val8; u16 val16; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index a5e6ec2152bff8808b38bd8f96dfe0ff37a19918..3a86675020a210f4bd325b2d8376822e19203d59 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -4372,6 +4372,13 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv, void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv, u8 macid, bool connect) { +#ifdef RTL8XXXU_GEN2_REPORT_CONNECT + /* + * Barry Day reports this causes issues with 8192eu and 8723bu + * devices reconnecting. The reason for this is unclear, but + * until it is better understood, leave the code in place but + * disabled, so it is not lost. + */ struct h2c_cmd h2c; memset(&h2c, 0, sizeof(struct h2c_cmd)); @@ -4383,6 +4390,7 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv, h2c.media_status_rpt.parm &= ~BIT(0); rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt)); +#endif } void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv) @@ -4759,13 +4767,28 @@ static void rtl8xxxu_dump_action(struct device *dev, * This format is used on 8188cu/8192cu/8723au */ void -rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr, - struct rtl8xxxu_txdesc32 *tx_desc, u32 rate, - u16 rate_flag, bool sgi, bool short_preamble, - bool ampdu_enable) +rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, + struct ieee80211_tx_info *tx_info, + struct rtl8xxxu_txdesc32 *tx_desc, bool sgi, + bool short_preamble, bool ampdu_enable, u32 rts_rate) { + struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info); + struct rtl8xxxu_priv *priv = hw->priv; + struct device *dev = &priv->udev->dev; + u32 rate; + u16 rate_flags = tx_info->control.rates[0].flags; u16 seq_number; + if (rate_flags & IEEE80211_TX_RC_MCS && + !ieee80211_is_mgmt(hdr->frame_control)) + rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0; + else + rate = tx_rate->hw_value; + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX) + dev_info(dev, "%s: TX rate: %d, pkt size %d\n", + __func__, rate, cpu_to_le16(tx_desc->pkt_size)); + seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); tx_desc->txdw5 = cpu_to_le32(rate); @@ -4796,15 +4819,16 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr, if (sgi) tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI); - if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { - /* - * Use RTS rate 24M - does the mac80211 tell - * us which to use? - */ - tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M << - TXDESC32_RTS_RATE_SHIFT); + /* + * rts_rate is zero if RTS/CTS or CTS to SELF are not enabled + */ + tx_desc->txdw4 |= cpu_to_le32(rts_rate << TXDESC32_RTS_RATE_SHIFT); + if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { tx_desc->txdw4 |= cpu_to_le32(TXDESC32_RTS_CTS_ENABLE); tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE); + } else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { + tx_desc->txdw4 |= cpu_to_le32(TXDESC32_CTS_SELF_ENABLE); + tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE); } } @@ -4813,16 +4837,31 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr, * This format is used on 8192eu/8723bu */ void -rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr, - struct rtl8xxxu_txdesc32 *tx_desc32, u32 rate, - u16 rate_flag, bool sgi, bool short_preamble, - bool ampdu_enable) +rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, + struct ieee80211_tx_info *tx_info, + struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi, + bool short_preamble, bool ampdu_enable, u32 rts_rate) { + struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info); + struct rtl8xxxu_priv *priv = hw->priv; + struct device *dev = &priv->udev->dev; struct rtl8xxxu_txdesc40 *tx_desc40; + u32 rate; + u16 rate_flags = tx_info->control.rates[0].flags; u16 seq_number; tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc32; + if (rate_flags & IEEE80211_TX_RC_MCS && + !ieee80211_is_mgmt(hdr->frame_control)) + rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0; + else + rate = tx_rate->hw_value; + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX) + dev_info(dev, "%s: TX rate: %d, pkt size %d\n", + __func__, rate, cpu_to_le16(tx_desc40->pkt_size)); + seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); tx_desc40->txdw4 = cpu_to_le32(rate); @@ -4849,15 +4888,19 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr, if (short_preamble) tx_desc40->txdw5 |= cpu_to_le32(TXDESC40_SHORT_PREAMBLE); - if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { - /* - * Use RTS rate 24M - does the mac80211 tell - * us which to use? - */ - tx_desc40->txdw4 |= cpu_to_le32(DESC_RATE_24M << - TXDESC40_RTS_RATE_SHIFT); + tx_desc40->txdw4 |= cpu_to_le32(rts_rate << TXDESC40_RTS_RATE_SHIFT); + /* + * rts_rate is zero if RTS/CTS or CTS to SELF are not enabled + */ + if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE); tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE); + } else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { + /* + * For some reason the vendor driver doesn't set + * TXDESC40_HW_RTS_ENABLE for CTS to SELF + */ + tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_CTS_SELF_ENABLE); } } @@ -4867,14 +4910,13 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); - struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info); struct rtl8xxxu_priv *priv = hw->priv; struct rtl8xxxu_txdesc32 *tx_desc; struct rtl8xxxu_tx_urb *tx_urb; struct ieee80211_sta *sta = NULL; struct ieee80211_vif *vif = tx_info->control.vif; struct device *dev = &priv->udev->dev; - u32 queue, rate; + u32 queue, rts_rate; u16 pktlen = skb->len; u16 seq_number; u16 rate_flag = tx_info->control.rates[0].flags; @@ -4901,10 +4943,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, goto error; } - if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX) - dev_info(dev, "%s: TX rate: %d (%d), pkt size %d\n", - __func__, tx_rate->bitrate, tx_rate->hw_value, pktlen); - if (ieee80211_is_action(hdr->frame_control)) rtl8xxxu_dump_action(dev, hdr); @@ -4958,12 +4996,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, } } - if (rate_flag & IEEE80211_TX_RC_MCS && - !ieee80211_is_mgmt(hdr->frame_control)) - rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0; - else - rate = tx_rate->hw_value; - if (rate_flag & IEEE80211_TX_RC_SHORT_GI || (ieee80211_is_data_qos(hdr->frame_control) && sta && sta->ht_cap.cap & @@ -4974,10 +5006,17 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, (sta && vif && vif->bss_conf.use_short_preamble)) short_preamble = true; + if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) + rts_rate = ieee80211_get_rts_cts_rate(hw, tx_info)->hw_value; + else if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) + rts_rate = ieee80211_get_rts_cts_rate(hw, tx_info)->hw_value; + else + rts_rate = 0; + seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); - priv->fops->fill_txdesc(hdr, tx_desc, rate, rate_flag, - sgi, short_preamble, ampdu_enable); + priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble, + ampdu_enable, rts_rate); rtl8xxxu_calc_tx_desc_csum(tx_desc); diff --git a/drivers/net/wireless/realtek/rtlwifi/Makefile b/drivers/net/wireless/realtek/rtlwifi/Makefile index ad6d3c52ec572247067544a4a5ca81b09d020d68..84c2e826fa1d48724a7a28e4c3f1ffd2225ca2a9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/Makefile @@ -30,5 +30,3 @@ obj-$(CONFIG_RTLBTCOEXIST) += btcoexist/ obj-$(CONFIG_RTL8723_COMMON) += rtl8723com/ obj-$(CONFIG_RTL8821AE) += rtl8821ae/ obj-$(CONFIG_RTL8192EE) += rtl8192ee/ - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 264466f59c57029e71c64109b58c0b359557f5fe..4ac928bf1f8e6c77f68386c353c20f26168c77e4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1303,12 +1303,13 @@ EXPORT_SYMBOL_GPL(rtl_action_proc); static void setup_arp_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc) { + struct ieee80211_hw *hw = rtlpriv->hw; + rtlpriv->ra.is_special_data = true; if (rtlpriv->cfg->ops->get_btc_status()) rtlpriv->btcoexist.btc_ops->btc_special_packet_notify( rtlpriv, 1); - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv->works.lps_change_work); + rtl_lps_leave(hw); ppsc->last_delaylps_stamp_jiffies = jiffies; } @@ -1381,8 +1382,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx, if (is_tx) { rtlpriv->ra.is_special_data = true; - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv->works.lps_change_work); + rtl_lps_leave(hw); ppsc->last_delaylps_stamp_jiffies = jiffies; } diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile index 47ceecfcb7dc1059777d31b6ce1d5224a7930f6b..d1454d4f08a556998e5e7ea96f886ae7aaedfe4c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile @@ -3,5 +3,3 @@ btcoexist-objs := halbtc8723b2ant.o \ rtl_btc.o obj-$(CONFIG_RTLBTCOEXIST) += btcoexist.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 8e7f23c11680a5fa98352e5052f02fa9ae091de8..ded1493fee9c975742ede341b991b4446a48e3d7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -1150,10 +1150,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, } else { mstatus = RT_MEDIA_DISCONNECT; - if (mac->link_state == MAC80211_LINKED) { - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv->works.lps_change_work); - } + if (mac->link_state == MAC80211_LINKED) + rtl_lps_leave(hw); if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE) rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); mac->link_state = MAC80211_NOLINK; @@ -1431,8 +1429,7 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw, } if (mac->link_state == MAC80211_LINKED) { - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv->works.lps_change_work); + rtl_lps_leave(hw); mac->link_state = MAC80211_LINKED_SCANNING; } else { rtl_ips_nic_on(hw); @@ -1832,7 +1829,8 @@ bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); pskb = __skb_dequeue(&ring->queue); - kfree_skb(pskb); + if (pskb) + dev_kfree_skb_irq(pskb); /*this is wrong, fill_tx_cmddesc needs update*/ pdesc = &ring->desc[0]; diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 0dfa9eac3926dd488262529cd536c23c7eea08be..8bfe020edd3a24aaa9f8ec7c194f5b8fa7093d1f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -663,11 +659,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) } if (((rtlpriv->link_info.num_rx_inperiod + - rtlpriv->link_info.num_tx_inperiod) > 8) || - (rtlpriv->link_info.num_rx_inperiod > 2)) { - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv->works.lps_change_work); - } + rtlpriv->link_info.num_tx_inperiod) > 8) || + (rtlpriv->link_info.num_rx_inperiod > 2)) + rtl_lps_leave(hw); } static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, @@ -918,10 +912,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) } if (((rtlpriv->link_info.num_rx_inperiod + rtlpriv->link_info.num_tx_inperiod) > 8) || - (rtlpriv->link_info.num_rx_inperiod > 2)) { - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv->works.lps_change_work); - } + (rtlpriv->link_info.num_rx_inperiod > 2)) + rtl_lps_leave(hw); skb = new_skb; no_new: if (rtlpriv->use_new_trx_flow) { diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h index b951ebac15eafa75e03a9124a5a1f130d6047310..578b1d900bfbcd9f7a4a8f39b73a9248651e4a7c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.h +++ b/drivers/net/wireless/realtek/rtlwifi/pci.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index 18d979affc1861ebc97dad9fe083d7159d40b746..d0ffc4d508cff2df70606c9a88845a22884c7bc9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -407,8 +407,8 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode) } } -/*Enter the leisure power save mode.*/ -void rtl_lps_enter(struct ieee80211_hw *hw) +/* Interrupt safe routine to enter the leisure power save mode.*/ +static void rtl_lps_enter_core(struct ieee80211_hw *hw) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); @@ -444,10 +444,9 @@ void rtl_lps_enter(struct ieee80211_hw *hw) spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); } -EXPORT_SYMBOL(rtl_lps_enter); -/*Leave the leisure power save mode.*/ -void rtl_lps_leave(struct ieee80211_hw *hw) +/* Interrupt safe routine to leave the leisure power save mode.*/ +static void rtl_lps_leave_core(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); @@ -477,7 +476,6 @@ void rtl_lps_leave(struct ieee80211_hw *hw) } spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); } -EXPORT_SYMBOL(rtl_lps_leave); /* For sw LPS*/ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len) @@ -670,12 +668,34 @@ void rtl_lps_change_work_callback(struct work_struct *work) struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->enter_ps) - rtl_lps_enter(hw); + rtl_lps_enter_core(hw); else - rtl_lps_leave(hw); + rtl_lps_leave_core(hw); } EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback); +void rtl_lps_enter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (!in_interrupt()) + return rtl_lps_enter_core(hw); + rtlpriv->enter_ps = true; + schedule_work(&rtlpriv->works.lps_change_work); +} +EXPORT_SYMBOL_GPL(rtl_lps_enter); + +void rtl_lps_leave(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (!in_interrupt()) + return rtl_lps_leave_core(hw); + rtlpriv->enter_ps = false; + schedule_work(&rtlpriv->works.lps_change_work); +} +EXPORT_SYMBOL_GPL(rtl_lps_leave); + void rtl_swlps_wq_callback(void *data) { struct rtl_works *rtlworks = container_of_dwork_rtl(data, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile index 676e7de27f278d16f8dcbb420d379775962cafbb..dae4f0f19cd3198b6cf5e4136b4c4ddb4c96b5a6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile @@ -11,5 +11,3 @@ rtl8188ee-objs := \ trx.o obj-$(CONFIG_RTL8188EE) += rtl8188ee.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h index 071ccee69eae6b69a4f117c9afd059be527dc83f..0fd2bac14db611162cadeaa339bdbc4885abcb9e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h index 21bd4a5337abc633686b1d56e749f70a3b42c944..b884c30c7b37ce1bf3a0f09ce132c20ed9baeb8b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h index 1850fde881b587c6bbb15a630a58dfaaa16ece07..d38dbca3c19e8f4da02949d351ce28cd12f32e3a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c index f05c2c674165baded2af82c03ca2a84a27f3fe86..6ea7fd7bb527c511a88ae0ec280dc92ae689f423 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile index aee42d7ae8a2307c85b6b6dd8aea998b10c18ac6..0546b755625950acea7fab0c08106d50e2d2850c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile @@ -5,5 +5,3 @@ rtl8192c-common-objs := \ phy_common.o obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c-common.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c index 316be5ff69ca80fd0a988ab732d2f596a933e0ec..bdc132bef822b4b5a7e65b25663d18b65ca00615 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h index 6a72d0c8afa073573f886b7455f90ffbc5ff773b..441604ff5858b228de53cd5deb0dc60396fcaeac 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h index 864806c19ca7b21043de97cdbbe79c9763b289b6..c5fa14bda38704aac4d122c05b45ba39fa3fa959 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c index 918b1d129e77cc48e0b936b207bfcd0d861489c5..889bd130115413ff2d5d98ccf7e07c301aad8b00 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c index 27e3d5f9ca3424bd75c7ae3d430b25c2e3700275..94dd25cf1ca86a6a3fe6a4ff55efb5b8fc9074a5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h index 202412577bf0cf522f0293e97fcd1a6bcb71c57d..d11261e05a2e822cb51ff8064f5431a6e1144339 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile index c0cb0cfe7d370ec0a65abb65d521b5ff03e8f424..577c7adbc3225d7e9298523aee07ca4ca34455d4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile @@ -9,5 +9,3 @@ rtl8192ce-objs := \ trx.o obj-$(CONFIG_RTL8192CE) += rtl8192ce.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h index 690a7a1675e2019c16932e08b4cfa86731e03566..b90aaf12807229ac2b34360575114b67fe782668 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c index 09898cf2e07ab4859c7ff733bf3a94d94b127356..2c8205e46be430568839c592f9b054c1a0cf228b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h index 38ba707015f52a0da261da7bc2d5cb4b443dd39f..9761d0ca31b0eaedf61b48ec28dd682be7168b40 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c index a47be73a0980247269859d14b90031b53a765266..4483d40ecad1b7adec14077caec71fbeb2f83545 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h index 98a086822aaceb5172e1d040352ab2d3f4bce283..877f138a0cb93b184514be2b79896ccba4e808fa 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c index 24e483ba3fa45440c51d093a50fdcb6d2d813af4..833193b751f73673cf3c0460a79a3427e3a6ee04 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h index c5761066d3835118ddf9f69a5bf4130e2bef4bea..f6edb9cd9b6764e78622da9c6940d8f7b5bfc36c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c index 46d0d945f283a3c7fcdbb13cf360d83b17969703..d1b6a8fe7b6a5c64094da84efafe5f85003d1f5f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h index dadc02b5de0bba0c66fb29722e3eccbb0b312ae9..93f3bc0197b4de8979cd850f102054fb27b53a43 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h index dc8460c0b32f44f60b8644cb04409c7d4235926a..1bb7ed35812d7dcfdd571da594834c7e66a44a31 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c index a9c406f33d0a57ffd818b95b03e61a28cb4ca6bb..7cae6350437c72395a5a9bed876329a474a7bb55 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h index ebd72cae10b6ecf680deb765a9ce26e779d7c6fc..22c5e6f51331f48bbf8aec5dfbf93e2e5909cf47 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index 8b6e37ce3f6690ae499d88bfef81ca2913ecc48f..691ddef1ae28eab7d2a193fca3a72baa7871e480 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h index d2367a5d0cf5152ff9595328eac103b4923811a2..9a1c89cbbda101508be5a12657aa881368d6cd47 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c index 752f943a84ae10bf7f2f466bcc35078fcf0bc5dc..98b06d48a2dd8bbddd241f38211bedd772ff9c77 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h index 8b79161f71be25764db207edea9634127aab340f..51e4e07396a6821668c48e071c3839027a7b1260 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c index 781af1b99eb5f77c1124d8534e25a0be596fb665..2ab4a00246cca14fa77af6ba98a176f29917884f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h index 607304586c030caf92b7a194853e65e53f2e2163..66291fc341e73122df27d144430792b69035005b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile index ad2de6b839efa077cce86e61b8ae8e1b51c7aef5..97437dadc287f4344c91eafc83329411d009a2ae 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile @@ -10,5 +10,3 @@ rtl8192cu-objs := \ trx.o obj-$(CONFIG_RTL8192CU) += rtl8192cu.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h index 74a479ac323d53716e5e9b02f7d179d4370ae5c5..316fe9990b6dd707fba7524acbc29e510df153de 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c index c16209a336eac66c6b7ad78afa6ac3d24b197796..00fc0685317a7a1995207ffbaaf1af685033181c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h index fafa6bac2a3f5062b98911dc5802dd9e82fc2b95..ce71433792e398a60b16145e0fc4e8d6f151cb0b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index ae8f055483facb429618afb07284953ec6fd9d77..5c7da0cfc684eb6039705479bda3690f5347016b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h index 67588083e6cc7ae9009c57a1fd8b07bd178c6e4d..932f056f7ef872576485d7767c73b13b93497a4f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c index 8514ab652520ac6b86742130f901eb03830eb3bd..c6240813ff7b3034a81a58aee10d88b3127ae554 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h index 0f372278b7af84e4d310ecc7fb2a608f127c4385..551deb8afb6f7e2a48b9b9781027dcae70b7b6a2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c index 68ca734853c15fd521c516cb06b317994ec8666f..cf212f694db5011f0ef45f25a21c2c582a58cca6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h index 20a49ec8459b4e740d07b501ffdf90db83b33d2a..8573b7e257d98c8602170a1f89587f0079f0ca4d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c index 4b2976465905e3130511f1affc5ef00e1fa6e4e3..f35f435c094eea2cc397748b0b0f207369f748b3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h index 42b0686604833d87310e9d1d21295585ecc0b9c7..a422c4db1a41b055cd4914691b717abb872509a4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h index 8b81465c629b9abfae5f337d709c9f45d3d99317..8185886daa8ef1c8b376ff1ff55936fec0bef9bf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c index ec2ea56f7933162b1d131f0b251a36e3bb2470fa..5e3183024aa01d56f99171b359b4cb5c7e7e6797 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h index 6f987de5b4413ee74b71fbb7ddef9cb39929954a..07aec0b20cc9daaf4e0313b2d561580dab2b0fd5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index f953320f0e23a86661d141a67fe109f29c09a80c..b84e13ac6ead554b28b25f36b4d2e0508157cfd0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h index a1310abd0d54605e6bbd1a1cc9f9200bc3ecbe43..4ea2cb225580045f655743bbbb233fda86cf7633 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c index 7903c154de00d2cd193e54ed0d33a27d68ca9c1c..b3ac981d88c6de5fb25053ad0ec6049937ce2ee2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h index 4b020e9e30b1eb8a2f559b41c5906e10276c5e9a..851bf53d246c7765908828e50f7965bf88f51aa5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index 95880fe4106ef5da88cffa38995a7e2e4a55326b..1ea878fa7901266ace020a0ff683d6e36b359ec1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h index fd8051dcd98a0295c4838750c6e4a3aded6920cb..df88e39301c2dabf089a3089b49cf710fb98eff1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile index e3213c8264b6d6a56a37449bc1fab4647d56bd61..d0703f20d30c1edc904c5b138a0e7094f6ca454e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile @@ -10,5 +10,3 @@ rtl8192de-objs := \ trx.o obj-$(CONFIG_RTL8192DE) += rtl8192de.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h index 0a443ed17cf4760dcc38475741450e6328196cce..cb7b9b727e3a2c93dfe0cdd4765ec354d45e0aec 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c index 7c1db7e7572dc3fa44d6f7849250f537206475e1..ac6d554b67c839b81218f66bed58adbe4ef128e4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h index f2d318ceeb284f6be9b47d08e0d5a8b336fbfbe0..5d346ec366ce9971d4aed9479a59ad6a2d2e323b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index 8de29cc3ced0763f4b7ff2b18f7dbd5d0ff5b4e2..17f6903c14bbe6f33067b83529a6f124a0b0270d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h index 8a38daa316cb502164634e9d58d8af507e2eaef7..6b435236a28e387f57278738a45240076281fbb6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index d91f8bbfe7a0799aba518625725a102305d7d219..fcb14c5db172b889ed5039f04bc5226758097336 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h index 1bc7b1a96d4aebe5577067460e56fa698405b3a8..24b03b9999bed70b31f5affcffb379e20364f6c0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c index 811ba57eb9bb6cab9e2a903860fdc97639b9c172..c22b8a215c877c99c656c7db7b054c2a8ace60a3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h index a29df30c30255414bfa522ce61ebe469cfc9fae0..9874519704d3edfbe6f2826dd79d40c46dde2fd7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index 2a1edfd21b9663c87e72ff1d07d4ed37959b225a..424f54babd03e8cf22e1c7c30b67959331abcf57 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h index 8115bf4ac68313570d0277d3e3db56260c30678e..58b56b523dbe38f4e037329aae944a884fe93ebb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h index 315a298bab06a756525ef81ea14f66105e426f03..b354b95936e29129184231286e41570e2ee687b8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c index 2f479d3976445fba66736ce4dffa2f332754cbd9..9dc9e915513ea985a7ade3aac3dc9668779a51fc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h index 7303d12c266fbe2ed44d55aa7e46f689e1acc2c2..c650a8dcdb260833c8dadf02612a18bec1128643 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index 1ebfee18882fb4f1bf8cd31ed303bd98e96542ab..2d65e40952921924b42ecfbc5daed49471ba8b3b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h index 0e6035b8fd86faec14d07dd64f22b0acd4037356..fd7d036e9abc67431963b4a7007a82924687c5e3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c index 8ea6f528dfa6d72c4b460ce8bcf60b8555cd8c3e..4badb183cf353c50e74b28b995e950cb5ef96931 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h index 8b724a86117a5f1130d0d7e83fe87e466182676f..7fefc483ec2861a7470cc794c5f4128d2d3a3951 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index e998e98d74cb220f981abcff483f664f13b313bc..5fb37564957cd9831c6c14d33908ef369e4cf5e3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h index 194d99f8bacf1504312efa2dd0b6ab1580f66f15..9bb6cc648590a9d334c2ddacf01ab34c7b9d8710 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile index 0315eeda9b600aab5a28fa6d27d8c63fe80e27fb..f254b9f64326284027a2e9255f80e1aa9d4392f5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile @@ -12,5 +12,3 @@ rtl8192ee-objs := \ obj-$(CONFIG_RTL8192EE) += rtl8192ee.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile index b7eb13819cbcbd9d36df32fc502b02eaa37409b1..dfa9dbbe2cdff5c84dc089a82fb3991efda2dff8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile @@ -11,5 +11,3 @@ rtl8192se-objs := \ obj-$(CONFIG_RTL8192SE) += rtl8192se.o -ccflags-y += -D__CHECK_ENDIAN__ - diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h index 41466f957cdc055b4f00588ab95f1281c8ea2d77..b5ba0554a0cd571e85ebda7d4007f35f6e1d5339 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c index 9bae5a92e30fba4b50f2d67408882d1e4b61720d..2c073a77b194814c0e07864d652466f4a2bad83d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h index de6ac796c74d43a8a432aff4888459a5574c5858..3af07efed73abba10612e295575633bc44540c76 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c index 331b1584a1a2d27aae6871b0be096f58c45e318c..32f9207b5cf5d7eb45180a5fb51321c4b70c501b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h index b1e44b86e8ed3617d0d11e627ef30f31d311b552..5827aa32cef0444a1dec3346af7d2f47d45182d6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c index 52e4430edb545624f3d15b612917d65b14cf67aa..26e06b2837c3a97d7d74e2b84aee047d820766de 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h index 4cacee10f31eb81dd3a34bf8c2cbfd4cab22d60b..86bce1be83ce8412572969f71854057721ad74e7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c index 9849cb988186808584ddc89c80e65e1adcc2f2e4..870007801f6b41eb9caf4767191ed7980b6c546b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h index 2182dbeb5f32b56b8b2e64824f19159d4a63ce32..90e265d9ffc6938b4acb9c90a789ce77f88e6c4f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c index 4bb75581ab38b649bfee11041db76839fec0fa2e..fcb9216af82d1731863a604d0bee03ade5be6720 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h index 8acf4765a7a6bad91269cc4471e1f120a13561e7..7a3b6b6238729d957d7378c9cbf91bb144f8d4e5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h index e13043479b71a91fa8343f8124d3a29e4e08252c..5d445c2afcf3fdfb238aa1bd86973003bb634bb4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c index 34e88a3f6abedcdefd6d4986e674542b0cb12ceb..bd2fa7735866486b280d1c8f8e8f0274b21b98f0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h index 8a29eb94ab174dbfef5a2cac60c1f61f2a206437..e9ba283d05ada04cfccf31eaca434eb9a24cf02b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index 3e1eaeac4fdce859cb853b5d820ae5075aa83b8a..998cefbd7e89104b7797d5bf419fe567dea35ff2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h index 2eb88862ebe462e701ea4c33ccad756e7be190b8..af449d6714e64b823b55c7886ec00ddeaf456d47 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c index f1a73f75127eb0dc76086976a9357faf02636412..162578f05c858598f96467ac7388a5e87dafe99a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h index 2feb73b71a4f6d1e560c7a06a487839dcd1ae6a9..aa3c7687d22676153d2f85faa03b8bfc1c70efda 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h @@ -6,10 +6,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c index d53bbf6bef818d54a7d024604dcc9f1a69432550..9a5a113992211d5641af7cae2d4d1412b06f9319 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h index 5a13f17e3b41c7603e92646f7205af989607c59c..72858913807217fa5800395a73bf90eea30f9c5b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile index 6220672a96f44ffc539ccdc2325876b96dc6895c..e7607d2cb2ef280ea13d90857005362f371d8214 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile @@ -14,5 +14,3 @@ rtl8723ae-objs := \ obj-$(CONFIG_RTL8723AE) += rtl8723ae.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h index 57111052e86b649ab4f3248bb91838f1201dd49e..a113780af08a2ab496447c3977e0dedc97e23f3f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c index 1186755e55b84447f3753af654f25b4d6d65b7ad..e5505387260b49269eb9b3b605ca5b61eac3ccaf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c @@ -134,7 +134,7 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, wait_h2c_limmit--; if (wait_h2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, - "Wating too long for FW read clear HMEBox(%d)!\n", + "Waiting too long for FW read clear HMEBox(%d)!\n", boxnum); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h index 9d1fe25db9538e7bd521ef1e6cd4af3abb25a856..2e668fcfc5c23b080ef76dd1b2ef9787c01426fe 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h index bcd64a22acc02285963e7fdf0fbd572f2b4b1037..45719fdcb067d775eadb70b6cac697de0251c43c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c index c7be9342136ce57f24193f7aa55214a71a3127cf..77c10047cb2070a7b7630394c6f48c32775982c7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile index a77c3410279275320eef1195e7fd6c78556631da..a841cbd55d8e11004786f4eb47748a8bd3d59270 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile @@ -12,5 +12,3 @@ rtl8723be-objs := \ obj-$(CONFIG_RTL8723BE) += rtl8723be.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile index 345a68adcf3810ae926280f062ebc449fc05d641..73da75526e2a6feed668dbc9f2fd2013141bfa96 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile @@ -5,5 +5,3 @@ rtl8723-common-objs := \ phy_common.o obj-$(CONFIG_RTL8723_COMMON) += rtl8723-common.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile index f7a26f71197e01db7fe089dc6ba9de940986ba1b..8ca406b95f0298e5bd408ff465cad1ce9d4471fb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile @@ -12,5 +12,3 @@ rtl8821ae-objs := \ obj-$(CONFIG_RTL8821AE) += rtl8821ae.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 32aa5c1d070a07b429e97c03c9ff6f14c48cb91e..0a508649903d73fe4b9a643a5e7e43577abc22c9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h index 685273ca95612e7ff9c9fb393b48b9c65d1744aa..a6d43d2ecd369435b15b6d2dc7ac810afff0a44b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.h +++ b/drivers/net/wireless/realtek/rtlwifi/usb.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index dbb23899ddcb336827a3b0e5b255db9e16d2604d..dadaa73ab49d7f098d4056214252cf627c0cbd51 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -194,6 +194,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band) void rsi_mac80211_detach(struct rsi_hw *adapter) { struct ieee80211_hw *hw = adapter->hw; + enum nl80211_band band; if (hw) { ieee80211_stop_queues(hw); @@ -201,7 +202,17 @@ void rsi_mac80211_detach(struct rsi_hw *adapter) ieee80211_free_hw(hw); } + for (band = 0; band < NUM_NL80211_BANDS; band++) { + struct ieee80211_supported_band *sband = + &adapter->sbands[band]; + + kfree(sband->channels); + } + +#ifdef CONFIG_RSI_DEBUGFS rsi_remove_dbgfs(adapter); + kfree(adapter->dfsentry); +#endif } EXPORT_SYMBOL_GPL(rsi_mac80211_detach); @@ -264,6 +275,8 @@ static int rsi_mac80211_start(struct ieee80211_hw *hw) common->iface_down = false; mutex_unlock(&common->mutex); + rsi_send_rx_filter_frame(common, 0); + return 0; } @@ -304,7 +317,9 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw, if (!adapter->sc_nvifs) { ++adapter->sc_nvifs; adapter->vifs[0] = vif; - ret = rsi_set_vap_capabilities(common, STA_OPMODE); + ret = rsi_set_vap_capabilities(common, + STA_OPMODE, + VAP_ADD); } break; default: @@ -332,8 +347,10 @@ static void rsi_mac80211_remove_interface(struct ieee80211_hw *hw, struct rsi_common *common = adapter->priv; mutex_lock(&common->mutex); - if (vif->type == NL80211_IFTYPE_STATION) + if (vif->type == NL80211_IFTYPE_STATION) { adapter->sc_nvifs--; + rsi_set_vap_capabilities(common, STA_OPMODE, VAP_DELETE); + } if (!memcmp(adapter->vifs[0], vif, sizeof(struct ieee80211_vif))) adapter->vifs[0] = NULL; @@ -373,7 +390,7 @@ static int rsi_channel_change(struct ieee80211_hw *hw) status = rsi_band_check(common); if (!status) - status = rsi_set_channel(adapter->priv, channel); + status = rsi_set_channel(adapter->priv, curchan); if (bss->assoc) { if (common->hw_data_qs_blocked && @@ -393,6 +410,34 @@ static int rsi_channel_change(struct ieee80211_hw *hw) return status; } +/** + * rsi_config_power() - This function configures tx power to device + * @hw: Pointer to the ieee80211_hw structure. + * + * Return: 0 on success, negative error code on failure. + */ +static int rsi_config_power(struct ieee80211_hw *hw) +{ + struct rsi_hw *adapter = hw->priv; + struct rsi_common *common = adapter->priv; + struct ieee80211_conf *conf = &hw->conf; + + if (adapter->sc_nvifs <= 0) { + rsi_dbg(ERR_ZONE, "%s: No virtual interface found\n", __func__); + return -EINVAL; + } + + rsi_dbg(INFO_ZONE, + "%s: Set tx power: %d dBM\n", __func__, conf->power_level); + + if (conf->power_level == common->tx_power) + return 0; + + common->tx_power = conf->power_level; + + return rsi_send_radio_params_update(common); +} + /** * rsi_mac80211_config() - This function is a handler for configuration * requests. The stack calls this function to @@ -414,6 +459,12 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw, if (changed & IEEE80211_CONF_CHANGE_CHANNEL) status = rsi_channel_change(hw); + /* tx power */ + if (changed & IEEE80211_CONF_CHANGE_POWER) { + rsi_dbg(INFO_ZONE, "%s: Configuring Power\n", __func__); + status = rsi_config_power(hw); + } + mutex_unlock(&common->mutex); return status; @@ -456,11 +507,19 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + u16 rx_filter_word = 0; mutex_lock(&common->mutex); if (changed & BSS_CHANGED_ASSOC) { rsi_dbg(INFO_ZONE, "%s: Changed Association status: %d\n", __func__, bss_conf->assoc); + if (bss_conf->assoc) { + /* Send the RX filter frame */ + rx_filter_word = (ALLOW_DATA_ASSOC_PEER | + ALLOW_CTRL_ASSOC_PEER | + ALLOW_MGMT_ASSOC_PEER); + rsi_send_rx_filter_frame(common, rx_filter_word); + } rsi_inform_bss_status(common, bss_conf->assoc, bss_conf->bssid, @@ -998,6 +1057,7 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw, struct rsi_common *common = adapter->priv; mutex_lock(&common->mutex); + /* Resetting all the fields to default values */ common->bitrate_mask[NL80211_BAND_2GHZ] = 0; common->bitrate_mask[NL80211_BAND_5GHZ] = 0; @@ -1007,9 +1067,114 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw, common->vif_info[0].seq_start = 0; common->secinfo.ptk_cipher = 0; common->secinfo.gtk_cipher = 0; + + rsi_send_rx_filter_frame(common, 0); + mutex_unlock(&common->mutex); + + return 0; +} +/** + * rsi_mac80211_set_antenna() - This function is used to configure + * tx and rx antennas. + * @hw: Pointer to the ieee80211_hw structure. + * @tx_ant: Bitmap for tx antenna + * @rx_ant: Bitmap for rx antenna + * + * Return: 0 on success, Negative error code on failure. + */ +static int rsi_mac80211_set_antenna(struct ieee80211_hw *hw, + u32 tx_ant, u32 rx_ant) +{ + struct rsi_hw *adapter = hw->priv; + struct rsi_common *common = adapter->priv; + u8 antenna = 0; + + if (tx_ant > 1 || rx_ant > 1) { + rsi_dbg(ERR_ZONE, + "Invalid antenna selection (tx: %d, rx:%d)\n", + tx_ant, rx_ant); + rsi_dbg(ERR_ZONE, + "Use 0 for int_ant, 1 for ext_ant\n"); + return -EINVAL; + } + + rsi_dbg(INFO_ZONE, "%s: Antenna map Tx %x Rx %d\n", + __func__, tx_ant, rx_ant); + + mutex_lock(&common->mutex); + + antenna = tx_ant ? ANTENNA_SEL_UFL : ANTENNA_SEL_INT; + if (common->ant_in_use != antenna) + if (rsi_set_antenna(common, antenna)) + goto fail_set_antenna; + + rsi_dbg(INFO_ZONE, "(%s) Antenna path configured successfully\n", + tx_ant ? "UFL" : "INT"); + + common->ant_in_use = antenna; + + mutex_unlock(&common->mutex); + return 0; + +fail_set_antenna: + rsi_dbg(ERR_ZONE, "%s: Failed.\n", __func__); + mutex_unlock(&common->mutex); + return -EINVAL; +} + +/** + * rsi_mac80211_get_antenna() - This function is used to configure + * tx and rx antennas. + * + * @hw: Pointer to the ieee80211_hw structure. + * @tx_ant: Bitmap for tx antenna + * @rx_ant: Bitmap for rx antenna + * + * Return: 0 on success, -1 on failure. + */ +static int rsi_mac80211_get_antenna(struct ieee80211_hw *hw, + u32 *tx_ant, u32 *rx_ant) +{ + struct rsi_hw *adapter = hw->priv; + struct rsi_common *common = adapter->priv; + + mutex_lock(&common->mutex); + + *tx_ant = (common->ant_in_use == ANTENNA_SEL_UFL) ? 1 : 0; + *rx_ant = 0; + + mutex_unlock(&common->mutex); + + return 0; +} + +static void rsi_reg_notify(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_channel *ch; + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct rsi_hw * adapter = hw->priv; + int i; + + sband = wiphy->bands[NL80211_BAND_5GHZ]; + + for (i = 0; i < sband->n_channels; i++) { + ch = &sband->channels[i]; + if (ch->flags & IEEE80211_CHAN_DISABLED) + continue; + + if (ch->flags & IEEE80211_CHAN_RADAR) + ch->flags |= IEEE80211_CHAN_NO_IR; + } + + rsi_dbg(INFO_ZONE, + "country = %s dfs_region = %d\n", + request->alpha2, request->dfs_region); + adapter->dfs_region = request->dfs_region; } static struct ieee80211_ops mac80211_ops = { @@ -1028,6 +1193,8 @@ static struct ieee80211_ops mac80211_ops = { .ampdu_action = rsi_mac80211_ampdu_action, .sta_add = rsi_mac80211_sta_add, .sta_remove = rsi_mac80211_sta_remove, + .set_antenna = rsi_mac80211_set_antenna, + .get_antenna = rsi_mac80211_get_antenna, }; /** @@ -1092,6 +1259,8 @@ int rsi_mac80211_attach(struct rsi_common *common) wiphy->bands[NL80211_BAND_5GHZ] = &adapter->sbands[NL80211_BAND_5GHZ]; + wiphy->reg_notifier = rsi_reg_notify; + status = ieee80211_register_hw(hw); if (status) return status; diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 35c14cc3f0d2d6ca006d3a934c33dbd7962fc883..fac87c06357b234fc30827dcc5e0ca157c0532c2 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -617,7 +617,9 @@ static int rsi_program_bb_rf(struct rsi_common *common) * * Return: 0 on success, corresponding negative error code on failure. */ -int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode) +int rsi_set_vap_capabilities(struct rsi_common *common, + enum opmode mode, + u8 vap_status) { struct sk_buff *skb = NULL; struct rsi_vap_caps *vap_caps; @@ -642,6 +644,7 @@ int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode) FRAME_DESC_SZ) | (RSI_WIFI_MGMT_Q << 12)); vap_caps->desc_word[1] = cpu_to_le16(VAP_CAPABILITIES); + vap_caps->desc_word[2] = cpu_to_le16(vap_status << 8); vap_caps->desc_word[4] = cpu_to_le16(mode | (common->channel_width << 8)); vap_caps->desc_word[7] = cpu_to_le16((vap_id << 8) | @@ -910,7 +913,8 @@ int rsi_band_check(struct rsi_common *common) * * Return: 0 on success, corresponding error code on failure. */ -int rsi_set_channel(struct rsi_common *common, u16 channel) +int rsi_set_channel(struct rsi_common *common, + struct ieee80211_channel *channel) { struct sk_buff *skb = NULL; struct rsi_mac_frame *mgmt_frame; @@ -925,24 +929,76 @@ int rsi_set_channel(struct rsi_common *common, u16 channel) return -ENOMEM; } + if (!channel) { + dev_kfree_skb(skb); + return 0; + } memset(skb->data, 0, FRAME_DESC_SZ); mgmt_frame = (struct rsi_mac_frame *)skb->data; mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST); - mgmt_frame->desc_word[4] = cpu_to_le16(channel); + mgmt_frame->desc_word[4] = cpu_to_le16(channel->hw_value); + + mgmt_frame->desc_word[4] |= + cpu_to_le16(((char)(channel->max_antenna_gain)) << 8); + mgmt_frame->desc_word[5] = + cpu_to_le16((char)(channel->max_antenna_gain)); mgmt_frame->desc_word[7] = cpu_to_le16(PUT_BBP_RESET | BBP_REG_WRITE | (RSI_RF_TYPE << 4)); - mgmt_frame->desc_word[5] = cpu_to_le16(0x01); - mgmt_frame->desc_word[6] = cpu_to_le16(0x12); + if (!(channel->flags & IEEE80211_CHAN_NO_IR) && + !(channel->flags & IEEE80211_CHAN_RADAR)) { + if (common->tx_power < channel->max_power) + mgmt_frame->desc_word[6] = cpu_to_le16(common->tx_power); + else + mgmt_frame->desc_word[6] = cpu_to_le16(channel->max_power); + } + mgmt_frame->desc_word[7] = cpu_to_le16(common->priv->dfs_region); if (common->channel_width == BW_40MHZ) mgmt_frame->desc_word[5] |= cpu_to_le16(0x1 << 8); - common->channel = channel; + common->channel = channel->hw_value; + + skb_put(skb, FRAME_DESC_SZ); + + return rsi_send_internal_mgmt_frame(common, skb); +} + +/** + * rsi_send_radio_params_update() - This function sends the radio + * parameters update to device + * @common: Pointer to the driver private structure. + * @channel: Channel value to be set. + * + * Return: 0 on success, corresponding error code on failure. + */ +int rsi_send_radio_params_update(struct rsi_common *common) +{ + struct rsi_mac_frame *cmd_frame; + struct sk_buff *skb = NULL; + + rsi_dbg(MGMT_TX_ZONE, + "%s: Sending Radio Params update frame\n", __func__); + + skb = dev_alloc_skb(FRAME_DESC_SZ); + if (!skb) { + rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", + __func__); + return -ENOMEM; + } + + memset(skb->data, 0, FRAME_DESC_SZ); + cmd_frame = (struct rsi_mac_frame *)skb->data; + + cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); + cmd_frame->desc_word[1] = cpu_to_le16(RADIO_PARAMS_UPDATE); + cmd_frame->desc_word[3] = cpu_to_le16(BIT(0)); + + cmd_frame->desc_word[3] |= cpu_to_le16(common->tx_power << 8); skb_put(skb, FRAME_DESC_SZ); @@ -1240,6 +1296,72 @@ int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event) } +/** + * rsi_send_rx_filter_frame() - Sends a frame to filter the RX packets + * + * @common: Pointer to the driver private structure. + * @rx_filter_word: Flags of filter packets + * + * @Return: 0 on success, -1 on failure. + */ +int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word) +{ + struct rsi_mac_frame *cmd_frame; + struct sk_buff *skb; + + rsi_dbg(MGMT_TX_ZONE, "Sending RX filter frame\n"); + + skb = dev_alloc_skb(FRAME_DESC_SZ); + if (!skb) { + rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", + __func__); + return -ENOMEM; + } + + memset(skb->data, 0, FRAME_DESC_SZ); + cmd_frame = (struct rsi_mac_frame *)skb->data; + + cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); + cmd_frame->desc_word[1] = cpu_to_le16(SET_RX_FILTER); + cmd_frame->desc_word[4] = cpu_to_le16(rx_filter_word); + + skb_put(skb, FRAME_DESC_SZ); + + return rsi_send_internal_mgmt_frame(common, skb); +} + +/** + * rsi_set_antenna() - This fuction send antenna configuration request + * to device + * + * @common: Pointer to the driver private structure. + * @antenna: bitmap for tx antenna selection + * + * Return: 0 on Success, negative error code on failure + */ +int rsi_set_antenna(struct rsi_common *common, u8 antenna) +{ + struct rsi_mac_frame *cmd_frame; + struct sk_buff *skb; + + skb = dev_alloc_skb(FRAME_DESC_SZ); + if (!skb) { + rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", + __func__); + return -ENOMEM; + } + + memset(skb->data, 0, FRAME_DESC_SZ); + cmd_frame = (struct rsi_mac_frame *)skb->data; + + cmd_frame->desc_word[1] = cpu_to_le16(ANT_SEL_FRAME); + cmd_frame->desc_word[3] = cpu_to_le16(antenna & 0x00ff); + cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); + + skb_put(skb, FRAME_DESC_SZ); + + return rsi_send_internal_mgmt_frame(common, skb); +} /** * rsi_handle_ta_confirm_type() - This function handles the confirm frames. diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index dcd095787166e3aa0cf6de80054e2ee5e6134d77..1d5904bc2c7439459a87dc054f83b53857fb3ef3 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -204,6 +204,9 @@ struct rsi_common { struct cqm_info cqm_info; bool hw_data_qs_blocked; + + int tx_power; + u8 ant_in_use; }; struct rsi_hw { @@ -220,6 +223,7 @@ struct rsi_hw { struct rsi_debugfs *dfsentry; u8 num_debugfs_entries; #endif + u8 dfs_region; void *rsi_dev; int (*host_intf_read_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len); int (*host_intf_write_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len); diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 3741173fd3acea1132224caaeab02aba47d84b36..dfbf7a50269b16e9da4bc8bd78c3c4fa0afc669a 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -140,11 +140,30 @@ #define RSI_SUPP_FILTERS (FIF_ALLMULTI | FIF_PROBE_REQ |\ FIF_BCN_PRBRESP_PROMISC) + +#define ANTENNA_SEL_INT 0x02 /* RF_OUT_2 / Integerated */ +#define ANTENNA_SEL_UFL 0x03 /* RF_OUT_1 / U.FL */ + +/* Rx filter word definitions */ +#define PROMISCOUS_MODE BIT(0) +#define ALLOW_DATA_ASSOC_PEER BIT(1) +#define ALLOW_MGMT_ASSOC_PEER BIT(2) +#define ALLOW_CTRL_ASSOC_PEER BIT(3) +#define DISALLOW_BEACONS BIT(4) +#define ALLOW_CONN_PEER_MGMT_WHILE_BUF_FULL BIT(5) +#define DISALLOW_BROADCAST_DATA BIT(6) + enum opmode { STA_OPMODE = 1, AP_OPMODE = 2 }; +enum vap_status { + VAP_ADD = 1, + VAP_DELETE = 2, + VAP_UPDATE = 3 +}; + extern struct ieee80211_rate rsi_rates[12]; extern const u16 rsi_mcsrates[8]; @@ -184,7 +203,9 @@ enum cmd_frame_type { BG_SCAN_PARAMS, BG_SCAN_PROBE_REQ, CW_MODE_REQ, - PER_CMD_PKT + PER_CMD_PKT, + ANT_SEL_FRAME = 0x20, + RADIO_PARAMS_UPDATE = 0x29 }; struct rsi_mac_frame { @@ -287,12 +308,14 @@ static inline u8 rsi_get_channel(u8 *addr) } int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg); -int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode); +int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode, + u8 vap_status); int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid, u16 ssn, u8 buf_size, u8 event); int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len, u8 key_type, u8 key_id, u32 cipher); -int rsi_set_channel(struct rsi_common *common, u16 chno); +int rsi_set_channel(struct rsi_common *common, + struct ieee80211_channel *channel); int rsi_send_block_unblock_frame(struct rsi_common *common, bool event); void rsi_inform_bss_status(struct rsi_common *common, u8 status, const u8 *bssid, u8 qos_enable, u16 aid); @@ -306,4 +329,7 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb); int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb); int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb); int rsi_band_check(struct rsi_common *common); +int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word); +int rsi_send_radio_params_update(struct rsi_common *common); +int rsi_set_antenna(struct rsi_common *common, u8 antenna); #endif diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c index 680d60eabc75de4b50a44a7bb6395fb020fa4b83..be4c22e0d9021f99db69faef2bcfc41dbeaf8b9f 100644 --- a/drivers/net/wireless/st/cw1200/wsm.c +++ b/drivers/net/wireless/st/cw1200/wsm.c @@ -379,7 +379,6 @@ static int wsm_multi_tx_confirm(struct cw1200_common *priv, { int ret; int count; - int i; count = WSM_GET32(buf); if (WARN_ON(count <= 0)) @@ -395,11 +394,10 @@ static int wsm_multi_tx_confirm(struct cw1200_common *priv, } cw1200_debug_txed_multi(priv, count); - for (i = 0; i < count; ++i) { + do { ret = wsm_tx_confirm(priv, buf, link_id); - if (ret) - return ret; - } + } while (!ret && --count); + return ret; underflow: @@ -1807,16 +1805,18 @@ static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size) { size_t pos = buf->data - buf->begin; size_t size = pos + extra_size; + u8 *tmp; size = round_up(size, FWLOAD_BLOCK_SIZE); - buf->begin = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA); - if (buf->begin) { - buf->data = &buf->begin[pos]; - buf->end = &buf->begin[size]; - return 0; - } else { - buf->end = buf->data = buf->begin; + tmp = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA); + if (!tmp) { + wsm_buf_deinit(buf); return -ENOMEM; } + + buf->begin = tmp; + buf->data = &buf->begin[pos]; + buf->end = &buf->begin[size]; + return 0; } diff --git a/drivers/net/wireless/ti/wl1251/Makefile b/drivers/net/wireless/ti/wl1251/Makefile index a5c6328b5f725bd3e92e8292df48328f219b08f2..58b4f935a3f634f89157e32389e90cbf29898310 100644 --- a/drivers/net/wireless/ti/wl1251/Makefile +++ b/drivers/net/wireless/ti/wl1251/Makefile @@ -6,5 +6,3 @@ wl1251_sdio-objs += sdio.o obj-$(CONFIG_WL1251) += wl1251.o obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c index b36ce185c9f22740add5056aec16858ae1ee9783..86fa0fc690841df2606e4670947fbd637bc64c55 100644 --- a/drivers/net/wireless/ti/wl18xx/event.c +++ b/drivers/net/wireless/ti/wl18xx/event.c @@ -218,5 +218,33 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl) if (vector & FW_LOGGER_INDICATION) wlcore_event_fw_logger(wl); + if (vector & RX_BA_WIN_SIZE_CHANGE_EVENT_ID) { + struct wl12xx_vif *wlvif; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + u8 link_id = mbox->rx_ba_link_id; + u8 win_size = mbox->rx_ba_win_size; + const u8 *addr; + + wlvif = wl->links[link_id].wlvif; + vif = wl12xx_wlvif_to_vif(wlvif); + + /* Update RX aggregation window size and call + * MAC routine to stop active RX aggregations for this link + */ + if (wlvif->bss_type != BSS_TYPE_AP_BSS) + addr = vif->bss_conf.bssid; + else + addr = wl->links[link_id].addr; + + sta = ieee80211_find_sta(vif, addr); + if (sta) { + sta->max_rx_aggregation_subframes = win_size; + ieee80211_stop_rx_ba_session(vif, + wl->links[link_id].ba_bitmap, + addr); + } + } + return 0; } diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h index ce8ea9c04052170da91b2266f2d2dbac561fa62d..4af297fbb529dc19045f4130388f46795a045371 100644 --- a/drivers/net/wireless/ti/wl18xx/event.h +++ b/drivers/net/wireless/ti/wl18xx/event.h @@ -38,6 +38,7 @@ enum { REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(18), DFS_CHANNELS_CONFIG_COMPLETE_EVENT = BIT(19), PERIODIC_SCAN_REPORT_EVENT_ID = BIT(20), + RX_BA_WIN_SIZE_CHANGE_EVENT_ID = BIT(21), SMART_CONFIG_SYNC_EVENT_ID = BIT(22), SMART_CONFIG_DECODE_EVENT_ID = BIT(23), TIME_SYNC_EVENT_ID = BIT(24), diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 06d6943b257c28858167fd91dda3dd50cb0937c8..5bdf7a03e3ddb578023dda6969113199701fe0a0 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -1041,7 +1041,8 @@ static int wl18xx_boot(struct wl1271 *wl) SMART_CONFIG_SYNC_EVENT_ID | SMART_CONFIG_DECODE_EVENT_ID | TIME_SYNC_EVENT_ID | - FW_LOGGER_INDICATION; + FW_LOGGER_INDICATION | + RX_BA_WIN_SIZE_CHANGE_EVENT_ID; wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID; diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile index 0a69c1373643b07fffd482f6a011724d91d9e760..e286713b3c189c7579ea60db51963fceff656ae3 100644 --- a/drivers/net/wireless/ti/wlcore/Makefile +++ b/drivers/net/wireless/ti/wlcore/Makefile @@ -8,5 +8,3 @@ wlcore-$(CONFIG_NL80211_TESTMODE) += testmode.o obj-$(CONFIG_WLCORE) += wlcore.o obj-$(CONFIG_WLCORE_SPI) += wlcore_spi.o obj-$(CONFIG_WLCORE_SDIO) += wlcore_sdio.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c index 26cc23f3224148bd8a4909f242dfaffdd3c1c108..a4859993db3c600bb2cd4cadf2e30794da19f338 100644 --- a/drivers/net/wireless/ti/wlcore/acx.c +++ b/drivers/net/wireless/ti/wlcore/acx.c @@ -1419,7 +1419,8 @@ int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl, /* setup BA session receiver setting in the FW. */ int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, - u16 ssn, bool enable, u8 peer_hlid) + u16 ssn, bool enable, u8 peer_hlid, + u8 win_size) { struct wl1271_acx_ba_receiver_setup *acx; int ret; @@ -1435,7 +1436,7 @@ int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, acx->hlid = peer_hlid; acx->tid = tid_index; acx->enable = enable; - acx->win_size = wl->conf.ht.rx_ba_win_size; + acx->win_size = win_size; acx->ssn = ssn; ret = wlcore_cmd_configure_failsafe(wl, ACX_BA_SESSION_RX_SETUP, acx, diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h index 6321ed47289158f11af77fcc218518cdd70ff868..f46d7fdf9a0008bf1cae968fa8ab36ac09da00d5 100644 --- a/drivers/net/wireless/ti/wlcore/acx.h +++ b/drivers/net/wireless/ti/wlcore/acx.h @@ -1113,7 +1113,8 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl, int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, - u16 ssn, bool enable, u8 peer_hlid); + u16 ssn, bool enable, u8 peer_hlid, + u8 win_size); int wl12xx_acx_tsf_info(struct wl1271 *wl, struct wl12xx_vif *wlvif, u64 *mactime); int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 471521a0db7b6634e0f2c36ef8a168116c336f4f..e536aa01b937a958684c81a8a8c228dc44975b42 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -5285,7 +5285,9 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, } ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true, - hlid); + hlid, + params->buf_size); + if (!ret) { *ba_bitmap |= BIT(tid); wl->ba_rx_session_count++; @@ -5306,7 +5308,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, } ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false, - hlid); + hlid, 0); if (!ret) { *ba_bitmap &= ~BIT(tid); wl->ba_rx_session_count--; @@ -6086,6 +6088,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(wl->hw, SIGNAL_DBM); ieee80211_hw_set(wl->hw, SUPPORTS_PS); + ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG); wl->hw->wiphy->cipher_suites = cipher_suites; wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); @@ -6120,6 +6123,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) WIPHY_FLAG_SUPPORTS_SCHED_SCAN | WIPHY_FLAG_HAS_CHANNEL_SWITCH; + wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN; + /* make sure all our channels fit in the scanned_ch bitmask */ BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) + ARRAY_SIZE(wl1271_channels_5ghz) > diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index 932f3f81e8cf3f3cb6b6a51e0e4904ecfc7c23fc..acec0d9ec422b22ee0c92b26da3d99c9dd0c5878 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -51,7 +51,7 @@ #include #include -#include +#include #include "wl3501.h" @@ -1853,7 +1853,6 @@ static const struct net_device_ops wl3501_netdev_ops = { .ndo_stop = wl3501_close, .ndo_start_xmit = wl3501_hard_start_xmit, .ndo_tx_timeout = wl3501_tx_timeout, - .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c index dea049b2556f337ed1f1504cef7e834882035707..de7ff395977a31820913eb1d43aa2509d979c136 100644 --- a/drivers/net/wireless/zydas/zd1201.c +++ b/drivers/net/wireless/zydas/zd1201.c @@ -1724,7 +1724,6 @@ static const struct net_device_ops zd1201_netdev_ops = { .ndo_tx_timeout = zd1201_tx_timeout, .ndo_set_rx_mode = zd1201_set_multicast, .ndo_set_mac_address = zd1201_set_mac_address, - .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 74dc2bf714280c935d0ba16511a87a863323b19a..e30ffd29b7e913f2514c4c925979ffd474190acc 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -302,7 +302,7 @@ static int xenvif_close(struct net_device *dev) static int xenvif_change_mtu(struct net_device *dev, int mtu) { struct xenvif *vif = netdev_priv(dev); - int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN; + int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; @@ -471,6 +471,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, dev->tx_queue_len = XENVIF_QUEUE_LENGTH; + dev->min_mtu = 0; + dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; + /* * Initialise a dummy MAC address. We choose the numerically * largest non-broadcast address to prevent the address getting diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 8674e188b697d91741cb8e5eaef96918eb50224d..3124eaec942745fe9eb6cd4a49ee8bc633d80e67 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -785,12 +785,9 @@ static void xen_mcast_ctrl_changed(struct xenbus_watch *watch, struct xenvif *vif = container_of(watch, struct xenvif, mcast_ctrl_watch); struct xenbus_device *dev = xenvif_to_xenbus_device(vif); - int val; - if (xenbus_scanf(XBT_NIL, dev->otherend, - "request-multicast-control", "%d", &val) < 0) - val = 0; - vif->multicast_control = !!val; + vif->multicast_control = !!xenbus_read_unsigned(dev->otherend, + "request-multicast-control", 0); } static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev, @@ -889,16 +886,16 @@ static int connect_ctrl_ring(struct backend_info *be) unsigned int evtchn; int err; - err = xenbus_gather(XBT_NIL, dev->otherend, - "ctrl-ring-ref", "%u", &val, NULL); - if (err) + err = xenbus_scanf(XBT_NIL, dev->otherend, + "ctrl-ring-ref", "%u", &val); + if (err < 0) goto done; /* The frontend does not have a control ring */ ring_ref = val; - err = xenbus_gather(XBT_NIL, dev->otherend, - "event-channel-ctrl", "%u", &val, NULL); - if (err) { + err = xenbus_scanf(XBT_NIL, dev->otherend, + "event-channel-ctrl", "%u", &val); + if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/event-channel-ctrl", dev->otherend); @@ -934,14 +931,11 @@ static void connect(struct backend_info *be) /* Check whether the frontend requested multiple queues * and read the number requested. */ - err = xenbus_scanf(XBT_NIL, dev->otherend, - "multi-queue-num-queues", - "%u", &requested_num_queues); - if (err < 0) { - requested_num_queues = 1; /* Fall back to single queue */ - } else if (requested_num_queues > xenvif_max_queues) { + requested_num_queues = xenbus_read_unsigned(dev->otherend, + "multi-queue-num-queues", 1); + if (requested_num_queues > xenvif_max_queues) { /* buggy or malicious guest */ - xenbus_dev_fatal(dev, err, + xenbus_dev_fatal(dev, -EINVAL, "guest requested %u queues, exceeding the maximum of %u.", requested_num_queues, xenvif_max_queues); return; @@ -1134,7 +1128,7 @@ static int read_xenbus_vif_flags(struct backend_info *be) struct xenvif *vif = be->vif; struct xenbus_device *dev = be->dev; unsigned int rx_copy; - int err, val; + int err; err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", &rx_copy); @@ -1150,10 +1144,7 @@ static int read_xenbus_vif_flags(struct backend_info *be) if (!rx_copy) return -EOPNOTSUPP; - if (xenbus_scanf(XBT_NIL, dev->otherend, - "feature-rx-notify", "%d", &val) < 0) - val = 0; - if (!val) { + if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) { /* - Reduce drain timeout to poll more frequently for * Rx requests. * - Disable Rx stall detection. @@ -1162,34 +1153,21 @@ static int read_xenbus_vif_flags(struct backend_info *be) be->vif->stall_timeout = 0; } - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", - "%d", &val) < 0) - val = 0; - vif->can_sg = !!val; + vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0); vif->gso_mask = 0; - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", - "%d", &val) < 0) - val = 0; - if (val) + if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0)) vif->gso_mask |= GSO_BIT(TCPV4); - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", - "%d", &val) < 0) - val = 0; - if (val) + if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0)) vif->gso_mask |= GSO_BIT(TCPV6); - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", - "%d", &val) < 0) - val = 0; - vif->ip_csum = !val; + vif->ip_csum = !xenbus_read_unsigned(dev->otherend, + "feature-no-csum-offload", 0); - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload", - "%d", &val) < 0) - val = 0; - vif->ipv6_csum = !!val; + vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend, + "feature-ipv6-csum-offload", 0); return 0; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index e17879dd5d5a18e3efcfe0d64a4fb992d2967b96..a479cd99911d8a06cdc37b92682b58b101500846 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -304,7 +304,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) queue->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&queue->gref_rx_head); - BUG_ON((signed short)ref < 0); + WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); queue->grant_rx_ref[id] = ref; page = skb_frag_page(&skb_shinfo(skb)->frags[0]); @@ -428,7 +428,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); ref = gnttab_claim_grant_reference(&queue->gref_tx_head); - BUG_ON((signed short)ref < 0); + WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, gfn, GNTMAP_readonly); @@ -1169,43 +1169,23 @@ static netdev_features_t xennet_fix_features(struct net_device *dev, netdev_features_t features) { struct netfront_info *np = netdev_priv(dev); - int val; - if (features & NETIF_F_SG) { - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", - "%d", &val) < 0) - val = 0; + if (features & NETIF_F_SG && + !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0)) + features &= ~NETIF_F_SG; - if (!val) - features &= ~NETIF_F_SG; - } - - if (features & NETIF_F_IPV6_CSUM) { - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, - "feature-ipv6-csum-offload", "%d", &val) < 0) - val = 0; - - if (!val) - features &= ~NETIF_F_IPV6_CSUM; - } - - if (features & NETIF_F_TSO) { - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, - "feature-gso-tcpv4", "%d", &val) < 0) - val = 0; + if (features & NETIF_F_IPV6_CSUM && + !xenbus_read_unsigned(np->xbdev->otherend, + "feature-ipv6-csum-offload", 0)) + features &= ~NETIF_F_IPV6_CSUM; - if (!val) - features &= ~NETIF_F_TSO; - } + if (features & NETIF_F_TSO && + !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0)) + features &= ~NETIF_F_TSO; - if (features & NETIF_F_TSO6) { - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, - "feature-gso-tcpv6", "%d", &val) < 0) - val = 0; - - if (!val) - features &= ~NETIF_F_TSO6; - } + if (features & NETIF_F_TSO6 && + !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0)) + features &= ~NETIF_F_TSO6; return features; } @@ -1329,6 +1309,8 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netdev->features |= netdev->hw_features; netdev->ethtool_ops = &xennet_ethtool_ops; + netdev->min_mtu = 0; + netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; @@ -1821,18 +1803,13 @@ static int talk_to_netback(struct xenbus_device *dev, info->netdev->irq = 0; /* Check if backend supports multiple queues */ - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "multi-queue-max-queues", "%u", &max_queues); - if (err < 0) - max_queues = 1; + max_queues = xenbus_read_unsigned(info->xbdev->otherend, + "multi-queue-max-queues", 1); num_queues = min(max_queues, xennet_max_queues); /* Check feature-split-event-channels */ - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "feature-split-event-channels", "%u", - &feature_split_evtchn); - if (err < 0) - feature_split_evtchn = 0; + feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend, + "feature-split-event-channels", 0); /* Read mac addr. */ err = xen_net_read_mac(dev, info->netdev->dev_addr); @@ -1966,16 +1943,10 @@ static int xennet_connect(struct net_device *dev) struct netfront_info *np = netdev_priv(dev); unsigned int num_queues = 0; int err; - unsigned int feature_rx_copy; unsigned int j = 0; struct netfront_queue *queue = NULL; - err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, - "feature-rx-copy", "%u", &feature_rx_copy); - if (err != 1) - feature_rx_copy = 0; - - if (!feature_rx_copy) { + if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) { dev_info(&dev->dev, "backend does not support copying receive path\n"); return -ENODEV; diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c index 83deda4bb4d6d52d4b529b71712f71114aac1445..8a04c5e029998504d5c07c1c327746ecf324bf78 100644 --- a/drivers/nfc/mei_phy.c +++ b/drivers/nfc/mei_phy.c @@ -133,7 +133,7 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy) return -ENOMEM; bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length); - if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { + if (bytes_recv < 0 || bytes_recv < if_version_length) { pr_err("Could not read IF version\n"); r = -EIO; goto err; @@ -297,35 +297,34 @@ static int mei_nfc_recv(struct nfc_mei_phy *phy, u8 *buf, size_t length) } -static void nfc_mei_event_cb(struct mei_cl_device *cldev, u32 events, - void *context) +static void nfc_mei_rx_cb(struct mei_cl_device *cldev) { - struct nfc_mei_phy *phy = context; + struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev); + struct sk_buff *skb; + int reply_size; - if (phy->hard_fault != 0) + if (!phy) return; - if (events & BIT(MEI_CL_EVENT_RX)) { - struct sk_buff *skb; - int reply_size; + if (phy->hard_fault != 0) + return; - skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL); - if (!skb) - return; + skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL); + if (!skb) + return; - reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ); - if (reply_size < MEI_NFC_HEADER_SIZE) { - kfree_skb(skb); - return; - } + reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ); + if (reply_size < MEI_NFC_HEADER_SIZE) { + kfree_skb(skb); + return; + } - skb_put(skb, reply_size); - skb_pull(skb, MEI_NFC_HEADER_SIZE); + skb_put(skb, reply_size); + skb_pull(skb, MEI_NFC_HEADER_SIZE); - MEI_DUMP_SKB_IN("mei frame read", skb); + MEI_DUMP_SKB_IN("mei frame read", skb); - nfc_hci_recv_frame(phy->hdev, skb); - } + nfc_hci_recv_frame(phy->hdev, skb); } static int nfc_mei_phy_enable(void *phy_id) @@ -356,8 +355,7 @@ static int nfc_mei_phy_enable(void *phy_id) goto err; } - r = mei_cldev_register_event_cb(phy->cldev, BIT(MEI_CL_EVENT_RX), - nfc_mei_event_cb, phy); + r = mei_cldev_register_rx_cb(phy->cldev, nfc_mei_rx_cb); if (r) { pr_err("Event cb registration failed %d\n", r); goto err; diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c index 3092501f26c4ffbff41162c68fc182b876530a2f..eb5eddf1794e21598eb27f0bd93df5cc65f91842 100644 --- a/drivers/nfc/microread/mei.c +++ b/drivers/nfc/microread/mei.c @@ -82,28 +82,7 @@ static struct mei_cl_driver microread_driver = { .remove = microread_mei_remove, }; -static int microread_mei_init(void) -{ - int r; - - pr_debug(DRIVER_DESC ": %s\n", __func__); - - r = mei_cldev_driver_register(µread_driver); - if (r) { - pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n"); - return r; - } - - return 0; -} - -static void microread_mei_exit(void) -{ - mei_cldev_driver_unregister(µread_driver); -} - -module_init(microread_mei_init); -module_exit(microread_mei_exit); +module_mei_cl_driver(microread_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c index 46d0eb24eef9e07878443a38af17ab10e27ed341..ad57a8ec00d654224d2be29ed65669d938b5c0b4 100644 --- a/drivers/nfc/pn544/mei.c +++ b/drivers/nfc/pn544/mei.c @@ -82,28 +82,7 @@ static struct mei_cl_driver pn544_driver = { .remove = pn544_mei_remove, }; -static int pn544_mei_init(void) -{ - int r; - - pr_debug(DRIVER_DESC ": %s\n", __func__); - - r = mei_cldev_driver_register(&pn544_driver); - if (r) { - pr_err(PN544_DRIVER_NAME ": driver registration failed\n"); - return r; - } - - return 0; -} - -static void pn544_mei_exit(void) -{ - mei_cldev_driver_unregister(&pn544_driver); -} - -module_init(pn544_mei_init); -module_exit(pn544_mei_exit); +module_mei_cl_driver(pn544_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index 6ccba0d862df704877db1589fff036d40c10cf36..019a158e1128d7a27cdbea269a9149bc90b364dd 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -138,11 +138,11 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx, base_addr = pci_resource_start(ndev->ntb.pdev, bar); if (bar != 1) { - xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 3); - limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 3); + xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2); + limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 2); /* Set the limit if supported */ - limit = base_addr + size; + limit = size; /* set and verify setting the translation address */ write64(addr, peer_mmio + xlat_reg); @@ -164,14 +164,8 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx, xlat_reg = AMD_BAR1XLAT_OFFSET; limit_reg = AMD_BAR1LMT_OFFSET; - /* split bar addr range must all be 32 bit */ - if (addr & (~0ull << 32)) - return -EINVAL; - if ((addr + size) & (~0ull << 32)) - return -EINVAL; - /* Set the limit if supported */ - limit = base_addr + size; + limit = size; /* set and verify setting the translation address */ write64(addr, peer_mmio + xlat_reg); @@ -199,6 +193,11 @@ static int amd_link_is_up(struct amd_ntb_dev *ndev) if (!ndev->peer_sta) return NTB_LNK_STA_ACTIVE(ndev->cntl_sta); + if (ndev->peer_sta & AMD_LINK_UP_EVENT) { + ndev->peer_sta = 0; + return 1; + } + /* If peer_sta is reset or D0 event, the ISR has * started a timer to check link status of hardware. * So here just clear status bit. And if peer_sta is @@ -207,7 +206,7 @@ static int amd_link_is_up(struct amd_ntb_dev *ndev) */ if (ndev->peer_sta & AMD_PEER_RESET_EVENT) ndev->peer_sta &= ~AMD_PEER_RESET_EVENT; - else if (ndev->peer_sta & AMD_PEER_D0_EVENT) + else if (ndev->peer_sta & (AMD_PEER_D0_EVENT | AMD_LINK_DOWN_EVENT)) ndev->peer_sta = 0; return 0; @@ -491,6 +490,8 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) break; case AMD_PEER_D3_EVENT: case AMD_PEER_PMETO_EVENT: + case AMD_LINK_UP_EVENT: + case AMD_LINK_DOWN_EVENT: amd_ack_smu(ndev, status); /* link down */ @@ -598,7 +599,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev, err_msix_request: while (i-- > 0) - free_irq(ndev->msix[i].vector, ndev); + free_irq(ndev->msix[i].vector, &ndev->vec[i]); pci_disable_msix(pdev); err_msix_enable: kfree(ndev->msix); diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.h b/drivers/ntb/hw/amd/ntb_hw_amd.h index 2eac3cd3e6469689ede8fe472f32993238817ad8..13d73ed94a52d55c05d94c91629c7e2e595f13fc 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.h +++ b/drivers/ntb/hw/amd/ntb_hw_amd.h @@ -148,9 +148,12 @@ enum { AMD_PEER_D3_EVENT = BIT(2), AMD_PEER_PMETO_EVENT = BIT(3), AMD_PEER_D0_EVENT = BIT(4), + AMD_LINK_UP_EVENT = BIT(5), + AMD_LINK_DOWN_EVENT = BIT(6), AMD_EVENT_INTMASK = (AMD_PEER_FLUSH_EVENT | AMD_PEER_RESET_EVENT | AMD_PEER_D3_EVENT | - AMD_PEER_PMETO_EVENT | AMD_PEER_D0_EVENT), + AMD_PEER_PMETO_EVENT | AMD_PEER_D0_EVENT | + AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT), AMD_PMESTAT_OFFSET = 0x480, AMD_PMSGTRIG_OFFSET = 0x490, diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c index 0d5c29ae51def6735e21aafb6579cda7c56ebd6f..eca9688bf9d9fdff11ea091b9b2e3d9c115ab8e0 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.c +++ b/drivers/ntb/hw/intel/ntb_hw_intel.c @@ -86,7 +86,12 @@ static const struct intel_ntb_xlat_reg xeon_pri_xlat; static const struct intel_ntb_xlat_reg xeon_sec_xlat; static struct intel_b2b_addr xeon_b2b_usd_addr; static struct intel_b2b_addr xeon_b2b_dsd_addr; +static const struct intel_ntb_reg skx_reg; +static const struct intel_ntb_alt_reg skx_pri_reg; +static const struct intel_ntb_alt_reg skx_b2b_reg; +static const struct intel_ntb_xlat_reg skx_sec_xlat; static const struct ntb_dev_ops intel_ntb_ops; +static const struct ntb_dev_ops intel_ntb3_ops; static const struct file_operations intel_ntb_debugfs_info; static struct dentry *debugfs_dir; @@ -112,17 +117,17 @@ MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, module_param_named(xeon_b2b_usd_bar4_addr64, xeon_b2b_usd_addr.bar4_addr64, ullong, 0644); -MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, +MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64, "XEON B2B USD BAR 4 64-bit address"); module_param_named(xeon_b2b_usd_bar4_addr32, xeon_b2b_usd_addr.bar4_addr32, ullong, 0644); -MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, +MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32, "XEON B2B USD split-BAR 4 32-bit address"); module_param_named(xeon_b2b_usd_bar5_addr32, xeon_b2b_usd_addr.bar5_addr32, ullong, 0644); -MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, +MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32, "XEON B2B USD split-BAR 5 32-bit address"); module_param_named(xeon_b2b_dsd_bar2_addr64, @@ -132,19 +137,22 @@ MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, module_param_named(xeon_b2b_dsd_bar4_addr64, xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644); -MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, +MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64, "XEON B2B DSD BAR 4 64-bit address"); module_param_named(xeon_b2b_dsd_bar4_addr32, xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644); -MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, +MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32, "XEON B2B DSD split-BAR 4 32-bit address"); module_param_named(xeon_b2b_dsd_bar5_addr32, xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644); -MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, +MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32, "XEON B2B DSD split-BAR 5 32-bit address"); +static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd); +static int xeon_init_isr(struct intel_ntb_dev *ndev); + #ifndef ioread64 #ifdef readq #define ioread64 readq @@ -206,6 +214,14 @@ static inline int pdev_is_xeon(struct pci_dev *pdev) return 0; } +static inline int pdev_is_skx_xeon(struct pci_dev *pdev) +{ + if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX) + return 1; + + return 0; +} + static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev) { ndev->unsafe_flags = 0; @@ -390,6 +406,9 @@ static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec) vec_mask = ndev_vec_mask(ndev, vec); + if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31)) + vec_mask |= ndev->db_link_mask; + dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask); ndev->last_ts = jiffies; @@ -409,6 +428,9 @@ static irqreturn_t ndev_vec_isr(int irq, void *dev) { struct intel_ntb_vec *nvec = dev; + dev_dbg(ndev_dev(nvec->ndev), "irq: %d nvec->num: %d\n", + irq, nvec->num); + return ndev_interrupt(nvec->ndev, nvec->num); } @@ -465,14 +487,14 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev, goto err_msix_request; } - dev_dbg(ndev_dev(ndev), "Using msix interrupts\n"); + dev_dbg(ndev_dev(ndev), "Using %d msix interrupts\n", msix_count); ndev->db_vec_count = msix_count; ndev->db_vec_shift = msix_shift; return 0; err_msix_request: while (i-- > 0) - free_irq(ndev->msix[i].vector, ndev); + free_irq(ndev->msix[i].vector, &ndev->vec[i]); pci_disable_msix(pdev); err_msix_enable: kfree(ndev->msix); @@ -547,8 +569,171 @@ static void ndev_deinit_isr(struct intel_ntb_dev *ndev) } } -static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf, - size_t count, loff_t *offp) +static ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct intel_ntb_dev *ndev; + void __iomem *mmio; + char *buf; + size_t buf_size; + ssize_t ret, off; + union { u64 v64; u32 v32; u16 v16; } u; + + ndev = filp->private_data; + mmio = ndev->self_mmio; + + buf_size = min(count, 0x800ul); + + buf = kmalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + off = 0; + + off += scnprintf(buf + off, buf_size - off, + "NTB Device Information:\n"); + + off += scnprintf(buf + off, buf_size - off, + "Connection Topology -\t%s\n", + ntb_topo_string(ndev->ntb.topo)); + + off += scnprintf(buf + off, buf_size - off, + "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl); + off += scnprintf(buf + off, buf_size - off, + "LNK STA -\t\t%#06x\n", ndev->lnk_sta); + + if (!ndev->reg->link_is_up(ndev)) + off += scnprintf(buf + off, buf_size - off, + "Link Status -\t\tDown\n"); + else { + off += scnprintf(buf + off, buf_size - off, + "Link Status -\t\tUp\n"); + off += scnprintf(buf + off, buf_size - off, + "Link Speed -\t\tPCI-E Gen %u\n", + NTB_LNK_STA_SPEED(ndev->lnk_sta)); + off += scnprintf(buf + off, buf_size - off, + "Link Width -\t\tx%u\n", + NTB_LNK_STA_WIDTH(ndev->lnk_sta)); + } + + off += scnprintf(buf + off, buf_size - off, + "Memory Window Count -\t%u\n", ndev->mw_count); + off += scnprintf(buf + off, buf_size - off, + "Scratchpad Count -\t%u\n", ndev->spad_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Count -\t%u\n", ndev->db_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Vector Count -\t%u\n", ndev->db_vec_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift); + + off += scnprintf(buf + off, buf_size - off, + "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask); + + u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Mask -\t\t%#llx\n", u.v64); + + u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Bell -\t\t%#llx\n", u.v64); + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Incoming XLAT:\n"); + + u.v64 = ioread64(mmio + SKX_IMBAR1XBASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IMBAR1XBASE -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + SKX_IMBAR2XBASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IMBAR2XBASE -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64); + + if (ntb_topo_is_b2b(ndev->ntb.topo)) { + off += scnprintf(buf + off, buf_size - off, + "\nNTB Outgoing B2B XLAT:\n"); + + u.v64 = ioread64(mmio + SKX_EMBAR1XBASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "EMBAR1XBASE -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + SKX_EMBAR2XBASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "EMBAR2XBASE -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + SKX_EMBAR1XLMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "EMBAR1XLMT -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + SKX_EMBAR2XLMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "EMBAR2XLMT -\t\t%#018llx\n", u.v64); + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Secondary BAR:\n"); + + u.v64 = ioread64(mmio + SKX_EMBAR0_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "EMBAR0 -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + SKX_EMBAR1_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "EMBAR1 -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + SKX_EMBAR2_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "EMBAR2 -\t\t%#018llx\n", u.v64); + } + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Statistics:\n"); + + u.v16 = ioread16(mmio + SKX_USMEMMISS_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "Upstream Memory Miss -\t%u\n", u.v16); + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Hardware Errors:\n"); + + if (!pci_read_config_word(ndev->ntb.pdev, + SKX_DEVSTS_OFFSET, &u.v16)) + off += scnprintf(buf + off, buf_size - off, + "DEVSTS -\t\t%#06x\n", u.v16); + + if (!pci_read_config_word(ndev->ntb.pdev, + SKX_LINK_STATUS_OFFSET, &u.v16)) + off += scnprintf(buf + off, buf_size - off, + "LNKSTS -\t\t%#06x\n", u.v16); + + if (!pci_read_config_dword(ndev->ntb.pdev, + SKX_UNCERRSTS_OFFSET, &u.v32)) + off += scnprintf(buf + off, buf_size - off, + "UNCERRSTS -\t\t%#06x\n", u.v32); + + if (!pci_read_config_dword(ndev->ntb.pdev, + SKX_CORERRSTS_OFFSET, &u.v32)) + off += scnprintf(buf + off, buf_size - off, + "CORERRSTS -\t\t%#06x\n", u.v32); + + ret = simple_read_from_buffer(ubuf, count, offp, buf, off); + kfree(buf); + return ret; +} + +static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) { struct intel_ntb_dev *ndev; struct pci_dev *pdev; @@ -813,6 +998,20 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf, return ret; } +static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct intel_ntb_dev *ndev = filp->private_data; + + if (pdev_is_xeon(ndev->ntb.pdev) || + pdev_is_atom(ndev->ntb.pdev)) + return ndev_ntb_debugfs_read(filp, ubuf, count, offp); + else if (pdev_is_skx_xeon(ndev->ntb.pdev)) + return ndev_ntb3_debugfs_read(filp, ubuf, count, offp); + + return -ENXIO; +} + static void ndev_init_debugfs(struct intel_ntb_dev *ndev) { if (!debugfs_dir) { @@ -1428,6 +1627,383 @@ static void atom_deinit_dev(struct intel_ntb_dev *ndev) atom_deinit_isr(ndev); } +/* Skylake Xeon NTB */ + +static u64 skx_db_ioread(void __iomem *mmio) +{ + return ioread64(mmio); +} + +static void skx_db_iowrite(u64 bits, void __iomem *mmio) +{ + iowrite64(bits, mmio); +} + +static int skx_init_isr(struct intel_ntb_dev *ndev) +{ + int i; + + /* + * The MSIX vectors and the interrupt status bits are not lined up + * on Skylake. By default the link status bit is bit 32, however it + * is by default MSIX vector0. We need to fixup to line them up. + * The vectors at reset is 1-32,0. We need to reprogram to 0-32. + */ + + for (i = 0; i < SKX_DB_MSIX_VECTOR_COUNT; i++) + iowrite8(i, ndev->self_mmio + SKX_INTVEC_OFFSET + i); + + /* move link status down one as workaround */ + if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) { + iowrite8(SKX_DB_MSIX_VECTOR_COUNT - 2, + ndev->self_mmio + SKX_INTVEC_OFFSET + + (SKX_DB_MSIX_VECTOR_COUNT - 1)); + } + + return ndev_init_isr(ndev, SKX_DB_MSIX_VECTOR_COUNT, + SKX_DB_MSIX_VECTOR_COUNT, + SKX_DB_MSIX_VECTOR_SHIFT, + SKX_DB_TOTAL_SHIFT); +} + +static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, + const struct intel_b2b_addr *addr, + const struct intel_b2b_addr *peer_addr) +{ + struct pci_dev *pdev; + void __iomem *mmio; + resource_size_t bar_size; + phys_addr_t bar_addr; + int b2b_bar; + u8 bar_sz; + + pdev = ndev_pdev(ndev); + mmio = ndev->self_mmio; + + if (ndev->b2b_idx == UINT_MAX) { + dev_dbg(ndev_dev(ndev), "not using b2b mw\n"); + b2b_bar = 0; + ndev->b2b_off = 0; + } else { + b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx); + if (b2b_bar < 0) + return -EIO; + + dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar); + + bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar); + + dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size); + + if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) { + dev_dbg(ndev_dev(ndev), + "b2b using first half of bar\n"); + ndev->b2b_off = bar_size >> 1; + } else if (bar_size >= XEON_B2B_MIN_SIZE) { + dev_dbg(ndev_dev(ndev), + "b2b using whole bar\n"); + ndev->b2b_off = 0; + --ndev->mw_count; + } else { + dev_dbg(ndev_dev(ndev), + "b2b bar size is too small\n"); + return -EIO; + } + } + + /* + * Reset the secondary bar sizes to match the primary bar sizes, + * except disable or halve the size of the b2b secondary bar. + */ + pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz); + dev_dbg(ndev_dev(ndev), "IMBAR1SZ %#x\n", bar_sz); + if (b2b_bar == 1) { + if (ndev->b2b_off) + bar_sz -= 1; + else + bar_sz = 0; + } + + pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz); + pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz); + dev_dbg(ndev_dev(ndev), "EMBAR1SZ %#x\n", bar_sz); + + pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz); + dev_dbg(ndev_dev(ndev), "IMBAR2SZ %#x\n", bar_sz); + if (b2b_bar == 2) { + if (ndev->b2b_off) + bar_sz -= 1; + else + bar_sz = 0; + } + + pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz); + pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz); + dev_dbg(ndev_dev(ndev), "EMBAR2SZ %#x\n", bar_sz); + + /* SBAR01 hit by first part of the b2b bar */ + if (b2b_bar == 0) + bar_addr = addr->bar0_addr; + else if (b2b_bar == 1) + bar_addr = addr->bar2_addr64; + else if (b2b_bar == 2) + bar_addr = addr->bar4_addr64; + else + return -EIO; + + /* setup incoming bar limits == base addrs (zero length windows) */ + bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0); + iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET); + bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET); + dev_dbg(ndev_dev(ndev), "IMBAR1XLMT %#018llx\n", bar_addr); + + bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); + iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET); + bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET); + dev_dbg(ndev_dev(ndev), "IMBAR2XLMT %#018llx\n", bar_addr); + + /* zero incoming translation addrs */ + iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET); + iowrite64(0, mmio + SKX_IMBAR2XBASE_OFFSET); + + ndev->peer_mmio = ndev->self_mmio; + + return 0; +} + +static int skx_init_ntb(struct intel_ntb_dev *ndev) +{ + int rc; + + + ndev->mw_count = XEON_MW_COUNT; + ndev->spad_count = SKX_SPAD_COUNT; + ndev->db_count = SKX_DB_COUNT; + ndev->db_link_mask = SKX_DB_LINK_BIT; + + /* DB fixup for using 31 right now */ + if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) + ndev->db_link_mask |= BIT_ULL(31); + + switch (ndev->ntb.topo) { + case NTB_TOPO_B2B_USD: + case NTB_TOPO_B2B_DSD: + ndev->self_reg = &skx_pri_reg; + ndev->peer_reg = &skx_b2b_reg; + ndev->xlat_reg = &skx_sec_xlat; + + if (ndev->ntb.topo == NTB_TOPO_B2B_USD) { + rc = skx_setup_b2b_mw(ndev, + &xeon_b2b_dsd_addr, + &xeon_b2b_usd_addr); + } else { + rc = skx_setup_b2b_mw(ndev, + &xeon_b2b_usd_addr, + &xeon_b2b_dsd_addr); + } + + if (rc) + return rc; + + /* Enable Bus Master and Memory Space on the secondary side */ + iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, + ndev->self_mmio + SKX_SPCICMD_OFFSET); + + break; + + default: + return -EINVAL; + } + + ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + + ndev->reg->db_iowrite(ndev->db_valid_mask, + ndev->self_mmio + + ndev->self_reg->db_mask); + + return 0; +} + +static int skx_init_dev(struct intel_ntb_dev *ndev) +{ + struct pci_dev *pdev; + u8 ppd; + int rc; + + pdev = ndev_pdev(ndev); + + ndev->reg = &skx_reg; + + rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd); + if (rc) + return -EIO; + + ndev->ntb.topo = xeon_ppd_topo(ndev, ppd); + dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd, + ntb_topo_string(ndev->ntb.topo)); + if (ndev->ntb.topo == NTB_TOPO_NONE) + return -EINVAL; + + if (pdev_is_skx_xeon(pdev)) + ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD; + + rc = skx_init_ntb(ndev); + if (rc) + return rc; + + return skx_init_isr(ndev); +} + +static int intel_ntb3_link_enable(struct ntb_dev *ntb, + enum ntb_speed max_speed, + enum ntb_width max_width) +{ + struct intel_ntb_dev *ndev; + u32 ntb_ctl; + + ndev = container_of(ntb, struct intel_ntb_dev, ntb); + + dev_dbg(ndev_dev(ndev), + "Enabling link with max_speed %d max_width %d\n", + max_speed, max_width); + + if (max_speed != NTB_SPEED_AUTO) + dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed); + if (max_width != NTB_WIDTH_AUTO) + dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width); + + ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); + ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK); + ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP; + ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP; + iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); + + return 0; +} +static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx, + dma_addr_t addr, resource_size_t size) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + unsigned long xlat_reg, limit_reg; + resource_size_t bar_size, mw_size; + void __iomem *mmio; + u64 base, limit, reg_val; + int bar; + + if (idx >= ndev->b2b_idx && !ndev->b2b_off) + idx += 1; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + bar_size = pci_resource_len(ndev->ntb.pdev, bar); + + if (idx == ndev->b2b_idx) + mw_size = bar_size - ndev->b2b_off; + else + mw_size = bar_size; + + /* hardware requires that addr is aligned to bar size */ + if (addr & (bar_size - 1)) + return -EINVAL; + + /* make sure the range fits in the usable mw size */ + if (size > mw_size) + return -EINVAL; + + mmio = ndev->self_mmio; + xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10); + limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10); + base = pci_resource_start(ndev->ntb.pdev, bar); + + /* Set the limit if supported, if size is not mw_size */ + if (limit_reg && size != mw_size) + limit = base + size; + else + limit = base + mw_size; + + /* set and verify setting the translation address */ + iowrite64(addr, mmio + xlat_reg); + reg_val = ioread64(mmio + xlat_reg); + if (reg_val != addr) { + iowrite64(0, mmio + xlat_reg); + return -EIO; + } + + dev_dbg(ndev_dev(ndev), "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val); + + /* set and verify setting the limit */ + iowrite64(limit, mmio + limit_reg); + reg_val = ioread64(mmio + limit_reg); + if (reg_val != limit) { + iowrite64(base, mmio + limit_reg); + iowrite64(0, mmio + xlat_reg); + return -EIO; + } + + dev_dbg(ndev_dev(ndev), "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val); + + /* setup the EP */ + limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000; + base = ioread64(mmio + SKX_EMBAR1_OFFSET + (8 * idx)); + base &= ~0xf; + + if (limit_reg && size != mw_size) + limit = base + size; + else + limit = base + mw_size; + + /* set and verify setting the limit */ + iowrite64(limit, mmio + limit_reg); + reg_val = ioread64(mmio + limit_reg); + if (reg_val != limit) { + iowrite64(base, mmio + limit_reg); + iowrite64(0, mmio + xlat_reg); + return -EIO; + } + + dev_dbg(ndev_dev(ndev), "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val); + + return 0; +} + +static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + int bit; + + if (db_bits & ~ndev->db_valid_mask) + return -EINVAL; + + while (db_bits) { + bit = __ffs(db_bits); + iowrite32(1, ndev->peer_mmio + + ndev->peer_reg->db_bell + (bit * 4)); + db_bits &= db_bits - 1; + } + + return 0; +} + +static u64 intel_ntb3_db_read(struct ntb_dev *ntb) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + return ndev_db_read(ndev, + ndev->self_mmio + + ndev->self_reg->db_clear); +} + +static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + return ndev_db_write(ndev, db_bits, + ndev->self_mmio + + ndev->self_reg->db_clear); +} + /* XEON */ static u64 xeon_db_ioread(void __iomem *mmio) @@ -1755,6 +2331,8 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, XEON_B2B_MIN_SIZE); if (!ndev->peer_mmio) return -EIO; + + ndev->peer_addr = pci_resource_start(pdev, b2b_bar); } return 0; @@ -2019,6 +2597,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev) goto err_mmio; } ndev->peer_mmio = ndev->self_mmio; + ndev->peer_addr = pci_resource_start(pdev, 0); return 0; @@ -2117,6 +2696,24 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev, if (rc) goto err_init_dev; + } else if (pdev_is_skx_xeon(pdev)) { + ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); + if (!ndev) { + rc = -ENOMEM; + goto err_ndev; + } + + ndev_init_struct(ndev, pdev); + ndev->ntb.ops = &intel_ntb3_ops; + + rc = intel_ntb_init_pci(ndev, pdev); + if (rc) + goto err_init_pci; + + rc = skx_init_dev(ndev); + if (rc) + goto err_init_dev; + } else { rc = -EINVAL; goto err_ndev; @@ -2140,7 +2737,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev, ndev_deinit_debugfs(ndev); if (pdev_is_atom(pdev)) atom_deinit_dev(ndev); - else if (pdev_is_xeon(pdev)) + else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev)) xeon_deinit_dev(ndev); err_init_dev: intel_ntb_deinit_pci(ndev); @@ -2158,7 +2755,7 @@ static void intel_ntb_pci_remove(struct pci_dev *pdev) ndev_deinit_debugfs(ndev); if (pdev_is_atom(pdev)) atom_deinit_dev(ndev); - else if (pdev_is_xeon(pdev)) + else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev)) xeon_deinit_dev(ndev); intel_ntb_deinit_pci(ndev); kfree(ndev); @@ -2254,6 +2851,36 @@ static struct intel_b2b_addr xeon_b2b_dsd_addr = { .bar5_addr32 = XEON_B2B_BAR5_ADDR32, }; +static const struct intel_ntb_reg skx_reg = { + .poll_link = xeon_poll_link, + .link_is_up = xeon_link_is_up, + .db_ioread = skx_db_ioread, + .db_iowrite = skx_db_iowrite, + .db_size = sizeof(u64), + .ntb_ctl = SKX_NTBCNTL_OFFSET, + .mw_bar = {2, 4}, +}; + +static const struct intel_ntb_alt_reg skx_pri_reg = { + .db_bell = SKX_EM_DOORBELL_OFFSET, + .db_clear = SKX_IM_INT_STATUS_OFFSET, + .db_mask = SKX_IM_INT_DISABLE_OFFSET, + .spad = SKX_IM_SPAD_OFFSET, +}; + +static const struct intel_ntb_alt_reg skx_b2b_reg = { + .db_bell = SKX_IM_DOORBELL_OFFSET, + .db_clear = SKX_EM_INT_STATUS_OFFSET, + .db_mask = SKX_EM_INT_DISABLE_OFFSET, + .spad = SKX_B2B_SPAD_OFFSET, +}; + +static const struct intel_ntb_xlat_reg skx_sec_xlat = { +/* .bar0_base = SKX_EMBAR0_OFFSET, */ + .bar2_limit = SKX_IMBAR1XLMT_OFFSET, + .bar2_xlat = SKX_IMBAR1XBASE_OFFSET, +}; + /* operations for primary side of local ntb */ static const struct ntb_dev_ops intel_ntb_ops = { .mw_count = intel_ntb_mw_count, @@ -2281,6 +2908,31 @@ static const struct ntb_dev_ops intel_ntb_ops = { .peer_spad_write = intel_ntb_peer_spad_write, }; +static const struct ntb_dev_ops intel_ntb3_ops = { + .mw_count = intel_ntb_mw_count, + .mw_get_range = intel_ntb_mw_get_range, + .mw_set_trans = intel_ntb3_mw_set_trans, + .link_is_up = intel_ntb_link_is_up, + .link_enable = intel_ntb3_link_enable, + .link_disable = intel_ntb_link_disable, + .db_valid_mask = intel_ntb_db_valid_mask, + .db_vector_count = intel_ntb_db_vector_count, + .db_vector_mask = intel_ntb_db_vector_mask, + .db_read = intel_ntb3_db_read, + .db_clear = intel_ntb3_db_clear, + .db_set_mask = intel_ntb_db_set_mask, + .db_clear_mask = intel_ntb_db_clear_mask, + .peer_db_addr = intel_ntb_peer_db_addr, + .peer_db_set = intel_ntb3_peer_db_set, + .spad_is_unsafe = intel_ntb_spad_is_unsafe, + .spad_count = intel_ntb_spad_count, + .spad_read = intel_ntb_spad_read, + .spad_write = intel_ntb_spad_write, + .peer_spad_addr = intel_ntb_peer_spad_addr, + .peer_spad_read = intel_ntb_peer_spad_read, + .peer_spad_write = intel_ntb_peer_spad_write, +}; + static const struct file_operations intel_ntb_debugfs_info = { .owner = THIS_MODULE, .open = simple_open, @@ -2304,6 +2956,7 @@ static const struct pci_device_id intel_ntb_pci_tbl[] = { {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)}, {0} }; MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl); diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h index 3ec149cf656276dd362fad25b37661b69d5fb3d0..f2cf8a783f1eef7c64dc49896174ee4eafc78d01 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.h +++ b/drivers/ntb/hw/intel/ntb_hw_intel.h @@ -70,6 +70,7 @@ #define PCI_DEVICE_ID_INTEL_NTB_B2B_BDX 0x6F0D #define PCI_DEVICE_ID_INTEL_NTB_PS_BDX 0x6F0E #define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F +#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C /* Intel Xeon hardware */ @@ -150,6 +151,51 @@ #define XEON_DB_TOTAL_SHIFT 16 #define XEON_SPAD_COUNT 16 +/* Intel Skylake Xeon hardware */ +#define SKX_IMBAR1SZ_OFFSET 0x00d0 +#define SKX_IMBAR2SZ_OFFSET 0x00d1 +#define SKX_EMBAR1SZ_OFFSET 0x00d2 +#define SKX_EMBAR2SZ_OFFSET 0x00d3 +#define SKX_DEVCTRL_OFFSET 0x0098 +#define SKX_DEVSTS_OFFSET 0x009a +#define SKX_UNCERRSTS_OFFSET 0x014c +#define SKX_CORERRSTS_OFFSET 0x0158 +#define SKX_LINK_STATUS_OFFSET 0x01a2 + +#define SKX_NTBCNTL_OFFSET 0x0000 +#define SKX_IMBAR1XBASE_OFFSET 0x0010 /* SBAR2XLAT */ +#define SKX_IMBAR1XLMT_OFFSET 0x0018 /* SBAR2LMT */ +#define SKX_IMBAR2XBASE_OFFSET 0x0020 /* SBAR4XLAT */ +#define SKX_IMBAR2XLMT_OFFSET 0x0028 /* SBAR4LMT */ +#define SKX_IM_INT_STATUS_OFFSET 0x0040 +#define SKX_IM_INT_DISABLE_OFFSET 0x0048 +#define SKX_IM_SPAD_OFFSET 0x0080 /* SPAD */ +#define SKX_USMEMMISS_OFFSET 0x0070 +#define SKX_INTVEC_OFFSET 0x00d0 +#define SKX_IM_DOORBELL_OFFSET 0x0100 /* SDOORBELL0 */ +#define SKX_B2B_SPAD_OFFSET 0x0180 /* B2B SPAD */ +#define SKX_EMBAR0XBASE_OFFSET 0x4008 /* B2B_XLAT */ +#define SKX_EMBAR1XBASE_OFFSET 0x4010 /* PBAR2XLAT */ +#define SKX_EMBAR1XLMT_OFFSET 0x4018 /* PBAR2LMT */ +#define SKX_EMBAR2XBASE_OFFSET 0x4020 /* PBAR4XLAT */ +#define SKX_EMBAR2XLMT_OFFSET 0x4028 /* PBAR4LMT */ +#define SKX_EM_INT_STATUS_OFFSET 0x4040 +#define SKX_EM_INT_DISABLE_OFFSET 0x4048 +#define SKX_EM_SPAD_OFFSET 0x4080 /* remote SPAD */ +#define SKX_EM_DOORBELL_OFFSET 0x4100 /* PDOORBELL0 */ +#define SKX_SPCICMD_OFFSET 0x4504 /* SPCICMD */ +#define SKX_EMBAR0_OFFSET 0x4510 /* SBAR0BASE */ +#define SKX_EMBAR1_OFFSET 0x4518 /* SBAR23BASE */ +#define SKX_EMBAR2_OFFSET 0x4520 /* SBAR45BASE */ + +#define SKX_DB_COUNT 32 +#define SKX_DB_LINK 32 +#define SKX_DB_LINK_BIT BIT_ULL(SKX_DB_LINK) +#define SKX_DB_MSIX_VECTOR_COUNT 33 +#define SKX_DB_MSIX_VECTOR_SHIFT 1 +#define SKX_DB_TOTAL_SHIFT 33 +#define SKX_SPAD_COUNT 16 + /* Intel Atom hardware */ #define ATOM_SBAR2XLAT_OFFSET 0x0008 @@ -240,6 +286,7 @@ #define NTB_HWERR_SDOORBELL_LOCKUP BIT_ULL(0) #define NTB_HWERR_SB01BASE_LOCKUP BIT_ULL(1) #define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2) +#define NTB_HWERR_MSIX_VECTOR32_BAD BIT_ULL(3) /* flags to indicate unsafe api */ #define NTB_UNSAFE_DB BIT_ULL(0) @@ -263,6 +310,7 @@ struct intel_ntb_reg { struct intel_ntb_alt_reg { unsigned long db_bell; unsigned long db_mask; + unsigned long db_clear; unsigned long spad; }; diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 8601c10acf74e3267d1d5501e20149ce74cdb38b..f81aa4b18d9f4dd76ec0be63f46865d3b04f89c6 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -66,6 +66,7 @@ #define NTB_TRANSPORT_VER "4" #define NTB_TRANSPORT_NAME "ntb_transport" #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB" +#define NTB_TRANSPORT_MIN_SPADS (MW0_SZ_HIGH + 2) MODULE_DESCRIPTION(NTB_TRANSPORT_DESC); MODULE_VERSION(NTB_TRANSPORT_VER); @@ -242,9 +243,6 @@ enum { NUM_MWS, MW0_SZ_HIGH, MW0_SZ_LOW, - MW1_SZ_HIGH, - MW1_SZ_LOW, - MAX_SPAD, }; #define dev_client_dev(__dev) \ @@ -257,7 +255,7 @@ enum { #define NTB_QP_DEF_NUM_ENTRIES 100 #define NTB_LINK_DOWN_TIMEOUT 10 #define DMA_RETRIES 20 -#define DMA_OUT_RESOURCE_TO 50 +#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) static void ntb_transport_rxc_db(unsigned long data); static const struct ntb_ctx_ops ntb_transport_ops; @@ -811,7 +809,7 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) { struct ntb_transport_qp *qp; u64 qp_bitmap_alloc; - int i; + unsigned int i, count; qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; @@ -831,7 +829,8 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) * goes down, blast them now to give them a sane value the next * time they are accessed */ - for (i = 0; i < MAX_SPAD; i++) + count = ntb_spad_count(nt->ndev); + for (i = 0; i < count; i++) ntb_spad_write(nt->ndev, i, 0); } @@ -960,7 +959,6 @@ static void ntb_qp_link_work(struct work_struct *work) ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num)); /* query remote spad for qp ready bits */ - ntb_peer_spad_read(nt->ndev, QP_LINKS); dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val); /* See if the remote side is up */ @@ -1064,17 +1062,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) { struct ntb_transport_ctx *nt; struct ntb_transport_mw *mw; - unsigned int mw_count, qp_count; + unsigned int mw_count, qp_count, spad_count, max_mw_count_for_spads; u64 qp_bitmap; int node; int rc, i; mw_count = ntb_mw_count(ndev); - if (ntb_spad_count(ndev) < (NUM_MWS + 1 + mw_count * 2)) { - dev_err(&ndev->dev, "Not enough scratch pad registers for %s", - NTB_TRANSPORT_NAME); - return -EIO; - } if (ntb_db_is_unsafe(ndev)) dev_dbg(&ndev->dev, @@ -1090,8 +1083,18 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) return -ENOMEM; nt->ndev = ndev; + spad_count = ntb_spad_count(ndev); + + /* Limit the MW's based on the availability of scratchpads */ + + if (spad_count < NTB_TRANSPORT_MIN_SPADS) { + nt->mw_count = 0; + rc = -EINVAL; + goto err; + } - nt->mw_count = mw_count; + max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2; + nt->mw_count = min(mw_count, max_mw_count_for_spads); nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec), GFP_KERNEL, node); diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 6a50f20bf1cde0e080f766ede5df147a9ea11a7b..e75d4fdc08663905eace859cff6cdebdb97e92b5 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -72,7 +72,7 @@ #define MAX_THREADS 32 #define MAX_TEST_SIZE SZ_1M #define MAX_SRCS 32 -#define DMA_OUT_RESOURCE_TO 50 +#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) #define DMA_RETRIES 20 #define SZ_4G (1ULL << 32) #define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */ @@ -589,7 +589,7 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf, return -ENOMEM; if (mutex_is_locked(&perf->run_mutex)) { - out_off = snprintf(buf, 64, "running\n"); + out_off = scnprintf(buf, 64, "running\n"); goto read_from_buf; } @@ -600,14 +600,14 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf, break; if (pctx->status) { - out_off += snprintf(buf + out_off, 1024 - out_off, + out_off += scnprintf(buf + out_off, 1024 - out_off, "%d: error %d\n", i, pctx->status); continue; } rate = div64_u64(pctx->copied, pctx->diff_us); - out_off += snprintf(buf + out_off, 1024 - out_off, + out_off += scnprintf(buf + out_off, 1024 - out_off, "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n", i, pctx->copied, pctx->diff_us, rate); } diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c index 7d311799fca1696ee7c5884fa2d58af2f2061816..435861189d97f87fc07397e6a174571d524bdb6f 100644 --- a/drivers/ntb/test/ntb_pingpong.c +++ b/drivers/ntb/test/ntb_pingpong.c @@ -88,7 +88,7 @@ MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer"); static unsigned long db_init = 0x7; module_param(db_init, ulong, 0644); -MODULE_PARM_DESC(delay_ms, "Initial doorbell bits to ring on the peer"); +MODULE_PARM_DESC(db_init, "Initial doorbell bits to ring on the peer"); struct pp_ctx { struct ntb_dev *ntb; diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c index 5371b374f1fe1a870c561b8392d55f4e4bafb941..e8f68f5732f11de4dacd10487df30a7f768506df 100644 --- a/drivers/nubus/proc.c +++ b/drivers/nubus/proc.c @@ -25,7 +25,7 @@ #include #include -#include +#include #include static int diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index a8b6949a8778835a89cef3469f031b77956d0c45..23d4a1728cdfa4993903b6b4504c6a70f5a7fff4 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -715,7 +715,7 @@ EXPORT_SYMBOL_GPL(nd_cmd_in_size); u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, const struct nd_cmd_desc *desc, int idx, const u32 *in_field, - const u32 *out_field) + const u32 *out_field, unsigned long remainder) { if (idx >= desc->out_num) return UINT_MAX; @@ -727,9 +727,24 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, return in_field[1]; else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2) return out_field[1]; - else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2) - return out_field[1] - 8; - else if (cmd == ND_CMD_CALL) { + else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2) { + /* + * Per table 9-276 ARS Data in ACPI 6.1, out_field[1] is + * "Size of Output Buffer in bytes, including this + * field." + */ + if (out_field[1] < 4) + return 0; + /* + * ACPI 6.1 is ambiguous if 'status' is included in the + * output size. If we encounter an output size that + * overshoots the remainder by 4 bytes, assume it was + * including 'status'. + */ + if (out_field[1] - 8 == remainder) + return remainder; + return out_field[1] - 4; + } else if (cmd == ND_CMD_CALL) { struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field; return pkg->nd_size_out; @@ -876,7 +891,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, /* process an output envelope */ for (i = 0; i < desc->out_num; i++) { u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, - (u32 *) in_env, (u32 *) out_env); + (u32 *) in_env, (u32 *) out_env, 0); u32 copy; if (out_size == UINT_MAX) { diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index d5dc80c48b4cb36a55c54a2383ce9812ae068260..b3323c0697f6239ebbfe757137cde8352fe3c480 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -22,9 +22,8 @@ void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns) { struct nd_namespace_common *ndns = *_ndns; - dev_WARN_ONCE(dev, !mutex_is_locked(&ndns->dev.mutex) - || ndns->claim != dev, - "%s: invalid claim\n", __func__); + lockdep_assert_held(&ndns->dev.mutex); + dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__); ndns->claim = NULL; *_ndns = NULL; put_device(&ndns->dev); @@ -49,9 +48,8 @@ bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, { if (attach->claim) return false; - dev_WARN_ONCE(dev, !mutex_is_locked(&attach->dev.mutex) - || *_ndns, - "%s: invalid claim\n", __func__); + lockdep_assert_held(&attach->dev.mutex); + dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__); attach->claim = dev; *_ndns = attach; get_device(&attach->dev); @@ -226,6 +224,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, resource_size_t offset, void *buf, size_t size, int rw) { struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); + unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); + sector_t sector = offset >> 9; + int rc = 0; + + if (unlikely(!size)) + return 0; if (unlikely(offset + size > nsio->size)) { dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n"); @@ -233,17 +237,31 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, } if (rw == READ) { - unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); - - if (unlikely(is_bad_pmem(&nsio->bb, offset / 512, sz_align))) + if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) return -EIO; return memcpy_from_pmem(buf, nsio->addr + offset, size); - } else { - memcpy_to_pmem(nsio->addr + offset, buf, size); - nvdimm_flush(to_nd_region(ndns->dev.parent)); } - return 0; + if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { + if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) { + long cleared; + + cleared = nvdimm_clear_poison(&ndns->dev, offset, size); + if (cleared < size) + rc = -EIO; + if (cleared > 0 && cleared / 512) { + cleared /= 512; + badblocks_clear(&nsio->bb, sector, cleared); + } + invalidate_pmem(nsio->addr + offset, size); + } else + rc = -EIO; + } + + memcpy_to_pmem(nsio->addr + offset, buf, size); + nvdimm_flush(to_nd_region(ndns->dev.parent)); + + return rc; } int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio) @@ -253,7 +271,7 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio) nsio->size = resource_size(res); if (!devm_request_mem_region(dev, res->start, resource_size(res), - dev_name(dev))) { + dev_name(&ndns->dev))) { dev_warn(dev, "could not reserve region %pR\n", res); return -EBUSY; } diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 7ceba08774b690dbda57f667e223b3b0a180dd09..9303cfeb8bee507c171c2060fb7602e7b0519267 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -317,35 +317,6 @@ ssize_t nd_sector_size_store(struct device *dev, const char *buf, } } -void __nd_iostat_start(struct bio *bio, unsigned long *start) -{ - struct gendisk *disk = bio->bi_bdev->bd_disk; - const int rw = bio_data_dir(bio); - int cpu = part_stat_lock(); - - *start = jiffies; - part_round_stats(cpu, &disk->part0); - part_stat_inc(cpu, &disk->part0, ios[rw]); - part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio)); - part_inc_in_flight(&disk->part0, rw); - part_stat_unlock(); -} -EXPORT_SYMBOL(__nd_iostat_start); - -void nd_iostat_end(struct bio *bio, unsigned long start) -{ - struct gendisk *disk = bio->bi_bdev->bd_disk; - unsigned long duration = jiffies - start; - const int rw = bio_data_dir(bio); - int cpu = part_stat_lock(); - - part_stat_add(cpu, &disk->part0, ticks[rw], duration); - part_round_stats(cpu, &disk->part0); - part_dec_in_flight(&disk->part0, rw); - part_stat_unlock(); -} -EXPORT_SYMBOL(nd_iostat_end); - static ssize_t commands_show(struct device *dev, struct device_attribute *attr, char *buf) { diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c index 619834e144d1e65e2314cde9356c86cff44d5053..ee0b412827bfb43fc739d5e03e873f12254517fa 100644 --- a/drivers/nvdimm/dimm.c +++ b/drivers/nvdimm/dimm.c @@ -64,6 +64,8 @@ static int nvdimm_probe(struct device *dev) nd_label_copy(ndd, to_next_namespace_index(ndd), to_current_namespace_index(ndd)); rc = nd_label_reserve_dpa(ndd); + if (ndd->ns_current >= 0) + nvdimm_set_aliasing(dev); nvdimm_bus_unlock(dev); if (rc) diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index d614493ad5acb849037d22bcc44fd78c0e80e1a3..0eedc49e0d473ed36b5ef9832760aa8498b9f146 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -184,6 +184,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, return rc; } +void nvdimm_set_aliasing(struct device *dev) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + + nvdimm->flags |= NDD_ALIASING; +} + static void nvdimm_release(struct device *dev) { struct nvdimm *nvdimm = to_nvdimm(dev); diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c index 11ea90120542dcbec8410147efca7a502eea9a77..6f9a6ffd7cde25f3e4714b2035eb519db1099fd8 100644 --- a/drivers/nvdimm/e820.c +++ b/drivers/nvdimm/e820.c @@ -84,18 +84,8 @@ static struct platform_driver e820_pmem_driver = { }, }; -static __init int e820_pmem_init(void) -{ - return platform_driver_register(&e820_pmem_driver); -} - -static __exit void e820_pmem_exit(void) -{ - platform_driver_unregister(&e820_pmem_driver); -} +module_platform_driver(e820_pmem_driver); MODULE_ALIAS("platform:e820_pmem*"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Intel Corporation"); -module_init(e820_pmem_init); -module_exit(e820_pmem_exit); diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index fac7cabe8f563e8e0e08b97f79aeb4f86174f66f..dd615345699fddf38f9117344278bf9364afe547 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -938,7 +938,7 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region, } for_each_dpa_resource(ndd, res) - if (strncmp(res->name, "pmem", 3) == 0) + if (strncmp(res->name, "pmem", 4) == 0) count++; WARN_ON_ONCE(!count); diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index abe5c6bc756c255193d803039973971368b9471d..6307088b375f2d899002f9fc9fae16c387357598 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1132,7 +1132,7 @@ static ssize_t size_show(struct device *dev, return sprintf(buf, "%llu\n", (unsigned long long) nvdimm_namespace_capacity(to_ndns(dev))); } -static DEVICE_ATTR(size, S_IRUGO, size_show, size_store); +static DEVICE_ATTR(size, 0444, size_show, size_store); static u8 *namespace_to_uuid(struct device *dev) { @@ -1456,7 +1456,7 @@ static umode_t namespace_visible(struct kobject *kobj, if (is_namespace_pmem(dev) || is_namespace_blk(dev)) { if (a == &dev_attr_size.attr) - return S_IWUSR | S_IRUGO; + return 0644; if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr) return 0; @@ -1653,7 +1653,7 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) u64 hw_start, hw_end, pmem_start, pmem_end; struct nd_label_ent *label_ent; - WARN_ON(!mutex_is_locked(&nd_mapping->lock)); + lockdep_assert_held(&nd_mapping->lock); list_for_each_entry(label_ent, &nd_mapping->labels, list) { nd_label = label_ent->label; if (!nd_label) @@ -1997,7 +1997,7 @@ struct device *create_namespace_blk(struct nd_region *nd_region, struct nd_mapping *nd_mapping = &nd_region->mapping[0]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct nd_namespace_blk *nsblk; - char *name[NSLABEL_NAME_LEN]; + char name[NSLABEL_NAME_LEN]; struct device *dev = NULL; struct resource *res; diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index d3b2fca8deec20b930ca55e025c7f116b30f9199..35dd75057e1697f2e03de12c62cf3f6cabb69aec 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -238,6 +238,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, void *buf, size_t len); long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, unsigned int len); +void nvdimm_set_aliasing(struct device *dev); struct nd_btt *to_nd_btt(struct device *dev); struct nd_gen_sb { @@ -377,10 +378,17 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) if (!blk_queue_io_stat(disk->queue)) return false; - __nd_iostat_start(bio, start); + *start = jiffies; + generic_start_io_acct(bio_data_dir(bio), + bio_sectors(bio), &disk->part0); return true; } -void nd_iostat_end(struct bio *bio, unsigned long start); +static inline void nd_iostat_end(struct bio *bio, unsigned long start) +{ + struct gendisk *disk = bio->bi_bdev->bd_disk; + + generic_end_io_acct(bio_data_dir(bio), &disk->part0, start); +} static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len) { diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index cea8350fbc7ec2f2e617199fd2bc8b8cb1df2fbb..a2ac9e641aa9341f2fcca9fa8b968fd874e80a90 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -108,7 +108,7 @@ static ssize_t align_show(struct device *dev, { struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); - return sprintf(buf, "%lx\n", nd_pfn->align); + return sprintf(buf, "%ld\n", nd_pfn->align); } static ssize_t __align_store(struct nd_pfn *nd_pfn, const char *buf) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 24618431a14bae7e891438b3601d017d1d34db4d..7282d7495bf1f0a1bf6685012dafb1d9cc60bfa4 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -53,21 +53,24 @@ static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, struct device *dev = to_dev(pmem); sector_t sector; long cleared; + int rc = 0; sector = (offset - pmem->data_offset) / 512; - cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); + cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); + if (cleared < len) + rc = -EIO; if (cleared > 0 && cleared / 512) { - dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", - __func__, (unsigned long long) sector, - cleared / 512, cleared / 512 > 1 ? "s" : ""); - badblocks_clear(&pmem->bb, sector, cleared / 512); - } else { - return -EIO; + cleared /= 512; + dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__, + (unsigned long long) sector, cleared, + cleared > 1 ? "s" : ""); + badblocks_clear(&pmem->bb, sector, cleared); } invalidate_pmem(pmem->virt_addr + offset, len); - return 0; + + return rc; } static void write_pmem(void *pmem_addr, struct page *page, @@ -270,7 +273,7 @@ static int pmem_attach_disk(struct device *dev, dev_warn(dev, "unable to guarantee persistence of writes\n"); if (!devm_request_mem_region(dev, res->start, resource_size(res), - dev_name(dev))) { + dev_name(&ndns->dev))) { dev_warn(dev, "could not reserve region %pR\n", res); return -EBUSY; } diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 6af5e629140cd3336d590c6ec24783ebd6e3d78d..7cd705f3247c341160a6b2ad7d1827c1604f34ce 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -509,7 +509,7 @@ void nd_mapping_free_labels(struct nd_mapping *nd_mapping) { struct nd_label_ent *label_ent, *e; - WARN_ON(!mutex_is_locked(&nd_mapping->lock)); + lockdep_assert_held(&nd_mapping->lock); list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { list_del(&label_ent->list); kfree(label_ent); diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index f7d37a62f874b780c94a0bbbf3d2607845c0c5ab..90745a616df7c984c74317d72cd0e8832a8b0476 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -43,3 +43,20 @@ config NVME_RDMA from https://github.com/linux-nvme/nvme-cli. If unsure, say N. + +config NVME_FC + tristate "NVM Express over Fabrics FC host driver" + depends on BLOCK + depends on HAS_DMA + select NVME_CORE + select NVME_FABRICS + select SG_POOL + help + This provides support for the NVMe over Fabrics protocol using + the FC transport. This allows you to use remote block devices + exported using the NVMe protocol set. + + To configure a NVMe over Fabrics controller use the nvme-cli tool + from https://github.com/linux-nvme/nvme-cli. + + If unsure, say N. diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index 47abcec23514baedf3d48ce01a42e3301297cac3..f1a7d945fbb6624212c2c357a1b7f5a66eaa4b96 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_NVME_CORE) += nvme-core.o obj-$(CONFIG_BLK_DEV_NVME) += nvme.o obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o +obj-$(CONFIG_NVME_FC) += nvme-fc.o nvme-core-y := core.o nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o @@ -12,3 +13,5 @@ nvme-y += pci.o nvme-fabrics-y += fabrics.o nvme-rdma-y += rdma.o + +nvme-fc-y += fc.o diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 79e679d12f3b3667ad815e0bd39ab428da6651ac..b40cfb076f02446fda62aea907301e4a62dcc17a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -201,13 +201,7 @@ static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk) void nvme_requeue_req(struct request *req) { - unsigned long flags; - - blk_mq_requeue_request(req); - spin_lock_irqsave(req->q->queue_lock, flags); - if (!blk_queue_stopped(req->q)) - blk_mq_kick_requeue_list(req->q); - spin_unlock_irqrestore(req->q->queue_lock, flags); + blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q)); } EXPORT_SYMBOL_GPL(nvme_requeue_req); @@ -227,8 +221,7 @@ struct request *nvme_alloc_request(struct request_queue *q, req->cmd_type = REQ_TYPE_DRV_PRIV; req->cmd_flags |= REQ_FAILFAST_DRIVER; - req->cmd = (unsigned char *)cmd; - req->cmd_len = sizeof(struct nvme_command); + nvme_req(req)->cmd = cmd; return req; } @@ -246,8 +239,6 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd) { struct nvme_dsm_range *range; - struct page *page; - int offset; unsigned int nr_bytes = blk_rq_bytes(req); range = kmalloc(sizeof(*range), GFP_ATOMIC); @@ -264,19 +255,12 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, cmnd->dsm.nr = 0; cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); - req->completion_data = range; - page = virt_to_page(range); - offset = offset_in_page(range); - blk_add_request_payload(req, page, offset, sizeof(*range)); - - /* - * we set __data_len back to the size of the area to be discarded - * on disk. This allows us to report completion on the full amount - * of blocks described by the request. - */ - req->__data_len = nr_bytes; + req->special_vec.bv_page = virt_to_page(range); + req->special_vec.bv_offset = offset_in_page(range); + req->special_vec.bv_len = sizeof(*range); + req->rq_flags |= RQF_SPECIAL_PAYLOAD; - return 0; + return BLK_MQ_RQ_QUEUE_OK; } static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req, @@ -295,7 +279,6 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req, memset(cmnd, 0, sizeof(*cmnd)); cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); - cmnd->rw.command_id = req->tag; cmnd->rw.nsid = cpu_to_le32(ns->ns_id); cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); @@ -324,10 +307,10 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req, int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, struct nvme_command *cmd) { - int ret = 0; + int ret = BLK_MQ_RQ_QUEUE_OK; if (req->cmd_type == REQ_TYPE_DRV_PRIV) - memcpy(cmd, req->cmd, sizeof(*cmd)); + memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); else if (req_op(req) == REQ_OP_FLUSH) nvme_setup_flush(ns, cmd); else if (req_op(req) == REQ_OP_DISCARD) @@ -335,6 +318,8 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, else nvme_setup_rw(ns, req, cmd); + cmd->common.command_id = req->tag; + return ret; } EXPORT_SYMBOL_GPL(nvme_setup_cmd); @@ -344,7 +329,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd); * if the result is positive, it's an NVM Express status code */ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, - struct nvme_completion *cqe, void *buffer, unsigned bufflen, + union nvme_result *result, void *buffer, unsigned bufflen, unsigned timeout, int qid, int at_head, int flags) { struct request *req; @@ -355,7 +340,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, return PTR_ERR(req); req->timeout = timeout ? timeout : ADMIN_TIMEOUT; - req->special = cqe; if (buffer && bufflen) { ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); @@ -364,6 +348,8 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, } blk_execute_rq(req->q, NULL, req, at_head); + if (result) + *result = nvme_req(req)->result; ret = req->errors; out: blk_mq_free_request(req); @@ -385,7 +371,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, u32 *result, unsigned timeout) { bool write = nvme_is_write(cmd); - struct nvme_completion cqe; struct nvme_ns *ns = q->queuedata; struct gendisk *disk = ns ? ns->disk : NULL; struct request *req; @@ -398,7 +383,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, return PTR_ERR(req); req->timeout = timeout ? timeout : ADMIN_TIMEOUT; - req->special = &cqe; if (ubuffer && bufflen) { ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, @@ -453,7 +437,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, blk_execute_rq(req->q, disk, req, 0); ret = req->errors; if (result) - *result = le32_to_cpu(cqe.result); + *result = le32_to_cpu(nvme_req(req)->result.u32); if (meta && !ret && !write) { if (copy_to_user(meta_buffer, meta, meta_len)) ret = -EFAULT; @@ -602,7 +586,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, void *buffer, size_t buflen, u32 *result) { struct nvme_command c; - struct nvme_completion cqe; + union nvme_result res; int ret; memset(&c, 0, sizeof(c)); @@ -610,10 +594,10 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, c.features.nsid = cpu_to_le32(nsid); c.features.fid = cpu_to_le32(fid); - ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, buffer, buflen, 0, + ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0, NVME_QID_ANY, 0, 0); if (ret >= 0 && result) - *result = le32_to_cpu(cqe.result); + *result = le32_to_cpu(res.u32); return ret; } @@ -621,7 +605,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, void *buffer, size_t buflen, u32 *result) { struct nvme_command c; - struct nvme_completion cqe; + union nvme_result res; int ret; memset(&c, 0, sizeof(c)); @@ -629,10 +613,10 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, c.features.fid = cpu_to_le32(fid); c.features.dword11 = cpu_to_le32(dword11); - ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, + ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0, NVME_QID_ANY, 0, 0); if (ret >= 0 && result) - *result = le32_to_cpu(cqe.result); + *result = le32_to_cpu(res.u32); return ret; } @@ -1683,27 +1667,24 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) if (nvme_revalidate_ns(ns, &id)) goto out_free_queue; - if (nvme_nvm_ns_supported(ns, id)) { - if (nvme_nvm_register(ns, disk_name, node, - &nvme_ns_attr_group)) { - dev_warn(ctrl->dev, "%s: LightNVM init failure\n", - __func__); - goto out_free_id; - } - } else { - disk = alloc_disk_node(0, node); - if (!disk) - goto out_free_id; + if (nvme_nvm_ns_supported(ns, id) && + nvme_nvm_register(ns, disk_name, node)) { + dev_warn(ctrl->dev, "%s: LightNVM init failure\n", __func__); + goto out_free_id; + } - disk->fops = &nvme_fops; - disk->private_data = ns; - disk->queue = ns->queue; - disk->flags = GENHD_FL_EXT_DEVT; - memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); - ns->disk = disk; + disk = alloc_disk_node(0, node); + if (!disk) + goto out_free_id; - __nvme_revalidate_disk(disk, id); - } + disk->fops = &nvme_fops; + disk->private_data = ns; + disk->queue = ns->queue; + disk->flags = GENHD_FL_EXT_DEVT; + memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); + ns->disk = disk; + + __nvme_revalidate_disk(disk, id); mutex_lock(&ctrl->namespaces_mutex); list_add_tail(&ns->list, &ctrl->namespaces); @@ -1713,14 +1694,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) kfree(id); - if (ns->ndev) - return; - device_add_disk(ctrl->device, ns->disk); if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj, &nvme_ns_attr_group)) pr_warn("%s: failed to create sysfs group for identification\n", ns->disk->disk_name); + if (ns->ndev && nvme_nvm_register_sysfs(ns)) + pr_warn("%s: failed to register lightnvm sysfs group for identification\n", + ns->disk->disk_name); return; out_free_id: kfree(id); @@ -1742,6 +1723,8 @@ static void nvme_ns_remove(struct nvme_ns *ns) blk_integrity_unregister(ns->disk); sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, &nvme_ns_attr_group); + if (ns->ndev) + nvme_nvm_unregister_sysfs(ns); del_gendisk(ns->disk); blk_mq_abort_requeue_list(ns->queue); blk_cleanup_queue(ns->queue); @@ -1905,18 +1888,25 @@ static void nvme_async_event_work(struct work_struct *work) spin_unlock_irq(&ctrl->lock); } -void nvme_complete_async_event(struct nvme_ctrl *ctrl, - struct nvme_completion *cqe) +void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, + union nvme_result *res) { - u16 status = le16_to_cpu(cqe->status) >> 1; - u32 result = le32_to_cpu(cqe->result); + u32 result = le32_to_cpu(res->u32); + bool done = true; - if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) { + switch (le16_to_cpu(status) >> 1) { + case NVME_SC_SUCCESS: + done = false; + /*FALLTHRU*/ + case NVME_SC_ABORT_REQ: ++ctrl->event_limit; schedule_work(&ctrl->async_event_work); + break; + default: + break; } - if (status != NVME_SC_SUCCESS) + if (done) return; switch (result & 0xff07) { @@ -2078,14 +2068,8 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl) struct nvme_ns *ns; mutex_lock(&ctrl->namespaces_mutex); - list_for_each_entry(ns, &ctrl->namespaces, list) { - spin_lock_irq(ns->queue->queue_lock); - queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); - spin_unlock_irq(ns->queue->queue_lock); - - blk_mq_cancel_requeue_work(ns->queue); - blk_mq_stop_hw_queues(ns->queue); - } + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_mq_quiesce_queue(ns->queue); mutex_unlock(&ctrl->namespaces_mutex); } EXPORT_SYMBOL_GPL(nvme_stop_queues); @@ -2096,7 +2080,6 @@ void nvme_start_queues(struct nvme_ctrl *ctrl) mutex_lock(&ctrl->namespaces_mutex); list_for_each_entry(ns, &ctrl->namespaces, list) { - queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue); blk_mq_start_stopped_hw_queues(ns->queue, true); blk_mq_kick_requeue_list(ns->queue); } diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 5a3f008d348094c53ce2c3a8d7e5cdc270c646d0..916d1360805964cbe6a095576ba2049af52384f8 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -161,7 +161,7 @@ EXPORT_SYMBOL_GPL(nvmf_get_subsysnqn); int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) { struct nvme_command cmd; - struct nvme_completion cqe; + union nvme_result res; int ret; memset(&cmd, 0, sizeof(cmd)); @@ -169,11 +169,11 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) cmd.prop_get.fctype = nvme_fabrics_type_property_get; cmd.prop_get.offset = cpu_to_le32(off); - ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0, + ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0, NVME_QID_ANY, 0, 0); if (ret >= 0) - *val = le64_to_cpu(cqe.result64); + *val = le64_to_cpu(res.u64); if (unlikely(ret != 0)) dev_err(ctrl->device, "Property Get error: %d, offset %#x\n", @@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read32); int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) { struct nvme_command cmd; - struct nvme_completion cqe; + union nvme_result res; int ret; memset(&cmd, 0, sizeof(cmd)); @@ -216,11 +216,11 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) cmd.prop_get.attrib = 1; cmd.prop_get.offset = cpu_to_le32(off); - ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0, + ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0, NVME_QID_ANY, 0, 0); if (ret >= 0) - *val = le64_to_cpu(cqe.result64); + *val = le64_to_cpu(res.u64); if (unlikely(ret != 0)) dev_err(ctrl->device, "Property Get error: %d, offset %#x\n", @@ -368,7 +368,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) { struct nvme_command cmd; - struct nvme_completion cqe; + union nvme_result res; struct nvmf_connect_data *data; int ret; @@ -400,16 +400,16 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); - ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, + ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, data, sizeof(*data), 0, NVME_QID_ANY, 1, BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); if (ret) { - nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result), + nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), &cmd, data); goto out_free_data; } - ctrl->cntlid = le16_to_cpu(cqe.result16); + ctrl->cntlid = le16_to_cpu(res.u16); out_free_data: kfree(data); @@ -441,7 +441,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) { struct nvme_command cmd; struct nvmf_connect_data *data; - struct nvme_completion cqe; + union nvme_result res; int ret; memset(&cmd, 0, sizeof(cmd)); @@ -459,11 +459,11 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); - ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &cqe, + ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res, data, sizeof(*data), 0, qid, 1, BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); if (ret) { - nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result), + nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), &cmd, data); } kfree(data); @@ -576,7 +576,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, nqnlen = strlen(opts->subsysnqn); if (nqnlen >= NVMF_NQN_SIZE) { pr_err("%s needs to be < %d bytes\n", - opts->subsysnqn, NVMF_NQN_SIZE); + opts->subsysnqn, NVMF_NQN_SIZE); ret = -EINVAL; goto out; } @@ -666,10 +666,12 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, if (nqnlen >= NVMF_NQN_SIZE) { pr_err("%s needs to be < %d bytes\n", p, NVMF_NQN_SIZE); + kfree(p); ret = -EINVAL; goto out; } opts->host = nvmf_host_add(p); + kfree(p); if (!opts->host) { ret = -ENOMEM; goto out; @@ -825,8 +827,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count) out_unlock: mutex_unlock(&nvmf_transports_mutex); out_free_opts: - nvmf_host_put(opts->host); - kfree(opts); + nvmf_free_options(opts); return ERR_PTR(ret); } diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c new file mode 100644 index 0000000000000000000000000000000000000000..771e2e76187222dfb71616f5665c7b2b22802c74 --- /dev/null +++ b/drivers/nvme/host/fc.c @@ -0,0 +1,2586 @@ +/* + * Copyright (c) 2016 Avago Technologies. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful. + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, + * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO + * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. + * See the GNU General Public License for more details, a copy of which + * can be found in the file COPYING included with this package + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include + +#include "nvme.h" +#include "fabrics.h" +#include +#include + + +/* *************************** Data Structures/Defines ****************** */ + + +/* + * We handle AEN commands ourselves and don't even let the + * block layer know about them. + */ +#define NVME_FC_NR_AEN_COMMANDS 1 +#define NVME_FC_AQ_BLKMQ_DEPTH \ + (NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS) +#define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1) + +enum nvme_fc_queue_flags { + NVME_FC_Q_CONNECTED = (1 << 0), +}; + +#define NVMEFC_QUEUE_DELAY 3 /* ms units */ + +struct nvme_fc_queue { + struct nvme_fc_ctrl *ctrl; + struct device *dev; + struct blk_mq_hw_ctx *hctx; + void *lldd_handle; + int queue_size; + size_t cmnd_capsule_len; + u32 qnum; + u32 rqcnt; + u32 seqno; + + u64 connection_id; + atomic_t csn; + + unsigned long flags; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + +struct nvmefc_ls_req_op { + struct nvmefc_ls_req ls_req; + + struct nvme_fc_ctrl *ctrl; + struct nvme_fc_queue *queue; + struct request *rq; + + int ls_error; + struct completion ls_done; + struct list_head lsreq_list; /* ctrl->ls_req_list */ + bool req_queued; +}; + +enum nvme_fcpop_state { + FCPOP_STATE_UNINIT = 0, + FCPOP_STATE_IDLE = 1, + FCPOP_STATE_ACTIVE = 2, + FCPOP_STATE_ABORTED = 3, +}; + +struct nvme_fc_fcp_op { + struct nvme_request nreq; /* + * nvme/host/core.c + * requires this to be + * the 1st element in the + * private structure + * associated with the + * request. + */ + struct nvmefc_fcp_req fcp_req; + + struct nvme_fc_ctrl *ctrl; + struct nvme_fc_queue *queue; + struct request *rq; + + atomic_t state; + u32 rqno; + u32 nents; + + struct nvme_fc_cmd_iu cmd_iu; + struct nvme_fc_ersp_iu rsp_iu; +}; + +struct nvme_fc_lport { + struct nvme_fc_local_port localport; + + struct ida endp_cnt; + struct list_head port_list; /* nvme_fc_port_list */ + struct list_head endp_list; + struct device *dev; /* physical device for dma */ + struct nvme_fc_port_template *ops; + struct kref ref; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + +struct nvme_fc_rport { + struct nvme_fc_remote_port remoteport; + + struct list_head endp_list; /* for lport->endp_list */ + struct list_head ctrl_list; + spinlock_t lock; + struct kref ref; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + +enum nvme_fcctrl_state { + FCCTRL_INIT = 0, + FCCTRL_ACTIVE = 1, +}; + +struct nvme_fc_ctrl { + spinlock_t lock; + struct nvme_fc_queue *queues; + u32 queue_count; + + struct device *dev; + struct nvme_fc_lport *lport; + struct nvme_fc_rport *rport; + u32 cnum; + + u64 association_id; + + u64 cap; + + struct list_head ctrl_list; /* rport->ctrl_list */ + struct list_head ls_req_list; + + struct blk_mq_tag_set admin_tag_set; + struct blk_mq_tag_set tag_set; + + struct work_struct delete_work; + struct kref ref; + int state; + + struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS]; + + struct nvme_ctrl ctrl; +}; + +static inline struct nvme_fc_ctrl * +to_fc_ctrl(struct nvme_ctrl *ctrl) +{ + return container_of(ctrl, struct nvme_fc_ctrl, ctrl); +} + +static inline struct nvme_fc_lport * +localport_to_lport(struct nvme_fc_local_port *portptr) +{ + return container_of(portptr, struct nvme_fc_lport, localport); +} + +static inline struct nvme_fc_rport * +remoteport_to_rport(struct nvme_fc_remote_port *portptr) +{ + return container_of(portptr, struct nvme_fc_rport, remoteport); +} + +static inline struct nvmefc_ls_req_op * +ls_req_to_lsop(struct nvmefc_ls_req *lsreq) +{ + return container_of(lsreq, struct nvmefc_ls_req_op, ls_req); +} + +static inline struct nvme_fc_fcp_op * +fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq) +{ + return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req); +} + + + +/* *************************** Globals **************************** */ + + +static DEFINE_SPINLOCK(nvme_fc_lock); + +static LIST_HEAD(nvme_fc_lport_list); +static DEFINE_IDA(nvme_fc_local_port_cnt); +static DEFINE_IDA(nvme_fc_ctrl_cnt); + +static struct workqueue_struct *nvme_fc_wq; + + + +/* *********************** FC-NVME Port Management ************************ */ + +static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *); +static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *, + struct nvme_fc_queue *, unsigned int); + + +/** + * nvme_fc_register_localport - transport entry point called by an + * LLDD to register the existence of a NVME + * host FC port. + * @pinfo: pointer to information about the port to be registered + * @template: LLDD entrypoints and operational parameters for the port + * @dev: physical hardware device node port corresponds to. Will be + * used for DMA mappings + * @lport_p: pointer to a local port pointer. Upon success, the routine + * will allocate a nvme_fc_local_port structure and place its + * address in the local port pointer. Upon failure, local port + * pointer will be set to 0. + * + * Returns: + * a completion status. Must be 0 upon success; a negative errno + * (ex: -ENXIO) upon failure. + */ +int +nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, + struct nvme_fc_port_template *template, + struct device *dev, + struct nvme_fc_local_port **portptr) +{ + struct nvme_fc_lport *newrec; + unsigned long flags; + int ret, idx; + + if (!template->localport_delete || !template->remoteport_delete || + !template->ls_req || !template->fcp_io || + !template->ls_abort || !template->fcp_abort || + !template->max_hw_queues || !template->max_sgl_segments || + !template->max_dif_sgl_segments || !template->dma_boundary) { + ret = -EINVAL; + goto out_reghost_failed; + } + + newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), + GFP_KERNEL); + if (!newrec) { + ret = -ENOMEM; + goto out_reghost_failed; + } + + idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL); + if (idx < 0) { + ret = -ENOSPC; + goto out_fail_kfree; + } + + if (!get_device(dev) && dev) { + ret = -ENODEV; + goto out_ida_put; + } + + INIT_LIST_HEAD(&newrec->port_list); + INIT_LIST_HEAD(&newrec->endp_list); + kref_init(&newrec->ref); + newrec->ops = template; + newrec->dev = dev; + ida_init(&newrec->endp_cnt); + newrec->localport.private = &newrec[1]; + newrec->localport.node_name = pinfo->node_name; + newrec->localport.port_name = pinfo->port_name; + newrec->localport.port_role = pinfo->port_role; + newrec->localport.port_id = pinfo->port_id; + newrec->localport.port_state = FC_OBJSTATE_ONLINE; + newrec->localport.port_num = idx; + + spin_lock_irqsave(&nvme_fc_lock, flags); + list_add_tail(&newrec->port_list, &nvme_fc_lport_list); + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + if (dev) + dma_set_seg_boundary(dev, template->dma_boundary); + + *portptr = &newrec->localport; + return 0; + +out_ida_put: + ida_simple_remove(&nvme_fc_local_port_cnt, idx); +out_fail_kfree: + kfree(newrec); +out_reghost_failed: + *portptr = NULL; + + return ret; +} +EXPORT_SYMBOL_GPL(nvme_fc_register_localport); + +static void +nvme_fc_free_lport(struct kref *ref) +{ + struct nvme_fc_lport *lport = + container_of(ref, struct nvme_fc_lport, ref); + unsigned long flags; + + WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); + WARN_ON(!list_empty(&lport->endp_list)); + + /* remove from transport list */ + spin_lock_irqsave(&nvme_fc_lock, flags); + list_del(&lport->port_list); + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + /* let the LLDD know we've finished tearing it down */ + lport->ops->localport_delete(&lport->localport); + + ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); + ida_destroy(&lport->endp_cnt); + + put_device(lport->dev); + + kfree(lport); +} + +static void +nvme_fc_lport_put(struct nvme_fc_lport *lport) +{ + kref_put(&lport->ref, nvme_fc_free_lport); +} + +static int +nvme_fc_lport_get(struct nvme_fc_lport *lport) +{ + return kref_get_unless_zero(&lport->ref); +} + +/** + * nvme_fc_unregister_localport - transport entry point called by an + * LLDD to deregister/remove a previously + * registered a NVME host FC port. + * @localport: pointer to the (registered) local port that is to be + * deregistered. + * + * Returns: + * a completion status. Must be 0 upon success; a negative errno + * (ex: -ENXIO) upon failure. + */ +int +nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr) +{ + struct nvme_fc_lport *lport = localport_to_lport(portptr); + unsigned long flags; + + if (!portptr) + return -EINVAL; + + spin_lock_irqsave(&nvme_fc_lock, flags); + + if (portptr->port_state != FC_OBJSTATE_ONLINE) { + spin_unlock_irqrestore(&nvme_fc_lock, flags); + return -EINVAL; + } + portptr->port_state = FC_OBJSTATE_DELETED; + + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + nvme_fc_lport_put(lport); + + return 0; +} +EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport); + +/** + * nvme_fc_register_remoteport - transport entry point called by an + * LLDD to register the existence of a NVME + * subsystem FC port on its fabric. + * @localport: pointer to the (registered) local port that the remote + * subsystem port is connected to. + * @pinfo: pointer to information about the port to be registered + * @rport_p: pointer to a remote port pointer. Upon success, the routine + * will allocate a nvme_fc_remote_port structure and place its + * address in the remote port pointer. Upon failure, remote port + * pointer will be set to 0. + * + * Returns: + * a completion status. Must be 0 upon success; a negative errno + * (ex: -ENXIO) upon failure. + */ +int +nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, + struct nvme_fc_port_info *pinfo, + struct nvme_fc_remote_port **portptr) +{ + struct nvme_fc_lport *lport = localport_to_lport(localport); + struct nvme_fc_rport *newrec; + unsigned long flags; + int ret, idx; + + newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), + GFP_KERNEL); + if (!newrec) { + ret = -ENOMEM; + goto out_reghost_failed; + } + + if (!nvme_fc_lport_get(lport)) { + ret = -ESHUTDOWN; + goto out_kfree_rport; + } + + idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL); + if (idx < 0) { + ret = -ENOSPC; + goto out_lport_put; + } + + INIT_LIST_HEAD(&newrec->endp_list); + INIT_LIST_HEAD(&newrec->ctrl_list); + kref_init(&newrec->ref); + spin_lock_init(&newrec->lock); + newrec->remoteport.localport = &lport->localport; + newrec->remoteport.private = &newrec[1]; + newrec->remoteport.port_role = pinfo->port_role; + newrec->remoteport.node_name = pinfo->node_name; + newrec->remoteport.port_name = pinfo->port_name; + newrec->remoteport.port_id = pinfo->port_id; + newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; + newrec->remoteport.port_num = idx; + + spin_lock_irqsave(&nvme_fc_lock, flags); + list_add_tail(&newrec->endp_list, &lport->endp_list); + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + *portptr = &newrec->remoteport; + return 0; + +out_lport_put: + nvme_fc_lport_put(lport); +out_kfree_rport: + kfree(newrec); +out_reghost_failed: + *portptr = NULL; + return ret; + +} +EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport); + +static void +nvme_fc_free_rport(struct kref *ref) +{ + struct nvme_fc_rport *rport = + container_of(ref, struct nvme_fc_rport, ref); + struct nvme_fc_lport *lport = + localport_to_lport(rport->remoteport.localport); + unsigned long flags; + + WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); + WARN_ON(!list_empty(&rport->ctrl_list)); + + /* remove from lport list */ + spin_lock_irqsave(&nvme_fc_lock, flags); + list_del(&rport->endp_list); + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + /* let the LLDD know we've finished tearing it down */ + lport->ops->remoteport_delete(&rport->remoteport); + + ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num); + + kfree(rport); + + nvme_fc_lport_put(lport); +} + +static void +nvme_fc_rport_put(struct nvme_fc_rport *rport) +{ + kref_put(&rport->ref, nvme_fc_free_rport); +} + +static int +nvme_fc_rport_get(struct nvme_fc_rport *rport) +{ + return kref_get_unless_zero(&rport->ref); +} + +/** + * nvme_fc_unregister_remoteport - transport entry point called by an + * LLDD to deregister/remove a previously + * registered a NVME subsystem FC port. + * @remoteport: pointer to the (registered) remote port that is to be + * deregistered. + * + * Returns: + * a completion status. Must be 0 upon success; a negative errno + * (ex: -ENXIO) upon failure. + */ +int +nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr) +{ + struct nvme_fc_rport *rport = remoteport_to_rport(portptr); + struct nvme_fc_ctrl *ctrl; + unsigned long flags; + + if (!portptr) + return -EINVAL; + + spin_lock_irqsave(&rport->lock, flags); + + if (portptr->port_state != FC_OBJSTATE_ONLINE) { + spin_unlock_irqrestore(&rport->lock, flags); + return -EINVAL; + } + portptr->port_state = FC_OBJSTATE_DELETED; + + /* tear down all associations to the remote port */ + list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) + __nvme_fc_del_ctrl(ctrl); + + spin_unlock_irqrestore(&rport->lock, flags); + + nvme_fc_rport_put(rport); + return 0; +} +EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport); + + +/* *********************** FC-NVME DMA Handling **************************** */ + +/* + * The fcloop device passes in a NULL device pointer. Real LLD's will + * pass in a valid device pointer. If NULL is passed to the dma mapping + * routines, depending on the platform, it may or may not succeed, and + * may crash. + * + * As such: + * Wrapper all the dma routines and check the dev pointer. + * + * If simple mappings (return just a dma address, we'll noop them, + * returning a dma address of 0. + * + * On more complex mappings (dma_map_sg), a pseudo routine fills + * in the scatter list, setting all dma addresses to 0. + */ + +static inline dma_addr_t +fc_dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction dir) +{ + return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; +} + +static inline int +fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return dev ? dma_mapping_error(dev, dma_addr) : 0; +} + +static inline void +fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + if (dev) + dma_unmap_single(dev, addr, size, dir); +} + +static inline void +fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + if (dev) + dma_sync_single_for_cpu(dev, addr, size, dir); +} + +static inline void +fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + if (dev) + dma_sync_single_for_device(dev, addr, size, dir); +} + +/* pseudo dma_map_sg call */ +static int +fc_map_sg(struct scatterlist *sg, int nents) +{ + struct scatterlist *s; + int i; + + WARN_ON(nents == 0 || sg[0].length == 0); + + for_each_sg(sg, s, nents, i) { + s->dma_address = 0L; +#ifdef CONFIG_NEED_SG_DMA_LENGTH + s->dma_length = s->length; +#endif + } + return nents; +} + +static inline int +fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); +} + +static inline void +fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + if (dev) + dma_unmap_sg(dev, sg, nents, dir); +} + + +/* *********************** FC-NVME LS Handling **************************** */ + +static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *); +static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *); + + +static void +__nvme_fc_finish_ls_req(struct nvme_fc_ctrl *ctrl, + struct nvmefc_ls_req_op *lsop) +{ + struct nvmefc_ls_req *lsreq = &lsop->ls_req; + unsigned long flags; + + spin_lock_irqsave(&ctrl->lock, flags); + + if (!lsop->req_queued) { + spin_unlock_irqrestore(&ctrl->lock, flags); + return; + } + + list_del(&lsop->lsreq_list); + + lsop->req_queued = false; + + spin_unlock_irqrestore(&ctrl->lock, flags); + + fc_dma_unmap_single(ctrl->dev, lsreq->rqstdma, + (lsreq->rqstlen + lsreq->rsplen), + DMA_BIDIRECTIONAL); + + nvme_fc_ctrl_put(ctrl); +} + +static int +__nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, + struct nvmefc_ls_req_op *lsop, + void (*done)(struct nvmefc_ls_req *req, int status)) +{ + struct nvmefc_ls_req *lsreq = &lsop->ls_req; + unsigned long flags; + int ret; + + if (!nvme_fc_ctrl_get(ctrl)) + return -ESHUTDOWN; + + lsreq->done = done; + lsop->ctrl = ctrl; + lsop->req_queued = false; + INIT_LIST_HEAD(&lsop->lsreq_list); + init_completion(&lsop->ls_done); + + lsreq->rqstdma = fc_dma_map_single(ctrl->dev, lsreq->rqstaddr, + lsreq->rqstlen + lsreq->rsplen, + DMA_BIDIRECTIONAL); + if (fc_dma_mapping_error(ctrl->dev, lsreq->rqstdma)) { + nvme_fc_ctrl_put(ctrl); + dev_err(ctrl->dev, + "els request command failed EFAULT.\n"); + return -EFAULT; + } + lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; + + spin_lock_irqsave(&ctrl->lock, flags); + + list_add_tail(&lsop->lsreq_list, &ctrl->ls_req_list); + + lsop->req_queued = true; + + spin_unlock_irqrestore(&ctrl->lock, flags); + + ret = ctrl->lport->ops->ls_req(&ctrl->lport->localport, + &ctrl->rport->remoteport, lsreq); + if (ret) + lsop->ls_error = ret; + + return ret; +} + +static void +nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status) +{ + struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); + + lsop->ls_error = status; + complete(&lsop->ls_done); +} + +static int +nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop) +{ + struct nvmefc_ls_req *lsreq = &lsop->ls_req; + struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; + int ret; + + ret = __nvme_fc_send_ls_req(ctrl, lsop, nvme_fc_send_ls_req_done); + + if (!ret) + /* + * No timeout/not interruptible as we need the struct + * to exist until the lldd calls us back. Thus mandate + * wait until driver calls back. lldd responsible for + * the timeout action + */ + wait_for_completion(&lsop->ls_done); + + __nvme_fc_finish_ls_req(ctrl, lsop); + + if (ret) { + dev_err(ctrl->dev, + "ls request command failed (%d).\n", ret); + return ret; + } + + /* ACC or RJT payload ? */ + if (rjt->w0.ls_cmd == FCNVME_LS_RJT) + return -ENXIO; + + return 0; +} + +static void +nvme_fc_send_ls_req_async(struct nvme_fc_ctrl *ctrl, + struct nvmefc_ls_req_op *lsop, + void (*done)(struct nvmefc_ls_req *req, int status)) +{ + int ret; + + ret = __nvme_fc_send_ls_req(ctrl, lsop, done); + + /* don't wait for completion */ + + if (ret) + done(&lsop->ls_req, ret); +} + +/* Validation Error indexes into the string table below */ +enum { + VERR_NO_ERROR = 0, + VERR_LSACC = 1, + VERR_LSDESC_RQST = 2, + VERR_LSDESC_RQST_LEN = 3, + VERR_ASSOC_ID = 4, + VERR_ASSOC_ID_LEN = 5, + VERR_CONN_ID = 6, + VERR_CONN_ID_LEN = 7, + VERR_CR_ASSOC = 8, + VERR_CR_ASSOC_ACC_LEN = 9, + VERR_CR_CONN = 10, + VERR_CR_CONN_ACC_LEN = 11, + VERR_DISCONN = 12, + VERR_DISCONN_ACC_LEN = 13, +}; + +static char *validation_errors[] = { + "OK", + "Not LS_ACC", + "Not LSDESC_RQST", + "Bad LSDESC_RQST Length", + "Not Association ID", + "Bad Association ID Length", + "Not Connection ID", + "Bad Connection ID Length", + "Not CR_ASSOC Rqst", + "Bad CR_ASSOC ACC Length", + "Not CR_CONN Rqst", + "Bad CR_CONN ACC Length", + "Not Disconnect Rqst", + "Bad Disconnect ACC Length", +}; + +static int +nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, + struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio) +{ + struct nvmefc_ls_req_op *lsop; + struct nvmefc_ls_req *lsreq; + struct fcnvme_ls_cr_assoc_rqst *assoc_rqst; + struct fcnvme_ls_cr_assoc_acc *assoc_acc; + int ret, fcret = 0; + + lsop = kzalloc((sizeof(*lsop) + + ctrl->lport->ops->lsrqst_priv_sz + + sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL); + if (!lsop) { + ret = -ENOMEM; + goto out_no_memory; + } + lsreq = &lsop->ls_req; + + lsreq->private = (void *)&lsop[1]; + assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *) + (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz); + assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1]; + + assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; + assoc_rqst->desc_list_len = + cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); + + assoc_rqst->assoc_cmd.desc_tag = + cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD); + assoc_rqst->assoc_cmd.desc_len = + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); + + assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); + assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize); + /* Linux supports only Dynamic controllers */ + assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); + memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id, + min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be))); + strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, + min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE)); + strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, + min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE)); + + lsop->queue = queue; + lsreq->rqstaddr = assoc_rqst; + lsreq->rqstlen = sizeof(*assoc_rqst); + lsreq->rspaddr = assoc_acc; + lsreq->rsplen = sizeof(*assoc_acc); + lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; + + ret = nvme_fc_send_ls_req(ctrl, lsop); + if (ret) + goto out_free_buffer; + + /* process connect LS completion */ + + /* validate the ACC response */ + if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) + fcret = VERR_LSACC; + if (assoc_acc->hdr.desc_list_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_ls_cr_assoc_acc))) + fcret = VERR_CR_ASSOC_ACC_LEN; + if (assoc_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) + fcret = VERR_LSDESC_RQST; + else if (assoc_acc->hdr.rqst.desc_len != + fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) + fcret = VERR_LSDESC_RQST_LEN; + else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) + fcret = VERR_CR_ASSOC; + else if (assoc_acc->associd.desc_tag != + cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) + fcret = VERR_ASSOC_ID; + else if (assoc_acc->associd.desc_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_assoc_id))) + fcret = VERR_ASSOC_ID_LEN; + else if (assoc_acc->connectid.desc_tag != + cpu_to_be32(FCNVME_LSDESC_CONN_ID)) + fcret = VERR_CONN_ID; + else if (assoc_acc->connectid.desc_len != + fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) + fcret = VERR_CONN_ID_LEN; + + if (fcret) { + ret = -EBADF; + dev_err(ctrl->dev, + "q %d connect failed: %s\n", + queue->qnum, validation_errors[fcret]); + } else { + ctrl->association_id = + be64_to_cpu(assoc_acc->associd.association_id); + queue->connection_id = + be64_to_cpu(assoc_acc->connectid.connection_id); + set_bit(NVME_FC_Q_CONNECTED, &queue->flags); + } + +out_free_buffer: + kfree(lsop); +out_no_memory: + if (ret) + dev_err(ctrl->dev, + "queue %d connect admin queue failed (%d).\n", + queue->qnum, ret); + return ret; +} + +static int +nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, + u16 qsize, u16 ersp_ratio) +{ + struct nvmefc_ls_req_op *lsop; + struct nvmefc_ls_req *lsreq; + struct fcnvme_ls_cr_conn_rqst *conn_rqst; + struct fcnvme_ls_cr_conn_acc *conn_acc; + int ret, fcret = 0; + + lsop = kzalloc((sizeof(*lsop) + + ctrl->lport->ops->lsrqst_priv_sz + + sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL); + if (!lsop) { + ret = -ENOMEM; + goto out_no_memory; + } + lsreq = &lsop->ls_req; + + lsreq->private = (void *)&lsop[1]; + conn_rqst = (struct fcnvme_ls_cr_conn_rqst *) + (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz); + conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1]; + + conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; + conn_rqst->desc_list_len = cpu_to_be32( + sizeof(struct fcnvme_lsdesc_assoc_id) + + sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); + + conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); + conn_rqst->associd.desc_len = + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_assoc_id)); + conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); + conn_rqst->connect_cmd.desc_tag = + cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD); + conn_rqst->connect_cmd.desc_len = + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); + conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); + conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); + conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize); + + lsop->queue = queue; + lsreq->rqstaddr = conn_rqst; + lsreq->rqstlen = sizeof(*conn_rqst); + lsreq->rspaddr = conn_acc; + lsreq->rsplen = sizeof(*conn_acc); + lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; + + ret = nvme_fc_send_ls_req(ctrl, lsop); + if (ret) + goto out_free_buffer; + + /* process connect LS completion */ + + /* validate the ACC response */ + if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) + fcret = VERR_LSACC; + if (conn_acc->hdr.desc_list_len != + fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc))) + fcret = VERR_CR_CONN_ACC_LEN; + if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) + fcret = VERR_LSDESC_RQST; + else if (conn_acc->hdr.rqst.desc_len != + fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) + fcret = VERR_LSDESC_RQST_LEN; + else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) + fcret = VERR_CR_CONN; + else if (conn_acc->connectid.desc_tag != + cpu_to_be32(FCNVME_LSDESC_CONN_ID)) + fcret = VERR_CONN_ID; + else if (conn_acc->connectid.desc_len != + fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) + fcret = VERR_CONN_ID_LEN; + + if (fcret) { + ret = -EBADF; + dev_err(ctrl->dev, + "q %d connect failed: %s\n", + queue->qnum, validation_errors[fcret]); + } else { + queue->connection_id = + be64_to_cpu(conn_acc->connectid.connection_id); + set_bit(NVME_FC_Q_CONNECTED, &queue->flags); + } + +out_free_buffer: + kfree(lsop); +out_no_memory: + if (ret) + dev_err(ctrl->dev, + "queue %d connect command failed (%d).\n", + queue->qnum, ret); + return ret; +} + +static void +nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) +{ + struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); + struct nvme_fc_ctrl *ctrl = lsop->ctrl; + + __nvme_fc_finish_ls_req(ctrl, lsop); + + if (status) + dev_err(ctrl->dev, + "disconnect assoc ls request command failed (%d).\n", + status); + + /* fc-nvme iniator doesn't care about success or failure of cmd */ + + kfree(lsop); +} + +/* + * This routine sends a FC-NVME LS to disconnect (aka terminate) + * the FC-NVME Association. Terminating the association also + * terminates the FC-NVME connections (per queue, both admin and io + * queues) that are part of the association. E.g. things are torn + * down, and the related FC-NVME Association ID and Connection IDs + * become invalid. + * + * The behavior of the fc-nvme initiator is such that it's + * understanding of the association and connections will implicitly + * be torn down. The action is implicit as it may be due to a loss of + * connectivity with the fc-nvme target, so you may never get a + * response even if you tried. As such, the action of this routine + * is to asynchronously send the LS, ignore any results of the LS, and + * continue on with terminating the association. If the fc-nvme target + * is present and receives the LS, it too can tear down. + */ +static void +nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) +{ + struct fcnvme_ls_disconnect_rqst *discon_rqst; + struct fcnvme_ls_disconnect_acc *discon_acc; + struct nvmefc_ls_req_op *lsop; + struct nvmefc_ls_req *lsreq; + + lsop = kzalloc((sizeof(*lsop) + + ctrl->lport->ops->lsrqst_priv_sz + + sizeof(*discon_rqst) + sizeof(*discon_acc)), + GFP_KERNEL); + if (!lsop) + /* couldn't sent it... too bad */ + return; + + lsreq = &lsop->ls_req; + + lsreq->private = (void *)&lsop[1]; + discon_rqst = (struct fcnvme_ls_disconnect_rqst *) + (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz); + discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1]; + + discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT; + discon_rqst->desc_list_len = cpu_to_be32( + sizeof(struct fcnvme_lsdesc_assoc_id) + + sizeof(struct fcnvme_lsdesc_disconn_cmd)); + + discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); + discon_rqst->associd.desc_len = + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_assoc_id)); + + discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); + + discon_rqst->discon_cmd.desc_tag = cpu_to_be32( + FCNVME_LSDESC_DISCONN_CMD); + discon_rqst->discon_cmd.desc_len = + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_disconn_cmd)); + discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION; + discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id); + + lsreq->rqstaddr = discon_rqst; + lsreq->rqstlen = sizeof(*discon_rqst); + lsreq->rspaddr = discon_acc; + lsreq->rsplen = sizeof(*discon_acc); + lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; + + nvme_fc_send_ls_req_async(ctrl, lsop, nvme_fc_disconnect_assoc_done); + + /* only meaningful part to terminating the association */ + ctrl->association_id = 0; +} + + +/* *********************** NVME Ctrl Routines **************************** */ + + +static int +nvme_fc_reinit_request(void *data, struct request *rq) +{ + struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); + struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; + + memset(cmdiu, 0, sizeof(*cmdiu)); + cmdiu->scsi_id = NVME_CMD_SCSI_ID; + cmdiu->fc_id = NVME_CMD_FC_ID; + cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); + memset(&op->rsp_iu, 0, sizeof(op->rsp_iu)); + + return 0; +} + +static void +__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, + struct nvme_fc_fcp_op *op) +{ + fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, + sizeof(op->rsp_iu), DMA_FROM_DEVICE); + fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, + sizeof(op->cmd_iu), DMA_TO_DEVICE); + + atomic_set(&op->state, FCPOP_STATE_UNINIT); +} + +static void +nvme_fc_exit_request(void *data, struct request *rq, + unsigned int hctx_idx, unsigned int rq_idx) +{ + struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); + + return __nvme_fc_exit_request(data, op); +} + +static void +nvme_fc_exit_aen_ops(struct nvme_fc_ctrl *ctrl) +{ + struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; + int i; + + for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) { + if (atomic_read(&aen_op->state) == FCPOP_STATE_UNINIT) + continue; + __nvme_fc_exit_request(ctrl, aen_op); + nvme_fc_ctrl_put(ctrl); + } +} + +void +nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) +{ + struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); + struct request *rq = op->rq; + struct nvmefc_fcp_req *freq = &op->fcp_req; + struct nvme_fc_ctrl *ctrl = op->ctrl; + struct nvme_fc_queue *queue = op->queue; + struct nvme_completion *cqe = &op->rsp_iu.cqe; + u16 status; + + /* + * WARNING: + * The current linux implementation of a nvme controller + * allocates a single tag set for all io queues and sizes + * the io queues to fully hold all possible tags. Thus, the + * implementation does not reference or care about the sqhd + * value as it never needs to use the sqhd/sqtail pointers + * for submission pacing. + * + * This affects the FC-NVME implementation in two ways: + * 1) As the value doesn't matter, we don't need to waste + * cycles extracting it from ERSPs and stamping it in the + * cases where the transport fabricates CQEs on successful + * completions. + * 2) The FC-NVME implementation requires that delivery of + * ERSP completions are to go back to the nvme layer in order + * relative to the rsn, such that the sqhd value will always + * be "in order" for the nvme layer. As the nvme layer in + * linux doesn't care about sqhd, there's no need to return + * them in order. + * + * Additionally: + * As the core nvme layer in linux currently does not look at + * every field in the cqe - in cases where the FC transport must + * fabricate a CQE, the following fields will not be set as they + * are not referenced: + * cqe.sqid, cqe.sqhd, cqe.command_id + */ + + fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, + sizeof(op->rsp_iu), DMA_FROM_DEVICE); + + if (atomic_read(&op->state) == FCPOP_STATE_ABORTED) + status = NVME_SC_ABORT_REQ | NVME_SC_DNR; + else + status = freq->status; + + /* + * For the linux implementation, if we have an unsuccesful + * status, they blk-mq layer can typically be called with the + * non-zero status and the content of the cqe isn't important. + */ + if (status) + goto done; + + /* + * command completed successfully relative to the wire + * protocol. However, validate anything received and + * extract the status and result from the cqe (create it + * where necessary). + */ + + switch (freq->rcv_rsplen) { + + case 0: + case NVME_FC_SIZEOF_ZEROS_RSP: + /* + * No response payload or 12 bytes of payload (which + * should all be zeros) are considered successful and + * no payload in the CQE by the transport. + */ + if (freq->transferred_length != + be32_to_cpu(op->cmd_iu.data_len)) { + status = -EIO; + goto done; + } + op->nreq.result.u64 = 0; + break; + + case sizeof(struct nvme_fc_ersp_iu): + /* + * The ERSP IU contains a full completion with CQE. + * Validate ERSP IU and look at cqe. + */ + if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != + (freq->rcv_rsplen / 4) || + be32_to_cpu(op->rsp_iu.xfrd_len) != + freq->transferred_length || + op->rqno != le16_to_cpu(cqe->command_id))) { + status = -EIO; + goto done; + } + op->nreq.result = cqe->result; + status = le16_to_cpu(cqe->status) >> 1; + break; + + default: + status = -EIO; + goto done; + } + +done: + if (!queue->qnum && op->rqno >= AEN_CMDID_BASE) { + nvme_complete_async_event(&queue->ctrl->ctrl, status, + &op->nreq.result); + nvme_fc_ctrl_put(ctrl); + return; + } + + blk_mq_complete_request(rq, status); +} + +static int +__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, + struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op, + struct request *rq, u32 rqno) +{ + struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; + int ret = 0; + + memset(op, 0, sizeof(*op)); + op->fcp_req.cmdaddr = &op->cmd_iu; + op->fcp_req.cmdlen = sizeof(op->cmd_iu); + op->fcp_req.rspaddr = &op->rsp_iu; + op->fcp_req.rsplen = sizeof(op->rsp_iu); + op->fcp_req.done = nvme_fc_fcpio_done; + op->fcp_req.first_sgl = (struct scatterlist *)&op[1]; + op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE]; + op->ctrl = ctrl; + op->queue = queue; + op->rq = rq; + op->rqno = rqno; + + cmdiu->scsi_id = NVME_CMD_SCSI_ID; + cmdiu->fc_id = NVME_CMD_FC_ID; + cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); + + op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, + &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); + if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { + dev_err(ctrl->dev, + "FCP Op failed - cmdiu dma mapping failed.\n"); + ret = EFAULT; + goto out_on_error; + } + + op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, + &op->rsp_iu, sizeof(op->rsp_iu), + DMA_FROM_DEVICE); + if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { + dev_err(ctrl->dev, + "FCP Op failed - rspiu dma mapping failed.\n"); + ret = EFAULT; + } + + atomic_set(&op->state, FCPOP_STATE_IDLE); +out_on_error: + return ret; +} + +static int +nvme_fc_init_request(void *data, struct request *rq, + unsigned int hctx_idx, unsigned int rq_idx, + unsigned int numa_node) +{ + struct nvme_fc_ctrl *ctrl = data; + struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); + struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1]; + + return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++); +} + +static int +nvme_fc_init_admin_request(void *data, struct request *rq, + unsigned int hctx_idx, unsigned int rq_idx, + unsigned int numa_node) +{ + struct nvme_fc_ctrl *ctrl = data; + struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); + struct nvme_fc_queue *queue = &ctrl->queues[0]; + + return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++); +} + +static int +nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) +{ + struct nvme_fc_fcp_op *aen_op; + struct nvme_fc_cmd_iu *cmdiu; + struct nvme_command *sqe; + int i, ret; + + aen_op = ctrl->aen_ops; + for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) { + cmdiu = &aen_op->cmd_iu; + sqe = &cmdiu->sqe; + ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], + aen_op, (struct request *)NULL, + (AEN_CMDID_BASE + i)); + if (ret) + return ret; + + memset(sqe, 0, sizeof(*sqe)); + sqe->common.opcode = nvme_admin_async_event; + sqe->common.command_id = AEN_CMDID_BASE + i; + } + return 0; +} + + +static inline void +__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, + unsigned int qidx) +{ + struct nvme_fc_queue *queue = &ctrl->queues[qidx]; + + hctx->driver_data = queue; + queue->hctx = hctx; +} + +static int +nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + struct nvme_fc_ctrl *ctrl = data; + + __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1); + + return 0; +} + +static int +nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + struct nvme_fc_ctrl *ctrl = data; + + __nvme_fc_init_hctx(hctx, ctrl, hctx_idx); + + return 0; +} + +static void +nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size) +{ + struct nvme_fc_queue *queue; + + queue = &ctrl->queues[idx]; + memset(queue, 0, sizeof(*queue)); + queue->ctrl = ctrl; + queue->qnum = idx; + atomic_set(&queue->csn, 1); + queue->dev = ctrl->dev; + + if (idx > 0) + queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; + else + queue->cmnd_capsule_len = sizeof(struct nvme_command); + + queue->queue_size = queue_size; + + /* + * Considered whether we should allocate buffers for all SQEs + * and CQEs and dma map them - mapping their respective entries + * into the request structures (kernel vm addr and dma address) + * thus the driver could use the buffers/mappings directly. + * It only makes sense if the LLDD would use them for its + * messaging api. It's very unlikely most adapter api's would use + * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload + * structures were used instead. + */ +} + +/* + * This routine terminates a queue at the transport level. + * The transport has already ensured that all outstanding ios on + * the queue have been terminated. + * The transport will send a Disconnect LS request to terminate + * the queue's connection. Termination of the admin queue will also + * terminate the association at the target. + */ +static void +nvme_fc_free_queue(struct nvme_fc_queue *queue) +{ + if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) + return; + + /* + * Current implementation never disconnects a single queue. + * It always terminates a whole association. So there is never + * a disconnect(queue) LS sent to the target. + */ + + queue->connection_id = 0; + clear_bit(NVME_FC_Q_CONNECTED, &queue->flags); +} + +static void +__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, + struct nvme_fc_queue *queue, unsigned int qidx) +{ + if (ctrl->lport->ops->delete_queue) + ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, + queue->lldd_handle); + queue->lldd_handle = NULL; +} + +static void +nvme_fc_destroy_admin_queue(struct nvme_fc_ctrl *ctrl) +{ + __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); + blk_cleanup_queue(ctrl->ctrl.admin_q); + blk_mq_free_tag_set(&ctrl->admin_tag_set); + nvme_fc_free_queue(&ctrl->queues[0]); +} + +static void +nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) +{ + int i; + + for (i = 1; i < ctrl->queue_count; i++) + nvme_fc_free_queue(&ctrl->queues[i]); +} + +static int +__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, + struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize) +{ + int ret = 0; + + queue->lldd_handle = NULL; + if (ctrl->lport->ops->create_queue) + ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, + qidx, qsize, &queue->lldd_handle); + + return ret; +} + +static void +nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) +{ + struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1]; + int i; + + for (i = ctrl->queue_count - 1; i >= 1; i--, queue--) + __nvme_fc_delete_hw_queue(ctrl, queue, i); +} + +static int +nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) +{ + struct nvme_fc_queue *queue = &ctrl->queues[1]; + int i, j, ret; + + for (i = 1; i < ctrl->queue_count; i++, queue++) { + ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); + if (ret) { + for (j = i-1; j >= 0; j--) + __nvme_fc_delete_hw_queue(ctrl, + &ctrl->queues[j], j); + return ret; + } + } + + return 0; +} + +static int +nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) +{ + int i, ret = 0; + + for (i = 1; i < ctrl->queue_count; i++) { + ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, + (qsize / 5)); + if (ret) + break; + ret = nvmf_connect_io_queue(&ctrl->ctrl, i); + if (ret) + break; + } + + return ret; +} + +static void +nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) +{ + int i; + + for (i = 1; i < ctrl->queue_count; i++) + nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize); +} + +static void +nvme_fc_ctrl_free(struct kref *ref) +{ + struct nvme_fc_ctrl *ctrl = + container_of(ref, struct nvme_fc_ctrl, ref); + unsigned long flags; + + if (ctrl->state != FCCTRL_INIT) { + /* remove from rport list */ + spin_lock_irqsave(&ctrl->rport->lock, flags); + list_del(&ctrl->ctrl_list); + spin_unlock_irqrestore(&ctrl->rport->lock, flags); + } + + put_device(ctrl->dev); + nvme_fc_rport_put(ctrl->rport); + + kfree(ctrl->queues); + ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); + nvmf_free_options(ctrl->ctrl.opts); + kfree(ctrl); +} + +static void +nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) +{ + kref_put(&ctrl->ref, nvme_fc_ctrl_free); +} + +static int +nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) +{ + return kref_get_unless_zero(&ctrl->ref); +} + +/* + * All accesses from nvme core layer done - can now free the + * controller. Called after last nvme_put_ctrl() call + */ +static void +nvme_fc_free_nvme_ctrl(struct nvme_ctrl *nctrl) +{ + struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); + + WARN_ON(nctrl != &ctrl->ctrl); + + /* + * Tear down the association, which will generate link + * traffic to terminate connections + */ + + if (ctrl->state != FCCTRL_INIT) { + /* send a Disconnect(association) LS to fc-nvme target */ + nvme_fc_xmt_disconnect_assoc(ctrl); + + if (ctrl->ctrl.tagset) { + blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_free_tag_set(&ctrl->tag_set); + nvme_fc_delete_hw_io_queues(ctrl); + nvme_fc_free_io_queues(ctrl); + } + + nvme_fc_exit_aen_ops(ctrl); + + nvme_fc_destroy_admin_queue(ctrl); + } + + nvme_fc_ctrl_put(ctrl); +} + + +static int +__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) +{ + int state; + + state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); + if (state != FCPOP_STATE_ACTIVE) { + atomic_set(&op->state, state); + return -ECANCELED; /* fail */ + } + + ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, + &ctrl->rport->remoteport, + op->queue->lldd_handle, + &op->fcp_req); + + return 0; +} + +enum blk_eh_timer_return +nvme_fc_timeout(struct request *rq, bool reserved) +{ + struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); + struct nvme_fc_ctrl *ctrl = op->ctrl; + int ret; + + if (reserved) + return BLK_EH_RESET_TIMER; + + ret = __nvme_fc_abort_op(ctrl, op); + if (ret) + /* io wasn't active to abort consider it done */ + return BLK_EH_HANDLED; + + /* + * TODO: force a controller reset + * when that happens, queues will be torn down and outstanding + * ios will be terminated, and the above abort, on a single io + * will no longer be needed. + */ + + return BLK_EH_HANDLED; +} + +static int +nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, + struct nvme_fc_fcp_op *op) +{ + struct nvmefc_fcp_req *freq = &op->fcp_req; + u32 map_len = nvme_map_len(rq); + enum dma_data_direction dir; + int ret; + + freq->sg_cnt = 0; + + if (!map_len) + return 0; + + freq->sg_table.sgl = freq->first_sgl; + ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments, + freq->sg_table.sgl); + if (ret) + return -ENOMEM; + + op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); + WARN_ON(op->nents > rq->nr_phys_segments); + dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, + op->nents, dir); + if (unlikely(freq->sg_cnt <= 0)) { + sg_free_table_chained(&freq->sg_table, true); + freq->sg_cnt = 0; + return -EFAULT; + } + + /* + * TODO: blk_integrity_rq(rq) for DIF + */ + return 0; +} + +static void +nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, + struct nvme_fc_fcp_op *op) +{ + struct nvmefc_fcp_req *freq = &op->fcp_req; + + if (!freq->sg_cnt) + return; + + fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, + ((rq_data_dir(rq) == WRITE) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE)); + + nvme_cleanup_cmd(rq); + + sg_free_table_chained(&freq->sg_table, true); + + freq->sg_cnt = 0; +} + +/* + * In FC, the queue is a logical thing. At transport connect, the target + * creates its "queue" and returns a handle that is to be given to the + * target whenever it posts something to the corresponding SQ. When an + * SQE is sent on a SQ, FC effectively considers the SQE, or rather the + * command contained within the SQE, an io, and assigns a FC exchange + * to it. The SQE and the associated SQ handle are sent in the initial + * CMD IU sents on the exchange. All transfers relative to the io occur + * as part of the exchange. The CQE is the last thing for the io, + * which is transferred (explicitly or implicitly) with the RSP IU + * sent on the exchange. After the CQE is received, the FC exchange is + * terminaed and the Exchange may be used on a different io. + * + * The transport to LLDD api has the transport making a request for a + * new fcp io request to the LLDD. The LLDD then allocates a FC exchange + * resource and transfers the command. The LLDD will then process all + * steps to complete the io. Upon completion, the transport done routine + * is called. + * + * So - while the operation is outstanding to the LLDD, there is a link + * level FC exchange resource that is also outstanding. This must be + * considered in all cleanup operations. + */ +static int +nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, + struct nvme_fc_fcp_op *op, u32 data_len, + enum nvmefc_fcp_datadir io_dir) +{ + struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; + struct nvme_command *sqe = &cmdiu->sqe; + u32 csn; + int ret; + + if (!nvme_fc_ctrl_get(ctrl)) + return BLK_MQ_RQ_QUEUE_ERROR; + + /* format the FC-NVME CMD IU and fcp_req */ + cmdiu->connection_id = cpu_to_be64(queue->connection_id); + csn = atomic_inc_return(&queue->csn); + cmdiu->csn = cpu_to_be32(csn); + cmdiu->data_len = cpu_to_be32(data_len); + switch (io_dir) { + case NVMEFC_FCP_WRITE: + cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; + break; + case NVMEFC_FCP_READ: + cmdiu->flags = FCNVME_CMD_FLAGS_READ; + break; + case NVMEFC_FCP_NODATA: + cmdiu->flags = 0; + break; + } + op->fcp_req.payload_length = data_len; + op->fcp_req.io_dir = io_dir; + op->fcp_req.transferred_length = 0; + op->fcp_req.rcv_rsplen = 0; + op->fcp_req.status = 0; + op->fcp_req.sqid = cpu_to_le16(queue->qnum); + + /* + * validate per fabric rules, set fields mandated by fabric spec + * as well as those by FC-NVME spec. + */ + WARN_ON_ONCE(sqe->common.metadata); + WARN_ON_ONCE(sqe->common.dptr.prp1); + WARN_ON_ONCE(sqe->common.dptr.prp2); + sqe->common.flags |= NVME_CMD_SGL_METABUF; + + /* + * format SQE DPTR field per FC-NVME rules + * type=data block descr; subtype=offset; + * offset is currently 0. + */ + sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET; + sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); + sqe->rw.dptr.sgl.addr = 0; + + /* odd that we set the command_id - should come from nvme-fabrics */ + WARN_ON_ONCE(sqe->common.command_id != cpu_to_le16(op->rqno)); + + if (op->rq) { /* skipped on aens */ + ret = nvme_fc_map_data(ctrl, op->rq, op); + if (ret < 0) { + dev_err(queue->ctrl->ctrl.device, + "Failed to map data (%d)\n", ret); + nvme_cleanup_cmd(op->rq); + nvme_fc_ctrl_put(ctrl); + return (ret == -ENOMEM || ret == -EAGAIN) ? + BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR; + } + } + + fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, + sizeof(op->cmd_iu), DMA_TO_DEVICE); + + atomic_set(&op->state, FCPOP_STATE_ACTIVE); + + if (op->rq) + blk_mq_start_request(op->rq); + + ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, + &ctrl->rport->remoteport, + queue->lldd_handle, &op->fcp_req); + + if (ret) { + dev_err(ctrl->dev, + "Send nvme command failed - lldd returned %d.\n", ret); + + if (op->rq) { /* normal request */ + nvme_fc_unmap_data(ctrl, op->rq, op); + nvme_cleanup_cmd(op->rq); + } + /* else - aen. no cleanup needed */ + + nvme_fc_ctrl_put(ctrl); + + if (ret != -EBUSY) + return BLK_MQ_RQ_QUEUE_ERROR; + + if (op->rq) { + blk_mq_stop_hw_queues(op->rq->q); + blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY); + } + return BLK_MQ_RQ_QUEUE_BUSY; + } + + return BLK_MQ_RQ_QUEUE_OK; +} + +static int +nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct nvme_ns *ns = hctx->queue->queuedata; + struct nvme_fc_queue *queue = hctx->driver_data; + struct nvme_fc_ctrl *ctrl = queue->ctrl; + struct request *rq = bd->rq; + struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); + struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; + struct nvme_command *sqe = &cmdiu->sqe; + enum nvmefc_fcp_datadir io_dir; + u32 data_len; + int ret; + + ret = nvme_setup_cmd(ns, rq, sqe); + if (ret) + return ret; + + data_len = nvme_map_len(rq); + if (data_len) + io_dir = ((rq_data_dir(rq) == WRITE) ? + NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); + else + io_dir = NVMEFC_FCP_NODATA; + + return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); +} + +static struct blk_mq_tags * +nvme_fc_tagset(struct nvme_fc_queue *queue) +{ + if (queue->qnum == 0) + return queue->ctrl->admin_tag_set.tags[queue->qnum]; + + return queue->ctrl->tag_set.tags[queue->qnum - 1]; +} + +static int +nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) + +{ + struct nvme_fc_queue *queue = hctx->driver_data; + struct nvme_fc_ctrl *ctrl = queue->ctrl; + struct request *req; + struct nvme_fc_fcp_op *op; + + req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag); + if (!req) { + dev_err(queue->ctrl->ctrl.device, + "tag 0x%x on QNum %#x not found\n", + tag, queue->qnum); + return 0; + } + + op = blk_mq_rq_to_pdu(req); + + if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) && + (ctrl->lport->ops->poll_queue)) + ctrl->lport->ops->poll_queue(&ctrl->lport->localport, + queue->lldd_handle); + + return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE)); +} + +static void +nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx) +{ + struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); + struct nvme_fc_fcp_op *aen_op; + int ret; + + if (aer_idx > NVME_FC_NR_AEN_COMMANDS) + return; + + aen_op = &ctrl->aen_ops[aer_idx]; + + ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, + NVMEFC_FCP_NODATA); + if (ret) + dev_err(ctrl->ctrl.device, + "failed async event work [%d]\n", aer_idx); +} + +static void +nvme_fc_complete_rq(struct request *rq) +{ + struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); + struct nvme_fc_ctrl *ctrl = op->ctrl; + int error = 0, state; + + state = atomic_xchg(&op->state, FCPOP_STATE_IDLE); + + nvme_cleanup_cmd(rq); + + nvme_fc_unmap_data(ctrl, rq, op); + + if (unlikely(rq->errors)) { + if (nvme_req_needs_retry(rq, rq->errors)) { + nvme_requeue_req(rq); + return; + } + + if (rq->cmd_type == REQ_TYPE_DRV_PRIV) + error = rq->errors; + else + error = nvme_error_status(rq->errors); + } + + nvme_fc_ctrl_put(ctrl); + + blk_mq_end_request(rq, error); +} + +static struct blk_mq_ops nvme_fc_mq_ops = { + .queue_rq = nvme_fc_queue_rq, + .complete = nvme_fc_complete_rq, + .init_request = nvme_fc_init_request, + .exit_request = nvme_fc_exit_request, + .reinit_request = nvme_fc_reinit_request, + .init_hctx = nvme_fc_init_hctx, + .poll = nvme_fc_poll, + .timeout = nvme_fc_timeout, +}; + +static struct blk_mq_ops nvme_fc_admin_mq_ops = { + .queue_rq = nvme_fc_queue_rq, + .complete = nvme_fc_complete_rq, + .init_request = nvme_fc_init_admin_request, + .exit_request = nvme_fc_exit_request, + .reinit_request = nvme_fc_reinit_request, + .init_hctx = nvme_fc_init_admin_hctx, + .timeout = nvme_fc_timeout, +}; + +static int +nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl) +{ + u32 segs; + int error; + + nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH); + + error = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], + NVME_FC_AQ_BLKMQ_DEPTH, + (NVME_FC_AQ_BLKMQ_DEPTH / 4)); + if (error) + return error; + + memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); + ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; + ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH; + ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ + ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; + ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) + + (SG_CHUNK_SIZE * + sizeof(struct scatterlist)) + + ctrl->lport->ops->fcprqst_priv_sz; + ctrl->admin_tag_set.driver_data = ctrl; + ctrl->admin_tag_set.nr_hw_queues = 1; + ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; + + error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); + if (error) + goto out_free_queue; + + ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); + if (IS_ERR(ctrl->ctrl.admin_q)) { + error = PTR_ERR(ctrl->ctrl.admin_q); + goto out_free_tagset; + } + + error = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, + NVME_FC_AQ_BLKMQ_DEPTH); + if (error) + goto out_cleanup_queue; + + error = nvmf_connect_admin_queue(&ctrl->ctrl); + if (error) + goto out_delete_hw_queue; + + error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); + if (error) { + dev_err(ctrl->ctrl.device, + "prop_get NVME_REG_CAP failed\n"); + goto out_delete_hw_queue; + } + + ctrl->ctrl.sqsize = + min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); + + error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); + if (error) + goto out_delete_hw_queue; + + segs = min_t(u32, NVME_FC_MAX_SEGMENTS, + ctrl->lport->ops->max_sgl_segments); + ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9); + + error = nvme_init_identify(&ctrl->ctrl); + if (error) + goto out_delete_hw_queue; + + nvme_start_keep_alive(&ctrl->ctrl); + + return 0; + +out_delete_hw_queue: + __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); +out_cleanup_queue: + blk_cleanup_queue(ctrl->ctrl.admin_q); +out_free_tagset: + blk_mq_free_tag_set(&ctrl->admin_tag_set); +out_free_queue: + nvme_fc_free_queue(&ctrl->queues[0]); + return error; +} + +/* + * This routine is used by the transport when it needs to find active + * io on a queue that is to be terminated. The transport uses + * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke + * this routine to kill them on a 1 by 1 basis. + * + * As FC allocates FC exchange for each io, the transport must contact + * the LLDD to terminate the exchange, thus releasing the FC exchange. + * After terminating the exchange the LLDD will call the transport's + * normal io done path for the request, but it will have an aborted + * status. The done path will return the io request back to the block + * layer with an error status. + */ +static void +nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) +{ + struct nvme_ctrl *nctrl = data; + struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); + struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); +int status; + + if (!blk_mq_request_started(req)) + return; + + /* this performs an ABTS-LS on the FC exchange for the io */ + status = __nvme_fc_abort_op(ctrl, op); + /* + * if __nvme_fc_abort_op failed: io wasn't active to abort + * consider it done. Assume completion path already completing + * in parallel + */ + if (status) + /* io wasn't active to abort consider it done */ + /* assume completion path already completing in parallel */ + return; +} + + +/* + * This routine stops operation of the controller. Admin and IO queues + * are stopped, outstanding ios on them terminated, and the nvme ctrl + * is shutdown. + */ +static void +nvme_fc_shutdown_ctrl(struct nvme_fc_ctrl *ctrl) +{ + /* + * If io queues are present, stop them and terminate all outstanding + * ios on them. As FC allocates FC exchange for each io, the + * transport must contact the LLDD to terminate the exchange, + * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr() + * to tell us what io's are busy and invoke a transport routine + * to kill them with the LLDD. After terminating the exchange + * the LLDD will call the transport's normal io done path, but it + * will have an aborted status. The done path will return the + * io requests back to the block layer as part of normal completions + * (but with error status). + */ + if (ctrl->queue_count > 1) { + nvme_stop_queues(&ctrl->ctrl); + blk_mq_tagset_busy_iter(&ctrl->tag_set, + nvme_fc_terminate_exchange, &ctrl->ctrl); + } + + if (ctrl->ctrl.state == NVME_CTRL_LIVE) + nvme_shutdown_ctrl(&ctrl->ctrl); + + /* + * now clean up the admin queue. Same thing as above. + * use blk_mq_tagset_busy_itr() and the transport routine to + * terminate the exchanges. + */ + blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); + blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, + nvme_fc_terminate_exchange, &ctrl->ctrl); +} + +/* + * Called to teardown an association. + * May be called with association fully in place or partially in place. + */ +static void +__nvme_fc_remove_ctrl(struct nvme_fc_ctrl *ctrl) +{ + nvme_stop_keep_alive(&ctrl->ctrl); + + /* stop and terminate ios on admin and io queues */ + nvme_fc_shutdown_ctrl(ctrl); + + /* + * tear down the controller + * This will result in the last reference on the nvme ctrl to + * expire, calling the transport nvme_fc_free_nvme_ctrl() callback. + * From there, the transport will tear down it's logical queues and + * association. + */ + nvme_uninit_ctrl(&ctrl->ctrl); + + nvme_put_ctrl(&ctrl->ctrl); +} + +static void +nvme_fc_del_ctrl_work(struct work_struct *work) +{ + struct nvme_fc_ctrl *ctrl = + container_of(work, struct nvme_fc_ctrl, delete_work); + + __nvme_fc_remove_ctrl(ctrl); +} + +static int +__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl) +{ + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) + return -EBUSY; + + if (!queue_work(nvme_fc_wq, &ctrl->delete_work)) + return -EBUSY; + + return 0; +} + +/* + * Request from nvme core layer to delete the controller + */ +static int +nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl) +{ + struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); + struct nvme_fc_rport *rport = ctrl->rport; + unsigned long flags; + int ret; + + spin_lock_irqsave(&rport->lock, flags); + ret = __nvme_fc_del_ctrl(ctrl); + spin_unlock_irqrestore(&rport->lock, flags); + if (ret) + return ret; + + flush_work(&ctrl->delete_work); + + return 0; +} + +static int +nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl) +{ + return -EIO; +} + +static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { + .name = "fc", + .module = THIS_MODULE, + .is_fabrics = true, + .reg_read32 = nvmf_reg_read32, + .reg_read64 = nvmf_reg_read64, + .reg_write32 = nvmf_reg_write32, + .reset_ctrl = nvme_fc_reset_nvme_ctrl, + .free_ctrl = nvme_fc_free_nvme_ctrl, + .submit_async_event = nvme_fc_submit_async_event, + .delete_ctrl = nvme_fc_del_nvme_ctrl, + .get_subsysnqn = nvmf_get_subsysnqn, + .get_address = nvmf_get_address, +}; + +static int +nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) +{ + struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; + int ret; + + ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); + if (ret) { + dev_info(ctrl->ctrl.device, + "set_queue_count failed: %d\n", ret); + return ret; + } + + ctrl->queue_count = opts->nr_io_queues + 1; + if (!opts->nr_io_queues) + return 0; + + dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", + opts->nr_io_queues); + + nvme_fc_init_io_queues(ctrl); + + memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); + ctrl->tag_set.ops = &nvme_fc_mq_ops; + ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; + ctrl->tag_set.reserved_tags = 1; /* fabric connect */ + ctrl->tag_set.numa_node = NUMA_NO_NODE; + ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) + + (SG_CHUNK_SIZE * + sizeof(struct scatterlist)) + + ctrl->lport->ops->fcprqst_priv_sz; + ctrl->tag_set.driver_data = ctrl; + ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1; + ctrl->tag_set.timeout = NVME_IO_TIMEOUT; + + ret = blk_mq_alloc_tag_set(&ctrl->tag_set); + if (ret) + return ret; + + ctrl->ctrl.tagset = &ctrl->tag_set; + + ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); + if (IS_ERR(ctrl->ctrl.connect_q)) { + ret = PTR_ERR(ctrl->ctrl.connect_q); + goto out_free_tag_set; + } + + ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); + if (ret) + goto out_cleanup_blk_queue; + + ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); + if (ret) + goto out_delete_hw_queues; + + return 0; + +out_delete_hw_queues: + nvme_fc_delete_hw_io_queues(ctrl); +out_cleanup_blk_queue: + nvme_stop_keep_alive(&ctrl->ctrl); + blk_cleanup_queue(ctrl->ctrl.connect_q); +out_free_tag_set: + blk_mq_free_tag_set(&ctrl->tag_set); + nvme_fc_free_io_queues(ctrl); + + /* force put free routine to ignore io queues */ + ctrl->ctrl.tagset = NULL; + + return ret; +} + + +static struct nvme_ctrl * +__nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, + struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) +{ + struct nvme_fc_ctrl *ctrl; + unsigned long flags; + int ret, idx; + bool changed; + + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) { + ret = -ENOMEM; + goto out_fail; + } + + idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); + if (idx < 0) { + ret = -ENOSPC; + goto out_free_ctrl; + } + + ctrl->ctrl.opts = opts; + INIT_LIST_HEAD(&ctrl->ctrl_list); + INIT_LIST_HEAD(&ctrl->ls_req_list); + ctrl->lport = lport; + ctrl->rport = rport; + ctrl->dev = lport->dev; + ctrl->state = FCCTRL_INIT; + ctrl->cnum = idx; + + ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); + if (ret) + goto out_free_ida; + + get_device(ctrl->dev); + kref_init(&ctrl->ref); + + INIT_WORK(&ctrl->delete_work, nvme_fc_del_ctrl_work); + spin_lock_init(&ctrl->lock); + + /* io queue count */ + ctrl->queue_count = min_t(unsigned int, + opts->nr_io_queues, + lport->ops->max_hw_queues); + opts->nr_io_queues = ctrl->queue_count; /* so opts has valid value */ + ctrl->queue_count++; /* +1 for admin queue */ + + ctrl->ctrl.sqsize = opts->queue_size - 1; + ctrl->ctrl.kato = opts->kato; + + ret = -ENOMEM; + ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue), + GFP_KERNEL); + if (!ctrl->queues) + goto out_uninit_ctrl; + + ret = nvme_fc_configure_admin_queue(ctrl); + if (ret) + goto out_uninit_ctrl; + + /* sanity checks */ + + /* FC-NVME supports 64-byte SQE only */ + if (ctrl->ctrl.ioccsz != 4) { + dev_err(ctrl->ctrl.device, "ioccsz %d is not supported!\n", + ctrl->ctrl.ioccsz); + goto out_remove_admin_queue; + } + /* FC-NVME supports 16-byte CQE only */ + if (ctrl->ctrl.iorcsz != 1) { + dev_err(ctrl->ctrl.device, "iorcsz %d is not supported!\n", + ctrl->ctrl.iorcsz); + goto out_remove_admin_queue; + } + /* FC-NVME does not have other data in the capsule */ + if (ctrl->ctrl.icdoff) { + dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", + ctrl->ctrl.icdoff); + goto out_remove_admin_queue; + } + + /* FC-NVME supports normal SGL Data Block Descriptors */ + + if (opts->queue_size > ctrl->ctrl.maxcmd) { + /* warn if maxcmd is lower than queue_size */ + dev_warn(ctrl->ctrl.device, + "queue_size %zu > ctrl maxcmd %u, reducing " + "to queue_size\n", + opts->queue_size, ctrl->ctrl.maxcmd); + opts->queue_size = ctrl->ctrl.maxcmd; + } + + ret = nvme_fc_init_aen_ops(ctrl); + if (ret) + goto out_exit_aen_ops; + + if (ctrl->queue_count > 1) { + ret = nvme_fc_create_io_queues(ctrl); + if (ret) + goto out_exit_aen_ops; + } + + spin_lock_irqsave(&ctrl->lock, flags); + ctrl->state = FCCTRL_ACTIVE; + spin_unlock_irqrestore(&ctrl->lock, flags); + + changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); + WARN_ON_ONCE(!changed); + + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n", + ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl); + + kref_get(&ctrl->ctrl.kref); + + spin_lock_irqsave(&rport->lock, flags); + list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); + spin_unlock_irqrestore(&rport->lock, flags); + + if (opts->nr_io_queues) { + nvme_queue_scan(&ctrl->ctrl); + nvme_queue_async_events(&ctrl->ctrl); + } + + return &ctrl->ctrl; + +out_exit_aen_ops: + nvme_fc_exit_aen_ops(ctrl); +out_remove_admin_queue: + /* send a Disconnect(association) LS to fc-nvme target */ + nvme_fc_xmt_disconnect_assoc(ctrl); + nvme_stop_keep_alive(&ctrl->ctrl); + nvme_fc_destroy_admin_queue(ctrl); +out_uninit_ctrl: + nvme_uninit_ctrl(&ctrl->ctrl); + nvme_put_ctrl(&ctrl->ctrl); + if (ret > 0) + ret = -EIO; + /* exit via here will follow ctlr ref point callbacks to free */ + return ERR_PTR(ret); + +out_free_ida: + ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); +out_free_ctrl: + kfree(ctrl); +out_fail: + nvme_fc_rport_put(rport); + /* exit via here doesn't follow ctlr ref points */ + return ERR_PTR(ret); +} + +enum { + FCT_TRADDR_ERR = 0, + FCT_TRADDR_WWNN = 1 << 0, + FCT_TRADDR_WWPN = 1 << 1, +}; + +struct nvmet_fc_traddr { + u64 nn; + u64 pn; +}; + +static const match_table_t traddr_opt_tokens = { + { FCT_TRADDR_WWNN, "nn-%s" }, + { FCT_TRADDR_WWPN, "pn-%s" }, + { FCT_TRADDR_ERR, NULL } +}; + +static int +nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf) +{ + substring_t args[MAX_OPT_ARGS]; + char *options, *o, *p; + int token, ret = 0; + u64 token64; + + options = o = kstrdup(buf, GFP_KERNEL); + if (!options) + return -ENOMEM; + + while ((p = strsep(&o, ":\n")) != NULL) { + if (!*p) + continue; + + token = match_token(p, traddr_opt_tokens, args); + switch (token) { + case FCT_TRADDR_WWNN: + if (match_u64(args, &token64)) { + ret = -EINVAL; + goto out; + } + traddr->nn = token64; + break; + case FCT_TRADDR_WWPN: + if (match_u64(args, &token64)) { + ret = -EINVAL; + goto out; + } + traddr->pn = token64; + break; + default: + pr_warn("unknown traddr token or missing value '%s'\n", + p); + ret = -EINVAL; + goto out; + } + } + +out: + kfree(options); + return ret; +} + +static struct nvme_ctrl * +nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) +{ + struct nvme_fc_lport *lport; + struct nvme_fc_rport *rport; + struct nvmet_fc_traddr laddr = { 0L, 0L }; + struct nvmet_fc_traddr raddr = { 0L, 0L }; + unsigned long flags; + int ret; + + ret = nvme_fc_parse_address(&raddr, opts->traddr); + if (ret || !raddr.nn || !raddr.pn) + return ERR_PTR(-EINVAL); + + ret = nvme_fc_parse_address(&laddr, opts->host_traddr); + if (ret || !laddr.nn || !laddr.pn) + return ERR_PTR(-EINVAL); + + /* find the host and remote ports to connect together */ + spin_lock_irqsave(&nvme_fc_lock, flags); + list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { + if (lport->localport.node_name != laddr.nn || + lport->localport.port_name != laddr.pn) + continue; + + list_for_each_entry(rport, &lport->endp_list, endp_list) { + if (rport->remoteport.node_name != raddr.nn || + rport->remoteport.port_name != raddr.pn) + continue; + + /* if fail to get reference fall through. Will error */ + if (!nvme_fc_rport_get(rport)) + break; + + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + return __nvme_fc_create_ctrl(dev, opts, lport, rport); + } + } + spin_unlock_irqrestore(&nvme_fc_lock, flags); + + return ERR_PTR(-ENOENT); +} + + +static struct nvmf_transport_ops nvme_fc_transport = { + .name = "fc", + .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, + .allowed_opts = NVMF_OPT_RECONNECT_DELAY, + .create_ctrl = nvme_fc_create_ctrl, +}; + +static int __init nvme_fc_init_module(void) +{ + nvme_fc_wq = create_workqueue("nvme_fc_wq"); + if (!nvme_fc_wq) + return -ENOMEM; + + nvmf_register_transport(&nvme_fc_transport); + return 0; +} + +static void __exit nvme_fc_exit_module(void) +{ + /* sanity check - all lports should be removed */ + if (!list_empty(&nvme_fc_lport_list)) + pr_warn("%s: localport list not empty\n", __func__); + + nvmf_unregister_transport(&nvme_fc_transport); + + destroy_workqueue(nvme_fc_wq); + + ida_destroy(&nvme_fc_local_port_cnt); + ida_destroy(&nvme_fc_ctrl_cnt); +} + +module_init(nvme_fc_init_module); +module_exit(nvme_fc_exit_module); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index f5e3011e31fcdfea4c4e067ca8b808bc66a2fffd..588d4a34c083492e047b42c3f8ac015bf3ce6277 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -146,14 +146,6 @@ struct nvme_nvm_command { }; }; -struct nvme_nvm_completion { - __le64 result; /* Used by LightNVM to return ppa completions */ - __le16 sq_head; /* how much of this queue may be reclaimed */ - __le16 sq_id; /* submission queue that generated this entry */ - __u16 command_id; /* of the command which completed */ - __le16 status; /* did the command fail, and if so, why? */ -}; - #define NVME_NVM_LP_MLC_PAIRS 886 struct nvme_nvm_lp_mlc { __le16 num_pairs; @@ -360,6 +352,7 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb, while (nlb) { u32 cmd_nlb = min(nlb_pr_rq, nlb); + u64 elba = slba + cmd_nlb; c.l2p.slba = cpu_to_le64(cmd_slba); c.l2p.nlb = cpu_to_le32(cmd_nlb); @@ -373,6 +366,14 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb, goto out; } + if (unlikely(elba > nvmdev->total_secs)) { + pr_err("nvm: L2P data from device is out of bounds!\n"); + return -EINVAL; + } + + /* Transform physical address to target address space */ + nvmdev->mt->part_to_tgt(nvmdev, entries, cmd_nlb); + if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) { ret = -EINTR; goto out; @@ -391,11 +392,12 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, u8 *blks) { struct request_queue *q = nvmdev->q; + struct nvm_geo *geo = &nvmdev->geo; struct nvme_ns *ns = q->queuedata; struct nvme_ctrl *ctrl = ns->ctrl; struct nvme_nvm_command c = {}; struct nvme_nvm_bb_tbl *bb_tbl; - int nr_blks = nvmdev->blks_per_lun * nvmdev->plane_mode; + int nr_blks = geo->blks_per_lun * geo->plane_mode; int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks; int ret = 0; @@ -436,7 +438,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, goto out; } - memcpy(blks, bb_tbl->blk, nvmdev->blks_per_lun * nvmdev->plane_mode); + memcpy(blks, bb_tbl->blk, geo->blks_per_lun * geo->plane_mode); out: kfree(bb_tbl); return ret; @@ -481,14 +483,11 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd, static void nvme_nvm_end_io(struct request *rq, int error) { struct nvm_rq *rqd = rq->end_io_data; - struct nvme_nvm_completion *cqe = rq->special; - - if (cqe) - rqd->ppa_status = le64_to_cpu(cqe->result); + rqd->ppa_status = nvme_req(rq)->result.u64; nvm_end_io(rqd, error); - kfree(rq->cmd); + kfree(nvme_req(rq)->cmd); blk_mq_free_request(rq); } @@ -500,20 +499,18 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) struct bio *bio = rqd->bio; struct nvme_nvm_command *cmd; - rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0); - if (IS_ERR(rq)) + cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL); + if (!cmd) return -ENOMEM; - cmd = kzalloc(sizeof(struct nvme_nvm_command) + - sizeof(struct nvme_nvm_completion), GFP_KERNEL); - if (!cmd) { - blk_mq_free_request(rq); + rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY); + if (IS_ERR(rq)) { + kfree(cmd); return -ENOMEM; } + rq->cmd_flags &= ~REQ_FAILFAST_DRIVER; - rq->cmd_type = REQ_TYPE_DRV_PRIV; rq->ioprio = bio_prio(bio); - if (bio_has_data(bio)) rq->nr_phys_segments = bio_phys_segments(q, bio); @@ -522,10 +519,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) nvme_nvm_rqtocmd(rq, rqd, ns, cmd); - rq->cmd = (unsigned char *)cmd; - rq->cmd_len = sizeof(struct nvme_nvm_command); - rq->special = cmd + 1; - rq->end_io_data = rqd; blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io); @@ -543,6 +536,7 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd) c.erase.nsid = cpu_to_le32(ns->ns_id); c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa); c.erase.length = cpu_to_le16(rqd->nr_ppas - 1); + c.erase.control = cpu_to_le16(rqd->flags); return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0); } @@ -592,12 +586,10 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { .max_phys_sect = 64, }; -int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node, - const struct attribute_group *attrs) +int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node) { struct request_queue *q = ns->queue; struct nvm_dev *dev; - int ret; dev = nvm_alloc_dev(node); if (!dev) @@ -606,18 +598,10 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node, dev->q = q; memcpy(dev->name, disk_name, DISK_NAME_LEN); dev->ops = &nvme_nvm_dev_ops; - dev->parent_dev = ns->ctrl->device; dev->private_data = ns; ns->ndev = dev; - ret = nvm_register(dev); - - ns->lba_shift = ilog2(dev->sec_size) - 9; - - if (sysfs_create_group(&dev->dev.kobj, attrs)) - pr_warn("%s: failed to create sysfs group for identification\n", - disk_name); - return ret; + return nvm_register(dev); } void nvme_nvm_unregister(struct nvme_ns *ns) @@ -625,6 +609,167 @@ void nvme_nvm_unregister(struct nvme_ns *ns) nvm_unregister(ns->ndev); } +static ssize_t nvm_dev_attr_show(struct device *dev, + struct device_attribute *dattr, char *page) +{ + struct nvme_ns *ns = nvme_get_ns_from_dev(dev); + struct nvm_dev *ndev = ns->ndev; + struct nvm_id *id; + struct nvm_id_group *grp; + struct attribute *attr; + + if (!ndev) + return 0; + + id = &ndev->identity; + grp = &id->groups[0]; + attr = &dattr->attr; + + if (strcmp(attr->name, "version") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id); + } else if (strcmp(attr->name, "vendor_opcode") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt); + } else if (strcmp(attr->name, "capabilities") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->cap); + } else if (strcmp(attr->name, "device_mode") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->dom); + } else if (strcmp(attr->name, "media_manager") == 0) { + if (!ndev->mt) + return scnprintf(page, PAGE_SIZE, "%s\n", "none"); + return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name); + } else if (strcmp(attr->name, "ppa_format") == 0) { + return scnprintf(page, PAGE_SIZE, + "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", + id->ppaf.ch_offset, id->ppaf.ch_len, + id->ppaf.lun_offset, id->ppaf.lun_len, + id->ppaf.pln_offset, id->ppaf.pln_len, + id->ppaf.blk_offset, id->ppaf.blk_len, + id->ppaf.pg_offset, id->ppaf.pg_len, + id->ppaf.sect_offset, id->ppaf.sect_len); + } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */ + return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype); + } else if (strcmp(attr->name, "flash_media_type") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype); + } else if (strcmp(attr->name, "num_channels") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch); + } else if (strcmp(attr->name, "num_luns") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun); + } else if (strcmp(attr->name, "num_planes") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln); + } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */ + return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk); + } else if (strcmp(attr->name, "num_pages") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg); + } else if (strcmp(attr->name, "page_size") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz); + } else if (strcmp(attr->name, "hw_sector_size") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs); + } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */ + return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos); + } else if (strcmp(attr->name, "read_typ") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt); + } else if (strcmp(attr->name, "read_max") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm); + } else if (strcmp(attr->name, "prog_typ") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt); + } else if (strcmp(attr->name, "prog_max") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm); + } else if (strcmp(attr->name, "erase_typ") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet); + } else if (strcmp(attr->name, "erase_max") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem); + } else if (strcmp(attr->name, "multiplane_modes") == 0) { + return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos); + } else if (strcmp(attr->name, "media_capabilities") == 0) { + return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap); + } else if (strcmp(attr->name, "max_phys_secs") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", + ndev->ops->max_phys_sect); + } else { + return scnprintf(page, + PAGE_SIZE, + "Unhandled attr(%s) in `nvm_dev_attr_show`\n", + attr->name); + } +} + +#define NVM_DEV_ATTR_RO(_name) \ + DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL) + +static NVM_DEV_ATTR_RO(version); +static NVM_DEV_ATTR_RO(vendor_opcode); +static NVM_DEV_ATTR_RO(capabilities); +static NVM_DEV_ATTR_RO(device_mode); +static NVM_DEV_ATTR_RO(ppa_format); +static NVM_DEV_ATTR_RO(media_manager); + +static NVM_DEV_ATTR_RO(media_type); +static NVM_DEV_ATTR_RO(flash_media_type); +static NVM_DEV_ATTR_RO(num_channels); +static NVM_DEV_ATTR_RO(num_luns); +static NVM_DEV_ATTR_RO(num_planes); +static NVM_DEV_ATTR_RO(num_blocks); +static NVM_DEV_ATTR_RO(num_pages); +static NVM_DEV_ATTR_RO(page_size); +static NVM_DEV_ATTR_RO(hw_sector_size); +static NVM_DEV_ATTR_RO(oob_sector_size); +static NVM_DEV_ATTR_RO(read_typ); +static NVM_DEV_ATTR_RO(read_max); +static NVM_DEV_ATTR_RO(prog_typ); +static NVM_DEV_ATTR_RO(prog_max); +static NVM_DEV_ATTR_RO(erase_typ); +static NVM_DEV_ATTR_RO(erase_max); +static NVM_DEV_ATTR_RO(multiplane_modes); +static NVM_DEV_ATTR_RO(media_capabilities); +static NVM_DEV_ATTR_RO(max_phys_secs); + +static struct attribute *nvm_dev_attrs[] = { + &dev_attr_version.attr, + &dev_attr_vendor_opcode.attr, + &dev_attr_capabilities.attr, + &dev_attr_device_mode.attr, + &dev_attr_media_manager.attr, + + &dev_attr_ppa_format.attr, + &dev_attr_media_type.attr, + &dev_attr_flash_media_type.attr, + &dev_attr_num_channels.attr, + &dev_attr_num_luns.attr, + &dev_attr_num_planes.attr, + &dev_attr_num_blocks.attr, + &dev_attr_num_pages.attr, + &dev_attr_page_size.attr, + &dev_attr_hw_sector_size.attr, + &dev_attr_oob_sector_size.attr, + &dev_attr_read_typ.attr, + &dev_attr_read_max.attr, + &dev_attr_prog_typ.attr, + &dev_attr_prog_max.attr, + &dev_attr_erase_typ.attr, + &dev_attr_erase_max.attr, + &dev_attr_multiplane_modes.attr, + &dev_attr_media_capabilities.attr, + &dev_attr_max_phys_secs.attr, + NULL, +}; + +static const struct attribute_group nvm_dev_attr_group = { + .name = "lightnvm", + .attrs = nvm_dev_attrs, +}; + +int nvme_nvm_register_sysfs(struct nvme_ns *ns) +{ + return sysfs_create_group(&disk_to_dev(ns->disk)->kobj, + &nvm_dev_attr_group); +} + +void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) +{ + sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, + &nvm_dev_attr_group); +} + /* move to shared place when used in multiple places. */ #define PCI_VENDOR_ID_CNEX 0x1d1d #define PCI_DEVICE_ID_CNEX_WL 0x2807 diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index d47f5a5d18c7c8dd93338eae503845f94c548c99..bd5321441d127143b87563e5463d2944aa0c1b0e 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -79,6 +79,20 @@ enum nvme_quirks { NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), }; +/* + * Common request structure for NVMe passthrough. All drivers must have + * this structure as the first member of their request-private data. + */ +struct nvme_request { + struct nvme_command *cmd; + union nvme_result result; +}; + +static inline struct nvme_request *nvme_req(struct request *req) +{ + return blk_mq_rq_to_pdu(req); +} + /* The below value is the specific amount of delay needed before checking * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was @@ -222,8 +236,10 @@ static inline unsigned nvme_map_len(struct request *rq) static inline void nvme_cleanup_cmd(struct request *req) { - if (req_op(req) == REQ_OP_DISCARD) - kfree(req->completion_data); + if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { + kfree(page_address(req->special_vec.bv_page) + + req->special_vec.bv_offset); + } } static inline int nvme_error_status(u16 status) @@ -261,8 +277,8 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl); void nvme_remove_namespaces(struct nvme_ctrl *ctrl); #define NVME_NR_AERS 1 -void nvme_complete_async_event(struct nvme_ctrl *ctrl, - struct nvme_completion *cqe); +void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, + union nvme_result *res); void nvme_queue_async_events(struct nvme_ctrl *ctrl); void nvme_stop_queues(struct nvme_ctrl *ctrl); @@ -278,7 +294,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buf, unsigned bufflen); int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, - struct nvme_completion *cqe, void *buffer, unsigned bufflen, + union nvme_result *result, void *buffer, unsigned bufflen, unsigned timeout, int qid, int at_head, int flags); int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, void __user *ubuffer, unsigned bufflen, u32 *result, @@ -307,36 +323,33 @@ int nvme_sg_get_version_num(int __user *ip); #ifdef CONFIG_NVM int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id); -int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node, - const struct attribute_group *attrs); +int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node); void nvme_nvm_unregister(struct nvme_ns *ns); - -static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) -{ - if (dev->type->devnode) - return dev_to_disk(dev)->private_data; - - return (container_of(dev, struct nvm_dev, dev))->private_data; -} +int nvme_nvm_register_sysfs(struct nvme_ns *ns); +void nvme_nvm_unregister_sysfs(struct nvme_ns *ns); #else static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, - int node, - const struct attribute_group *attrs) + int node) { return 0; } static inline void nvme_nvm_unregister(struct nvme_ns *ns) {}; - +static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns) +{ + return 0; +} +static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {}; static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) { return 0; } +#endif /* CONFIG_NVM */ + static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) { return dev_to_disk(dev)->private_data; } -#endif /* CONFIG_NVM */ int __init nvme_core_init(void); void nvme_core_exit(void); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 0248d0e21feedb706b818b01cbfeea164322570c..3d21a154dce79deceeff77cd16ef5c6bf2a71978 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -50,7 +50,7 @@ #define NVME_AQ_DEPTH 256 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) - + /* * We handle AEN commands ourselves and don't even let the * block layer know about them. @@ -141,6 +141,7 @@ struct nvme_queue { * allocated to store the PRP list. */ struct nvme_iod { + struct nvme_request req; struct nvme_queue *nvmeq; int aborted; int npages; /* In the PRP list. 0 means small pool in use */ @@ -302,14 +303,14 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq, static __le64 **iod_list(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - return (__le64 **)(iod->sg + req->nr_phys_segments); + return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req)); } static int nvme_init_iod(struct request *rq, unsigned size, struct nvme_dev *dev) { struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); - int nseg = rq->nr_phys_segments; + int nseg = blk_rq_nr_phys_segments(rq); if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC); @@ -324,11 +325,11 @@ static int nvme_init_iod(struct request *rq, unsigned size, iod->nents = 0; iod->length = size; - if (!(rq->cmd_flags & REQ_DONTPREP)) { + if (!(rq->rq_flags & RQF_DONTPREP)) { rq->retries = 0; - rq->cmd_flags |= REQ_DONTPREP; + rq->rq_flags |= RQF_DONTPREP; } - return 0; + return BLK_MQ_RQ_QUEUE_OK; } static void nvme_free_iod(struct nvme_dev *dev, struct request *req) @@ -339,8 +340,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req) __le64 **list = iod_list(req); dma_addr_t prp_dma = iod->first_dma; - nvme_cleanup_cmd(req); - if (iod->npages == 0) dma_pool_free(dev->prp_small_pool, list[0], prp_dma); for (i = 0; i < iod->npages; i++) { @@ -510,7 +509,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req, DMA_TO_DEVICE : DMA_FROM_DEVICE; int ret = BLK_MQ_RQ_QUEUE_ERROR; - sg_init_table(iod->sg, req->nr_phys_segments); + sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); iod->nents = blk_rq_map_sg(q, req, iod->sg); if (!iod->nents) goto out; @@ -566,6 +565,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) } } + nvme_cleanup_cmd(req); nvme_free_iod(dev, req); } @@ -596,22 +596,21 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, } } - map_len = nvme_map_len(req); - ret = nvme_init_iod(req, map_len, dev); - if (ret) + ret = nvme_setup_cmd(ns, req, &cmnd); + if (ret != BLK_MQ_RQ_QUEUE_OK) return ret; - ret = nvme_setup_cmd(ns, req, &cmnd); - if (ret) - goto out; + map_len = nvme_map_len(req); + ret = nvme_init_iod(req, map_len, dev); + if (ret != BLK_MQ_RQ_QUEUE_OK) + goto out_free_cmd; - if (req->nr_phys_segments) + if (blk_rq_nr_phys_segments(req)) ret = nvme_map_data(dev, req, map_len, &cmnd); - if (ret) - goto out; + if (ret != BLK_MQ_RQ_QUEUE_OK) + goto out_cleanup_iod; - cmnd.common.command_id = req->tag; blk_mq_start_request(req); spin_lock_irq(&nvmeq->q_lock); @@ -621,14 +620,16 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, else ret = BLK_MQ_RQ_QUEUE_ERROR; spin_unlock_irq(&nvmeq->q_lock); - goto out; + goto out_cleanup_iod; } __nvme_submit_cmd(nvmeq, &cmnd); nvme_process_cq(nvmeq); spin_unlock_irq(&nvmeq->q_lock); return BLK_MQ_RQ_QUEUE_OK; -out: +out_cleanup_iod: nvme_free_iod(dev, req); +out_free_cmd: + nvme_cleanup_cmd(req); return ret; } @@ -703,13 +704,13 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) */ if (unlikely(nvmeq->qid == 0 && cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) { - nvme_complete_async_event(&nvmeq->dev->ctrl, &cqe); + nvme_complete_async_event(&nvmeq->dev->ctrl, + cqe.status, &cqe.result); continue; } req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id); - if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special) - memcpy(req->special, &cqe, sizeof(cqe)); + nvme_req(req)->result = cqe.result; blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1); } @@ -1242,20 +1243,16 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) result = nvme_enable_ctrl(&dev->ctrl, cap); if (result) - goto free_nvmeq; + return result; nvmeq->cq_vector = 0; result = queue_request_irq(nvmeq); if (result) { nvmeq->cq_vector = -1; - goto free_nvmeq; + return result; } return result; - - free_nvmeq: - nvme_free_queues(dev, 0); - return result; } static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) @@ -1285,6 +1282,24 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) return true; } +static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) +{ + /* Read a config register to help see what died. */ + u16 pci_status; + int result; + + result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, + &pci_status); + if (result == PCIBIOS_SUCCESSFUL) + dev_warn(dev->dev, + "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", + csts, pci_status); + else + dev_warn(dev->dev, + "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", + csts, result); +} + static void nvme_watchdog_timer(unsigned long data) { struct nvme_dev *dev = (struct nvme_dev *)data; @@ -1293,9 +1308,7 @@ static void nvme_watchdog_timer(unsigned long data) /* Skip controllers under certain specific conditions. */ if (nvme_should_reset(dev, csts)) { if (!nvme_reset(dev)) - dev_warn(dev->dev, - "Failed status: 0x%x, reset controller.\n", - csts); + nvme_warn_reset(dev, csts); return; } @@ -1317,10 +1330,8 @@ static int nvme_create_io_queues(struct nvme_dev *dev) max = min(dev->max_qid, dev->queue_count - 1); for (i = dev->online_queues; i <= max; i++) { ret = nvme_create_queue(dev->queues[i], i); - if (ret) { - nvme_free_queues(dev, i); + if (ret) break; - } } /* @@ -1338,7 +1349,7 @@ static ssize_t nvme_cmb_show(struct device *dev, { struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); - return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", + return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", ndev->cmbloc, ndev->cmbsz); } static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL); @@ -1460,13 +1471,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) result = queue_request_irq(adminq); if (result) { adminq->cq_vector = -1; - goto free_queues; + return result; } return nvme_create_io_queues(dev); - - free_queues: - nvme_free_queues(dev, 1); - return result; } static void nvme_del_queue_end(struct request *req, int error) @@ -2095,9 +2102,6 @@ static const struct pci_error_handlers nvme_err_handler = { .reset_notify = nvme_reset_notify, }; -/* Move to pci_ids.h later */ -#define PCI_CLASS_STORAGE_EXPRESS 0x010802 - static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(INTEL, 0x0953), .driver_data = NVME_QUIRK_STRIPE_SIZE | diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 5a8388177959916dc4dadbc5e6d8c366b0eb6b81..f587af345889eb1b32a1f4f87dbe0c9a3a30ae42 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -28,7 +28,6 @@ #include #include -#include #include #include "nvme.h" @@ -43,6 +42,28 @@ #define NVME_RDMA_MAX_INLINE_SEGMENTS 1 +static const char *const nvme_rdma_cm_status_strs[] = { + [NVME_RDMA_CM_INVALID_LEN] = "invalid length", + [NVME_RDMA_CM_INVALID_RECFMT] = "invalid record format", + [NVME_RDMA_CM_INVALID_QID] = "invalid queue ID", + [NVME_RDMA_CM_INVALID_HSQSIZE] = "invalid host SQ size", + [NVME_RDMA_CM_INVALID_HRQSIZE] = "invalid host RQ size", + [NVME_RDMA_CM_NO_RSC] = "resource not found", + [NVME_RDMA_CM_INVALID_IRD] = "invalid IRD", + [NVME_RDMA_CM_INVALID_ORD] = "Invalid ORD", +}; + +static const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status) +{ + size_t index = status; + + if (index < ARRAY_SIZE(nvme_rdma_cm_status_strs) && + nvme_rdma_cm_status_strs[index]) + return nvme_rdma_cm_status_strs[index]; + else + return "unrecognized reason"; +}; + /* * We handle AEN commands ourselves and don't even let the * block layer know about them. @@ -66,6 +87,7 @@ struct nvme_rdma_qe { struct nvme_rdma_queue; struct nvme_rdma_request { + struct nvme_request req; struct ib_mr *mr; struct nvme_rdma_qe sqe; struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; @@ -83,6 +105,7 @@ enum nvme_rdma_queue_flags { NVME_RDMA_Q_CONNECTED = (1 << 0), NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1), NVME_RDMA_Q_DELETING = (1 << 2), + NVME_RDMA_Q_LIVE = (1 << 3), }; struct nvme_rdma_queue { @@ -240,7 +263,9 @@ static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev, static void nvme_rdma_qp_event(struct ib_event *event, void *context) { - pr_debug("QP event %d\n", event->event); + pr_debug("QP event %s (%d)\n", + ib_event_msg(event->event), event->event); + } static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue) @@ -624,10 +649,18 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl) for (i = 1; i < ctrl->queue_count; i++) { ret = nvmf_connect_io_queue(&ctrl->ctrl, i); - if (ret) - break; + if (ret) { + dev_info(ctrl->ctrl.device, + "failed to connect i/o queue: %d\n", ret); + goto out_free_queues; + } + set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); } + return 0; + +out_free_queues: + nvme_rdma_free_io_queues(ctrl); return ret; } @@ -712,6 +745,8 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) if (ret) goto stop_admin_q; + set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); + ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); if (ret) goto stop_admin_q; @@ -761,8 +796,10 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) nvme_stop_keep_alive(&ctrl->ctrl); - for (i = 0; i < ctrl->queue_count; i++) + for (i = 0; i < ctrl->queue_count; i++) { clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags); + clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); + } if (ctrl->queue_count > 1) nvme_stop_queues(&ctrl->ctrl); @@ -950,8 +987,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_device *dev = queue->device; struct ib_device *ibdev = dev->dev; - int nents, count; - int ret; + int count, ret; req->num_sge = 1; req->inline_data = false; @@ -963,16 +999,14 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, return nvme_rdma_set_sg_null(c); req->sg_table.sgl = req->first_sgl; - ret = sg_alloc_table_chained(&req->sg_table, rq->nr_phys_segments, - req->sg_table.sgl); + ret = sg_alloc_table_chained(&req->sg_table, + blk_rq_nr_phys_segments(rq), req->sg_table.sgl); if (ret) return -ENOMEM; - nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl); - BUG_ON(nents > rq->nr_phys_segments); - req->nents = nents; + req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl); - count = ib_dma_map_sg(ibdev, req->sg_table.sgl, nents, + count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents, rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (unlikely(count <= 0)) { sg_free_table_chained(&req->sg_table, true); @@ -1117,13 +1151,10 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx) static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, struct nvme_completion *cqe, struct ib_wc *wc, int tag) { - u16 status = le16_to_cpu(cqe->status); struct request *rq; struct nvme_rdma_request *req; int ret = 0; - status >>= 1; - rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id); if (!rq) { dev_err(queue->ctrl->ctrl.device, @@ -1134,9 +1165,6 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, } req = blk_mq_rq_to_pdu(rq); - if (rq->cmd_type == REQ_TYPE_DRV_PRIV && rq->special) - memcpy(rq->special, cqe, sizeof(*cqe)); - if (rq->tag == tag) ret = 1; @@ -1144,8 +1172,8 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, wc->ex.invalidate_rkey == req->mr->rkey) req->mr->need_inval = false; - blk_mq_complete_request(rq, status); - + req->req.result = cqe->result; + blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1); return ret; } @@ -1173,7 +1201,8 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag) */ if (unlikely(nvme_rdma_queue_idx(queue) == 0 && cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH)) - nvme_complete_async_event(&queue->ctrl->ctrl, cqe); + nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, + &cqe->result); else ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag); ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE); @@ -1207,16 +1236,24 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue) static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, struct rdma_cm_event *ev) { - if (ev->param.conn.private_data_len) { - struct nvme_rdma_cm_rej *rej = - (struct nvme_rdma_cm_rej *)ev->param.conn.private_data; + struct rdma_cm_id *cm_id = queue->cm_id; + int status = ev->status; + const char *rej_msg; + const struct nvme_rdma_cm_rej *rej_data; + u8 rej_data_len; + + rej_msg = rdma_reject_msg(cm_id, status); + rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len); + + if (rej_data && rej_data_len >= sizeof(u16)) { + u16 sts = le16_to_cpu(rej_data->sts); dev_err(queue->ctrl->ctrl.device, - "Connect rejected, status %d.", le16_to_cpu(rej->sts)); - /* XXX: Think of something clever to do here... */ + "Connect rejected: status %d (%s) nvme status %d (%s).\n", + status, rej_msg, sts, nvme_rdma_cm_msg(sts)); } else { dev_err(queue->ctrl->ctrl.device, - "Connect rejected, no private data.\n"); + "Connect rejected: status %d (%s).\n", status, rej_msg); } return -ECONNRESET; @@ -1378,6 +1415,24 @@ nvme_rdma_timeout(struct request *rq, bool reserved) return BLK_EH_HANDLED; } +/* + * We cannot accept any other command until the Connect command has completed. + */ +static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, + struct request *rq) +{ + if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { + struct nvme_command *cmd = (struct nvme_command *)rq->cmd; + + if (rq->cmd_type != REQ_TYPE_DRV_PRIV || + cmd->common.opcode != nvme_fabrics_command || + cmd->fabrics.fctype != nvme_fabrics_type_connect) + return false; + } + + return true; +} + static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -1394,15 +1449,17 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, WARN_ON_ONCE(rq->tag < 0); + if (!nvme_rdma_queue_is_ready(queue, rq)) + return BLK_MQ_RQ_QUEUE_BUSY; + dev = queue->device->dev; ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(struct nvme_command), DMA_TO_DEVICE); ret = nvme_setup_cmd(ns, rq, c); - if (ret) + if (ret != BLK_MQ_RQ_QUEUE_OK) return ret; - c->common.command_id = rq->tag; blk_mq_start_request(rq); map_len = nvme_map_len(rq); @@ -1544,6 +1601,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) if (error) goto out_cleanup_queue; + set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); + error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); if (error) { dev_err(ctrl->ctrl.device, @@ -1908,6 +1967,14 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, opts->queue_size = ctrl->ctrl.maxcmd; } + if (opts->queue_size > ctrl->ctrl.sqsize + 1) { + /* warn if sqsize is lower than queue_size */ + dev_warn(ctrl->ctrl.device, + "queue_size %zu > ctrl sqsize %u, clamping down\n", + opts->queue_size, ctrl->ctrl.sqsize + 1); + opts->queue_size = ctrl->ctrl.sqsize + 1; + } + if (opts->nr_io_queues) { ret = nvme_rdma_create_io_queues(ctrl); if (ret) diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c index 3eaa4d27801ee17c7e1ccaa1b8a489844cb6f5bc..b71e95044b43e3e62a5d063047064a46d88a6d9f 100644 --- a/drivers/nvme/host/scsi.c +++ b/drivers/nvme/host/scsi.c @@ -1280,10 +1280,6 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10, static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list, u16 idx, u16 bd_len, u8 llbaa) { - u16 bd_num; - - bd_num = bd_len / ((llbaa == 0) ? - SHORT_DESC_BLOCK : LONG_DESC_BLOCK); /* Store block descriptor info if a FORMAT UNIT comes later */ /* TODO Saving 1st BD info; what to do if multiple BD received? */ if (llbaa == 0) { @@ -1528,7 +1524,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, int nvme_sc; struct nvme_id_ns *id_ns; u8 i; - u8 flbas, nlbaf; + u8 nlbaf; u8 selected_lbaf = 0xFF; u32 cdw10 = 0; struct nvme_command c; @@ -1539,7 +1535,6 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, if (res) return res; - flbas = (id_ns->flbas) & 0x0F; nlbaf = id_ns->nlbaf; for (i = 0; i < nlbaf; i++) { @@ -2168,12 +2163,10 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns, static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 *cmd) { - u8 immed, pcmod, no_flush, start; + u8 immed, no_flush; immed = cmd[1] & 0x01; - pcmod = cmd[3] & 0x0f; no_flush = cmd[4] & 0x04; - start = cmd[4] & 0x01; if (immed != 0) { return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 3a5b9d0576cb0f71a5bf22a34c905ae8dfc634fb..03e4ab65fe777ad3c6a347959fb9cdb2dc5a060a 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -34,3 +34,27 @@ config NVME_TARGET_RDMA devices over RDMA. If unsure, say N. + +config NVME_TARGET_FC + tristate "NVMe over Fabrics FC target driver" + depends on NVME_TARGET + depends on HAS_DMA + help + This enables the NVMe FC target support, which allows exporting NVMe + devices over FC. + + If unsure, say N. + +config NVME_TARGET_FCLOOP + tristate "NVMe over Fabrics FC Transport Loopback Test driver" + depends on NVME_TARGET + select NVME_CORE + select NVME_FABRICS + select SG_POOL + depends on NVME_FC + depends on NVME_TARGET_FC + help + This enables the NVMe FC loopback test support, which can be useful + to test NVMe-FC transport interfaces. + + If unsure, say N. diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile index b7a06232c9da79004120ddbb2d4f5f2a26de6bc8..fecc14f535b23e6e66ed63acf2e9b5d73a11e12a 100644 --- a/drivers/nvme/target/Makefile +++ b/drivers/nvme/target/Makefile @@ -2,8 +2,12 @@ obj-$(CONFIG_NVME_TARGET) += nvmet.o obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o +obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o +obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o nvmet-y += core.o configfs.o admin-cmd.o io-cmd.o fabrics-cmd.o \ discovery.o nvme-loop-y += loop.o nvmet-rdma-y += rdma.o +nvmet-fc-y += fc.o +nvme-fcloop-y += fcloop.o diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 6fe4c48a21e46520ad1e0e569a9773813ed7e233..ec1ad2aa0a4ca941e8fe51db94cc7ffa452c1693 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -237,7 +237,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); id->nn = cpu_to_le32(ctrl->subsys->max_nsid); - id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM); + id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | + NVME_CTRL_ONCS_WRITE_ZEROES); /* XXX: don't report vwc if the underlying device is write through */ id->vwc = NVME_CTRL_VWC_PRESENT; diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index af5e2dc4a3d503bb8bca93951bcfa3be58685ca5..6f5074153dcd9ea921f35751bbd622c18f227e49 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -37,6 +37,8 @@ static ssize_t nvmet_addr_adrfam_show(struct config_item *item, return sprintf(page, "ipv6\n"); case NVMF_ADDR_FAMILY_IB: return sprintf(page, "ib\n"); + case NVMF_ADDR_FAMILY_FC: + return sprintf(page, "fc\n"); default: return sprintf(page, "\n"); } @@ -59,6 +61,8 @@ static ssize_t nvmet_addr_adrfam_store(struct config_item *item, port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6; } else if (sysfs_streq(page, "ib")) { port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB; + } else if (sysfs_streq(page, "fc")) { + port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC; } else { pr_err("Invalid value '%s' for adrfam\n", page); return -EINVAL; @@ -209,6 +213,8 @@ static ssize_t nvmet_addr_trtype_show(struct config_item *item, return sprintf(page, "rdma\n"); case NVMF_TRTYPE_LOOP: return sprintf(page, "loop\n"); + case NVMF_TRTYPE_FC: + return sprintf(page, "fc\n"); default: return sprintf(page, "\n"); } @@ -229,6 +235,12 @@ static void nvmet_port_init_tsas_loop(struct nvmet_port *port) memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE); } +static void nvmet_port_init_tsas_fc(struct nvmet_port *port) +{ + port->disc_addr.trtype = NVMF_TRTYPE_FC; + memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE); +} + static ssize_t nvmet_addr_trtype_store(struct config_item *item, const char *page, size_t count) { @@ -244,6 +256,8 @@ static ssize_t nvmet_addr_trtype_store(struct config_item *item, nvmet_port_init_tsas_rdma(port); } else if (sysfs_streq(page, "loop")) { nvmet_port_init_tsas_loop(port); + } else if (sysfs_streq(page, "fc")) { + nvmet_port_init_tsas_fc(port); } else { pr_err("Invalid value '%s' for trtype\n", page); return -EINVAL; @@ -271,7 +285,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item, mutex_lock(&subsys->lock); ret = -EBUSY; - if (nvmet_ns_enabled(ns)) + if (ns->enabled) goto out_unlock; kfree(ns->device_path); @@ -307,7 +321,7 @@ static ssize_t nvmet_ns_device_nguid_store(struct config_item *item, int ret = 0; mutex_lock(&subsys->lock); - if (nvmet_ns_enabled(ns)) { + if (ns->enabled) { ret = -EBUSY; goto out_unlock; } @@ -339,7 +353,7 @@ CONFIGFS_ATTR(nvmet_ns_, device_nguid); static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page) { - return sprintf(page, "%d\n", nvmet_ns_enabled(to_nvmet_ns(item))); + return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled); } static ssize_t nvmet_ns_enable_store(struct config_item *item, @@ -466,7 +480,7 @@ static int nvmet_port_subsys_allow_link(struct config_item *parent, return ret; } -static int nvmet_port_subsys_drop_link(struct config_item *parent, +static void nvmet_port_subsys_drop_link(struct config_item *parent, struct config_item *target) { struct nvmet_port *port = to_nvmet_port(parent->ci_parent); @@ -479,7 +493,7 @@ static int nvmet_port_subsys_drop_link(struct config_item *parent, goto found; } up_write(&nvmet_config_sem); - return -EINVAL; + return; found: list_del(&p->entry); @@ -488,7 +502,6 @@ static int nvmet_port_subsys_drop_link(struct config_item *parent, nvmet_disable_port(port); up_write(&nvmet_config_sem); kfree(p); - return 0; } static struct configfs_item_operations nvmet_port_subsys_item_ops = { @@ -542,7 +555,7 @@ static int nvmet_allowed_hosts_allow_link(struct config_item *parent, return ret; } -static int nvmet_allowed_hosts_drop_link(struct config_item *parent, +static void nvmet_allowed_hosts_drop_link(struct config_item *parent, struct config_item *target) { struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); @@ -555,14 +568,13 @@ static int nvmet_allowed_hosts_drop_link(struct config_item *parent, goto found; } up_write(&nvmet_config_sem); - return -EINVAL; + return; found: list_del(&p->entry); nvmet_genctr++; up_write(&nvmet_config_sem); kfree(p); - return 0; } static struct configfs_item_operations nvmet_allowed_hosts_item_ops = { diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b4cacb6f02583740933ac63c42af32a7377f6687..b1d66ed655c9ec36261fb4d31bca06555cccca39 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -264,7 +264,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns) int ret = 0; mutex_lock(&subsys->lock); - if (!list_empty(&ns->dev_link)) + if (ns->enabled) goto out_unlock; ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE, @@ -309,6 +309,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns) list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0); + ns->enabled = true; ret = 0; out_unlock: mutex_unlock(&subsys->lock); @@ -325,11 +326,11 @@ void nvmet_ns_disable(struct nvmet_ns *ns) struct nvmet_ctrl *ctrl; mutex_lock(&subsys->lock); - if (list_empty(&ns->dev_link)) { - mutex_unlock(&subsys->lock); - return; - } - list_del_init(&ns->dev_link); + if (!ns->enabled) + goto out_unlock; + + ns->enabled = false; + list_del_rcu(&ns->dev_link); mutex_unlock(&subsys->lock); /* @@ -351,6 +352,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns) if (ns->bdev) blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ); +out_unlock: mutex_unlock(&subsys->lock); } @@ -617,7 +619,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, if (!subsys) { pr_warn("connect request for invalid subsystem %s!\n", subsysnqn); - req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn); + req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; } @@ -638,7 +640,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, pr_warn("could not find controller %d for subsys %s / host %s\n", cntlid, subsysnqn, hostnqn); - req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid); + req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; out: @@ -700,7 +702,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, if (!subsys) { pr_warn("connect request for invalid subsystem %s!\n", subsysnqn); - req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn); + req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); goto out; } @@ -709,7 +711,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, if (!nvmet_host_allowed(req, subsys, hostnqn)) { pr_info("connect by host %s for subsystem %s not allowed\n", hostnqn, subsysnqn); - req->rsp->result = IPO_IATTR_CONNECT_DATA(hostnqn); + req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); up_read(&nvmet_config_sem); goto out_put_subsystem; } @@ -838,9 +840,13 @@ static void nvmet_fatal_error_handler(struct work_struct *work) void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) { - ctrl->csts |= NVME_CSTS_CFS; - INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); - schedule_work(&ctrl->fatal_err_work); + mutex_lock(&ctrl->lock); + if (!(ctrl->csts & NVME_CSTS_CFS)) { + ctrl->csts |= NVME_CSTS_CFS; + INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); + schedule_work(&ctrl->fatal_err_work); + } + mutex_unlock(&ctrl->lock); } EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 9a97ae67e656456ebc053c31dbdc8bfc5b76ee0e..f4088198cd0d0a15b8b3da63f698612f568f2bb3 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -69,7 +69,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) } } - req->rsp->result64 = cpu_to_le64(val); + req->rsp->result.u64 = cpu_to_le64(val); nvmet_req_complete(req, status); } @@ -125,7 +125,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) d = kmap(sg_page(req->sg)) + req->sg->offset; /* zero out initial completion result, assign values as needed */ - req->rsp->result = 0; + req->rsp->result.u32 = 0; if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", @@ -138,7 +138,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) pr_warn("connect attempt for invalid controller ID %#x\n", d->cntlid); status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; - req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid); + req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); goto out; } @@ -155,7 +155,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) pr_info("creating controller %d for NQN %s.\n", ctrl->cntlid, ctrl->hostnqn); - req->rsp->result16 = cpu_to_le16(ctrl->cntlid); + req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid); out: kunmap(sg_page(req->sg)); @@ -173,7 +173,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) d = kmap(sg_page(req->sg)) + req->sg->offset; /* zero out initial completion result, assign values as needed */ - req->rsp->result = 0; + req->rsp->result.u32 = 0; if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", @@ -191,14 +191,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) if (unlikely(qid > ctrl->subsys->max_qid)) { pr_warn("invalid queue id (%d)\n", qid); status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; - req->rsp->result = IPO_IATTR_CONNECT_SQE(qid); + req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid); goto out_ctrl_put; } status = nvmet_install_queue(ctrl, req); if (status) { /* pass back cntlid that had the issue of installing queue */ - req->rsp->result16 = cpu_to_le16(ctrl->cntlid); + req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid); goto out_ctrl_put; } diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c new file mode 100644 index 0000000000000000000000000000000000000000..173e842f19c975a4a849c4120fcfcdcee73c26c1 --- /dev/null +++ b/drivers/nvme/target/fc.c @@ -0,0 +1,2288 @@ +/* + * Copyright (c) 2016 Avago Technologies. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful. + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, + * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO + * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. + * See the GNU General Public License for more details, a copy of which + * can be found in the file COPYING included with this package + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include +#include + +#include "nvmet.h" +#include +#include + + +/* *************************** Data Structures/Defines ****************** */ + + +#define NVMET_LS_CTX_COUNT 4 + +/* for this implementation, assume small single frame rqst/rsp */ +#define NVME_FC_MAX_LS_BUFFER_SIZE 2048 + +struct nvmet_fc_tgtport; +struct nvmet_fc_tgt_assoc; + +struct nvmet_fc_ls_iod { + struct nvmefc_tgt_ls_req *lsreq; + struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ + + struct list_head ls_list; /* tgtport->ls_list */ + + struct nvmet_fc_tgtport *tgtport; + struct nvmet_fc_tgt_assoc *assoc; + + u8 *rqstbuf; + u8 *rspbuf; + u16 rqstdatalen; + dma_addr_t rspdma; + + struct scatterlist sg[2]; + + struct work_struct work; +} __aligned(sizeof(unsigned long long)); + +#define NVMET_FC_MAX_KB_PER_XFR 256 + +enum nvmet_fcp_datadir { + NVMET_FCP_NODATA, + NVMET_FCP_WRITE, + NVMET_FCP_READ, + NVMET_FCP_ABORTED, +}; + +struct nvmet_fc_fcp_iod { + struct nvmefc_tgt_fcp_req *fcpreq; + + struct nvme_fc_cmd_iu cmdiubuf; + struct nvme_fc_ersp_iu rspiubuf; + dma_addr_t rspdma; + struct scatterlist *data_sg; + struct scatterlist *next_sg; + int data_sg_cnt; + u32 next_sg_offset; + u32 total_length; + u32 offset; + enum nvmet_fcp_datadir io_dir; + bool active; + bool abort; + spinlock_t flock; + + struct nvmet_req req; + struct work_struct work; + + struct nvmet_fc_tgtport *tgtport; + struct nvmet_fc_tgt_queue *queue; + + struct list_head fcp_list; /* tgtport->fcp_list */ +}; + +struct nvmet_fc_tgtport { + + struct nvmet_fc_target_port fc_target_port; + + struct list_head tgt_list; /* nvmet_fc_target_list */ + struct device *dev; /* dev for dma mapping */ + struct nvmet_fc_target_template *ops; + + struct nvmet_fc_ls_iod *iod; + spinlock_t lock; + struct list_head ls_list; + struct list_head ls_busylist; + struct list_head assoc_list; + struct ida assoc_cnt; + struct nvmet_port *port; + struct kref ref; +}; + +struct nvmet_fc_tgt_queue { + bool ninetypercent; + u16 qid; + u16 sqsize; + u16 ersp_ratio; + u16 sqhd; + int cpu; + atomic_t connected; + atomic_t sqtail; + atomic_t zrspcnt; + atomic_t rsn; + spinlock_t qlock; + struct nvmet_port *port; + struct nvmet_cq nvme_cq; + struct nvmet_sq nvme_sq; + struct nvmet_fc_tgt_assoc *assoc; + struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ + struct list_head fod_list; + struct workqueue_struct *work_q; + struct kref ref; +} __aligned(sizeof(unsigned long long)); + +struct nvmet_fc_tgt_assoc { + u64 association_id; + u32 a_id; + struct nvmet_fc_tgtport *tgtport; + struct list_head a_list; + struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES]; + struct kref ref; +}; + + +static inline int +nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) +{ + return (iodptr - iodptr->tgtport->iod); +} + +static inline int +nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) +{ + return (fodptr - fodptr->queue->fod); +} + + +/* + * Association and Connection IDs: + * + * Association ID will have random number in upper 6 bytes and zero + * in lower 2 bytes + * + * Connection IDs will be Association ID with QID or'd in lower 2 bytes + * + * note: Association ID = Connection ID for queue 0 + */ +#define BYTES_FOR_QID sizeof(u16) +#define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) +#define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) + +static inline u64 +nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) +{ + return (assoc->association_id | qid); +} + +static inline u64 +nvmet_fc_getassociationid(u64 connectionid) +{ + return connectionid & ~NVMET_FC_QUEUEID_MASK; +} + +static inline u16 +nvmet_fc_getqueueid(u64 connectionid) +{ + return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); +} + +static inline struct nvmet_fc_tgtport * +targetport_to_tgtport(struct nvmet_fc_target_port *targetport) +{ + return container_of(targetport, struct nvmet_fc_tgtport, + fc_target_port); +} + +static inline struct nvmet_fc_fcp_iod * +nvmet_req_to_fod(struct nvmet_req *nvme_req) +{ + return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); +} + + +/* *************************** Globals **************************** */ + + +static DEFINE_SPINLOCK(nvmet_fc_tgtlock); + +static LIST_HEAD(nvmet_fc_target_list); +static DEFINE_IDA(nvmet_fc_tgtport_cnt); + + +static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); +static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work); +static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); +static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); +static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); +static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); +static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); +static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); + + +/* *********************** FC-NVME DMA Handling **************************** */ + +/* + * The fcloop device passes in a NULL device pointer. Real LLD's will + * pass in a valid device pointer. If NULL is passed to the dma mapping + * routines, depending on the platform, it may or may not succeed, and + * may crash. + * + * As such: + * Wrapper all the dma routines and check the dev pointer. + * + * If simple mappings (return just a dma address, we'll noop them, + * returning a dma address of 0. + * + * On more complex mappings (dma_map_sg), a pseudo routine fills + * in the scatter list, setting all dma addresses to 0. + */ + +static inline dma_addr_t +fc_dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction dir) +{ + return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; +} + +static inline int +fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return dev ? dma_mapping_error(dev, dma_addr) : 0; +} + +static inline void +fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + if (dev) + dma_unmap_single(dev, addr, size, dir); +} + +static inline void +fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + if (dev) + dma_sync_single_for_cpu(dev, addr, size, dir); +} + +static inline void +fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + if (dev) + dma_sync_single_for_device(dev, addr, size, dir); +} + +/* pseudo dma_map_sg call */ +static int +fc_map_sg(struct scatterlist *sg, int nents) +{ + struct scatterlist *s; + int i; + + WARN_ON(nents == 0 || sg[0].length == 0); + + for_each_sg(sg, s, nents, i) { + s->dma_address = 0L; +#ifdef CONFIG_NEED_SG_DMA_LENGTH + s->dma_length = s->length; +#endif + } + return nents; +} + +static inline int +fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); +} + +static inline void +fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + if (dev) + dma_unmap_sg(dev, sg, nents, dir); +} + + +/* *********************** FC-NVME Port Management ************************ */ + + +static int +nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) +{ + struct nvmet_fc_ls_iod *iod; + int i; + + iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod), + GFP_KERNEL); + if (!iod) + return -ENOMEM; + + tgtport->iod = iod; + + for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { + INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); + iod->tgtport = tgtport; + list_add_tail(&iod->ls_list, &tgtport->ls_list); + + iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE, + GFP_KERNEL); + if (!iod->rqstbuf) + goto out_fail; + + iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE; + + iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, + NVME_FC_MAX_LS_BUFFER_SIZE, + DMA_TO_DEVICE); + if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) + goto out_fail; + } + + return 0; + +out_fail: + kfree(iod->rqstbuf); + list_del(&iod->ls_list); + for (iod--, i--; i >= 0; iod--, i--) { + fc_dma_unmap_single(tgtport->dev, iod->rspdma, + NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); + kfree(iod->rqstbuf); + list_del(&iod->ls_list); + } + + kfree(iod); + + return -EFAULT; +} + +static void +nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) +{ + struct nvmet_fc_ls_iod *iod = tgtport->iod; + int i; + + for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { + fc_dma_unmap_single(tgtport->dev, + iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE, + DMA_TO_DEVICE); + kfree(iod->rqstbuf); + list_del(&iod->ls_list); + } + kfree(tgtport->iod); +} + +static struct nvmet_fc_ls_iod * +nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) +{ + static struct nvmet_fc_ls_iod *iod; + unsigned long flags; + + spin_lock_irqsave(&tgtport->lock, flags); + iod = list_first_entry_or_null(&tgtport->ls_list, + struct nvmet_fc_ls_iod, ls_list); + if (iod) + list_move_tail(&iod->ls_list, &tgtport->ls_busylist); + spin_unlock_irqrestore(&tgtport->lock, flags); + return iod; +} + + +static void +nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_ls_iod *iod) +{ + unsigned long flags; + + spin_lock_irqsave(&tgtport->lock, flags); + list_move(&iod->ls_list, &tgtport->ls_list); + spin_unlock_irqrestore(&tgtport->lock, flags); +} + +static void +nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_tgt_queue *queue) +{ + struct nvmet_fc_fcp_iod *fod = queue->fod; + int i; + + for (i = 0; i < queue->sqsize; fod++, i++) { + INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work); + fod->tgtport = tgtport; + fod->queue = queue; + fod->active = false; + list_add_tail(&fod->fcp_list, &queue->fod_list); + spin_lock_init(&fod->flock); + + fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, + sizeof(fod->rspiubuf), DMA_TO_DEVICE); + if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { + list_del(&fod->fcp_list); + for (fod--, i--; i >= 0; fod--, i--) { + fc_dma_unmap_single(tgtport->dev, fod->rspdma, + sizeof(fod->rspiubuf), + DMA_TO_DEVICE); + fod->rspdma = 0L; + list_del(&fod->fcp_list); + } + + return; + } + } +} + +static void +nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_tgt_queue *queue) +{ + struct nvmet_fc_fcp_iod *fod = queue->fod; + int i; + + for (i = 0; i < queue->sqsize; fod++, i++) { + if (fod->rspdma) + fc_dma_unmap_single(tgtport->dev, fod->rspdma, + sizeof(fod->rspiubuf), DMA_TO_DEVICE); + } +} + +static struct nvmet_fc_fcp_iod * +nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) +{ + static struct nvmet_fc_fcp_iod *fod; + unsigned long flags; + + spin_lock_irqsave(&queue->qlock, flags); + fod = list_first_entry_or_null(&queue->fod_list, + struct nvmet_fc_fcp_iod, fcp_list); + if (fod) { + list_del(&fod->fcp_list); + fod->active = true; + fod->abort = false; + /* + * no queue reference is taken, as it was taken by the + * queue lookup just prior to the allocation. The iod + * will "inherit" that reference. + */ + } + spin_unlock_irqrestore(&queue->qlock, flags); + return fod; +} + + +static void +nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, + struct nvmet_fc_fcp_iod *fod) +{ + unsigned long flags; + + spin_lock_irqsave(&queue->qlock, flags); + list_add_tail(&fod->fcp_list, &fod->queue->fod_list); + fod->active = false; + spin_unlock_irqrestore(&queue->qlock, flags); + + /* + * release the reference taken at queue lookup and fod allocation + */ + nvmet_fc_tgt_q_put(queue); +} + +static int +nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid) +{ + int cpu, idx, cnt; + + if (!(tgtport->ops->target_features & + NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) || + tgtport->ops->max_hw_queues == 1) + return WORK_CPU_UNBOUND; + + /* Simple cpu selection based on qid modulo active cpu count */ + idx = !qid ? 0 : (qid - 1) % num_active_cpus(); + + /* find the n'th active cpu */ + for (cpu = 0, cnt = 0; ; ) { + if (cpu_active(cpu)) { + if (cnt == idx) + break; + cnt++; + } + cpu = (cpu + 1) % num_possible_cpus(); + } + + return cpu; +} + +static struct nvmet_fc_tgt_queue * +nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, + u16 qid, u16 sqsize) +{ + struct nvmet_fc_tgt_queue *queue; + unsigned long flags; + int ret; + + if (qid >= NVMET_NR_QUEUES) + return NULL; + + queue = kzalloc((sizeof(*queue) + + (sizeof(struct nvmet_fc_fcp_iod) * sqsize)), + GFP_KERNEL); + if (!queue) + return NULL; + + if (!nvmet_fc_tgt_a_get(assoc)) + goto out_free_queue; + + queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, + assoc->tgtport->fc_target_port.port_num, + assoc->a_id, qid); + if (!queue->work_q) + goto out_a_put; + + queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1]; + queue->qid = qid; + queue->sqsize = sqsize; + queue->assoc = assoc; + queue->port = assoc->tgtport->port; + queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); + INIT_LIST_HEAD(&queue->fod_list); + atomic_set(&queue->connected, 0); + atomic_set(&queue->sqtail, 0); + atomic_set(&queue->rsn, 1); + atomic_set(&queue->zrspcnt, 0); + spin_lock_init(&queue->qlock); + kref_init(&queue->ref); + + nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); + + ret = nvmet_sq_init(&queue->nvme_sq); + if (ret) + goto out_fail_iodlist; + + WARN_ON(assoc->queues[qid]); + spin_lock_irqsave(&assoc->tgtport->lock, flags); + assoc->queues[qid] = queue; + spin_unlock_irqrestore(&assoc->tgtport->lock, flags); + + return queue; + +out_fail_iodlist: + nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); + destroy_workqueue(queue->work_q); +out_a_put: + nvmet_fc_tgt_a_put(assoc); +out_free_queue: + kfree(queue); + return NULL; +} + + +static void +nvmet_fc_tgt_queue_free(struct kref *ref) +{ + struct nvmet_fc_tgt_queue *queue = + container_of(ref, struct nvmet_fc_tgt_queue, ref); + unsigned long flags; + + spin_lock_irqsave(&queue->assoc->tgtport->lock, flags); + queue->assoc->queues[queue->qid] = NULL; + spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags); + + nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); + + nvmet_fc_tgt_a_put(queue->assoc); + + destroy_workqueue(queue->work_q); + + kfree(queue); +} + +static void +nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) +{ + kref_put(&queue->ref, nvmet_fc_tgt_queue_free); +} + +static int +nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) +{ + return kref_get_unless_zero(&queue->ref); +} + + +static void +nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, + struct nvmefc_tgt_fcp_req *fcpreq) +{ + int ret; + + fcpreq->op = NVMET_FCOP_ABORT; + fcpreq->offset = 0; + fcpreq->timeout = 0; + fcpreq->transfer_length = 0; + fcpreq->transferred_length = 0; + fcpreq->fcp_error = 0; + fcpreq->sg_cnt = 0; + + ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fcpreq); + if (ret) + /* should never reach here !! */ + WARN_ON(1); +} + + +static void +nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) +{ + struct nvmet_fc_fcp_iod *fod = queue->fod; + unsigned long flags; + int i; + bool disconnect; + + disconnect = atomic_xchg(&queue->connected, 0); + + spin_lock_irqsave(&queue->qlock, flags); + /* about outstanding io's */ + for (i = 0; i < queue->sqsize; fod++, i++) { + if (fod->active) { + spin_lock(&fod->flock); + fod->abort = true; + spin_unlock(&fod->flock); + } + } + spin_unlock_irqrestore(&queue->qlock, flags); + + flush_workqueue(queue->work_q); + + if (disconnect) + nvmet_sq_destroy(&queue->nvme_sq); + + nvmet_fc_tgt_q_put(queue); +} + +static struct nvmet_fc_tgt_queue * +nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, + u64 connection_id) +{ + struct nvmet_fc_tgt_assoc *assoc; + struct nvmet_fc_tgt_queue *queue; + u64 association_id = nvmet_fc_getassociationid(connection_id); + u16 qid = nvmet_fc_getqueueid(connection_id); + unsigned long flags; + + spin_lock_irqsave(&tgtport->lock, flags); + list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { + if (association_id == assoc->association_id) { + queue = assoc->queues[qid]; + if (queue && + (!atomic_read(&queue->connected) || + !nvmet_fc_tgt_q_get(queue))) + queue = NULL; + spin_unlock_irqrestore(&tgtport->lock, flags); + return queue; + } + } + spin_unlock_irqrestore(&tgtport->lock, flags); + return NULL; +} + +static struct nvmet_fc_tgt_assoc * +nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport) +{ + struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; + unsigned long flags; + u64 ran; + int idx; + bool needrandom = true; + + assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); + if (!assoc) + return NULL; + + idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL); + if (idx < 0) + goto out_free_assoc; + + if (!nvmet_fc_tgtport_get(tgtport)) + goto out_ida_put; + + assoc->tgtport = tgtport; + assoc->a_id = idx; + INIT_LIST_HEAD(&assoc->a_list); + kref_init(&assoc->ref); + + while (needrandom) { + get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); + ran = ran << BYTES_FOR_QID_SHIFT; + + spin_lock_irqsave(&tgtport->lock, flags); + needrandom = false; + list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) + if (ran == tmpassoc->association_id) { + needrandom = true; + break; + } + if (!needrandom) { + assoc->association_id = ran; + list_add_tail(&assoc->a_list, &tgtport->assoc_list); + } + spin_unlock_irqrestore(&tgtport->lock, flags); + } + + return assoc; + +out_ida_put: + ida_simple_remove(&tgtport->assoc_cnt, idx); +out_free_assoc: + kfree(assoc); + return NULL; +} + +static void +nvmet_fc_target_assoc_free(struct kref *ref) +{ + struct nvmet_fc_tgt_assoc *assoc = + container_of(ref, struct nvmet_fc_tgt_assoc, ref); + struct nvmet_fc_tgtport *tgtport = assoc->tgtport; + unsigned long flags; + + spin_lock_irqsave(&tgtport->lock, flags); + list_del(&assoc->a_list); + spin_unlock_irqrestore(&tgtport->lock, flags); + ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id); + kfree(assoc); + nvmet_fc_tgtport_put(tgtport); +} + +static void +nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) +{ + kref_put(&assoc->ref, nvmet_fc_target_assoc_free); +} + +static int +nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) +{ + return kref_get_unless_zero(&assoc->ref); +} + +static void +nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) +{ + struct nvmet_fc_tgtport *tgtport = assoc->tgtport; + struct nvmet_fc_tgt_queue *queue; + unsigned long flags; + int i; + + spin_lock_irqsave(&tgtport->lock, flags); + for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) { + queue = assoc->queues[i]; + if (queue) { + if (!nvmet_fc_tgt_q_get(queue)) + continue; + spin_unlock_irqrestore(&tgtport->lock, flags); + nvmet_fc_delete_target_queue(queue); + nvmet_fc_tgt_q_put(queue); + spin_lock_irqsave(&tgtport->lock, flags); + } + } + spin_unlock_irqrestore(&tgtport->lock, flags); + + nvmet_fc_tgt_a_put(assoc); +} + +static struct nvmet_fc_tgt_assoc * +nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, + u64 association_id) +{ + struct nvmet_fc_tgt_assoc *assoc; + struct nvmet_fc_tgt_assoc *ret = NULL; + unsigned long flags; + + spin_lock_irqsave(&tgtport->lock, flags); + list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { + if (association_id == assoc->association_id) { + ret = assoc; + nvmet_fc_tgt_a_get(assoc); + break; + } + } + spin_unlock_irqrestore(&tgtport->lock, flags); + + return ret; +} + + +/** + * nvme_fc_register_targetport - transport entry point called by an + * LLDD to register the existence of a local + * NVME subystem FC port. + * @pinfo: pointer to information about the port to be registered + * @template: LLDD entrypoints and operational parameters for the port + * @dev: physical hardware device node port corresponds to. Will be + * used for DMA mappings + * @portptr: pointer to a local port pointer. Upon success, the routine + * will allocate a nvme_fc_local_port structure and place its + * address in the local port pointer. Upon failure, local port + * pointer will be set to NULL. + * + * Returns: + * a completion status. Must be 0 upon success; a negative errno + * (ex: -ENXIO) upon failure. + */ +int +nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, + struct nvmet_fc_target_template *template, + struct device *dev, + struct nvmet_fc_target_port **portptr) +{ + struct nvmet_fc_tgtport *newrec; + unsigned long flags; + int ret, idx; + + if (!template->xmt_ls_rsp || !template->fcp_op || + !template->targetport_delete || + !template->max_hw_queues || !template->max_sgl_segments || + !template->max_dif_sgl_segments || !template->dma_boundary) { + ret = -EINVAL; + goto out_regtgt_failed; + } + + newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), + GFP_KERNEL); + if (!newrec) { + ret = -ENOMEM; + goto out_regtgt_failed; + } + + idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL); + if (idx < 0) { + ret = -ENOSPC; + goto out_fail_kfree; + } + + if (!get_device(dev) && dev) { + ret = -ENODEV; + goto out_ida_put; + } + + newrec->fc_target_port.node_name = pinfo->node_name; + newrec->fc_target_port.port_name = pinfo->port_name; + newrec->fc_target_port.private = &newrec[1]; + newrec->fc_target_port.port_id = pinfo->port_id; + newrec->fc_target_port.port_num = idx; + INIT_LIST_HEAD(&newrec->tgt_list); + newrec->dev = dev; + newrec->ops = template; + spin_lock_init(&newrec->lock); + INIT_LIST_HEAD(&newrec->ls_list); + INIT_LIST_HEAD(&newrec->ls_busylist); + INIT_LIST_HEAD(&newrec->assoc_list); + kref_init(&newrec->ref); + ida_init(&newrec->assoc_cnt); + + ret = nvmet_fc_alloc_ls_iodlist(newrec); + if (ret) { + ret = -ENOMEM; + goto out_free_newrec; + } + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); + + *portptr = &newrec->fc_target_port; + return 0; + +out_free_newrec: + put_device(dev); +out_ida_put: + ida_simple_remove(&nvmet_fc_tgtport_cnt, idx); +out_fail_kfree: + kfree(newrec); +out_regtgt_failed: + *portptr = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); + + +static void +nvmet_fc_free_tgtport(struct kref *ref) +{ + struct nvmet_fc_tgtport *tgtport = + container_of(ref, struct nvmet_fc_tgtport, ref); + struct device *dev = tgtport->dev; + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + list_del(&tgtport->tgt_list); + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); + + nvmet_fc_free_ls_iodlist(tgtport); + + /* let the LLDD know we've finished tearing it down */ + tgtport->ops->targetport_delete(&tgtport->fc_target_port); + + ida_simple_remove(&nvmet_fc_tgtport_cnt, + tgtport->fc_target_port.port_num); + + ida_destroy(&tgtport->assoc_cnt); + + kfree(tgtport); + + put_device(dev); +} + +static void +nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) +{ + kref_put(&tgtport->ref, nvmet_fc_free_tgtport); +} + +static int +nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) +{ + return kref_get_unless_zero(&tgtport->ref); +} + +static void +__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) +{ + struct nvmet_fc_tgt_assoc *assoc, *next; + unsigned long flags; + + spin_lock_irqsave(&tgtport->lock, flags); + list_for_each_entry_safe(assoc, next, + &tgtport->assoc_list, a_list) { + if (!nvmet_fc_tgt_a_get(assoc)) + continue; + spin_unlock_irqrestore(&tgtport->lock, flags); + nvmet_fc_delete_target_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); + spin_lock_irqsave(&tgtport->lock, flags); + } + spin_unlock_irqrestore(&tgtport->lock, flags); +} + +/* + * nvmet layer has called to terminate an association + */ +static void +nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) +{ + struct nvmet_fc_tgtport *tgtport, *next; + struct nvmet_fc_tgt_assoc *assoc; + struct nvmet_fc_tgt_queue *queue; + unsigned long flags; + bool found_ctrl = false; + + /* this is a bit ugly, but don't want to make locks layered */ + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, + tgt_list) { + if (!nvmet_fc_tgtport_get(tgtport)) + continue; + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); + + spin_lock_irqsave(&tgtport->lock, flags); + list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { + queue = assoc->queues[0]; + if (queue && queue->nvme_sq.ctrl == ctrl) { + if (nvmet_fc_tgt_a_get(assoc)) + found_ctrl = true; + break; + } + } + spin_unlock_irqrestore(&tgtport->lock, flags); + + nvmet_fc_tgtport_put(tgtport); + + if (found_ctrl) { + nvmet_fc_delete_target_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); + return; + } + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + } + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); +} + +/** + * nvme_fc_unregister_targetport - transport entry point called by an + * LLDD to deregister/remove a previously + * registered a local NVME subsystem FC port. + * @tgtport: pointer to the (registered) target port that is to be + * deregistered. + * + * Returns: + * a completion status. Must be 0 upon success; a negative errno + * (ex: -ENXIO) upon failure. + */ +int +nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) +{ + struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); + + /* terminate any outstanding associations */ + __nvmet_fc_free_assocs(tgtport); + + nvmet_fc_tgtport_put(tgtport); + + return 0; +} +EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); + + +/* *********************** FC-NVME LS Handling **************************** */ + + +static void +nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, u32 desc_len, u8 rqst_ls_cmd) +{ + struct fcnvme_ls_acc_hdr *acc = buf; + + acc->w0.ls_cmd = ls_cmd; + acc->desc_list_len = desc_len; + acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST); + acc->rqst.desc_len = + fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)); + acc->rqst.w0.ls_cmd = rqst_ls_cmd; +} + +static int +nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd, + u8 reason, u8 explanation, u8 vendor) +{ + struct fcnvme_ls_rjt *rjt = buf; + + nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST, + fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)), + ls_cmd); + rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT); + rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt)); + rjt->rjt.reason_code = reason; + rjt->rjt.reason_explanation = explanation; + rjt->rjt.vendor = vendor; + + return sizeof(struct fcnvme_ls_rjt); +} + +/* Validation Error indexes into the string table below */ +enum { + VERR_NO_ERROR = 0, + VERR_CR_ASSOC_LEN = 1, + VERR_CR_ASSOC_RQST_LEN = 2, + VERR_CR_ASSOC_CMD = 3, + VERR_CR_ASSOC_CMD_LEN = 4, + VERR_ERSP_RATIO = 5, + VERR_ASSOC_ALLOC_FAIL = 6, + VERR_QUEUE_ALLOC_FAIL = 7, + VERR_CR_CONN_LEN = 8, + VERR_CR_CONN_RQST_LEN = 9, + VERR_ASSOC_ID = 10, + VERR_ASSOC_ID_LEN = 11, + VERR_NO_ASSOC = 12, + VERR_CONN_ID = 13, + VERR_CONN_ID_LEN = 14, + VERR_NO_CONN = 15, + VERR_CR_CONN_CMD = 16, + VERR_CR_CONN_CMD_LEN = 17, + VERR_DISCONN_LEN = 18, + VERR_DISCONN_RQST_LEN = 19, + VERR_DISCONN_CMD = 20, + VERR_DISCONN_CMD_LEN = 21, + VERR_DISCONN_SCOPE = 22, + VERR_RS_LEN = 23, + VERR_RS_RQST_LEN = 24, + VERR_RS_CMD = 25, + VERR_RS_CMD_LEN = 26, + VERR_RS_RCTL = 27, + VERR_RS_RO = 28, +}; + +static char *validation_errors[] = { + "OK", + "Bad CR_ASSOC Length", + "Bad CR_ASSOC Rqst Length", + "Not CR_ASSOC Cmd", + "Bad CR_ASSOC Cmd Length", + "Bad Ersp Ratio", + "Association Allocation Failed", + "Queue Allocation Failed", + "Bad CR_CONN Length", + "Bad CR_CONN Rqst Length", + "Not Association ID", + "Bad Association ID Length", + "No Association", + "Not Connection ID", + "Bad Connection ID Length", + "No Connection", + "Not CR_CONN Cmd", + "Bad CR_CONN Cmd Length", + "Bad DISCONN Length", + "Bad DISCONN Rqst Length", + "Not DISCONN Cmd", + "Bad DISCONN Cmd Length", + "Bad Disconnect Scope", + "Bad RS Length", + "Bad RS Rqst Length", + "Not RS Cmd", + "Bad RS Cmd Length", + "Bad RS R_CTL", + "Bad RS Relative Offset", +}; + +static void +nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_ls_iod *iod) +{ + struct fcnvme_ls_cr_assoc_rqst *rqst = + (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf; + struct fcnvme_ls_cr_assoc_acc *acc = + (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf; + struct nvmet_fc_tgt_queue *queue; + int ret = 0; + + memset(acc, 0, sizeof(*acc)); + + if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst)) + ret = VERR_CR_ASSOC_LEN; + else if (rqst->desc_list_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_ls_cr_assoc_rqst))) + ret = VERR_CR_ASSOC_RQST_LEN; + else if (rqst->assoc_cmd.desc_tag != + cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) + ret = VERR_CR_ASSOC_CMD; + else if (rqst->assoc_cmd.desc_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_cr_assoc_cmd))) + ret = VERR_CR_ASSOC_CMD_LEN; + else if (!rqst->assoc_cmd.ersp_ratio || + (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= + be16_to_cpu(rqst->assoc_cmd.sqsize))) + ret = VERR_ERSP_RATIO; + + else { + /* new association w/ admin queue */ + iod->assoc = nvmet_fc_alloc_target_assoc(tgtport); + if (!iod->assoc) + ret = VERR_ASSOC_ALLOC_FAIL; + else { + queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, + be16_to_cpu(rqst->assoc_cmd.sqsize)); + if (!queue) + ret = VERR_QUEUE_ALLOC_FAIL; + } + } + + if (ret) { + dev_err(tgtport->dev, + "Create Association LS failed: %s\n", + validation_errors[ret]); + iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, + NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, + ELS_RJT_LOGIC, + ELS_EXPL_NONE, 0); + return; + } + + queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); + atomic_set(&queue->connected, 1); + queue->sqhd = 0; /* best place to init value */ + + /* format a response */ + + iod->lsreq->rsplen = sizeof(*acc); + + nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, + fcnvme_lsdesc_len( + sizeof(struct fcnvme_ls_cr_assoc_acc)), + FCNVME_LS_CREATE_ASSOCIATION); + acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); + acc->associd.desc_len = + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_assoc_id)); + acc->associd.association_id = + cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); + acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); + acc->connectid.desc_len = + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_conn_id)); + acc->connectid.connection_id = acc->associd.association_id; +} + +static void +nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_ls_iod *iod) +{ + struct fcnvme_ls_cr_conn_rqst *rqst = + (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf; + struct fcnvme_ls_cr_conn_acc *acc = + (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf; + struct nvmet_fc_tgt_queue *queue; + int ret = 0; + + memset(acc, 0, sizeof(*acc)); + + if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) + ret = VERR_CR_CONN_LEN; + else if (rqst->desc_list_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_ls_cr_conn_rqst))) + ret = VERR_CR_CONN_RQST_LEN; + else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) + ret = VERR_ASSOC_ID; + else if (rqst->associd.desc_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_assoc_id))) + ret = VERR_ASSOC_ID_LEN; + else if (rqst->connect_cmd.desc_tag != + cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) + ret = VERR_CR_CONN_CMD; + else if (rqst->connect_cmd.desc_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) + ret = VERR_CR_CONN_CMD_LEN; + else if (!rqst->connect_cmd.ersp_ratio || + (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= + be16_to_cpu(rqst->connect_cmd.sqsize))) + ret = VERR_ERSP_RATIO; + + else { + /* new io queue */ + iod->assoc = nvmet_fc_find_target_assoc(tgtport, + be64_to_cpu(rqst->associd.association_id)); + if (!iod->assoc) + ret = VERR_NO_ASSOC; + else { + queue = nvmet_fc_alloc_target_queue(iod->assoc, + be16_to_cpu(rqst->connect_cmd.qid), + be16_to_cpu(rqst->connect_cmd.sqsize)); + if (!queue) + ret = VERR_QUEUE_ALLOC_FAIL; + + /* release get taken in nvmet_fc_find_target_assoc */ + nvmet_fc_tgt_a_put(iod->assoc); + } + } + + if (ret) { + dev_err(tgtport->dev, + "Create Connection LS failed: %s\n", + validation_errors[ret]); + iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, + NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, + (ret == VERR_NO_ASSOC) ? + ELS_RJT_PROT : ELS_RJT_LOGIC, + ELS_EXPL_NONE, 0); + return; + } + + queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); + atomic_set(&queue->connected, 1); + queue->sqhd = 0; /* best place to init value */ + + /* format a response */ + + iod->lsreq->rsplen = sizeof(*acc); + + nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, + fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), + FCNVME_LS_CREATE_CONNECTION); + acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); + acc->connectid.desc_len = + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_conn_id)); + acc->connectid.connection_id = + cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, + be16_to_cpu(rqst->connect_cmd.qid))); +} + +static void +nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_ls_iod *iod) +{ + struct fcnvme_ls_disconnect_rqst *rqst = + (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; + struct fcnvme_ls_disconnect_acc *acc = + (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; + struct nvmet_fc_tgt_queue *queue; + struct nvmet_fc_tgt_assoc *assoc; + int ret = 0; + bool del_assoc = false; + + memset(acc, 0, sizeof(*acc)); + + if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst)) + ret = VERR_DISCONN_LEN; + else if (rqst->desc_list_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_ls_disconnect_rqst))) + ret = VERR_DISCONN_RQST_LEN; + else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) + ret = VERR_ASSOC_ID; + else if (rqst->associd.desc_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_assoc_id))) + ret = VERR_ASSOC_ID_LEN; + else if (rqst->discon_cmd.desc_tag != + cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD)) + ret = VERR_DISCONN_CMD; + else if (rqst->discon_cmd.desc_len != + fcnvme_lsdesc_len( + sizeof(struct fcnvme_lsdesc_disconn_cmd))) + ret = VERR_DISCONN_CMD_LEN; + else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) && + (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION)) + ret = VERR_DISCONN_SCOPE; + else { + /* match an active association */ + assoc = nvmet_fc_find_target_assoc(tgtport, + be64_to_cpu(rqst->associd.association_id)); + iod->assoc = assoc; + if (!assoc) + ret = VERR_NO_ASSOC; + } + + if (ret) { + dev_err(tgtport->dev, + "Disconnect LS failed: %s\n", + validation_errors[ret]); + iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, + NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, + (ret == 8) ? ELS_RJT_PROT : ELS_RJT_LOGIC, + ELS_EXPL_NONE, 0); + return; + } + + /* format a response */ + + iod->lsreq->rsplen = sizeof(*acc); + + nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, + fcnvme_lsdesc_len( + sizeof(struct fcnvme_ls_disconnect_acc)), + FCNVME_LS_DISCONNECT); + + + if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) { + queue = nvmet_fc_find_target_queue(tgtport, + be64_to_cpu(rqst->discon_cmd.id)); + if (queue) { + int qid = queue->qid; + + nvmet_fc_delete_target_queue(queue); + + /* release the get taken by find_target_queue */ + nvmet_fc_tgt_q_put(queue); + + /* tear association down if io queue terminated */ + if (!qid) + del_assoc = true; + } + } + + /* release get taken in nvmet_fc_find_target_assoc */ + nvmet_fc_tgt_a_put(iod->assoc); + + if (del_assoc) + nvmet_fc_delete_target_assoc(iod->assoc); +} + + +/* *********************** NVME Ctrl Routines **************************** */ + + +static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); + +static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; + +static void +nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq) +{ + struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private; + struct nvmet_fc_tgtport *tgtport = iod->tgtport; + + fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, + NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); + nvmet_fc_free_ls_iod(tgtport, iod); + nvmet_fc_tgtport_put(tgtport); +} + +static void +nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_ls_iod *iod) +{ + int ret; + + fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, + NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); + + ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq); + if (ret) + nvmet_fc_xmt_ls_rsp_done(iod->lsreq); +} + +/* + * Actual processing routine for received FC-NVME LS Requests from the LLD + */ +static void +nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_ls_iod *iod) +{ + struct fcnvme_ls_rqst_w0 *w0 = + (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf; + + iod->lsreq->nvmet_fc_private = iod; + iod->lsreq->rspbuf = iod->rspbuf; + iod->lsreq->rspdma = iod->rspdma; + iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done; + /* Be preventative. handlers will later set to valid length */ + iod->lsreq->rsplen = 0; + + iod->assoc = NULL; + + /* + * handlers: + * parse request input, execute the request, and format the + * LS response + */ + switch (w0->ls_cmd) { + case FCNVME_LS_CREATE_ASSOCIATION: + /* Creates Association and initial Admin Queue/Connection */ + nvmet_fc_ls_create_association(tgtport, iod); + break; + case FCNVME_LS_CREATE_CONNECTION: + /* Creates an IO Queue/Connection */ + nvmet_fc_ls_create_connection(tgtport, iod); + break; + case FCNVME_LS_DISCONNECT: + /* Terminate a Queue/Connection or the Association */ + nvmet_fc_ls_disconnect(tgtport, iod); + break; + default: + iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf, + NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd, + ELS_RJT_INVAL, ELS_EXPL_NONE, 0); + } + + nvmet_fc_xmt_ls_rsp(tgtport, iod); +} + +/* + * Actual processing routine for received FC-NVME LS Requests from the LLD + */ +static void +nvmet_fc_handle_ls_rqst_work(struct work_struct *work) +{ + struct nvmet_fc_ls_iod *iod = + container_of(work, struct nvmet_fc_ls_iod, work); + struct nvmet_fc_tgtport *tgtport = iod->tgtport; + + nvmet_fc_handle_ls_rqst(tgtport, iod); +} + + +/** + * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD + * upon the reception of a NVME LS request. + * + * The nvmet-fc layer will copy payload to an internal structure for + * processing. As such, upon completion of the routine, the LLDD may + * immediately free/reuse the LS request buffer passed in the call. + * + * If this routine returns error, the LLDD should abort the exchange. + * + * @tgtport: pointer to the (registered) target port the LS was + * received on. + * @lsreq: pointer to a lsreq request structure to be used to reference + * the exchange corresponding to the LS. + * @lsreqbuf: pointer to the buffer containing the LS Request + * @lsreqbuf_len: length, in bytes, of the received LS request + */ +int +nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, + struct nvmefc_tgt_ls_req *lsreq, + void *lsreqbuf, u32 lsreqbuf_len) +{ + struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); + struct nvmet_fc_ls_iod *iod; + + if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE) + return -E2BIG; + + if (!nvmet_fc_tgtport_get(tgtport)) + return -ESHUTDOWN; + + iod = nvmet_fc_alloc_ls_iod(tgtport); + if (!iod) { + nvmet_fc_tgtport_put(tgtport); + return -ENOENT; + } + + iod->lsreq = lsreq; + iod->fcpreq = NULL; + memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); + iod->rqstdatalen = lsreqbuf_len; + + schedule_work(&iod->work); + + return 0; +} +EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); + + +/* + * ********************** + * Start of FCP handling + * ********************** + */ + +static int +nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) +{ + struct scatterlist *sg; + struct page *page; + unsigned int nent; + u32 page_len, length; + int i = 0; + + length = fod->total_length; + nent = DIV_ROUND_UP(length, PAGE_SIZE); + sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL); + if (!sg) + goto out; + + sg_init_table(sg, nent); + + while (length) { + page_len = min_t(u32, length, PAGE_SIZE); + + page = alloc_page(GFP_KERNEL); + if (!page) + goto out_free_pages; + + sg_set_page(&sg[i], page, page_len, 0); + length -= page_len; + i++; + } + + fod->data_sg = sg; + fod->data_sg_cnt = nent; + fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, + ((fod->io_dir == NVMET_FCP_WRITE) ? + DMA_FROM_DEVICE : DMA_TO_DEVICE)); + /* note: write from initiator perspective */ + + return 0; + +out_free_pages: + while (i > 0) { + i--; + __free_page(sg_page(&sg[i])); + } + kfree(sg); + fod->data_sg = NULL; + fod->data_sg_cnt = 0; +out: + return NVME_SC_INTERNAL; +} + +static void +nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) +{ + struct scatterlist *sg; + int count; + + if (!fod->data_sg || !fod->data_sg_cnt) + return; + + fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, + ((fod->io_dir == NVMET_FCP_WRITE) ? + DMA_FROM_DEVICE : DMA_TO_DEVICE)); + for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count) + __free_page(sg_page(sg)); + kfree(fod->data_sg); +} + + +static bool +queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) +{ + u32 sqtail, used; + + /* egad, this is ugly. And sqtail is just a best guess */ + sqtail = atomic_read(&q->sqtail) % q->sqsize; + + used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); + return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); +} + +/* + * Prep RSP payload. + * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op + */ +static void +nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_fcp_iod *fod) +{ + struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; + struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; + struct nvme_completion *cqe = &ersp->cqe; + u32 *cqewd = (u32 *)cqe; + bool send_ersp = false; + u32 rsn, rspcnt, xfr_length; + + if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) + xfr_length = fod->total_length; + else + xfr_length = fod->offset; + + /* + * check to see if we can send a 0's rsp. + * Note: to send a 0's response, the NVME-FC host transport will + * recreate the CQE. The host transport knows: sq id, SQHD (last + * seen in an ersp), and command_id. Thus it will create a + * zero-filled CQE with those known fields filled in. Transport + * must send an ersp for any condition where the cqe won't match + * this. + * + * Here are the FC-NVME mandated cases where we must send an ersp: + * every N responses, where N=ersp_ratio + * force fabric commands to send ersp's (not in FC-NVME but good + * practice) + * normal cmds: any time status is non-zero, or status is zero + * but words 0 or 1 are non-zero. + * the SQ is 90% or more full + * the cmd is a fused command + * transferred data length not equal to cmd iu length + */ + rspcnt = atomic_inc_return(&fod->queue->zrspcnt); + if (!(rspcnt % fod->queue->ersp_ratio) || + sqe->opcode == nvme_fabrics_command || + xfr_length != fod->total_length || + (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || + (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || + queue_90percent_full(fod->queue, cqe->sq_head)) + send_ersp = true; + + /* re-set the fields */ + fod->fcpreq->rspaddr = ersp; + fod->fcpreq->rspdma = fod->rspdma; + + if (!send_ersp) { + memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); + fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; + } else { + ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); + rsn = atomic_inc_return(&fod->queue->rsn); + ersp->rsn = cpu_to_be32(rsn); + ersp->xfrd_len = cpu_to_be32(xfr_length); + fod->fcpreq->rsplen = sizeof(*ersp); + } + + fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, + sizeof(fod->rspiubuf), DMA_TO_DEVICE); +} + +static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); + +static void +nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_fcp_iod *fod) +{ + int ret; + + fod->fcpreq->op = NVMET_FCOP_RSP; + fod->fcpreq->timeout = 0; + + nvmet_fc_prep_fcp_rsp(tgtport, fod); + + ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); + if (ret) + nvmet_fc_abort_op(tgtport, fod->fcpreq); +} + +static void +nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_fcp_iod *fod, u8 op) +{ + struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; + struct scatterlist *sg, *datasg; + u32 tlen, sg_off; + int ret; + + fcpreq->op = op; + fcpreq->offset = fod->offset; + fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; + tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024), + (fod->total_length - fod->offset)); + tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE); + tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments + * PAGE_SIZE); + fcpreq->transfer_length = tlen; + fcpreq->transferred_length = 0; + fcpreq->fcp_error = 0; + fcpreq->rsplen = 0; + + fcpreq->sg_cnt = 0; + + datasg = fod->next_sg; + sg_off = fod->next_sg_offset; + + for (sg = fcpreq->sg ; tlen; sg++) { + *sg = *datasg; + if (sg_off) { + sg->offset += sg_off; + sg->length -= sg_off; + sg->dma_address += sg_off; + sg_off = 0; + } + if (tlen < sg->length) { + sg->length = tlen; + fod->next_sg = datasg; + fod->next_sg_offset += tlen; + } else if (tlen == sg->length) { + fod->next_sg_offset = 0; + fod->next_sg = sg_next(datasg); + } else { + fod->next_sg_offset = 0; + datasg = sg_next(datasg); + } + tlen -= sg->length; + fcpreq->sg_cnt++; + } + + /* + * If the last READDATA request: check if LLDD supports + * combined xfr with response. + */ + if ((op == NVMET_FCOP_READDATA) && + ((fod->offset + fcpreq->transfer_length) == fod->total_length) && + (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { + fcpreq->op = NVMET_FCOP_READDATA_RSP; + nvmet_fc_prep_fcp_rsp(tgtport, fod); + } + + ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); + if (ret) { + /* + * should be ok to set w/o lock as its in the thread of + * execution (not an async timer routine) and doesn't + * contend with any clearing action + */ + fod->abort = true; + + if (op == NVMET_FCOP_WRITEDATA) + nvmet_req_complete(&fod->req, + NVME_SC_FC_TRANSPORT_ERROR); + else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { + fcpreq->fcp_error = ret; + fcpreq->transferred_length = 0; + nvmet_fc_xmt_fcp_op_done(fod->fcpreq); + } + } +} + +static void +nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) +{ + struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; + struct nvmet_fc_tgtport *tgtport = fod->tgtport; + unsigned long flags; + bool abort; + + spin_lock_irqsave(&fod->flock, flags); + abort = fod->abort; + spin_unlock_irqrestore(&fod->flock, flags); + + /* if in the middle of an io and we need to tear down */ + if (abort && fcpreq->op != NVMET_FCOP_ABORT) { + /* data no longer needed */ + nvmet_fc_free_tgt_pgs(fod); + + if (fcpreq->fcp_error || abort) + nvmet_req_complete(&fod->req, fcpreq->fcp_error); + + return; + } + + switch (fcpreq->op) { + + case NVMET_FCOP_WRITEDATA: + if (abort || fcpreq->fcp_error || + fcpreq->transferred_length != fcpreq->transfer_length) { + nvmet_req_complete(&fod->req, + NVME_SC_FC_TRANSPORT_ERROR); + return; + } + + fod->offset += fcpreq->transferred_length; + if (fod->offset != fod->total_length) { + /* transfer the next chunk */ + nvmet_fc_transfer_fcp_data(tgtport, fod, + NVMET_FCOP_WRITEDATA); + return; + } + + /* data transfer complete, resume with nvmet layer */ + + fod->req.execute(&fod->req); + + break; + + case NVMET_FCOP_READDATA: + case NVMET_FCOP_READDATA_RSP: + if (abort || fcpreq->fcp_error || + fcpreq->transferred_length != fcpreq->transfer_length) { + /* data no longer needed */ + nvmet_fc_free_tgt_pgs(fod); + + nvmet_fc_abort_op(tgtport, fod->fcpreq); + return; + } + + /* success */ + + if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { + /* data no longer needed */ + nvmet_fc_free_tgt_pgs(fod); + fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, + sizeof(fod->rspiubuf), DMA_TO_DEVICE); + nvmet_fc_free_fcp_iod(fod->queue, fod); + return; + } + + fod->offset += fcpreq->transferred_length; + if (fod->offset != fod->total_length) { + /* transfer the next chunk */ + nvmet_fc_transfer_fcp_data(tgtport, fod, + NVMET_FCOP_READDATA); + return; + } + + /* data transfer complete, send response */ + + /* data no longer needed */ + nvmet_fc_free_tgt_pgs(fod); + + nvmet_fc_xmt_fcp_rsp(tgtport, fod); + + break; + + case NVMET_FCOP_RSP: + case NVMET_FCOP_ABORT: + fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, + sizeof(fod->rspiubuf), DMA_TO_DEVICE); + nvmet_fc_free_fcp_iod(fod->queue, fod); + break; + + default: + nvmet_fc_free_tgt_pgs(fod); + nvmet_fc_abort_op(tgtport, fod->fcpreq); + break; + } +} + +/* + * actual completion handler after execution by the nvmet layer + */ +static void +__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_fcp_iod *fod, int status) +{ + struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; + struct nvme_completion *cqe = &fod->rspiubuf.cqe; + unsigned long flags; + bool abort; + + spin_lock_irqsave(&fod->flock, flags); + abort = fod->abort; + spin_unlock_irqrestore(&fod->flock, flags); + + /* if we have a CQE, snoop the last sq_head value */ + if (!status) + fod->queue->sqhd = cqe->sq_head; + + if (abort) { + /* data no longer needed */ + nvmet_fc_free_tgt_pgs(fod); + + nvmet_fc_abort_op(tgtport, fod->fcpreq); + return; + } + + /* if an error handling the cmd post initial parsing */ + if (status) { + /* fudge up a failed CQE status for our transport error */ + memset(cqe, 0, sizeof(*cqe)); + cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ + cqe->sq_id = cpu_to_le16(fod->queue->qid); + cqe->command_id = sqe->command_id; + cqe->status = cpu_to_le16(status); + } else { + + /* + * try to push the data even if the SQE status is non-zero. + * There may be a status where data still was intended to + * be moved + */ + if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { + /* push the data over before sending rsp */ + nvmet_fc_transfer_fcp_data(tgtport, fod, + NVMET_FCOP_READDATA); + return; + } + + /* writes & no data - fall thru */ + } + + /* data no longer needed */ + nvmet_fc_free_tgt_pgs(fod); + + nvmet_fc_xmt_fcp_rsp(tgtport, fod); +} + + +static void +nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) +{ + struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); + struct nvmet_fc_tgtport *tgtport = fod->tgtport; + + __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); +} + + +/* + * Actual processing routine for received FC-NVME LS Requests from the LLD + */ +void +nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_fcp_iod *fod) +{ + struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; + int ret; + + /* + * Fused commands are currently not supported in the linux + * implementation. + * + * As such, the implementation of the FC transport does not + * look at the fused commands and order delivery to the upper + * layer until we have both based on csn. + */ + + fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; + + fod->total_length = be32_to_cpu(cmdiu->data_len); + if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { + fod->io_dir = NVMET_FCP_WRITE; + if (!nvme_is_write(&cmdiu->sqe)) + goto transport_error; + } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { + fod->io_dir = NVMET_FCP_READ; + if (nvme_is_write(&cmdiu->sqe)) + goto transport_error; + } else { + fod->io_dir = NVMET_FCP_NODATA; + if (fod->total_length) + goto transport_error; + } + + fod->req.cmd = &fod->cmdiubuf.sqe; + fod->req.rsp = &fod->rspiubuf.cqe; + fod->req.port = fod->queue->port; + + /* ensure nvmet handlers will set cmd handler callback */ + fod->req.execute = NULL; + + /* clear any response payload */ + memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); + + ret = nvmet_req_init(&fod->req, + &fod->queue->nvme_cq, + &fod->queue->nvme_sq, + &nvmet_fc_tgt_fcp_ops); + if (!ret) { /* bad SQE content */ + nvmet_fc_abort_op(tgtport, fod->fcpreq); + return; + } + + /* keep a running counter of tail position */ + atomic_inc(&fod->queue->sqtail); + + fod->data_sg = NULL; + fod->data_sg_cnt = 0; + if (fod->total_length) { + ret = nvmet_fc_alloc_tgt_pgs(fod); + if (ret) { + nvmet_req_complete(&fod->req, ret); + return; + } + } + fod->req.sg = fod->data_sg; + fod->req.sg_cnt = fod->data_sg_cnt; + fod->offset = 0; + fod->next_sg = fod->data_sg; + fod->next_sg_offset = 0; + + if (fod->io_dir == NVMET_FCP_WRITE) { + /* pull the data over before invoking nvmet layer */ + nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); + return; + } + + /* + * Reads or no data: + * + * can invoke the nvmet_layer now. If read data, cmd completion will + * push the data + */ + + fod->req.execute(&fod->req); + + return; + +transport_error: + nvmet_fc_abort_op(tgtport, fod->fcpreq); +} + +/* + * Actual processing routine for received FC-NVME LS Requests from the LLD + */ +static void +nvmet_fc_handle_fcp_rqst_work(struct work_struct *work) +{ + struct nvmet_fc_fcp_iod *fod = + container_of(work, struct nvmet_fc_fcp_iod, work); + struct nvmet_fc_tgtport *tgtport = fod->tgtport; + + nvmet_fc_handle_fcp_rqst(tgtport, fod); +} + +/** + * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD + * upon the reception of a NVME FCP CMD IU. + * + * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc + * layer for processing. + * + * The nvmet-fc layer will copy cmd payload to an internal structure for + * processing. As such, upon completion of the routine, the LLDD may + * immediately free/reuse the CMD IU buffer passed in the call. + * + * If this routine returns error, the lldd should abort the exchange. + * + * @target_port: pointer to the (registered) target port the FCP CMD IU + * was receive on. + * @fcpreq: pointer to a fcpreq request structure to be used to reference + * the exchange corresponding to the FCP Exchange. + * @cmdiubuf: pointer to the buffer containing the FCP CMD IU + * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU + */ +int +nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, + struct nvmefc_tgt_fcp_req *fcpreq, + void *cmdiubuf, u32 cmdiubuf_len) +{ + struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); + struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; + struct nvmet_fc_tgt_queue *queue; + struct nvmet_fc_fcp_iod *fod; + + /* validate iu, so the connection id can be used to find the queue */ + if ((cmdiubuf_len != sizeof(*cmdiu)) || + (cmdiu->scsi_id != NVME_CMD_SCSI_ID) || + (cmdiu->fc_id != NVME_CMD_FC_ID) || + (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) + return -EIO; + + + queue = nvmet_fc_find_target_queue(tgtport, + be64_to_cpu(cmdiu->connection_id)); + if (!queue) + return -ENOTCONN; + + /* + * note: reference taken by find_target_queue + * After successful fod allocation, the fod will inherit the + * ownership of that reference and will remove the reference + * when the fod is freed. + */ + + fod = nvmet_fc_alloc_fcp_iod(queue); + if (!fod) { + /* release the queue lookup reference */ + nvmet_fc_tgt_q_put(queue); + return -ENOENT; + } + + fcpreq->nvmet_fc_private = fod; + fod->fcpreq = fcpreq; + /* + * put all admin cmds on hw queue id 0. All io commands go to + * the respective hw queue based on a modulo basis + */ + fcpreq->hwqid = queue->qid ? + ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; + memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); + + queue_work_on(queue->cpu, queue->work_q, &fod->work); + + return 0; +} +EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); + +enum { + FCT_TRADDR_ERR = 0, + FCT_TRADDR_WWNN = 1 << 0, + FCT_TRADDR_WWPN = 1 << 1, +}; + +struct nvmet_fc_traddr { + u64 nn; + u64 pn; +}; + +static const match_table_t traddr_opt_tokens = { + { FCT_TRADDR_WWNN, "nn-%s" }, + { FCT_TRADDR_WWPN, "pn-%s" }, + { FCT_TRADDR_ERR, NULL } +}; + +static int +nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf) +{ + substring_t args[MAX_OPT_ARGS]; + char *options, *o, *p; + int token, ret = 0; + u64 token64; + + options = o = kstrdup(buf, GFP_KERNEL); + if (!options) + return -ENOMEM; + + while ((p = strsep(&o, ",\n")) != NULL) { + if (!*p) + continue; + + token = match_token(p, traddr_opt_tokens, args); + switch (token) { + case FCT_TRADDR_WWNN: + if (match_u64(args, &token64)) { + ret = -EINVAL; + goto out; + } + traddr->nn = token64; + break; + case FCT_TRADDR_WWPN: + if (match_u64(args, &token64)) { + ret = -EINVAL; + goto out; + } + traddr->pn = token64; + break; + default: + pr_warn("unknown traddr token or missing value '%s'\n", + p); + ret = -EINVAL; + goto out; + } + } + +out: + kfree(options); + return ret; +} + +static int +nvmet_fc_add_port(struct nvmet_port *port) +{ + struct nvmet_fc_tgtport *tgtport; + struct nvmet_fc_traddr traddr = { 0L, 0L }; + unsigned long flags; + int ret; + + /* validate the address info */ + if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || + (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) + return -EINVAL; + + /* map the traddr address info to a target port */ + + ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr); + if (ret) + return ret; + + ret = -ENXIO; + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { + if ((tgtport->fc_target_port.node_name == traddr.nn) && + (tgtport->fc_target_port.port_name == traddr.pn)) { + /* a FC port can only be 1 nvmet port id */ + if (!tgtport->port) { + tgtport->port = port; + port->priv = tgtport; + ret = 0; + } else + ret = -EALREADY; + break; + } + } + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); + return ret; +} + +static void +nvmet_fc_remove_port(struct nvmet_port *port) +{ + struct nvmet_fc_tgtport *tgtport = port->priv; + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + if (tgtport->port == port) { + nvmet_fc_tgtport_put(tgtport); + tgtport->port = NULL; + } + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); +} + +static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { + .owner = THIS_MODULE, + .type = NVMF_TRTYPE_FC, + .msdbd = 1, + .add_port = nvmet_fc_add_port, + .remove_port = nvmet_fc_remove_port, + .queue_response = nvmet_fc_fcp_nvme_cmd_done, + .delete_ctrl = nvmet_fc_delete_ctrl, +}; + +static int __init nvmet_fc_init_module(void) +{ + return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops); +} + +static void __exit nvmet_fc_exit_module(void) +{ + /* sanity check - all lports should be removed */ + if (!list_empty(&nvmet_fc_target_list)) + pr_warn("%s: targetport list not empty\n", __func__); + + nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops); + + ida_destroy(&nvmet_fc_tgtport_cnt); +} + +module_init(nvmet_fc_init_module); +module_exit(nvmet_fc_exit_module); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c new file mode 100644 index 0000000000000000000000000000000000000000..bcb8ebeb01c5d26515c8047e4eed765dbdc4da45 --- /dev/null +++ b/drivers/nvme/target/fcloop.c @@ -0,0 +1,1148 @@ +/* + * Copyright (c) 2016 Avago Technologies. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful. + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, + * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO + * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. + * See the GNU General Public License for more details, a copy of which + * can be found in the file COPYING included with this package + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include + +#include "../host/nvme.h" +#include "../target/nvmet.h" +#include +#include + + +enum { + NVMF_OPT_ERR = 0, + NVMF_OPT_WWNN = 1 << 0, + NVMF_OPT_WWPN = 1 << 1, + NVMF_OPT_ROLES = 1 << 2, + NVMF_OPT_FCADDR = 1 << 3, + NVMF_OPT_LPWWNN = 1 << 4, + NVMF_OPT_LPWWPN = 1 << 5, +}; + +struct fcloop_ctrl_options { + int mask; + u64 wwnn; + u64 wwpn; + u32 roles; + u32 fcaddr; + u64 lpwwnn; + u64 lpwwpn; +}; + +static const match_table_t opt_tokens = { + { NVMF_OPT_WWNN, "wwnn=%s" }, + { NVMF_OPT_WWPN, "wwpn=%s" }, + { NVMF_OPT_ROLES, "roles=%d" }, + { NVMF_OPT_FCADDR, "fcaddr=%x" }, + { NVMF_OPT_LPWWNN, "lpwwnn=%s" }, + { NVMF_OPT_LPWWPN, "lpwwpn=%s" }, + { NVMF_OPT_ERR, NULL } +}; + +static int +fcloop_parse_options(struct fcloop_ctrl_options *opts, + const char *buf) +{ + substring_t args[MAX_OPT_ARGS]; + char *options, *o, *p; + int token, ret = 0; + u64 token64; + + options = o = kstrdup(buf, GFP_KERNEL); + if (!options) + return -ENOMEM; + + while ((p = strsep(&o, ",\n")) != NULL) { + if (!*p) + continue; + + token = match_token(p, opt_tokens, args); + opts->mask |= token; + switch (token) { + case NVMF_OPT_WWNN: + if (match_u64(args, &token64)) { + ret = -EINVAL; + goto out_free_options; + } + opts->wwnn = token64; + break; + case NVMF_OPT_WWPN: + if (match_u64(args, &token64)) { + ret = -EINVAL; + goto out_free_options; + } + opts->wwpn = token64; + break; + case NVMF_OPT_ROLES: + if (match_int(args, &token)) { + ret = -EINVAL; + goto out_free_options; + } + opts->roles = token; + break; + case NVMF_OPT_FCADDR: + if (match_hex(args, &token)) { + ret = -EINVAL; + goto out_free_options; + } + opts->fcaddr = token; + break; + case NVMF_OPT_LPWWNN: + if (match_u64(args, &token64)) { + ret = -EINVAL; + goto out_free_options; + } + opts->lpwwnn = token64; + break; + case NVMF_OPT_LPWWPN: + if (match_u64(args, &token64)) { + ret = -EINVAL; + goto out_free_options; + } + opts->lpwwpn = token64; + break; + default: + pr_warn("unknown parameter or missing value '%s'\n", p); + ret = -EINVAL; + goto out_free_options; + } + } + +out_free_options: + kfree(options); + return ret; +} + + +static int +fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname, + const char *buf) +{ + substring_t args[MAX_OPT_ARGS]; + char *options, *o, *p; + int token, ret = 0; + u64 token64; + + *nname = -1; + *pname = -1; + + options = o = kstrdup(buf, GFP_KERNEL); + if (!options) + return -ENOMEM; + + while ((p = strsep(&o, ",\n")) != NULL) { + if (!*p) + continue; + + token = match_token(p, opt_tokens, args); + switch (token) { + case NVMF_OPT_WWNN: + if (match_u64(args, &token64)) { + ret = -EINVAL; + goto out_free_options; + } + *nname = token64; + break; + case NVMF_OPT_WWPN: + if (match_u64(args, &token64)) { + ret = -EINVAL; + goto out_free_options; + } + *pname = token64; + break; + default: + pr_warn("unknown parameter or missing value '%s'\n", p); + ret = -EINVAL; + goto out_free_options; + } + } + +out_free_options: + kfree(options); + + if (!ret) { + if (*nname == -1) + return -EINVAL; + if (*pname == -1) + return -EINVAL; + } + + return ret; +} + + +#define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN) + +#define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \ + NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN) + +#define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN) + +#define ALL_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | NVMF_OPT_ROLES | \ + NVMF_OPT_FCADDR | NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN) + + +static DEFINE_SPINLOCK(fcloop_lock); +static LIST_HEAD(fcloop_lports); +static LIST_HEAD(fcloop_nports); + +struct fcloop_lport { + struct nvme_fc_local_port *localport; + struct list_head lport_list; + struct completion unreg_done; +}; + +struct fcloop_rport { + struct nvme_fc_remote_port *remoteport; + struct nvmet_fc_target_port *targetport; + struct fcloop_nport *nport; + struct fcloop_lport *lport; +}; + +struct fcloop_tport { + struct nvmet_fc_target_port *targetport; + struct nvme_fc_remote_port *remoteport; + struct fcloop_nport *nport; + struct fcloop_lport *lport; +}; + +struct fcloop_nport { + struct fcloop_rport *rport; + struct fcloop_tport *tport; + struct fcloop_lport *lport; + struct list_head nport_list; + struct kref ref; + struct completion rport_unreg_done; + struct completion tport_unreg_done; + u64 node_name; + u64 port_name; + u32 port_role; + u32 port_id; +}; + +struct fcloop_lsreq { + struct fcloop_tport *tport; + struct nvmefc_ls_req *lsreq; + struct work_struct work; + struct nvmefc_tgt_ls_req tgt_ls_req; + int status; +}; + +struct fcloop_fcpreq { + struct fcloop_tport *tport; + struct nvmefc_fcp_req *fcpreq; + u16 status; + struct work_struct work; + struct nvmefc_tgt_fcp_req tgt_fcp_req; +}; + + +static inline struct fcloop_lsreq * +tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq) +{ + return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req); +} + +static inline struct fcloop_fcpreq * +tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq) +{ + return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req); +} + + +static int +fcloop_create_queue(struct nvme_fc_local_port *localport, + unsigned int qidx, u16 qsize, + void **handle) +{ + *handle = localport; + return 0; +} + +static void +fcloop_delete_queue(struct nvme_fc_local_port *localport, + unsigned int idx, void *handle) +{ +} + + +/* + * Transmit of LS RSP done (e.g. buffers all set). call back up + * initiator "done" flows. + */ +static void +fcloop_tgt_lsrqst_done_work(struct work_struct *work) +{ + struct fcloop_lsreq *tls_req = + container_of(work, struct fcloop_lsreq, work); + struct fcloop_tport *tport = tls_req->tport; + struct nvmefc_ls_req *lsreq = tls_req->lsreq; + + if (tport->remoteport) + lsreq->done(lsreq, tls_req->status); +} + +static int +fcloop_ls_req(struct nvme_fc_local_port *localport, + struct nvme_fc_remote_port *remoteport, + struct nvmefc_ls_req *lsreq) +{ + struct fcloop_lsreq *tls_req = lsreq->private; + struct fcloop_rport *rport = remoteport->private; + int ret = 0; + + tls_req->lsreq = lsreq; + INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work); + + if (!rport->targetport) { + tls_req->status = -ECONNREFUSED; + schedule_work(&tls_req->work); + return ret; + } + + tls_req->status = 0; + tls_req->tport = rport->targetport->private; + ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req, + lsreq->rqstaddr, lsreq->rqstlen); + + return ret; +} + +static int +fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport, + struct nvmefc_tgt_ls_req *tgt_lsreq) +{ + struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq); + struct nvmefc_ls_req *lsreq = tls_req->lsreq; + + memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf, + ((lsreq->rsplen < tgt_lsreq->rsplen) ? + lsreq->rsplen : tgt_lsreq->rsplen)); + tgt_lsreq->done(tgt_lsreq); + + schedule_work(&tls_req->work); + + return 0; +} + +/* + * FCP IO operation done. call back up initiator "done" flows. + */ +static void +fcloop_tgt_fcprqst_done_work(struct work_struct *work) +{ + struct fcloop_fcpreq *tfcp_req = + container_of(work, struct fcloop_fcpreq, work); + struct fcloop_tport *tport = tfcp_req->tport; + struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; + + if (tport->remoteport) { + fcpreq->status = tfcp_req->status; + fcpreq->done(fcpreq); + } +} + + +static int +fcloop_fcp_req(struct nvme_fc_local_port *localport, + struct nvme_fc_remote_port *remoteport, + void *hw_queue_handle, + struct nvmefc_fcp_req *fcpreq) +{ + struct fcloop_fcpreq *tfcp_req = fcpreq->private; + struct fcloop_rport *rport = remoteport->private; + int ret = 0; + + INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work); + + if (!rport->targetport) { + tfcp_req->status = NVME_SC_FC_TRANSPORT_ERROR; + schedule_work(&tfcp_req->work); + return ret; + } + + tfcp_req->fcpreq = fcpreq; + tfcp_req->tport = rport->targetport->private; + + ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req, + fcpreq->cmdaddr, fcpreq->cmdlen); + + return ret; +} + +static void +fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg, + struct scatterlist *io_sg, u32 offset, u32 length) +{ + void *data_p, *io_p; + u32 data_len, io_len, tlen; + + io_p = sg_virt(io_sg); + io_len = io_sg->length; + + for ( ; offset; ) { + tlen = min_t(u32, offset, io_len); + offset -= tlen; + io_len -= tlen; + if (!io_len) { + io_sg = sg_next(io_sg); + io_p = sg_virt(io_sg); + io_len = io_sg->length; + } else + io_p += tlen; + } + + data_p = sg_virt(data_sg); + data_len = data_sg->length; + + for ( ; length; ) { + tlen = min_t(u32, io_len, data_len); + tlen = min_t(u32, tlen, length); + + if (op == NVMET_FCOP_WRITEDATA) + memcpy(data_p, io_p, tlen); + else + memcpy(io_p, data_p, tlen); + + length -= tlen; + + io_len -= tlen; + if ((!io_len) && (length)) { + io_sg = sg_next(io_sg); + io_p = sg_virt(io_sg); + io_len = io_sg->length; + } else + io_p += tlen; + + data_len -= tlen; + if ((!data_len) && (length)) { + data_sg = sg_next(data_sg); + data_p = sg_virt(data_sg); + data_len = data_sg->length; + } else + data_p += tlen; + } +} + +static int +fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *tgt_fcpreq) +{ + struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); + struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; + u32 rsplen = 0, xfrlen = 0; + int fcp_err = 0; + u8 op = tgt_fcpreq->op; + + switch (op) { + case NVMET_FCOP_WRITEDATA: + xfrlen = tgt_fcpreq->transfer_length; + fcloop_fcp_copy_data(op, tgt_fcpreq->sg, fcpreq->first_sgl, + tgt_fcpreq->offset, xfrlen); + fcpreq->transferred_length += xfrlen; + break; + + case NVMET_FCOP_READDATA: + case NVMET_FCOP_READDATA_RSP: + xfrlen = tgt_fcpreq->transfer_length; + fcloop_fcp_copy_data(op, tgt_fcpreq->sg, fcpreq->first_sgl, + tgt_fcpreq->offset, xfrlen); + fcpreq->transferred_length += xfrlen; + if (op == NVMET_FCOP_READDATA) + break; + + /* Fall-Thru to RSP handling */ + + case NVMET_FCOP_RSP: + rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ? + fcpreq->rsplen : tgt_fcpreq->rsplen); + memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen); + if (rsplen < tgt_fcpreq->rsplen) + fcp_err = -E2BIG; + fcpreq->rcv_rsplen = rsplen; + fcpreq->status = 0; + tfcp_req->status = 0; + break; + + case NVMET_FCOP_ABORT: + tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED; + break; + + default: + fcp_err = -EINVAL; + break; + } + + tgt_fcpreq->transferred_length = xfrlen; + tgt_fcpreq->fcp_error = fcp_err; + tgt_fcpreq->done(tgt_fcpreq); + + if ((!fcp_err) && (op == NVMET_FCOP_RSP || + op == NVMET_FCOP_READDATA_RSP || + op == NVMET_FCOP_ABORT)) + schedule_work(&tfcp_req->work); + + return 0; +} + +static void +fcloop_ls_abort(struct nvme_fc_local_port *localport, + struct nvme_fc_remote_port *remoteport, + struct nvmefc_ls_req *lsreq) +{ +} + +static void +fcloop_fcp_abort(struct nvme_fc_local_port *localport, + struct nvme_fc_remote_port *remoteport, + void *hw_queue_handle, + struct nvmefc_fcp_req *fcpreq) +{ +} + +static void +fcloop_localport_delete(struct nvme_fc_local_port *localport) +{ + struct fcloop_lport *lport = localport->private; + + /* release any threads waiting for the unreg to complete */ + complete(&lport->unreg_done); +} + +static void +fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) +{ + struct fcloop_rport *rport = remoteport->private; + + /* release any threads waiting for the unreg to complete */ + complete(&rport->nport->rport_unreg_done); +} + +static void +fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) +{ + struct fcloop_tport *tport = targetport->private; + + /* release any threads waiting for the unreg to complete */ + complete(&tport->nport->tport_unreg_done); +} + +#define FCLOOP_HW_QUEUES 4 +#define FCLOOP_SGL_SEGS 256 +#define FCLOOP_DMABOUND_4G 0xFFFFFFFF + +struct nvme_fc_port_template fctemplate = { + .localport_delete = fcloop_localport_delete, + .remoteport_delete = fcloop_remoteport_delete, + .create_queue = fcloop_create_queue, + .delete_queue = fcloop_delete_queue, + .ls_req = fcloop_ls_req, + .fcp_io = fcloop_fcp_req, + .ls_abort = fcloop_ls_abort, + .fcp_abort = fcloop_fcp_abort, + .max_hw_queues = FCLOOP_HW_QUEUES, + .max_sgl_segments = FCLOOP_SGL_SEGS, + .max_dif_sgl_segments = FCLOOP_SGL_SEGS, + .dma_boundary = FCLOOP_DMABOUND_4G, + /* sizes of additional private data for data structures */ + .local_priv_sz = sizeof(struct fcloop_lport), + .remote_priv_sz = sizeof(struct fcloop_rport), + .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), + .fcprqst_priv_sz = sizeof(struct fcloop_fcpreq), +}; + +struct nvmet_fc_target_template tgttemplate = { + .targetport_delete = fcloop_targetport_delete, + .xmt_ls_rsp = fcloop_xmt_ls_rsp, + .fcp_op = fcloop_fcp_op, + .max_hw_queues = FCLOOP_HW_QUEUES, + .max_sgl_segments = FCLOOP_SGL_SEGS, + .max_dif_sgl_segments = FCLOOP_SGL_SEGS, + .dma_boundary = FCLOOP_DMABOUND_4G, + /* optional features */ + .target_features = NVMET_FCTGTFEAT_READDATA_RSP | + NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED, + /* sizes of additional private data for data structures */ + .target_priv_sz = sizeof(struct fcloop_tport), +}; + +static ssize_t +fcloop_create_local_port(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct nvme_fc_port_info pinfo; + struct fcloop_ctrl_options *opts; + struct nvme_fc_local_port *localport; + struct fcloop_lport *lport; + int ret; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return -ENOMEM; + + ret = fcloop_parse_options(opts, buf); + if (ret) + goto out_free_opts; + + /* everything there ? */ + if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) { + ret = -EINVAL; + goto out_free_opts; + } + + pinfo.node_name = opts->wwnn; + pinfo.port_name = opts->wwpn; + pinfo.port_role = opts->roles; + pinfo.port_id = opts->fcaddr; + + ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport); + if (!ret) { + unsigned long flags; + + /* success */ + lport = localport->private; + lport->localport = localport; + INIT_LIST_HEAD(&lport->lport_list); + + spin_lock_irqsave(&fcloop_lock, flags); + list_add_tail(&lport->lport_list, &fcloop_lports); + spin_unlock_irqrestore(&fcloop_lock, flags); + + /* mark all of the input buffer consumed */ + ret = count; + } + +out_free_opts: + kfree(opts); + return ret ? ret : count; +} + + +static void +__unlink_local_port(struct fcloop_lport *lport) +{ + list_del(&lport->lport_list); +} + +static int +__wait_localport_unreg(struct fcloop_lport *lport) +{ + int ret; + + init_completion(&lport->unreg_done); + + ret = nvme_fc_unregister_localport(lport->localport); + + wait_for_completion(&lport->unreg_done); + + return ret; +} + + +static ssize_t +fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcloop_lport *tlport, *lport = NULL; + u64 nodename, portname; + unsigned long flags; + int ret; + + ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); + if (ret) + return ret; + + spin_lock_irqsave(&fcloop_lock, flags); + + list_for_each_entry(tlport, &fcloop_lports, lport_list) { + if (tlport->localport->node_name == nodename && + tlport->localport->port_name == portname) { + lport = tlport; + __unlink_local_port(lport); + break; + } + } + spin_unlock_irqrestore(&fcloop_lock, flags); + + if (!lport) + return -ENOENT; + + ret = __wait_localport_unreg(lport); + + return ret ? ret : count; +} + +static void +fcloop_nport_free(struct kref *ref) +{ + struct fcloop_nport *nport = + container_of(ref, struct fcloop_nport, ref); + unsigned long flags; + + spin_lock_irqsave(&fcloop_lock, flags); + list_del(&nport->nport_list); + spin_unlock_irqrestore(&fcloop_lock, flags); + + kfree(nport); +} + +static void +fcloop_nport_put(struct fcloop_nport *nport) +{ + kref_put(&nport->ref, fcloop_nport_free); +} + +static int +fcloop_nport_get(struct fcloop_nport *nport) +{ + return kref_get_unless_zero(&nport->ref); +} + +static struct fcloop_nport * +fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) +{ + struct fcloop_nport *newnport, *nport = NULL; + struct fcloop_lport *tmplport, *lport = NULL; + struct fcloop_ctrl_options *opts; + unsigned long flags; + u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS; + int ret; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return NULL; + + ret = fcloop_parse_options(opts, buf); + if (ret) + goto out_free_opts; + + /* everything there ? */ + if ((opts->mask & opts_mask) != opts_mask) { + ret = -EINVAL; + goto out_free_opts; + } + + newnport = kzalloc(sizeof(*newnport), GFP_KERNEL); + if (!newnport) + goto out_free_opts; + + INIT_LIST_HEAD(&newnport->nport_list); + newnport->node_name = opts->wwnn; + newnport->port_name = opts->wwpn; + if (opts->mask & NVMF_OPT_ROLES) + newnport->port_role = opts->roles; + if (opts->mask & NVMF_OPT_FCADDR) + newnport->port_id = opts->fcaddr; + kref_init(&newnport->ref); + + spin_lock_irqsave(&fcloop_lock, flags); + + list_for_each_entry(tmplport, &fcloop_lports, lport_list) { + if (tmplport->localport->node_name == opts->wwnn && + tmplport->localport->port_name == opts->wwpn) + goto out_invalid_opts; + + if (tmplport->localport->node_name == opts->lpwwnn && + tmplport->localport->port_name == opts->lpwwpn) + lport = tmplport; + } + + if (remoteport) { + if (!lport) + goto out_invalid_opts; + newnport->lport = lport; + } + + list_for_each_entry(nport, &fcloop_nports, nport_list) { + if (nport->node_name == opts->wwnn && + nport->port_name == opts->wwpn) { + if ((remoteport && nport->rport) || + (!remoteport && nport->tport)) { + nport = NULL; + goto out_invalid_opts; + } + + fcloop_nport_get(nport); + + spin_unlock_irqrestore(&fcloop_lock, flags); + + if (remoteport) + nport->lport = lport; + if (opts->mask & NVMF_OPT_ROLES) + nport->port_role = opts->roles; + if (opts->mask & NVMF_OPT_FCADDR) + nport->port_id = opts->fcaddr; + goto out_free_newnport; + } + } + + list_add_tail(&newnport->nport_list, &fcloop_nports); + + spin_unlock_irqrestore(&fcloop_lock, flags); + + kfree(opts); + return newnport; + +out_invalid_opts: + spin_unlock_irqrestore(&fcloop_lock, flags); +out_free_newnport: + kfree(newnport); +out_free_opts: + kfree(opts); + return nport; +} + +static ssize_t +fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct nvme_fc_remote_port *remoteport; + struct fcloop_nport *nport; + struct fcloop_rport *rport; + struct nvme_fc_port_info pinfo; + int ret; + + nport = fcloop_alloc_nport(buf, count, true); + if (!nport) + return -EIO; + + pinfo.node_name = nport->node_name; + pinfo.port_name = nport->port_name; + pinfo.port_role = nport->port_role; + pinfo.port_id = nport->port_id; + + ret = nvme_fc_register_remoteport(nport->lport->localport, + &pinfo, &remoteport); + if (ret || !remoteport) { + fcloop_nport_put(nport); + return ret; + } + + /* success */ + rport = remoteport->private; + rport->remoteport = remoteport; + rport->targetport = (nport->tport) ? nport->tport->targetport : NULL; + if (nport->tport) { + nport->tport->remoteport = remoteport; + nport->tport->lport = nport->lport; + } + rport->nport = nport; + rport->lport = nport->lport; + nport->rport = rport; + + return ret ? ret : count; +} + + +static struct fcloop_rport * +__unlink_remote_port(struct fcloop_nport *nport) +{ + struct fcloop_rport *rport = nport->rport; + + if (rport && nport->tport) + nport->tport->remoteport = NULL; + nport->rport = NULL; + + return rport; +} + +static int +__wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) +{ + int ret; + + if (!rport) + return -EALREADY; + + init_completion(&nport->rport_unreg_done); + + ret = nvme_fc_unregister_remoteport(rport->remoteport); + if (ret) + return ret; + + wait_for_completion(&nport->rport_unreg_done); + + fcloop_nport_put(nport); + + return ret; +} + +static ssize_t +fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcloop_nport *nport = NULL, *tmpport; + static struct fcloop_rport *rport; + u64 nodename, portname; + unsigned long flags; + int ret; + + ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); + if (ret) + return ret; + + spin_lock_irqsave(&fcloop_lock, flags); + + list_for_each_entry(tmpport, &fcloop_nports, nport_list) { + if (tmpport->node_name == nodename && + tmpport->port_name == portname && tmpport->rport) { + nport = tmpport; + rport = __unlink_remote_port(nport); + break; + } + } + + spin_unlock_irqrestore(&fcloop_lock, flags); + + if (!nport) + return -ENOENT; + + ret = __wait_remoteport_unreg(nport, rport); + + return ret ? ret : count; +} + +static ssize_t +fcloop_create_target_port(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct nvmet_fc_target_port *targetport; + struct fcloop_nport *nport; + struct fcloop_tport *tport; + struct nvmet_fc_port_info tinfo; + int ret; + + nport = fcloop_alloc_nport(buf, count, false); + if (!nport) + return -EIO; + + tinfo.node_name = nport->node_name; + tinfo.port_name = nport->port_name; + tinfo.port_id = nport->port_id; + + ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL, + &targetport); + if (ret) { + fcloop_nport_put(nport); + return ret; + } + + /* success */ + tport = targetport->private; + tport->targetport = targetport; + tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL; + if (nport->rport) + nport->rport->targetport = targetport; + tport->nport = nport; + tport->lport = nport->lport; + nport->tport = tport; + + return ret ? ret : count; +} + + +static struct fcloop_tport * +__unlink_target_port(struct fcloop_nport *nport) +{ + struct fcloop_tport *tport = nport->tport; + + if (tport && nport->rport) + nport->rport->targetport = NULL; + nport->tport = NULL; + + return tport; +} + +static int +__wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) +{ + int ret; + + if (!tport) + return -EALREADY; + + init_completion(&nport->tport_unreg_done); + + ret = nvmet_fc_unregister_targetport(tport->targetport); + if (ret) + return ret; + + wait_for_completion(&nport->tport_unreg_done); + + fcloop_nport_put(nport); + + return ret; +} + +static ssize_t +fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcloop_nport *nport = NULL, *tmpport; + struct fcloop_tport *tport; + u64 nodename, portname; + unsigned long flags; + int ret; + + ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); + if (ret) + return ret; + + spin_lock_irqsave(&fcloop_lock, flags); + + list_for_each_entry(tmpport, &fcloop_nports, nport_list) { + if (tmpport->node_name == nodename && + tmpport->port_name == portname && tmpport->tport) { + nport = tmpport; + tport = __unlink_target_port(nport); + break; + } + } + + spin_unlock_irqrestore(&fcloop_lock, flags); + + if (!nport) + return -ENOENT; + + ret = __wait_targetport_unreg(nport, tport); + + return ret ? ret : count; +} + + +static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port); +static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port); +static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port); +static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port); +static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port); +static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port); + +static struct attribute *fcloop_dev_attrs[] = { + &dev_attr_add_local_port.attr, + &dev_attr_del_local_port.attr, + &dev_attr_add_remote_port.attr, + &dev_attr_del_remote_port.attr, + &dev_attr_add_target_port.attr, + &dev_attr_del_target_port.attr, + NULL +}; + +static struct attribute_group fclopp_dev_attrs_group = { + .attrs = fcloop_dev_attrs, +}; + +static const struct attribute_group *fcloop_dev_attr_groups[] = { + &fclopp_dev_attrs_group, + NULL, +}; + +static struct class *fcloop_class; +static struct device *fcloop_device; + + +static int __init fcloop_init(void) +{ + int ret; + + fcloop_class = class_create(THIS_MODULE, "fcloop"); + if (IS_ERR(fcloop_class)) { + pr_err("couldn't register class fcloop\n"); + ret = PTR_ERR(fcloop_class); + return ret; + } + + fcloop_device = device_create_with_groups( + fcloop_class, NULL, MKDEV(0, 0), NULL, + fcloop_dev_attr_groups, "ctl"); + if (IS_ERR(fcloop_device)) { + pr_err("couldn't create ctl device!\n"); + ret = PTR_ERR(fcloop_device); + goto out_destroy_class; + } + + get_device(fcloop_device); + + return 0; + +out_destroy_class: + class_destroy(fcloop_class); + return ret; +} + +static void __exit fcloop_exit(void) +{ + struct fcloop_lport *lport; + struct fcloop_nport *nport; + struct fcloop_tport *tport; + struct fcloop_rport *rport; + unsigned long flags; + int ret; + + spin_lock_irqsave(&fcloop_lock, flags); + + for (;;) { + nport = list_first_entry_or_null(&fcloop_nports, + typeof(*nport), nport_list); + if (!nport) + break; + + tport = __unlink_target_port(nport); + rport = __unlink_remote_port(nport); + + spin_unlock_irqrestore(&fcloop_lock, flags); + + ret = __wait_targetport_unreg(nport, tport); + if (ret) + pr_warn("%s: Failed deleting target port\n", __func__); + + ret = __wait_remoteport_unreg(nport, rport); + if (ret) + pr_warn("%s: Failed deleting remote port\n", __func__); + + spin_lock_irqsave(&fcloop_lock, flags); + } + + for (;;) { + lport = list_first_entry_or_null(&fcloop_lports, + typeof(*lport), lport_list); + if (!lport) + break; + + __unlink_local_port(lport); + + spin_unlock_irqrestore(&fcloop_lock, flags); + + ret = __wait_localport_unreg(lport); + if (ret) + pr_warn("%s: Failed deleting local port\n", __func__); + + spin_lock_irqsave(&fcloop_lock, flags); + } + + spin_unlock_irqrestore(&fcloop_lock, flags); + + put_device(fcloop_device); + + device_destroy(fcloop_class, MKDEV(0, 0)); + class_destroy(fcloop_class); +} + +module_init(fcloop_init); +module_exit(fcloop_exit); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c index 4a96c2049b7b6be310ea0e818fa29bb39db108d0..4195115c7e5493be02acc7b4daddbfd2427c831e 100644 --- a/drivers/nvme/target/io-cmd.c +++ b/drivers/nvme/target/io-cmd.c @@ -37,9 +37,7 @@ static void nvmet_inline_bio_init(struct nvmet_req *req) { struct bio *bio = &req->inline_bio; - bio_init(bio); - bio->bi_max_vecs = NVMET_MAX_INLINE_BIOVEC; - bio->bi_io_vec = req->inline_bvec; + bio_init(bio, req->inline_bvec, NVMET_MAX_INLINE_BIOVEC); } static void nvmet_execute_rw(struct nvmet_req *req) @@ -58,7 +56,7 @@ static void nvmet_execute_rw(struct nvmet_req *req) if (req->cmd->rw.opcode == nvme_cmd_write) { op = REQ_OP_WRITE; - op_flags = WRITE_ODIRECT; + op_flags = REQ_SYNC | REQ_IDLE; if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) op_flags |= REQ_FUA; } else { @@ -96,7 +94,7 @@ static void nvmet_execute_rw(struct nvmet_req *req) cookie = submit_bio(bio); - blk_poll(bdev_get_queue(req->ns->bdev), cookie); + blk_mq_poll(bdev_get_queue(req->ns->bdev), cookie); } static void nvmet_execute_flush(struct nvmet_req *req) @@ -109,7 +107,7 @@ static void nvmet_execute_flush(struct nvmet_req *req) bio->bi_bdev = req->ns->bdev; bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); + bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; submit_bio(bio); } @@ -172,6 +170,32 @@ static void nvmet_execute_dsm(struct nvmet_req *req) } } +static void nvmet_execute_write_zeroes(struct nvmet_req *req) +{ + struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes; + struct bio *bio = NULL; + u16 status = NVME_SC_SUCCESS; + sector_t sector; + sector_t nr_sector; + + sector = le64_to_cpu(write_zeroes->slba) << + (req->ns->blksize_shift - 9); + nr_sector = (((sector_t)le32_to_cpu(write_zeroes->length)) << + (req->ns->blksize_shift - 9)) + 1; + + if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, + GFP_KERNEL, &bio, true)) + status = NVME_SC_INTERNAL | NVME_SC_DNR; + + if (bio) { + bio->bi_private = req; + bio->bi_end_io = nvmet_bio_done; + submit_bio(bio); + } else { + nvmet_req_complete(req, status); + } +} + int nvmet_parse_io_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; @@ -209,6 +233,9 @@ int nvmet_parse_io_cmd(struct nvmet_req *req) req->data_len = le32_to_cpu(cmd->dsm.nr + 1) * sizeof(struct nvme_dsm_range); return 0; + case nvme_cmd_write_zeroes: + req->execute = nvmet_execute_write_zeroes; + return 0; default: pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode); return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index d5df77d686b26a40414f192a1c74388651141fd1..9aaa70071ae555efc6b6cc41d08cdcb7a6108cbf 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -36,6 +36,7 @@ (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS) struct nvme_loop_iod { + struct nvme_request nvme_req; struct nvme_command cmd; struct nvme_completion rsp; struct nvmet_req req; @@ -112,10 +113,10 @@ static void nvme_loop_complete_rq(struct request *req) blk_mq_end_request(req, error); } -static void nvme_loop_queue_response(struct nvmet_req *nvme_req) +static void nvme_loop_queue_response(struct nvmet_req *req) { struct nvme_loop_iod *iod = - container_of(nvme_req, struct nvme_loop_iod, req); + container_of(req, struct nvme_loop_iod, req); struct nvme_completion *cqe = &iod->rsp; /* @@ -126,13 +127,13 @@ static void nvme_loop_queue_response(struct nvmet_req *nvme_req) */ if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 && cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) { - nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe); + nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe->status, + &cqe->result); } else { - struct request *req = blk_mq_rq_from_pdu(iod); + struct request *rq = blk_mq_rq_from_pdu(iod); - if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special) - memcpy(req->special, cqe, sizeof(*cqe)); - blk_mq_complete_request(req, le16_to_cpu(cqe->status) >> 1); + iod->nvme_req.result = cqe->result; + blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1); } } @@ -168,7 +169,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, int ret; ret = nvme_setup_cmd(ns, req, &iod->cmd); - if (ret) + if (ret != BLK_MQ_RQ_QUEUE_OK) return ret; iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; @@ -178,26 +179,25 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, nvme_cleanup_cmd(req); blk_mq_start_request(req); nvme_loop_queue_response(&iod->req); - return 0; + return BLK_MQ_RQ_QUEUE_OK; } if (blk_rq_bytes(req)) { iod->sg_table.sgl = iod->first_sgl; ret = sg_alloc_table_chained(&iod->sg_table, - req->nr_phys_segments, iod->sg_table.sgl); + blk_rq_nr_phys_segments(req), + iod->sg_table.sgl); if (ret) return BLK_MQ_RQ_QUEUE_BUSY; iod->req.sg = iod->sg_table.sgl; iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); - BUG_ON(iod->req.sg_cnt > req->nr_phys_segments); } - iod->cmd.common.command_id = req->tag; blk_mq_start_request(req); schedule_work(&iod->work); - return 0; + return BLK_MQ_RQ_QUEUE_OK; } static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx) diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 76b6eedccaf92ce4708cc4b730c03775d5469f0f..23d5eb1c944f64c485fef8551a41a72151492f2c 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -47,6 +47,7 @@ struct nvmet_ns { loff_t size; u8 nguid[16]; + bool enabled; struct nvmet_subsys *subsys; const char *device_path; @@ -61,11 +62,6 @@ static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item) return container_of(to_config_group(item), struct nvmet_ns, group); } -static inline bool nvmet_ns_enabled(struct nvmet_ns *ns) -{ - return !list_empty_careful(&ns->dev_link); -} - struct nvmet_cq { u16 qid; u16 size; @@ -238,7 +234,7 @@ static inline void nvmet_set_status(struct nvmet_req *req, u16 status) static inline void nvmet_set_result(struct nvmet_req *req, u32 result) { - req->rsp->result = cpu_to_le32(result); + req->rsp->result.u32 = cpu_to_le32(result); } /* diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index f8d23999e0f2c28b98934c6a790b3a6c2bde26d1..8c3760a78ac080af522afb5892d471063243bd7b 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -951,6 +951,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) { + ib_drain_qp(queue->cm_id->qp); rdma_destroy_qp(queue->cm_id); ib_free_cq(queue->cq); } @@ -1044,8 +1045,10 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, } ret = nvmet_sq_init(&queue->nvme_sq); - if (ret) + if (ret) { + ret = NVME_RDMA_CM_NO_RSC; goto out_free_queue; + } ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); if (ret) @@ -1066,6 +1069,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, spin_lock_init(&queue->rsp_wr_wait_lock); INIT_LIST_HEAD(&queue->free_rsps); spin_lock_init(&queue->rsps_lock); + INIT_LIST_HEAD(&queue->queue_list); queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); if (queue->idx < 0) { @@ -1114,6 +1118,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, out_free_queue: kfree(queue); out_reject: + pr_debug("rejecting connect request with status code %d\n", ret); nvmet_rdma_cm_reject(cm_id, ret); return NULL; } @@ -1127,7 +1132,8 @@ static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) rdma_notify(queue->cm_id, event->event); break; default: - pr_err("received unrecognized IB QP event %d\n", event->event); + pr_err("received IB QP event: %s (%d)\n", + ib_event_msg(event->event), event->event); break; } } @@ -1244,7 +1250,6 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) if (disconnect) { rdma_disconnect(queue->cm_id); - ib_drain_qp(queue->cm_id->qp); schedule_work(&queue->release_work); } } @@ -1269,7 +1274,12 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, { WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); - pr_err("failed to connect queue\n"); + mutex_lock(&nvmet_rdma_queue_mutex); + if (!list_empty(&queue->queue_list)) + list_del_init(&queue->queue_list); + mutex_unlock(&nvmet_rdma_queue_mutex); + + pr_err("failed to connect queue %d\n", queue->idx); schedule_work(&queue->release_work); } @@ -1352,12 +1362,21 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_TIMEWAIT_EXIT: - nvmet_rdma_queue_disconnect(queue); + /* + * We might end up here when we already freed the qp + * which means queue release sequence is in progress, + * so don't get in the way... + */ + if (queue) + nvmet_rdma_queue_disconnect(queue); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: ret = nvmet_rdma_device_removal(cm_id, queue); break; case RDMA_CM_EVENT_REJECTED: + pr_debug("Connection rejected: %s\n", + rdma_reject_msg(cm_id, event->status)); + /* FALLTHROUGH */ case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_CONNECT_ERROR: nvmet_rdma_queue_connect_fail(cm_id, queue); diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index ba140eaee5c89f99f0836f43bdaf425c995616c8..650f1b1797adbc1db1273eaba41d4f83d457bc79 100644 --- a/drivers/nvmem/Kconfig +++ b/drivers/nvmem/Kconfig @@ -35,6 +35,16 @@ config NVMEM_LPC18XX_EEPROM To compile this driver as a module, choose M here: the module will be called nvmem_lpc18xx_eeprom. +config NVMEM_LPC18XX_OTP + tristate "NXP LPC18XX OTP Memory Support" + depends on ARCH_LPC18XX || COMPILE_TEST + depends on HAS_IOMEM + help + Say Y here to include support for NXP LPC18xx OTP memory found on + all LPC18xx and LPC43xx devices. + To compile this driver as a module, choose M here: the module + will be called nvmem_lpc18xx_otp. + config NVMEM_MXS_OCOTP tristate "Freescale MXS On-Chip OTP Memory Support" depends on ARCH_MXS || COMPILE_TEST @@ -80,6 +90,18 @@ config ROCKCHIP_EFUSE This driver can also be built as a module. If so, the module will be called nvmem_rockchip_efuse. +config NVMEM_BCM_OCOTP + tristate "Broadcom On-Chip OTP Controller support" + depends on ARCH_BCM_IPROC || COMPILE_TEST + depends on HAS_IOMEM + default ARCH_BCM_IPROC + help + Say y here to enable read/write access to the Broadcom OTP + controller. + + This driver can also be built as a module. If so, the module + will be called nvmem-bcm-ocotp. + config NVMEM_SUNXI_SID tristate "Allwinner SoCs SID support" depends on ARCH_SUNXI diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile index 8f942a0cdaec8dabf5d15430fa1179d3ab535377..86e45995fdadb7fec0fe2d9eea5b1c5a0d2f9e6b 100644 --- a/drivers/nvmem/Makefile +++ b/drivers/nvmem/Makefile @@ -6,10 +6,14 @@ obj-$(CONFIG_NVMEM) += nvmem_core.o nvmem_core-y := core.o # Devices +obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o +nvmem-bcm-ocotp-y := bcm-ocotp.o obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o nvmem-imx-ocotp-y := imx-ocotp.o obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o +obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o +nvmem_lpc18xx_otp-y := lpc18xx_otp.o obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o nvmem-mxs-ocotp-y := mxs-ocotp.o obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c new file mode 100644 index 0000000000000000000000000000000000000000..646cadbf1f9390f3e298b0db325a2d2e5321bedc --- /dev/null +++ b/drivers/nvmem/bcm-ocotp.c @@ -0,0 +1,335 @@ +/* + * Copyright (C) 2016 Broadcom + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * # of tries for OTP Status. The time to execute a command varies. The slowest + * commands are writes which also vary based on the # of bits turned on. Writing + * 0xffffffff takes ~3800 us. + */ +#define OTPC_RETRIES 5000 + +/* Sequence to enable OTP program */ +#define OTPC_PROG_EN_SEQ { 0xf, 0x4, 0x8, 0xd } + +/* OTPC Commands */ +#define OTPC_CMD_READ 0x0 +#define OTPC_CMD_OTP_PROG_ENABLE 0x2 +#define OTPC_CMD_OTP_PROG_DISABLE 0x3 +#define OTPC_CMD_PROGRAM 0xA + +/* OTPC Status Bits */ +#define OTPC_STAT_CMD_DONE BIT(1) +#define OTPC_STAT_PROG_OK BIT(2) + +/* OTPC register definition */ +#define OTPC_MODE_REG_OFFSET 0x0 +#define OTPC_MODE_REG_OTPC_MODE 0 +#define OTPC_COMMAND_OFFSET 0x4 +#define OTPC_COMMAND_COMMAND_WIDTH 6 +#define OTPC_CMD_START_OFFSET 0x8 +#define OTPC_CMD_START_START 0 +#define OTPC_CPU_STATUS_OFFSET 0xc +#define OTPC_CPUADDR_REG_OFFSET 0x28 +#define OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH 16 +#define OTPC_CPU_WRITE_REG_OFFSET 0x2c + +#define OTPC_CMD_MASK (BIT(OTPC_COMMAND_COMMAND_WIDTH) - 1) +#define OTPC_ADDR_MASK (BIT(OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH) - 1) + + +struct otpc_map { + /* in words. */ + u32 otpc_row_size; + /* 128 bit row / 4 words support. */ + u16 data_r_offset[4]; + /* 128 bit row / 4 words support. */ + u16 data_w_offset[4]; +}; + +static struct otpc_map otp_map = { + .otpc_row_size = 1, + .data_r_offset = {0x10}, + .data_w_offset = {0x2c}, +}; + +static struct otpc_map otp_map_v2 = { + .otpc_row_size = 2, + .data_r_offset = {0x10, 0x5c}, + .data_w_offset = {0x2c, 0x64}, +}; + +struct otpc_priv { + struct device *dev; + void __iomem *base; + struct otpc_map *map; + struct nvmem_config *config; +}; + +static inline void set_command(void __iomem *base, u32 command) +{ + writel(command & OTPC_CMD_MASK, base + OTPC_COMMAND_OFFSET); +} + +static inline void set_cpu_address(void __iomem *base, u32 addr) +{ + writel(addr & OTPC_ADDR_MASK, base + OTPC_CPUADDR_REG_OFFSET); +} + +static inline void set_start_bit(void __iomem *base) +{ + writel(1 << OTPC_CMD_START_START, base + OTPC_CMD_START_OFFSET); +} + +static inline void reset_start_bit(void __iomem *base) +{ + writel(0, base + OTPC_CMD_START_OFFSET); +} + +static inline void write_cpu_data(void __iomem *base, u32 value) +{ + writel(value, base + OTPC_CPU_WRITE_REG_OFFSET); +} + +static int poll_cpu_status(void __iomem *base, u32 value) +{ + u32 status; + u32 retries; + + for (retries = 0; retries < OTPC_RETRIES; retries++) { + status = readl(base + OTPC_CPU_STATUS_OFFSET); + if (status & value) + break; + udelay(1); + } + if (retries == OTPC_RETRIES) + return -EAGAIN; + + return 0; +} + +static int enable_ocotp_program(void __iomem *base) +{ + static const u32 vals[] = OTPC_PROG_EN_SEQ; + int i; + int ret; + + /* Write the magic sequence to enable programming */ + set_command(base, OTPC_CMD_OTP_PROG_ENABLE); + for (i = 0; i < ARRAY_SIZE(vals); i++) { + write_cpu_data(base, vals[i]); + set_start_bit(base); + ret = poll_cpu_status(base, OTPC_STAT_CMD_DONE); + reset_start_bit(base); + if (ret) + return ret; + } + + return poll_cpu_status(base, OTPC_STAT_PROG_OK); +} + +static int disable_ocotp_program(void __iomem *base) +{ + int ret; + + set_command(base, OTPC_CMD_OTP_PROG_DISABLE); + set_start_bit(base); + ret = poll_cpu_status(base, OTPC_STAT_PROG_OK); + reset_start_bit(base); + + return ret; +} + +static int bcm_otpc_read(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct otpc_priv *priv = context; + u32 *buf = val; + u32 bytes_read; + u32 address = offset / priv->config->word_size; + int i, ret; + + for (bytes_read = 0; bytes_read < bytes;) { + set_command(priv->base, OTPC_CMD_READ); + set_cpu_address(priv->base, address++); + set_start_bit(priv->base); + ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE); + if (ret) { + dev_err(priv->dev, "otp read error: 0x%x", ret); + return -EIO; + } + + for (i = 0; i < priv->map->otpc_row_size; i++) { + *buf++ = readl(priv->base + + priv->map->data_r_offset[i]); + bytes_read += sizeof(*buf); + } + + reset_start_bit(priv->base); + } + + return 0; +} + +static int bcm_otpc_write(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct otpc_priv *priv = context; + u32 *buf = val; + u32 bytes_written; + u32 address = offset / priv->config->word_size; + int i, ret; + + if (offset % priv->config->word_size) + return -EINVAL; + + ret = enable_ocotp_program(priv->base); + if (ret) + return -EIO; + + for (bytes_written = 0; bytes_written < bytes;) { + set_command(priv->base, OTPC_CMD_PROGRAM); + set_cpu_address(priv->base, address++); + for (i = 0; i < priv->map->otpc_row_size; i++) { + writel(*buf, priv->base + priv->map->data_r_offset[i]); + buf++; + bytes_written += sizeof(*buf); + } + set_start_bit(priv->base); + ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE); + reset_start_bit(priv->base); + if (ret) { + dev_err(priv->dev, "otp write error: 0x%x", ret); + return -EIO; + } + } + + disable_ocotp_program(priv->base); + + return 0; +} + +static struct nvmem_config bcm_otpc_nvmem_config = { + .name = "bcm-ocotp", + .read_only = false, + .word_size = 4, + .stride = 4, + .owner = THIS_MODULE, + .reg_read = bcm_otpc_read, + .reg_write = bcm_otpc_write, +}; + +static const struct of_device_id bcm_otpc_dt_ids[] = { + { .compatible = "brcm,ocotp" }, + { .compatible = "brcm,ocotp-v2" }, + { }, +}; +MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids); + +static int bcm_otpc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *dn = dev->of_node; + struct resource *res; + struct otpc_priv *priv; + struct nvmem_device *nvmem; + int err; + u32 num_words; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + if (of_device_is_compatible(dev->of_node, "brcm,ocotp")) + priv->map = &otp_map; + else if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) + priv->map = &otp_map_v2; + else { + dev_err(&pdev->dev, + "%s otpc config map not defined\n", __func__); + return -EINVAL; + } + + /* Get OTP base address register. */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) { + dev_err(dev, "unable to map I/O memory\n"); + return PTR_ERR(priv->base); + } + + /* Enable CPU access to OTPC. */ + writel(readl(priv->base + OTPC_MODE_REG_OFFSET) | + BIT(OTPC_MODE_REG_OTPC_MODE), + priv->base + OTPC_MODE_REG_OFFSET); + reset_start_bit(priv->base); + + /* Read size of memory in words. */ + err = of_property_read_u32(dn, "brcm,ocotp-size", &num_words); + if (err) { + dev_err(dev, "size parameter not specified\n"); + return -EINVAL; + } else if (num_words == 0) { + dev_err(dev, "size must be > 0\n"); + return -EINVAL; + } + + bcm_otpc_nvmem_config.size = 4 * num_words; + bcm_otpc_nvmem_config.dev = dev; + bcm_otpc_nvmem_config.priv = priv; + + if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) { + bcm_otpc_nvmem_config.word_size = 8; + bcm_otpc_nvmem_config.stride = 8; + } + + priv->config = &bcm_otpc_nvmem_config; + + nvmem = nvmem_register(&bcm_otpc_nvmem_config); + if (IS_ERR(nvmem)) { + dev_err(dev, "error registering nvmem config\n"); + return PTR_ERR(nvmem); + } + + platform_set_drvdata(pdev, nvmem); + + return 0; +} + +static int bcm_otpc_remove(struct platform_device *pdev) +{ + struct nvmem_device *nvmem = platform_get_drvdata(pdev); + + return nvmem_unregister(nvmem); +} + +static struct platform_driver bcm_otpc_driver = { + .probe = bcm_otpc_probe, + .remove = bcm_otpc_remove, + .driver = { + .name = "brcm-otpc", + .of_match_table = bcm_otpc_dt_ids, + }, +}; +module_platform_driver(bcm_otpc_driver); + +MODULE_DESCRIPTION("Broadcom OTPC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/lpc18xx_otp.c b/drivers/nvmem/lpc18xx_otp.c new file mode 100644 index 0000000000000000000000000000000000000000..be8d07403ffc9bf50074cc132235b1fc4aca0339 --- /dev/null +++ b/drivers/nvmem/lpc18xx_otp.c @@ -0,0 +1,124 @@ +/* + * NXP LPC18xx/43xx OTP memory NVMEM driver + * + * Copyright (c) 2016 Joachim Eastwood + * + * Based on the imx ocotp driver, + * Copyright (c) 2015 Pengutronix, Philipp Zabel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * TODO: add support for writing OTP register via API in boot ROM. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * LPC18xx OTP memory contains 4 banks with 4 32-bit words. Bank 0 starts + * at offset 0 from the base. + * + * Bank 0 contains the part ID for Flashless devices and is reseverd for + * devices with Flash. + * Bank 1/2 is generale purpose or AES key storage for secure devices. + * Bank 3 contains control data, USB ID and generale purpose words. + */ +#define LPC18XX_OTP_NUM_BANKS 4 +#define LPC18XX_OTP_WORDS_PER_BANK 4 +#define LPC18XX_OTP_WORD_SIZE sizeof(u32) +#define LPC18XX_OTP_SIZE (LPC18XX_OTP_NUM_BANKS * \ + LPC18XX_OTP_WORDS_PER_BANK * \ + LPC18XX_OTP_WORD_SIZE) + +struct lpc18xx_otp { + void __iomem *base; +}; + +static int lpc18xx_otp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct lpc18xx_otp *otp = context; + unsigned int count = bytes >> 2; + u32 index = offset >> 2; + u32 *buf = val; + int i; + + if (count > (LPC18XX_OTP_SIZE - index)) + count = LPC18XX_OTP_SIZE - index; + + for (i = index; i < (index + count); i++) + *buf++ = readl(otp->base + i * LPC18XX_OTP_WORD_SIZE); + + return 0; +} + +static struct nvmem_config lpc18xx_otp_nvmem_config = { + .name = "lpc18xx-otp", + .read_only = true, + .word_size = LPC18XX_OTP_WORD_SIZE, + .stride = LPC18XX_OTP_WORD_SIZE, + .owner = THIS_MODULE, + .reg_read = lpc18xx_otp_read, +}; + +static int lpc18xx_otp_probe(struct platform_device *pdev) +{ + struct nvmem_device *nvmem; + struct lpc18xx_otp *otp; + struct resource *res; + + otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL); + if (!otp) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + otp->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(otp->base)) + return PTR_ERR(otp->base); + + lpc18xx_otp_nvmem_config.size = LPC18XX_OTP_SIZE; + lpc18xx_otp_nvmem_config.dev = &pdev->dev; + lpc18xx_otp_nvmem_config.priv = otp; + + nvmem = nvmem_register(&lpc18xx_otp_nvmem_config); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + platform_set_drvdata(pdev, nvmem); + + return 0; +} + +static int lpc18xx_otp_remove(struct platform_device *pdev) +{ + struct nvmem_device *nvmem = platform_get_drvdata(pdev); + + return nvmem_unregister(nvmem); +} + +static const struct of_device_id lpc18xx_otp_dt_ids[] = { + { .compatible = "nxp,lpc1850-otp" }, + { }, +}; +MODULE_DEVICE_TABLE(of, lpc18xx_otp_dt_ids); + +static struct platform_driver lpc18xx_otp_driver = { + .probe = lpc18xx_otp_probe, + .remove = lpc18xx_otp_remove, + .driver = { + .name = "lpc18xx_otp", + .of_match_table = lpc18xx_otp_dt_ids, + }, +}; +module_platform_driver(lpc18xx_otp_driver); + +MODULE_AUTHOR("Joachim Eastwoood "); +MODULE_DESCRIPTION("NXP LPC18xx OTP NVMEM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/of/base.c b/drivers/of/base.c index d687e6de24a07e11a2c2cecd94e7711610d52005..d4bea3c797d6a7b6a4d28c55cfdff1d30d03cc16 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1534,9 +1534,12 @@ void of_print_phandle_args(const char *msg, const struct of_phandle_args *args) { int i; printk("%s %s", msg, of_node_full_name(args->np)); - for (i = 0; i < args->args_count; i++) - printk(i ? ",%08x" : ":%08x", args->args[i]); - printk("\n"); + for (i = 0; i < args->args_count; i++) { + const char delim = i ? ',' : ':'; + + pr_cont("%c%08x", delim, args->args[i]); + } + pr_cont("\n"); } int of_phandle_iterator_init(struct of_phandle_iterator *it, @@ -2077,8 +2080,6 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) name = of_get_property(of_aliases, "stdout", NULL); if (name) of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); - if (of_stdout) - console_set_by_of(); } if (!of_aliases) diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index c89d5d231a0e30eb4d1a458523e8ef4723b5e6d3..c9b5cac03b361c980a20c888635bbb52093aa573 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -1015,6 +1015,7 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname, const char *type = of_get_flat_dt_prop(node, "device_type", NULL); const __be32 *reg, *endp; int l; + bool hotpluggable; /* We are scanning "memory" nodes only */ if (type == NULL) { @@ -1034,6 +1035,7 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname, return 0; endp = reg + (l / sizeof(__be32)); + hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL); pr_debug("memory scan node %s, reg size %d,\n", uname, l); @@ -1049,6 +1051,13 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname, (unsigned long long)size); early_init_dt_add_memory_arch(base, size); + + if (!hotpluggable) + continue; + + if (early_init_dt_mark_hotplug_memory_arch(base, size)) + pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n", + base, base + size); } return 0; @@ -1146,6 +1155,11 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) memblock_add(base, size); } +int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size) +{ + return memblock_mark_hotplug(base, size); +} + int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, bool nomap) { @@ -1168,6 +1182,11 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) WARN_ON(1); } +int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size) +{ + return -ENOSYS; +} + int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, bool nomap) { diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 393fea85eb4ef89babde111e622166b441885eb5..3fda9a32defbbdabbed554fb9f9dfd5f42056e0f 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -697,3 +697,4 @@ void of_msi_configure(struct device *dev, struct device_node *np) dev_set_msi_domain(dev, of_msi_get_domain(dev, np, DOMAIN_BUS_PLATFORM_MSI)); } +EXPORT_SYMBOL_GPL(of_msi_configure); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index b470f7e3521d49c73877b6156bc9db7b926fdc51..262281bd68fa3c4d39e13f52eeafea1948d1bd22 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -292,6 +292,7 @@ struct phy_device *of_phy_find_device(struct device_node *phy_np) mdiodev = to_mdio_device(d); if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) return to_phy_device(d); + put_device(d); } return NULL; @@ -456,8 +457,11 @@ int of_phy_register_fixed_link(struct device_node *np) status.link = 1; status.duplex = of_property_read_bool(fixed_link_node, "full-duplex"); - if (of_property_read_u32(fixed_link_node, "speed", &status.speed)) + if (of_property_read_u32(fixed_link_node, "speed", + &status.speed)) { + of_node_put(fixed_link_node); return -EINVAL; + } status.pause = of_property_read_bool(fixed_link_node, "pause"); status.asym_pause = of_property_read_bool(fixed_link_node, "asym-pause"); @@ -486,3 +490,18 @@ int of_phy_register_fixed_link(struct device_node *np) return -ENODEV; } EXPORT_SYMBOL(of_phy_register_fixed_link); + +void of_phy_deregister_fixed_link(struct device_node *np) +{ + struct phy_device *phydev; + + phydev = of_phy_find_device(np); + if (!phydev) + return; + + fixed_phy_unregister(phydev); + + put_device(&phydev->mdio.dev); /* of_phy_find_device() */ + phy_device_free(phydev); /* fixed_phy_register() */ +} +EXPORT_SYMBOL(of_phy_deregister_fixed_link); diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c index f63d4b0deff009ad8963b61034b95d8563ca919b..a53982a330ea40444ac4e161b5838bb4aed1a013 100644 --- a/drivers/of/of_numa.c +++ b/drivers/of/of_numa.c @@ -176,7 +176,12 @@ int of_node_to_nid(struct device_node *device) np->name); of_node_put(np); - if (!r) + /* + * If numa=off passed on command line, or with a defective + * device tree, the nid may not be in the set of possible + * nodes. Check for this case and return NUMA_NO_NODE. + */ + if (!r && nid < MAX_NUMNODES && node_possible(nid)) return nid; return NUMA_NO_NODE; diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c index b58be12ab27770bbbe40c25eb220a96204c71d3d..0ee42c3e66a1900db49408577c8ae69e698e7223 100644 --- a/drivers/of/of_pci.c +++ b/drivers/of/of_pci.c @@ -119,6 +119,27 @@ int of_get_pci_domain_nr(struct device_node *node) } EXPORT_SYMBOL_GPL(of_get_pci_domain_nr); +/** + * This function will try to find the limitation of link speed by finding + * a property called "max-link-speed" of the given device node. + * + * @node: device tree node with the max link speed information + * + * Returns the associated max link speed from DT, or a negative value if the + * required property is not found or is invalid. + */ +int of_pci_get_max_link_speed(struct device_node *node) +{ + u32 max_link_speed; + + if (of_property_read_u32(node, "max-link-speed", &max_link_speed) || + max_link_speed > 4) + return -EINVAL; + + return max_link_speed; +} +EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed); + /** * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only * is present and valid diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 318dbb51e7a2e098258f7826c31e30c4277163a4..0d4cda7050e0569e6f5cca1a31ffccc9aac7f893 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -58,6 +58,41 @@ struct of_overlay { static int of_overlay_apply_one(struct of_overlay *ov, struct device_node *target, const struct device_node *overlay); +static BLOCKING_NOTIFIER_HEAD(of_overlay_chain); + +int of_overlay_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&of_overlay_chain, nb); +} +EXPORT_SYMBOL_GPL(of_overlay_notifier_register); + +int of_overlay_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&of_overlay_chain, nb); +} +EXPORT_SYMBOL_GPL(of_overlay_notifier_unregister); + +static int of_overlay_notify(struct of_overlay *ov, + enum of_overlay_notify_action action) +{ + struct of_overlay_notify_data nd; + int i, ret; + + for (i = 0; i < ov->count; i++) { + struct of_overlay_info *ovinfo = &ov->ovinfo_tab[i]; + + nd.target = ovinfo->target; + nd.overlay = ovinfo->overlay; + + ret = blocking_notifier_call_chain(&of_overlay_chain, + action, &nd); + if (ret) + return notifier_to_errno(ret); + } + + return 0; +} + static int of_overlay_apply_single_property(struct of_overlay *ov, struct device_node *target, struct property *prop) { @@ -368,6 +403,13 @@ int of_overlay_create(struct device_node *tree) goto err_free_idr; } + err = of_overlay_notify(ov, OF_OVERLAY_PRE_APPLY); + if (err < 0) { + pr_err("%s: Pre-apply notifier failed (err=%d)\n", + __func__, err); + goto err_free_idr; + } + /* apply the overlay */ err = of_overlay_apply(ov); if (err) @@ -382,6 +424,8 @@ int of_overlay_create(struct device_node *tree) /* add to the tail of the overlay list */ list_add_tail(&ov->node, &ov_list); + of_overlay_notify(ov, OF_OVERLAY_POST_APPLY); + mutex_unlock(&of_mutex); return id; @@ -498,9 +542,10 @@ int of_overlay_destroy(int id) goto out; } - + of_overlay_notify(ov, OF_OVERLAY_PRE_REMOVE); list_del(&ov->node); __of_changeset_revert(&ov->cset); + of_overlay_notify(ov, OF_OVERLAY_POST_REMOVE); of_free_overlay_info(ov); idr_remove(&ov_idr, id); of_changeset_destroy(&ov->cset); diff --git a/drivers/of/platform.c b/drivers/of/platform.c index e4bf07d20f9bbf416f756ca381d3340ce662272f..b8064bc2b6eb97a29d330f2713ce221adcc6313a 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -45,6 +45,9 @@ static int of_dev_node_match(struct device *dev, void *data) * of_find_device_by_node - Find the platform_device associated with a node * @np: Pointer to device tree node * + * Takes a reference to the embedded struct device which needs to be dropped + * after use. + * * Returns platform_device pointer, or NULL if not found */ struct platform_device *of_find_device_by_node(struct device_node *np) @@ -558,9 +561,6 @@ static int of_platform_device_destroy(struct device *dev, void *data) * of the given device (and, recurrently, their children) that have been * created from their respective device tree nodes (and only those, * leaving others - eg. manually created - unharmed). - * - * Returns 0 when all children devices have been removed or - * -EBUSY when some children remained. */ void of_platform_depopulate(struct device *parent) { diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c index 46325d6394cfe4913d07fcfab1f6f5210d89fa9f..8bf12e904fd2e7c51e77729f0cc8cecb5fb12780 100644 --- a/drivers/of/resolver.c +++ b/drivers/of/resolver.c @@ -28,20 +28,19 @@ * Find a node with the give full name by recursively following any of * the child node links. */ -static struct device_node *__of_find_node_by_full_name(struct device_node *node, +static struct device_node *find_node_by_full_name(struct device_node *node, const char *full_name) { struct device_node *child, *found; - if (node == NULL) + if (!node) return NULL; - /* check */ - if (of_node_cmp(node->full_name, full_name) == 0) + if (!of_node_cmp(node->full_name, full_name)) return of_node_get(node); for_each_child_of_node(node, child) { - found = __of_find_node_by_full_name(child, full_name); + found = find_node_by_full_name(child, full_name); if (found != NULL) { of_node_put(child); return found; @@ -51,16 +50,12 @@ static struct device_node *__of_find_node_by_full_name(struct device_node *node, return NULL; } -/* - * Find live tree's maximum phandle value. - */ -static phandle of_get_tree_max_phandle(void) +static phandle live_tree_max_phandle(void) { struct device_node *node; phandle phandle; unsigned long flags; - /* now search recursively */ raw_spin_lock_irqsave(&devtree_lock, flags); phandle = 0; for_each_of_allnodes(node) { @@ -73,131 +68,102 @@ static phandle of_get_tree_max_phandle(void) return phandle; } -/* - * Adjust a subtree's phandle values by a given delta. - * Makes sure not to just adjust the device node's phandle value, - * but modify the phandle properties values as well. - */ -static void __of_adjust_tree_phandles(struct device_node *node, +static void adjust_overlay_phandles(struct device_node *overlay, int phandle_delta) { struct device_node *child; struct property *prop; phandle phandle; - /* first adjust the node's phandle direct value */ - if (node->phandle != 0 && node->phandle != OF_PHANDLE_ILLEGAL) - node->phandle += phandle_delta; + /* adjust node's phandle in node */ + if (overlay->phandle != 0 && overlay->phandle != OF_PHANDLE_ILLEGAL) + overlay->phandle += phandle_delta; - /* now adjust phandle & linux,phandle values */ - for_each_property_of_node(node, prop) { + /* copy adjusted phandle into *phandle properties */ + for_each_property_of_node(overlay, prop) { - /* only look for these two */ - if (of_prop_cmp(prop->name, "phandle") != 0 && - of_prop_cmp(prop->name, "linux,phandle") != 0) + if (of_prop_cmp(prop->name, "phandle") && + of_prop_cmp(prop->name, "linux,phandle")) continue; - /* must be big enough */ if (prop->length < 4) continue; - /* read phandle value */ phandle = be32_to_cpup(prop->value); - if (phandle == OF_PHANDLE_ILLEGAL) /* unresolved */ + if (phandle == OF_PHANDLE_ILLEGAL) continue; - /* adjust */ - *(uint32_t *)prop->value = cpu_to_be32(node->phandle); + *(uint32_t *)prop->value = cpu_to_be32(overlay->phandle); } - /* now do the children recursively */ - for_each_child_of_node(node, child) - __of_adjust_tree_phandles(child, phandle_delta); + for_each_child_of_node(overlay, child) + adjust_overlay_phandles(child, phandle_delta); } -static int __of_adjust_phandle_ref(struct device_node *node, - struct property *rprop, int value) +static int update_usages_of_a_phandle_reference(struct device_node *overlay, + struct property *prop_fixup, phandle phandle) { - phandle phandle; struct device_node *refnode; - struct property *sprop; - char *propval, *propcur, *propend, *nodestr, *propstr, *s; - int offset, propcurlen; + struct property *prop; + char *value, *cur, *end, *node_path, *prop_name, *s; + int offset, len; int err = 0; - /* make a copy */ - propval = kmalloc(rprop->length, GFP_KERNEL); - if (!propval) { - pr_err("%s: Could not copy value of '%s'\n", - __func__, rprop->name); + value = kmalloc(prop_fixup->length, GFP_KERNEL); + if (!value) return -ENOMEM; - } - memcpy(propval, rprop->value, rprop->length); + memcpy(value, prop_fixup->value, prop_fixup->length); - propend = propval + rprop->length; - for (propcur = propval; propcur < propend; propcur += propcurlen + 1) { - propcurlen = strlen(propcur); + /* prop_fixup contains a list of tuples of path:property_name:offset */ + end = value + prop_fixup->length; + for (cur = value; cur < end; cur += len + 1) { + len = strlen(cur); - nodestr = propcur; - s = strchr(propcur, ':'); + node_path = cur; + s = strchr(cur, ':'); if (!s) { - pr_err("%s: Illegal symbol entry '%s' (1)\n", - __func__, propcur); err = -EINVAL; goto err_fail; } *s++ = '\0'; - propstr = s; + prop_name = s; s = strchr(s, ':'); if (!s) { - pr_err("%s: Illegal symbol entry '%s' (2)\n", - __func__, (char *)rprop->value); err = -EINVAL; goto err_fail; } - *s++ = '\0'; + err = kstrtoint(s, 10, &offset); - if (err != 0) { - pr_err("%s: Could get offset '%s'\n", - __func__, (char *)rprop->value); + if (err) goto err_fail; - } - /* look into the resolve node for the full path */ - refnode = __of_find_node_by_full_name(node, nodestr); - if (!refnode) { - pr_warn("%s: Could not find refnode '%s'\n", - __func__, (char *)rprop->value); + refnode = find_node_by_full_name(overlay, node_path); + if (!refnode) continue; - } - /* now find the property */ - for_each_property_of_node(refnode, sprop) { - if (of_prop_cmp(sprop->name, propstr) == 0) + for_each_property_of_node(refnode, prop) { + if (!of_prop_cmp(prop->name, prop_name)) break; } of_node_put(refnode); - if (!sprop) { - pr_err("%s: Could not find property '%s'\n", - __func__, (char *)rprop->value); + if (!prop) { err = -ENOENT; goto err_fail; } - phandle = value; - *(__be32 *)(sprop->value + offset) = cpu_to_be32(phandle); + *(__be32 *)(prop->value + offset) = cpu_to_be32(phandle); } err_fail: - kfree(propval); + kfree(value); return err; } /* compare nodes taking into account that 'name' strips out the @ part */ -static int __of_node_name_cmp(const struct device_node *dn1, +static int node_name_cmp(const struct device_node *dn1, const struct device_node *dn2) { const char *n1 = strrchr(dn1->full_name, '/') ? : "/"; @@ -208,85 +174,77 @@ static int __of_node_name_cmp(const struct device_node *dn1, /* * Adjust the local phandle references by the given phandle delta. - * Assumes the existances of a __local_fixups__ node at the root. - * Assumes that __of_verify_tree_phandle_references has been called. - * Does not take any devtree locks so make sure you call this on a tree - * which is at the detached state. + * + * Subtree @local_fixups, which is overlay node __local_fixups__, + * mirrors the fragment node structure at the root of the overlay. + * + * For each property in the fragments that contains a phandle reference, + * @local_fixups has a property of the same name that contains a list + * of offsets of the phandle reference(s) within the respective property + * value(s). The values at these offsets will be fixed up. */ -static int __of_adjust_tree_phandle_references(struct device_node *node, - struct device_node *target, int phandle_delta) +static int adjust_local_phandle_references(struct device_node *local_fixups, + struct device_node *overlay, int phandle_delta) { - struct device_node *child, *childtarget; - struct property *rprop, *sprop; + struct device_node *child, *overlay_child; + struct property *prop_fix, *prop; int err, i, count; unsigned int off; phandle phandle; - if (node == NULL) + if (!local_fixups) return 0; - for_each_property_of_node(node, rprop) { + for_each_property_of_node(local_fixups, prop_fix) { /* skip properties added automatically */ - if (of_prop_cmp(rprop->name, "name") == 0 || - of_prop_cmp(rprop->name, "phandle") == 0 || - of_prop_cmp(rprop->name, "linux,phandle") == 0) + if (!of_prop_cmp(prop_fix->name, "name") || + !of_prop_cmp(prop_fix->name, "phandle") || + !of_prop_cmp(prop_fix->name, "linux,phandle")) continue; - if ((rprop->length % 4) != 0 || rprop->length == 0) { - pr_err("%s: Illegal property (size) '%s' @%s\n", - __func__, rprop->name, node->full_name); + if ((prop_fix->length % 4) != 0 || prop_fix->length == 0) return -EINVAL; - } - count = rprop->length / sizeof(__be32); + count = prop_fix->length / sizeof(__be32); - /* now find the target property */ - for_each_property_of_node(target, sprop) { - if (of_prop_cmp(sprop->name, rprop->name) == 0) + for_each_property_of_node(overlay, prop) { + if (!of_prop_cmp(prop->name, prop_fix->name)) break; } - if (sprop == NULL) { - pr_err("%s: Could not find target property '%s' @%s\n", - __func__, rprop->name, node->full_name); + if (!prop) return -EINVAL; - } for (i = 0; i < count; i++) { - off = be32_to_cpu(((__be32 *)rprop->value)[i]); - /* make sure the offset doesn't overstep (even wrap) */ - if (off >= sprop->length || - (off + 4) > sprop->length) { - pr_err("%s: Illegal property '%s' @%s\n", - __func__, rprop->name, - node->full_name); + off = be32_to_cpu(((__be32 *)prop_fix->value)[i]); + if ((off + 4) > prop->length) return -EINVAL; - } - - if (phandle_delta) { - /* adjust */ - phandle = be32_to_cpu(*(__be32 *)(sprop->value + off)); - phandle += phandle_delta; - *(__be32 *)(sprop->value + off) = cpu_to_be32(phandle); - } + + phandle = be32_to_cpu(*(__be32 *)(prop->value + off)); + phandle += phandle_delta; + *(__be32 *)(prop->value + off) = cpu_to_be32(phandle); } } - for_each_child_of_node(node, child) { - - for_each_child_of_node(target, childtarget) - if (__of_node_name_cmp(child, childtarget) == 0) + /* + * These nested loops recurse down two subtrees in parallel, where the + * node names in the two subtrees match. + * + * The roots of the subtrees are the overlay's __local_fixups__ node + * and the overlay's root node. + */ + for_each_child_of_node(local_fixups, child) { + + for_each_child_of_node(overlay, overlay_child) + if (!node_name_cmp(child, overlay_child)) break; - if (!childtarget) { - pr_err("%s: Could not find target child '%s' @%s\n", - __func__, child->name, node->full_name); + if (!overlay_child) return -EINVAL; - } - err = __of_adjust_tree_phandle_references(child, childtarget, + err = adjust_local_phandle_references(child, overlay_child, phandle_delta); - if (err != 0) + if (err) return err; } @@ -294,111 +252,103 @@ static int __of_adjust_tree_phandle_references(struct device_node *node, } /** - * of_resolve - Resolve the given node against the live tree. + * of_resolve_phandles - Relocate and resolve overlay against live tree * - * @resolve: Node to resolve + * @overlay: Pointer to devicetree overlay to relocate and resolve * - * Perform dynamic Device Tree resolution against the live tree - * to the given node to resolve. This depends on the live tree - * having a __symbols__ node, and the resolve node the __fixups__ & - * __local_fixups__ nodes (if needed). - * The result of the operation is a resolve node that it's contents - * are fit to be inserted or operate upon the live tree. - * Returns 0 on success or a negative error value on error. + * Modify (relocate) values of local phandles in @overlay to a range that + * does not conflict with the live expanded devicetree. Update references + * to the local phandles in @overlay. Update (resolve) phandle references + * in @overlay that refer to the live expanded devicetree. + * + * Phandle values in the live tree are in the range of + * 1 .. live_tree_max_phandle(). The range of phandle values in the overlay + * also begin with at 1. Adjust the phandle values in the overlay to begin + * at live_tree_max_phandle() + 1. Update references to the phandles to + * the adjusted phandle values. + * + * The name of each property in the "__fixups__" node in the overlay matches + * the name of a symbol (a label) in the live tree. The values of each + * property in the "__fixups__" node is a list of the property values in the + * overlay that need to be updated to contain the phandle reference + * corresponding to that symbol in the live tree. Update the references in + * the overlay with the phandle values in the live tree. + * + * @overlay must be detached. + * + * Resolving and applying @overlay to the live expanded devicetree must be + * protected by a mechanism to ensure that multiple overlays are processed + * in a single threaded manner so that multiple overlays will not relocate + * phandles to overlapping ranges. The mechanism to enforce this is not + * yet implemented. + * + * Return: %0 on success or a negative error value on error. */ -int of_resolve_phandles(struct device_node *resolve) +int of_resolve_phandles(struct device_node *overlay) { - struct device_node *child, *childroot, *refnode; - struct device_node *root_sym, *resolve_sym, *resolve_fix; - struct property *rprop; + struct device_node *child, *local_fixups, *refnode; + struct device_node *tree_symbols, *overlay_fixups; + struct property *prop; const char *refpath; phandle phandle, phandle_delta; int err; - if (!resolve) - pr_err("%s: null node\n", __func__); - if (resolve && !of_node_check_flag(resolve, OF_DETACHED)) - pr_err("%s: node %s not detached\n", __func__, - resolve->full_name); - /* the resolve node must exist, and be detached */ - if (!resolve || !of_node_check_flag(resolve, OF_DETACHED)) - return -EINVAL; - - /* first we need to adjust the phandles */ - phandle_delta = of_get_tree_max_phandle() + 1; - __of_adjust_tree_phandles(resolve, phandle_delta); - - /* locate the local fixups */ - childroot = NULL; - for_each_child_of_node(resolve, childroot) - if (of_node_cmp(childroot->name, "__local_fixups__") == 0) - break; - - if (childroot != NULL) { - /* resolve root is guaranteed to be the '/' */ - err = __of_adjust_tree_phandle_references(childroot, - resolve, 0); - if (err != 0) - return err; + tree_symbols = NULL; - BUG_ON(__of_adjust_tree_phandle_references(childroot, - resolve, phandle_delta)); + if (!overlay) { + pr_err("null overlay\n"); + err = -EINVAL; + goto out; + } + if (!of_node_check_flag(overlay, OF_DETACHED)) { + pr_err("overlay not detached\n"); + err = -EINVAL; + goto out; } - root_sym = NULL; - resolve_sym = NULL; - resolve_fix = NULL; - - /* this may fail (if no fixups are required) */ - root_sym = of_find_node_by_path("/__symbols__"); + phandle_delta = live_tree_max_phandle() + 1; + adjust_overlay_phandles(overlay, phandle_delta); - /* locate the symbols & fixups nodes on resolve */ - for_each_child_of_node(resolve, child) { + for_each_child_of_node(overlay, local_fixups) + if (!of_node_cmp(local_fixups->name, "__local_fixups__")) + break; - if (!resolve_sym && - of_node_cmp(child->name, "__symbols__") == 0) - resolve_sym = child; + err = adjust_local_phandle_references(local_fixups, overlay, phandle_delta); + if (err) + goto out; - if (!resolve_fix && - of_node_cmp(child->name, "__fixups__") == 0) - resolve_fix = child; + overlay_fixups = NULL; - /* both found, don't bother anymore */ - if (resolve_sym && resolve_fix) - break; + for_each_child_of_node(overlay, child) { + if (!of_node_cmp(child->name, "__fixups__")) + overlay_fixups = child; } - /* we do allow for the case where no fixups are needed */ - if (!resolve_fix) { - err = 0; /* no error */ + if (!overlay_fixups) { + err = 0; goto out; } - /* we need to fixup, but no root symbols... */ - if (!root_sym) { - pr_err("%s: no symbols in root of device tree.\n", __func__); + tree_symbols = of_find_node_by_path("/__symbols__"); + if (!tree_symbols) { + pr_err("no symbols in root of device tree.\n"); err = -EINVAL; goto out; } - for_each_property_of_node(resolve_fix, rprop) { + for_each_property_of_node(overlay_fixups, prop) { /* skip properties added automatically */ - if (of_prop_cmp(rprop->name, "name") == 0) + if (!of_prop_cmp(prop->name, "name")) continue; - err = of_property_read_string(root_sym, - rprop->name, &refpath); - if (err != 0) { - pr_err("%s: Could not find symbol '%s'\n", - __func__, rprop->name); + err = of_property_read_string(tree_symbols, + prop->name, &refpath); + if (err) goto out; - } refnode = of_find_node_by_path(refpath); if (!refnode) { - pr_err("%s: Could not find node by path '%s'\n", - __func__, refpath); err = -ENOENT; goto out; } @@ -406,17 +356,15 @@ int of_resolve_phandles(struct device_node *resolve) phandle = refnode->phandle; of_node_put(refnode); - pr_debug("%s: %s phandle is 0x%08x\n", - __func__, rprop->name, phandle); - - err = __of_adjust_phandle_ref(resolve, rprop, phandle); + err = update_usages_of_a_phandle_reference(overlay, prop, phandle); if (err) break; } out: - /* NULL is handled by of_node_put as NOP */ - of_node_put(root_sym); + if (err) + pr_err("overlay phandle fixup failed: %d\n", err); + of_node_put(tree_symbols); return err; } diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 82f7000a285d3780229a0ac5d49bac5618176f1d..642478d35e99a5c023a03a2d8f9593c8e4fbbde2 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -206,7 +206,7 @@ void sync_stop(void) * because we cannot reach this code without at least one * dcookie user still being registered (namely, the reader * of the event buffer). */ -static inline unsigned long fast_get_dcookie(struct path *path) +static inline unsigned long fast_get_dcookie(const struct path *path) { unsigned long cookie; diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index c0cc4e7ff023b685dd5a2159fe16d7cd0deb6ad3..67935fbbbcabf706914a874de4bdef137ff38564 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include "oprof.h" #include "event_buffer.h" diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c index 9559829fb234de4088602ca6e942e8dd11bd43ff..e65a576e4032d90d270bba28403fa546727c7c0d 100644 --- a/drivers/oprofile/nmi_timer_int.c +++ b/drivers/oprofile/nmi_timer_int.c @@ -59,25 +59,16 @@ static void nmi_timer_stop_cpu(int cpu) perf_event_disable(event); } -static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action, - void *data) +static int nmi_timer_cpu_online(unsigned int cpu) { - int cpu = (unsigned long)data; - switch (action) { - case CPU_DOWN_FAILED: - case CPU_ONLINE: - nmi_timer_start_cpu(cpu); - break; - case CPU_DOWN_PREPARE: - nmi_timer_stop_cpu(cpu); - break; - } - return NOTIFY_DONE; + nmi_timer_start_cpu(cpu); + return 0; +} +static int nmi_timer_cpu_predown(unsigned int cpu) +{ + nmi_timer_stop_cpu(cpu); + return 0; } - -static struct notifier_block nmi_timer_cpu_nb = { - .notifier_call = nmi_timer_cpu_notifier -}; static int nmi_timer_start(void) { @@ -103,13 +94,14 @@ static void nmi_timer_stop(void) put_online_cpus(); } +static enum cpuhp_state hp_online; + static void nmi_timer_shutdown(void) { struct perf_event *event; int cpu; - cpu_notifier_register_begin(); - __unregister_cpu_notifier(&nmi_timer_cpu_nb); + cpuhp_remove_state(hp_online); for_each_possible_cpu(cpu) { event = per_cpu(nmi_timer_events, cpu); if (!event) @@ -118,13 +110,11 @@ static void nmi_timer_shutdown(void) per_cpu(nmi_timer_events, cpu) = NULL; perf_event_release_kernel(event); } - - cpu_notifier_register_done(); } static int nmi_timer_setup(void) { - int cpu, err; + int err; u64 period; /* clock cycles per tick: */ @@ -132,24 +122,14 @@ static int nmi_timer_setup(void) do_div(period, HZ); nmi_timer_attr.sample_period = period; - cpu_notifier_register_begin(); - err = __register_cpu_notifier(&nmi_timer_cpu_nb); - if (err) - goto out; - - /* can't attach events to offline cpus: */ - for_each_online_cpu(cpu) { - err = nmi_timer_start_cpu(cpu); - if (err) { - cpu_notifier_register_done(); - nmi_timer_shutdown(); - return err; - } + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "oprofile/nmi:online", + nmi_timer_cpu_online, nmi_timer_cpu_predown); + if (err < 0) { + nmi_timer_shutdown(); + return err; } - -out: - cpu_notifier_register_done(); - return err; + hp_online = err; + return 0; } int __init op_nmi_timer_init(struct oprofile_operations *ops) diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c index 134398e0231be380c0af84e960e95b838cd2dce2..d77ebbfc67c98f8b63cdde527b8a59d7135486bd 100644 --- a/drivers/oprofile/oprofilefs.c +++ b/drivers/oprofile/oprofilefs.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include "oprof.h" diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 3ed6238f8f6e0c65279ef35a182f57edb019f19d..553ef8a5d588685ff02f98bde7110dfb421accaa 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -48,7 +48,7 @@ #include #include /* for L1_CACHE_BYTES */ -#include +#include #include #include #include diff --git a/drivers/parisc/ccio-rm-dma.c b/drivers/parisc/ccio-rm-dma.c index f78f6f1aef47409f6066ea68560690c87dd386e7..1bf988010855ba01a8e504b7cd32ca2d3fd392cb 100644 --- a/drivers/parisc/ccio-rm-dma.c +++ b/drivers/parisc/ccio-rm-dma.c @@ -40,7 +40,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c index 783906fe659a2824987fe4b5cc9397c866ba91f7..4dd9b1308128a4eb34aecd2824ce245f6a069fd1 100644 --- a/drivers/parisc/eisa_eeprom.c +++ b/drivers/parisc/eisa_eeprom.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #define EISA_EEPROM_MINOR 241 diff --git a/drivers/parisc/eisa_enumerator.c b/drivers/parisc/eisa_enumerator.c index 21905fef2cbf0d34259e37069e50140a66101441..d9bffe8d29b967a8067b27c85183e73ccf32699d 100644 --- a/drivers/parisc/eisa_enumerator.c +++ b/drivers/parisc/eisa_enumerator.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index b48243131993a6faddff840551c93a86b53ffec1..ff1a332d76e4fe73eb418f87b43a2c083d6a01f3 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -49,7 +49,7 @@ #include /* HZ */ #include #include -#include +#include /* The control of the LEDs and LCDs on PARISC-machines have to be done completely in software. The necessary calculations are done in a work queue diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index 3651c3871d5b45f4afc735709503aa315139c362..055f83fddc188d42e37869b4d403dfab635c9461 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c @@ -68,7 +68,7 @@ #include #include -#include +#include #include #define PDCS_VERSION "0.30" diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c index 5bed17f68ef4d164bdef53a22c6c0c65c6e83e8c..d998d0ed2bec55af24c49f6fb182a9c2fc92daa2 100644 --- a/drivers/parport/daisy.c +++ b/drivers/parport/daisy.c @@ -26,7 +26,7 @@ #include #include -#include +#include #undef DEBUG diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c index 2e21af43d91ea26bcdfbe7ec3cc8354be086749c..c0e7d21c88c25da720b7156b1143374a4d574325 100644 --- a/drivers/parport/ieee1284_ops.c +++ b/drivers/parport/ieee1284_ops.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #undef DEBUG /* undef me for production */ diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c index 6e3a60c788736a1e01c6d56377a4222d5e880e4a..dd6d4ccb41e4df9e97cd54d5f19337f3c69a6266 100644 --- a/drivers/parport/parport_gsc.c +++ b/drivers/parport/parport_gsc.c @@ -34,7 +34,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c index d763bc9e44c1c1a0e98d7f69b490afdc0721004e..4d1d6eaf333ddb64362ed7b9a7bca613c0c763d1 100644 --- a/drivers/parport/probe.c +++ b/drivers/parport/probe.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include static const struct { const char *token; diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c index 74ed3e459a3e8fd41d0e1015d3e845a35142d8db..8ee44a104ac47939f7693370143c78135c3735c6 100644 --- a/drivers/parport/procfs.c +++ b/drivers/parport/procfs.c @@ -23,7 +23,7 @@ #include #include -#include +#include #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) diff --git a/drivers/pci/access.c b/drivers/pci/access.c index d11cdbb8fba3edab6d0bfc69490c72b0c40394dd..db239547fefd0905b416db24b794925b816a20a9 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -142,10 +142,22 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, if (size == 4) { writel(val, addr); return PCIBIOS_SUCCESSFUL; - } else { - mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); } + /* + * In general, hardware that supports only 32-bit writes on PCI is + * not spec-compliant. For example, software may perform a 16-bit + * write. If the hardware only supports 32-bit accesses, we must + * do a 32-bit read, merge in the 16 bits we intend to write, + * followed by a 32-bit write. If the 16 bits we *don't* intend to + * write happen to have any RW1C (write-one-to-clear) bits set, we + * just inadvertently cleared something we shouldn't have. + */ + dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n", + size, pci_domain_nr(bus), bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn), where); + + mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); tmp = readl(addr) & mask; tmp |= val << ((where & 0x3) * 8); writel(tmp, addr); diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index c288e5a525754dbebe3165f44e5cbe0569d7ec21..bc56cf19afd392c7b3feefa2cf5f070f059ed9e1 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -320,7 +320,7 @@ void pci_bus_add_device(struct pci_dev *dev) pci_fixup_device(pci_fixup_final, dev); pci_create_sysfs_dev_files(dev); pci_proc_attach_device(dev); - pci_bridge_d3_device_changed(dev); + pci_bridge_d3_update(dev); dev->match_driver = true; retval = device_attach(&dev->dev); diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c index 43ed08dd8b01c2b112cbc244e123576ea8d971cb..2fee61bb6559408469c795554b87a320695fff24 100644 --- a/drivers/pci/ecam.c +++ b/drivers/pci/ecam.c @@ -162,3 +162,15 @@ struct pci_ecam_ops pci_generic_ecam_ops = { .write = pci_generic_config_write, } }; + +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) +/* ECAM ops for 32-bit access only (non-compliant) */ +struct pci_ecam_ops pci_32b_ops = { + .bus_shift = 20, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = pci_generic_config_read32, + .write = pci_generic_config_write32, + } +}; +#endif diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index d7e7c0a827c3526dec77709298fc56a40ee5954e..898d2c48239cd4f27e273bb7be3a6338e401702f 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -69,7 +69,7 @@ config PCI_IMX6 config PCI_TEGRA bool "NVIDIA Tegra PCIe controller" - depends on ARCH_TEGRA && !ARM64 + depends on ARCH_TEGRA help Say Y here if you want support for the PCIe host controller found on NVIDIA Tegra SoCs. @@ -133,8 +133,8 @@ config PCIE_XILINX config PCI_XGENE bool "X-Gene PCIe controller" - depends on ARCH_XGENE - depends on OF + depends on ARM64 + depends on OF || (ACPI && PCI_QUIRKS) select PCIEPORTBUS help Say Y here if you want internal PCI support on APM X-Gene SoC. @@ -240,14 +240,16 @@ config PCIE_QCOM config PCI_HOST_THUNDER_PEM bool "Cavium Thunder PCIe controller to off-chip devices" - depends on OF && ARM64 + depends on ARM64 + depends on OF || (ACPI && PCI_QUIRKS) select PCI_HOST_COMMON help Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs. config PCI_HOST_THUNDER_ECAM bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon" - depends on OF && ARM64 + depends on ARM64 + depends on OF || (ACPI && PCI_QUIRKS) select PCI_HOST_COMMON help Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs. @@ -276,7 +278,7 @@ config PCIE_ARTPEC6 config PCIE_ROCKCHIP bool "Rockchip PCIe controller" - depends on ARCH_ROCKCHIP + depends on ARCH_ROCKCHIP || COMPILE_TEST depends on OF depends on PCI_MSI_IRQ_DOMAIN select MFD_SYSCON @@ -286,7 +288,7 @@ config PCIE_ROCKCHIP 4 slots. config VMD - depends on PCI_MSI && X86_64 + depends on PCI_MSI && X86_64 && SRCU tristate "Intel Volume Management Device Driver" default N ---help--- diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 084cb4983645a298e5d7d3874389b00ff5a7c5f5..bfe3179ae74cdb91b2eda48a369873a8f9baf139 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -15,7 +15,6 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o -obj-$(CONFIG_PCI_XGENE) += pci-xgene.o obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o @@ -25,11 +24,23 @@ obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o -obj-$(CONFIG_PCI_HISI) += pcie-hisi.o obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o -obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o -obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o obj-$(CONFIG_VMD) += vmd.o + +# The following drivers are for devices that use the generic ACPI +# pci_root.c driver but don't support standard ECAM config access. +# They contain MCFG quirks to replace the generic ECAM accessors with +# device-specific ones that are shared with the DT driver. + +# The ACPI driver is generic and should not require driver-specific +# config options to be enabled, so we always build these drivers on +# ARM64 and use internal ifdefs to only build the pieces we need +# depending on whether ACPI, the DT driver, or both are enabled. + +obj-$(CONFIG_ARM64) += pcie-hisi.o +obj-$(CONFIG_ARM64) += pci-thunder-ecam.o +obj-$(CONFIG_ARM64) += pci-thunder-pem.o +obj-$(CONFIG_ARM64) += pci-xgene.o diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 763ff87458288180dfd3ab19baf4e52a84fcfef3..3efcc7bdc5fb65b96e8bca3a2acd24f12da38773 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -378,6 +378,8 @@ struct hv_pcibus_device { struct msi_domain_info msi_info; struct msi_controller msi_chip; struct irq_domain *irq_domain; + struct retarget_msi_interrupt retarget_msi_interrupt_params; + spinlock_t retarget_msi_interrupt_lock; }; /* @@ -755,7 +757,7 @@ static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest, return parent->chip->irq_set_affinity(parent, dest, force); } -void hv_irq_mask(struct irq_data *data) +static void hv_irq_mask(struct irq_data *data) { pci_msi_mask_irq(data); } @@ -770,38 +772,44 @@ void hv_irq_mask(struct irq_data *data) * is built out of this PCI bus's instance GUID and the function * number of the device. */ -void hv_irq_unmask(struct irq_data *data) +static void hv_irq_unmask(struct irq_data *data) { struct msi_desc *msi_desc = irq_data_get_msi_desc(data); struct irq_cfg *cfg = irqd_cfg(data); - struct retarget_msi_interrupt params; + struct retarget_msi_interrupt *params; struct hv_pcibus_device *hbus; struct cpumask *dest; struct pci_bus *pbus; struct pci_dev *pdev; int cpu; + unsigned long flags; dest = irq_data_get_affinity_mask(data); pdev = msi_desc_to_pci_dev(msi_desc); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); - memset(¶ms, 0, sizeof(params)); - params.partition_id = HV_PARTITION_ID_SELF; - params.source = 1; /* MSI(-X) */ - params.address = msi_desc->msg.address_lo; - params.data = msi_desc->msg.data; - params.device_id = (hbus->hdev->dev_instance.b[5] << 24) | + spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags); + + params = &hbus->retarget_msi_interrupt_params; + memset(params, 0, sizeof(*params)); + params->partition_id = HV_PARTITION_ID_SELF; + params->source = 1; /* MSI(-X) */ + params->address = msi_desc->msg.address_lo; + params->data = msi_desc->msg.data; + params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | (hbus->hdev->dev_instance.b[4] << 16) | (hbus->hdev->dev_instance.b[7] << 8) | (hbus->hdev->dev_instance.b[6] & 0xf8) | PCI_FUNC(pdev->devfn); - params.vector = cfg->vector; + params->vector = cfg->vector; for_each_cpu_and(cpu, dest, cpu_online_mask) - params.vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu)); + params->vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu)); - hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, ¶ms, NULL); + hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, params, NULL); + + spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); pci_msi_unmask_irq(data); } @@ -1271,9 +1279,9 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, struct hv_pci_dev *hpdev; struct pci_child_message *res_req; struct q_res_req_compl comp_pkt; - union { - struct pci_packet init_packet; - u8 buffer[0x100]; + struct { + struct pci_packet init_packet; + u8 buffer[sizeof(struct pci_child_message)]; } pkt; unsigned long flags; int ret; @@ -1582,6 +1590,10 @@ static void hv_eject_device_work(struct work_struct *work) pci_dev_put(pdev); } + spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags); + list_del(&hpdev->list_entry); + spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); + memset(&ctxt, 0, sizeof(ctxt)); ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; @@ -1590,10 +1602,6 @@ static void hv_eject_device_work(struct work_struct *work) sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0); - spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags); - list_del(&hpdev->list_entry); - spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); - put_pcichild(hpdev, hv_pcidev_ref_childlist); put_pcichild(hpdev, hv_pcidev_ref_pnp); put_hvpcibus(hpdev->hbus); @@ -2186,6 +2194,7 @@ static int hv_pci_probe(struct hv_device *hdev, INIT_LIST_HEAD(&hbus->resources_for_children); spin_lock_init(&hbus->config_lock); spin_lock_init(&hbus->device_list_lock); + spin_lock_init(&hbus->retarget_msi_interrupt_lock); sema_init(&hbus->enum_sem, 1); init_completion(&hbus->remove_event); @@ -2266,24 +2275,32 @@ static int hv_pci_probe(struct hv_device *hdev, return ret; } -/** - * hv_pci_remove() - Remove routine for this VMBus channel - * @hdev: VMBus's tracking struct for this root PCI bus - * - * Return: 0 on success, -errno on failure - */ -static int hv_pci_remove(struct hv_device *hdev) +static void hv_pci_bus_exit(struct hv_device *hdev) { - int ret; - struct hv_pcibus_device *hbus; - union { + struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); + struct { struct pci_packet teardown_packet; - u8 buffer[0x100]; + u8 buffer[sizeof(struct pci_message)]; } pkt; struct pci_bus_relations relations; struct hv_pci_compl comp_pkt; + int ret; - hbus = hv_get_drvdata(hdev); + /* + * After the host sends the RESCIND_CHANNEL message, it doesn't + * access the per-channel ringbuffer any longer. + */ + if (hdev->channel->rescind) + return; + + /* Delete any children which might still exist. */ + memset(&relations, 0, sizeof(relations)); + hv_pci_devices_present(hbus, &relations); + + ret = hv_send_resources_released(hdev); + if (ret) + dev_err(&hdev->device, + "Couldn't send resources released packet(s)\n"); memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet)); init_completion(&comp_pkt.host_event); @@ -2298,7 +2315,19 @@ static int hv_pci_remove(struct hv_device *hdev) VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (!ret) wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ); +} + +/** + * hv_pci_remove() - Remove routine for this VMBus channel + * @hdev: VMBus's tracking struct for this root PCI bus + * + * Return: 0 on success, -errno on failure + */ +static int hv_pci_remove(struct hv_device *hdev) +{ + struct hv_pcibus_device *hbus; + hbus = hv_get_drvdata(hdev); if (hbus->state == hv_pcibus_installed) { /* Remove the bus from PCI's point of view. */ pci_lock_rescan_remove(); @@ -2307,17 +2336,10 @@ static int hv_pci_remove(struct hv_device *hdev) pci_unlock_rescan_remove(); } - ret = hv_send_resources_released(hdev); - if (ret) - dev_err(&hdev->device, - "Couldn't send resources released packet(s)\n"); + hv_pci_bus_exit(hdev); vmbus_close(hdev->channel); - /* Delete any children which might still exist. */ - memset(&relations, 0, sizeof(relations)); - hv_pci_devices_present(hbus, &relations); - iounmap(hbus->cfg_addr); hv_free_config_window(hbus); pci_free_resource_list(&hbus->resources_for_children); diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c index 6537079963424b09bba4df9f0de93bd2a1e5820e..ea789138531be438ebeac3c0de62a47adf6c84eb 100644 --- a/drivers/pci/host/pci-layerscape.c +++ b/drivers/pci/host/pci-layerscape.c @@ -35,12 +35,10 @@ #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ #define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */ -/* PEX LUT registers */ -#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */ - struct ls_pcie_drvdata { u32 lut_offset; u32 ltssm_shift; + u32 lut_dbg; struct pcie_host_ops *ops; }; @@ -134,7 +132,7 @@ static int ls_pcie_link_up(struct pcie_port *pp) struct ls_pcie *pcie = to_ls_pcie(pp); u32 state; - state = (ioread32(pcie->lut + PCIE_LUT_DBG) >> + state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >> pcie->drvdata->ltssm_shift) & LTSSM_STATE_MASK; @@ -196,18 +194,28 @@ static struct ls_pcie_drvdata ls1021_drvdata = { static struct ls_pcie_drvdata ls1043_drvdata = { .lut_offset = 0x10000, .ltssm_shift = 24, + .lut_dbg = 0x7fc, + .ops = &ls_pcie_host_ops, +}; + +static struct ls_pcie_drvdata ls1046_drvdata = { + .lut_offset = 0x80000, + .ltssm_shift = 24, + .lut_dbg = 0x407fc, .ops = &ls_pcie_host_ops, }; static struct ls_pcie_drvdata ls2080_drvdata = { .lut_offset = 0x80000, .ltssm_shift = 0, + .lut_dbg = 0x7fc, .ops = &ls_pcie_host_ops, }; static const struct of_device_id ls_pcie_of_match[] = { { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, + { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, { }, @@ -252,10 +260,8 @@ static int __init ls_pcie_probe(struct platform_device *pdev) dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); pcie->pp.dbi_base = devm_ioremap_resource(dev, dbi_base); - if (IS_ERR(pcie->pp.dbi_base)) { - dev_err(dev, "missing *regs* space\n"); + if (IS_ERR(pcie->pp.dbi_base)) return PTR_ERR(pcie->pp.dbi_base); - } pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset; diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c index 1eeefa4df64c5059de0353f5de8f3ab4019fdc8a..85348590848bdf16f2d8745179d90f582dd2c2c4 100644 --- a/drivers/pci/host/pci-rcar-gen2.c +++ b/drivers/pci/host/pci-rcar-gen2.c @@ -430,10 +430,10 @@ static int rcar_pci_probe(struct platform_device *pdev) } static struct of_device_id rcar_pci_of_match[] = { - { .compatible = "renesas,pci-rcar-gen2", }, { .compatible = "renesas,pci-r8a7790", }, { .compatible = "renesas,pci-r8a7791", }, { .compatible = "renesas,pci-r8a7794", }, + { .compatible = "renesas,pci-rcar-gen2", }, { }, }; diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 8dfccf7332411b379d262c165829efc3195ac21a..ed8a93f2bfb5fd4167f7362cddbc6eabe6dd85c8 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -51,10 +51,6 @@ #include #include -#include -#include -#include - #define INT_PCI_MSI_NR (8 * 32) /* register definitions */ @@ -188,6 +184,9 @@ #define RP_VEND_XP 0x00000f00 #define RP_VEND_XP_DL_UP (1 << 30) +#define RP_VEND_CTL2 0x00000fa8 +#define RP_VEND_CTL2_PCA_ENABLE (1 << 7) + #define RP_PRIV_MISC 0x00000fe0 #define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0) #define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0) @@ -252,6 +251,7 @@ struct tegra_pcie_soc { bool has_intr_prsnt_sense; bool has_cml_clk; bool has_gen2; + bool force_pca_enable; }; static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip) @@ -322,11 +322,6 @@ struct tegra_pcie_bus { unsigned int nr; }; -static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys) -{ - return sys->private_data; -} - static inline void afi_writel(struct tegra_pcie *pcie, u32 value, unsigned long offset) { @@ -385,8 +380,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie, unsigned int busnr) { struct device *dev = pcie->dev; - pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED); + pgprot_t prot = pgprot_device(PAGE_KERNEL); phys_addr_t cs = pcie->cs->start; struct tegra_pcie_bus *bus; unsigned int i; @@ -430,7 +424,8 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie, static int tegra_pcie_add_bus(struct pci_bus *bus) { - struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata); + struct pci_host_bridge *host = pci_find_host_bridge(bus); + struct tegra_pcie *pcie = pci_host_bridge_priv(host); struct tegra_pcie_bus *b; b = tegra_pcie_bus_alloc(pcie, bus->number); @@ -444,7 +439,8 @@ static int tegra_pcie_add_bus(struct pci_bus *bus) static void tegra_pcie_remove_bus(struct pci_bus *child) { - struct tegra_pcie *pcie = sys_to_pcie(child->sysdata); + struct pci_host_bridge *host = pci_find_host_bridge(child); + struct tegra_pcie *pcie = pci_host_bridge_priv(host); struct tegra_pcie_bus *bus, *tmp; list_for_each_entry_safe(bus, tmp, &pcie->buses, list) { @@ -461,7 +457,8 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { - struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata); + struct pci_host_bridge *host = pci_find_host_bridge(bus); + struct tegra_pcie *pcie = pci_host_bridge_priv(host); struct device *dev = pcie->dev; void __iomem *addr = NULL; @@ -558,6 +555,12 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port) afi_writel(port->pcie, value, ctrl); tegra_pcie_port_reset(port); + + if (soc->force_pca_enable) { + value = readl(port->base + RP_VEND_CTL2); + value |= RP_VEND_CTL2_PCA_ENABLE; + writel(value, port->base + RP_VEND_CTL2); + } } static void tegra_pcie_port_disable(struct tegra_pcie_port *port) @@ -610,39 +613,31 @@ static void tegra_pcie_relax_enable(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); -static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) +static int tegra_pcie_request_resources(struct tegra_pcie *pcie) { - struct tegra_pcie *pcie = sys_to_pcie(sys); + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + struct list_head *windows = &host->windows; struct device *dev = pcie->dev; int err; - sys->mem_offset = pcie->offset.mem; - sys->io_offset = pcie->offset.io; + pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); + pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); + pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem); + pci_add_resource(windows, &pcie->busn); - err = devm_request_resource(dev, &iomem_resource, &pcie->io); + err = devm_request_pci_bus_resources(dev, windows); if (err < 0) return err; - err = pci_remap_iospace(&pcie->pio, pcie->io.start); - if (!err) - pci_add_resource_offset(&sys->resources, &pcie->pio, - sys->io_offset); + pci_remap_iospace(&pcie->pio, pcie->io.start); - pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); - pci_add_resource_offset(&sys->resources, &pcie->prefetch, - sys->mem_offset); - pci_add_resource(&sys->resources, &pcie->busn); - - err = devm_request_pci_bus_resources(dev, &sys->resources); - if (err < 0) - return err; - - return 1; + return 0; } static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin) { - struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata); + struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus); + struct tegra_pcie *pcie = pci_host_bridge_priv(host); int irq; tegra_cpuidle_pcie_irqs_in_use(); @@ -1499,10 +1494,11 @@ static const struct irq_domain_ops msi_domain_ops = { static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) { - struct device *dev = pcie->dev; - struct platform_device *pdev = to_platform_device(dev); + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + struct platform_device *pdev = to_platform_device(pcie->dev); const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_msi *msi = &pcie->msi; + struct device *dev = pcie->dev; unsigned long base; int err; u32 reg; @@ -1559,6 +1555,8 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) reg |= AFI_INTR_MASK_MSI_MASK; afi_writel(pcie, reg, AFI_INTR_MASK); + host->msi = &msi->chip; + return 0; err: @@ -1609,7 +1607,8 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes, struct device *dev = pcie->dev; struct device_node *np = dev->of_node; - if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) { + if (of_device_is_compatible(np, "nvidia,tegra124-pcie") || + of_device_is_compatible(np, "nvidia,tegra210-pcie")) { switch (lanes) { case 0x0000104: dev_info(dev, "4x1, 1x1 configuration\n"); @@ -1730,7 +1729,22 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) struct device_node *np = dev->of_node; unsigned int i = 0; - if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) { + if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) { + pcie->num_supplies = 6; + + pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, + sizeof(*pcie->supplies), + GFP_KERNEL); + if (!pcie->supplies) + return -ENOMEM; + + pcie->supplies[i++].supply = "avdd-pll-uerefe"; + pcie->supplies[i++].supply = "hvddio-pex"; + pcie->supplies[i++].supply = "dvddio-pex"; + pcie->supplies[i++].supply = "dvdd-pex-pll"; + pcie->supplies[i++].supply = "hvdd-pex-pll-e"; + pcie->supplies[i++].supply = "vddio-pex-ctl"; + } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) { pcie->num_supplies = 7; pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, @@ -2021,11 +2035,10 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port) return false; } -static int tegra_pcie_enable(struct tegra_pcie *pcie) +static void tegra_pcie_enable_ports(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; struct tegra_pcie_port *port, *tmp; - struct hw_pci hw; list_for_each_entry_safe(port, tmp, &pcie->ports, list) { dev_info(dev, "probing port %u, using %u lanes\n", @@ -2041,21 +2054,6 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie) tegra_pcie_port_disable(port); tegra_pcie_port_free(port); } - - memset(&hw, 0, sizeof(hw)); - -#ifdef CONFIG_PCI_MSI - hw.msi_ctrl = &pcie->msi.chip; -#endif - - hw.nr_controllers = 1; - hw.private_data = (void **)&pcie; - hw.setup = tegra_pcie_setup; - hw.map_irq = tegra_pcie_map_irq; - hw.ops = &tegra_pcie_ops; - - pci_common_init_dev(dev, &hw); - return 0; } static const struct tegra_pcie_soc tegra20_pcie = { @@ -2069,6 +2067,7 @@ static const struct tegra_pcie_soc tegra20_pcie = { .has_intr_prsnt_sense = false, .has_cml_clk = false, .has_gen2 = false, + .force_pca_enable = false, }; static const struct tegra_pcie_soc tegra30_pcie = { @@ -2083,6 +2082,7 @@ static const struct tegra_pcie_soc tegra30_pcie = { .has_intr_prsnt_sense = true, .has_cml_clk = true, .has_gen2 = false, + .force_pca_enable = false, }; static const struct tegra_pcie_soc tegra124_pcie = { @@ -2096,9 +2096,25 @@ static const struct tegra_pcie_soc tegra124_pcie = { .has_intr_prsnt_sense = true, .has_cml_clk = true, .has_gen2 = true, + .force_pca_enable = false, +}; + +static const struct tegra_pcie_soc tegra210_pcie = { + .num_ports = 2, + .msi_base_shift = 8, + .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, + .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, + .pads_refclk_cfg0 = 0x90b890b8, + .has_pex_clkreq_en = true, + .has_pex_bias_ctrl = true, + .has_intr_prsnt_sense = true, + .has_cml_clk = true, + .has_gen2 = true, + .force_pca_enable = true, }; static const struct of_device_id tegra_pcie_of_match[] = { + { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie }, { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie }, { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie }, { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie }, @@ -2217,13 +2233,17 @@ static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie) static int tegra_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct pci_host_bridge *host; struct tegra_pcie *pcie; + struct pci_bus *child; int err; - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) + host = pci_alloc_host_bridge(sizeof(*pcie)); + if (!host) return -ENOMEM; + pcie = pci_host_bridge_priv(host); + pcie->soc = of_device_get_match_data(dev); INIT_LIST_HEAD(&pcie->buses); INIT_LIST_HEAD(&pcie->ports); @@ -2243,6 +2263,10 @@ static int tegra_pcie_probe(struct platform_device *pdev) if (err) goto put_resources; + err = tegra_pcie_request_resources(pcie); + if (err) + goto put_resources; + /* setup the AFI address translations */ tegra_pcie_setup_translations(pcie); @@ -2254,12 +2278,30 @@ static int tegra_pcie_probe(struct platform_device *pdev) } } - err = tegra_pcie_enable(pcie); + tegra_pcie_enable_ports(pcie); + + pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); + host->busnr = pcie->busn.start; + host->dev.parent = &pdev->dev; + host->ops = &tegra_pcie_ops; + + err = pci_register_host_bridge(host); if (err < 0) { - dev_err(dev, "failed to enable PCIe ports: %d\n", err); + dev_err(dev, "failed to register host: %d\n", err); goto disable_msi; } + pci_scan_child_bus(host->bus); + + pci_fixup_irqs(pci_common_swizzle, tegra_pcie_map_irq); + pci_bus_size_bridges(host->bus); + pci_bus_assign_resources(host->bus); + + list_for_each_entry(child, &host->bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(host->bus); + if (IS_ENABLED(CONFIG_DEBUG_FS)) { err = tegra_pcie_debugfs_init(pcie); if (err < 0) diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c index d50a3dc2d8db127e225df7bfd24dd435500526cc..3f54a43bbbea50740045592790377d4b009e5d76 100644 --- a/drivers/pci/host/pci-thunder-ecam.c +++ b/drivers/pci/host/pci-thunder-ecam.c @@ -14,6 +14,8 @@ #include #include +#if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) + static void set_val(u32 v, int where, int size, u32 *val) { int shift = (where & 3) * 8; @@ -346,7 +348,7 @@ static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn, return pci_generic_config_write(bus, devfn, where, size, val); } -static struct pci_ecam_ops pci_thunder_ecam_ops = { +struct pci_ecam_ops pci_thunder_ecam_ops = { .bus_shift = 20, .pci_ops = { .map_bus = pci_ecam_map_bus, @@ -355,6 +357,8 @@ static struct pci_ecam_ops pci_thunder_ecam_ops = { } }; +#ifdef CONFIG_PCI_HOST_THUNDER_ECAM + static const struct of_device_id thunder_ecam_of_match[] = { { .compatible = "cavium,pci-host-thunder-ecam" }, { }, @@ -373,3 +377,6 @@ static struct platform_driver thunder_ecam_driver = { .probe = thunder_ecam_probe, }; builtin_platform_driver(thunder_ecam_driver); + +#endif +#endif diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c index 6abaf80ffb395d48668dc995782cdb4b53ff4067..af722eb0ca753312a8588d3291ceecd591312626 100644 --- a/drivers/pci/host/pci-thunder-pem.c +++ b/drivers/pci/host/pci-thunder-pem.c @@ -18,8 +18,12 @@ #include #include #include +#include #include #include +#include "../pci.h" + +#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) #define PEM_CFG_WR 0x28 #define PEM_CFG_RD 0x30 @@ -284,35 +288,16 @@ static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn, return pci_generic_config_write(bus, devfn, where, size, val); } -static int thunder_pem_init(struct pci_config_window *cfg) +static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg, + struct resource *res_pem) { - struct device *dev = cfg->parent; - resource_size_t bar4_start; - struct resource *res_pem; struct thunder_pem_pci *pem_pci; - struct platform_device *pdev; - - /* Only OF support for now */ - if (!dev->of_node) - return -EINVAL; + resource_size_t bar4_start; pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL); if (!pem_pci) return -ENOMEM; - pdev = to_platform_device(dev); - - /* - * The second register range is the PEM bridge to the PCIe - * bus. It has a different config access method than those - * devices behind the bridge. - */ - res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res_pem) { - dev_err(dev, "missing \"reg[1]\"property\n"); - return -EINVAL; - } - pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000); if (!pem_pci->pem_reg_base) return -ENOMEM; @@ -332,9 +317,69 @@ static int thunder_pem_init(struct pci_config_window *cfg) return 0; } +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) + +static int thunder_pem_acpi_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct acpi_device *adev = to_acpi_device(dev); + struct acpi_pci_root *root = acpi_driver_data(adev); + struct resource *res_pem; + int ret; + + res_pem = devm_kzalloc(&adev->dev, sizeof(*res_pem), GFP_KERNEL); + if (!res_pem) + return -ENOMEM; + + ret = acpi_get_rc_resources(dev, "THRX0002", root->segment, res_pem); + if (ret) { + dev_err(dev, "can't get rc base address\n"); + return ret; + } + + return thunder_pem_init(dev, cfg, res_pem); +} + +struct pci_ecam_ops thunder_pem_ecam_ops = { + .bus_shift = 24, + .init = thunder_pem_acpi_init, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = thunder_pem_config_read, + .write = thunder_pem_config_write, + } +}; + +#endif + +#ifdef CONFIG_PCI_HOST_THUNDER_PEM + +static int thunder_pem_platform_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct platform_device *pdev = to_platform_device(dev); + struct resource *res_pem; + + if (!dev->of_node) + return -EINVAL; + + /* + * The second register range is the PEM bridge to the PCIe + * bus. It has a different config access method than those + * devices behind the bridge. + */ + res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res_pem) { + dev_err(dev, "missing \"reg[1]\"property\n"); + return -EINVAL; + } + + return thunder_pem_init(dev, cfg, res_pem); +} + static struct pci_ecam_ops pci_thunder_pem_ops = { .bus_shift = 24, - .init = thunder_pem_init, + .init = thunder_pem_platform_init, .pci_ops = { .map_bus = pci_ecam_map_bus, .read = thunder_pem_config_read, @@ -360,3 +405,6 @@ static struct platform_driver thunder_pem_driver = { .probe = thunder_pem_probe, }; builtin_platform_driver(thunder_pem_driver); + +#endif +#endif diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c index a6456b5782692a5d5559b9ef59226d2fbce99e3f..1f38d0836751af9f823d848218d7dc17d9dad43f 100644 --- a/drivers/pci/host/pci-xgene-msi.c +++ b/drivers/pci/host/pci-xgene-msi.c @@ -360,16 +360,16 @@ static void xgene_msi_isr(struct irq_desc *desc) chained_irq_exit(chip, desc); } +static enum cpuhp_state pci_xgene_online; + static int xgene_msi_remove(struct platform_device *pdev) { - int virq, i; struct xgene_msi *msi = platform_get_drvdata(pdev); - for (i = 0; i < NR_HW_IRQS; i++) { - virq = msi->msi_groups[i].gic_irq; - if (virq != 0) - irq_set_chained_handler_and_data(virq, NULL, NULL); - } + if (pci_xgene_online) + cpuhp_remove_state(pci_xgene_online); + cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD); + kfree(msi->msi_groups); kfree(msi->bitmap); @@ -427,7 +427,7 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu) return 0; } -static void xgene_msi_hwirq_free(unsigned int cpu) +static int xgene_msi_hwirq_free(unsigned int cpu) { struct xgene_msi *msi = &xgene_msi_ctrl; struct xgene_msi_group *msi_group; @@ -441,33 +441,9 @@ static void xgene_msi_hwirq_free(unsigned int cpu) irq_set_chained_handler_and_data(msi_group->gic_irq, NULL, NULL); } + return 0; } -static int xgene_msi_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned cpu = (unsigned long)hcpu; - - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - xgene_msi_hwirq_alloc(cpu); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - xgene_msi_hwirq_free(cpu); - break; - default: - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block xgene_msi_cpu_notifier = { - .notifier_call = xgene_msi_cpu_callback, -}; - static const struct of_device_id xgene_msi_match_table[] = { {.compatible = "apm,xgene1-msi"}, {}, @@ -478,7 +454,6 @@ static int xgene_msi_probe(struct platform_device *pdev) struct resource *res; int rc, irq_index; struct xgene_msi *xgene_msi; - unsigned int cpu; int virt_msir; u32 msi_val, msi_idx; @@ -540,28 +515,22 @@ static int xgene_msi_probe(struct platform_device *pdev) } } - cpu_notifier_register_begin(); - - for_each_online_cpu(cpu) - if (xgene_msi_hwirq_alloc(cpu)) { - dev_err(&pdev->dev, "failed to register MSI handlers\n"); - cpu_notifier_register_done(); - goto error; - } - - rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier); - if (rc) { - dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); - cpu_notifier_register_done(); - goto error; - } - - cpu_notifier_register_done(); + rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online", + xgene_msi_hwirq_alloc, NULL); + if (rc) + goto err_cpuhp; + pci_xgene_online = rc; + rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL, + xgene_msi_hwirq_free); + if (rc) + goto err_cpuhp; dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); return 0; +err_cpuhp: + dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); error: xgene_msi_remove(pdev); return rc; diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index 1de23d74783f88d4a3dc59cd69810c172b7bf5cc..7c3b54b9eb17c58a9c80e9dae888faddeba8c546 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include #include @@ -64,7 +66,9 @@ /* PCIe IP version */ #define XGENE_PCIE_IP_VER_UNKN 0 #define XGENE_PCIE_IP_VER_1 1 +#define XGENE_PCIE_IP_VER_2 2 +#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) struct xgene_pcie_port { struct device_node *node; struct device *dev; @@ -91,13 +95,24 @@ static inline u32 pcie_bar_low_val(u32 addr, u32 flags) return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags; } +static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus) +{ + struct pci_config_window *cfg; + + if (acpi_disabled) + return (struct xgene_pcie_port *)(bus->sysdata); + + cfg = bus->sysdata; + return (struct xgene_pcie_port *)(cfg->priv); +} + /* * When the address bit [17:16] is 2'b01, the Configuration access will be * treated as Type 1 and it will be forwarded to external PCIe device. */ static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus) { - struct xgene_pcie_port *port = bus->sysdata; + struct xgene_pcie_port *port = pcie_bus_to_port(bus); if (bus->number >= (bus->primary + 1)) return port->cfg_base + AXI_EP_CFG_ACCESS; @@ -111,7 +126,7 @@ static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus) */ static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn) { - struct xgene_pcie_port *port = bus->sysdata; + struct xgene_pcie_port *port = pcie_bus_to_port(bus); unsigned int b, d, f; u32 rtdid_val = 0; @@ -158,7 +173,7 @@ static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { - struct xgene_pcie_port *port = bus->sysdata; + struct xgene_pcie_port *port = pcie_bus_to_port(bus); if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) != PCIBIOS_SUCCESSFUL) @@ -182,13 +197,103 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, return PCIBIOS_SUCCESSFUL; } +#endif -static struct pci_ops xgene_pcie_ops = { - .map_bus = xgene_pcie_map_bus, - .read = xgene_pcie_config_read32, - .write = pci_generic_config_write32, +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) +static int xgene_get_csr_resource(struct acpi_device *adev, + struct resource *res) +{ + struct device *dev = &adev->dev; + struct resource_entry *entry; + struct list_head list; + unsigned long flags; + int ret; + + INIT_LIST_HEAD(&list); + flags = IORESOURCE_MEM; + ret = acpi_dev_get_resources(adev, &list, + acpi_dev_filter_resource_type_cb, + (void *) flags); + if (ret < 0) { + dev_err(dev, "failed to parse _CRS method, error code %d\n", + ret); + return ret; + } + + if (ret == 0) { + dev_err(dev, "no IO and memory resources present in _CRS\n"); + return -EINVAL; + } + + entry = list_first_entry(&list, struct resource_entry, node); + *res = *entry->res; + acpi_dev_free_resource_list(&list); + return 0; +} + +static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion) +{ + struct device *dev = cfg->parent; + struct acpi_device *adev = to_acpi_device(dev); + struct xgene_pcie_port *port; + struct resource csr; + int ret; + + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + ret = xgene_get_csr_resource(adev, &csr); + if (ret) { + dev_err(dev, "can't get CSR resource\n"); + kfree(port); + return ret; + } + port->csr_base = devm_ioremap_resource(dev, &csr); + if (IS_ERR(port->csr_base)) { + kfree(port); + return -ENOMEM; + } + + port->cfg_base = cfg->win; + port->version = ipversion; + + cfg->priv = port; + return 0; +} + +static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg) +{ + return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1); +} + +struct pci_ecam_ops xgene_v1_pcie_ecam_ops = { + .bus_shift = 16, + .init = xgene_v1_pcie_ecam_init, + .pci_ops = { + .map_bus = xgene_pcie_map_bus, + .read = xgene_pcie_config_read32, + .write = pci_generic_config_write, + } +}; + +static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg) +{ + return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2); +} + +struct pci_ecam_ops xgene_v2_pcie_ecam_ops = { + .bus_shift = 16, + .init = xgene_v2_pcie_ecam_init, + .pci_ops = { + .map_bus = xgene_pcie_map_bus, + .read = xgene_pcie_config_read32, + .write = pci_generic_config_write, + } }; +#endif +#if defined(CONFIG_PCI_XGENE) static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr, u32 flags, u64 size) { @@ -521,6 +626,12 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port, return 0; } +static struct pci_ops xgene_pcie_ops = { + .map_bus = xgene_pcie_map_bus, + .read = xgene_pcie_config_read32, + .write = pci_generic_config_write32, +}; + static int xgene_pcie_probe_bridge(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -591,3 +702,4 @@ static struct platform_driver xgene_pcie_driver = { .probe = xgene_pcie_probe_bridge, }; builtin_platform_driver(xgene_pcie_driver); +#endif diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c index b0ac4dfafa0bbb116bfc80d754ac612c197171be..0c1540225ca3fb0fd32c8df363f19a270cbbaaf4 100644 --- a/drivers/pci/host/pcie-altera.c +++ b/drivers/pci/host/pcie-altera.c @@ -550,10 +550,8 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie) cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra"); pcie->cra_base = devm_ioremap_resource(dev, cra); - if (IS_ERR(pcie->cra_base)) { - dev_err(dev, "failed to map cra memory\n"); + if (IS_ERR(pcie->cra_base)) return PTR_ERR(pcie->cra_base); - } /* setup IRQ */ pcie->irq = platform_get_irq(pdev, 0); @@ -641,8 +639,4 @@ static struct platform_driver altera_pcie_driver = { }, }; -static int altera_pcie_init(void) -{ - return platform_driver_register(&altera_pcie_driver); -} -device_initcall(altera_pcie_init); +builtin_platform_driver(altera_pcie_driver); diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c index 8df6312ed3000a9c0587f318416e20f2329f55cc..1a02038c464021b9580a37e2ee70b71a365ecc28 100644 --- a/drivers/pci/host/pcie-designware-plat.c +++ b/drivers/pci/host/pcie-designware-plat.c @@ -3,7 +3,7 @@ * * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) * - * Authors: Joao Pinto + * Authors: Joao Pinto * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c index 56154c25980c6d83b1a155fc07e7520e8cee3ac1..a301a7187b30ff31192da2113f107c786790b305 100644 --- a/drivers/pci/host/pcie-hisi.c +++ b/drivers/pci/host/pcie-hisi.c @@ -18,7 +18,106 @@ #include #include #include +#include +#include +#include #include +#include "../pci.h" + +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) + +static int hisi_pcie_acpi_rd_conf(struct pci_bus *bus, u32 devfn, int where, + int size, u32 *val) +{ + struct pci_config_window *cfg = bus->sysdata; + int dev = PCI_SLOT(devfn); + + if (bus->number == cfg->busr.start) { + /* access only one slot on each root port */ + if (dev > 0) + return PCIBIOS_DEVICE_NOT_FOUND; + else + return pci_generic_config_read32(bus, devfn, where, + size, val); + } + + return pci_generic_config_read(bus, devfn, where, size, val); +} + +static int hisi_pcie_acpi_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + struct pci_config_window *cfg = bus->sysdata; + int dev = PCI_SLOT(devfn); + + if (bus->number == cfg->busr.start) { + /* access only one slot on each root port */ + if (dev > 0) + return PCIBIOS_DEVICE_NOT_FOUND; + else + return pci_generic_config_write32(bus, devfn, where, + size, val); + } + + return pci_generic_config_write(bus, devfn, where, size, val); +} + +static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + struct pci_config_window *cfg = bus->sysdata; + void __iomem *reg_base = cfg->priv; + + if (bus->number == cfg->busr.start) + return reg_base + where; + else + return pci_ecam_map_bus(bus, devfn, where); +} + +static int hisi_pcie_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct acpi_device *adev = to_acpi_device(dev); + struct acpi_pci_root *root = acpi_driver_data(adev); + struct resource *res; + void __iomem *reg_base; + int ret; + + /* + * Retrieve RC base and size from a HISI0081 device with _UID + * matching our segment. + */ + res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; + + ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res); + if (ret) { + dev_err(dev, "can't get rc base address\n"); + return -ENOMEM; + } + + reg_base = devm_ioremap(dev, res->start, resource_size(res)); + if (!reg_base) + return -ENOMEM; + + cfg->priv = reg_base; + return 0; +} + +struct pci_ecam_ops hisi_pcie_ops = { + .bus_shift = 20, + .init = hisi_pcie_init, + .pci_ops = { + .map_bus = hisi_pcie_map_bus, + .read = hisi_pcie_acpi_rd_conf, + .write = hisi_pcie_acpi_wr_conf, + } +}; + +#endif + +#ifdef CONFIG_PCI_HISI #include "pcie-designware.h" @@ -185,17 +284,13 @@ static int hisi_pcie_probe(struct platform_device *pdev) reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); pp->dbi_base = devm_ioremap_resource(dev, reg); - if (IS_ERR(pp->dbi_base)) { - dev_err(dev, "cannot get rc_dbi base\n"); + if (IS_ERR(pp->dbi_base)) return PTR_ERR(pp->dbi_base); - } ret = hisi_add_pcie_port(hisi_pcie, pdev); if (ret) return ret; - dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n"); - return 0; } @@ -227,3 +322,5 @@ static struct platform_driver hisi_pcie_driver = { }, }; builtin_platform_driver(hisi_pcie_driver); + +#endif diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c index 8ce089043a27dcd05be911bd1cd953e4abe92af0..bd4c9ec25edc22531ae450b2bb08f1d2aecd7b62 100644 --- a/drivers/pci/host/pcie-iproc-bcma.c +++ b/drivers/pci/host/pcie-iproc-bcma.c @@ -54,6 +54,7 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev) pcie->dev = dev; + pcie->type = IPROC_PCIE_PAXB_BCMA; pcie->base = bdev->io_addr; if (!pcie->base) { dev_err(dev, "no controller registers\n"); diff --git a/drivers/pci/host/pcie-iproc-msi.c b/drivers/pci/host/pcie-iproc-msi.c index 9a2973bdc78a97deb148eed7635f72d49d0e5909..9fad7915f82aa4bbaf9c74e3e2ffa568e983e989 100644 --- a/drivers/pci/host/pcie-iproc-msi.c +++ b/drivers/pci/host/pcie-iproc-msi.c @@ -563,6 +563,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) } switch (pcie->type) { + case IPROC_PCIE_PAXB_BCMA: case IPROC_PCIE_PAXB: msi->reg_offsets = iproc_msi_reg_paxb; msi->nr_eq_region = 1; diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index a3de087976b31b2586d0f1f064ee892805cac4b9..22d814a78a78bc60eaa61da82ceee561e62eb4d2 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c @@ -30,9 +30,15 @@ static const struct of_device_id iproc_pcie_of_match_table[] = { { .compatible = "brcm,iproc-pcie", .data = (int *)IPROC_PCIE_PAXB, + }, { + .compatible = "brcm,iproc-pcie-paxb-v2", + .data = (int *)IPROC_PCIE_PAXB_V2, }, { .compatible = "brcm,iproc-pcie-paxc", .data = (int *)IPROC_PCIE_PAXC, + }, { + .compatible = "brcm,iproc-pcie-paxc-v2", + .data = (int *)IPROC_PCIE_PAXC_V2, }, { /* sentinel */ } }; @@ -84,19 +90,6 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) return ret; } pcie->ob.axi_offset = val; - - ret = of_property_read_u32(np, "brcm,pcie-ob-window-size", - &val); - if (ret) { - dev_err(dev, - "missing brcm,pcie-ob-window-size property\n"); - return ret; - } - pcie->ob.window_size = (resource_size_t)val * SZ_1M; - - if (of_property_read_bool(np, "brcm,pcie-ob-oarr-size")) - pcie->ob.set_oarr_size = true; - pcie->need_ob_cfg = true; } @@ -115,7 +108,14 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) return ret; } - pcie->map_irq = of_irq_parse_and_map_pci; + /* PAXC doesn't support legacy IRQs, skip mapping */ + switch (pcie->type) { + case IPROC_PCIE_PAXC: + case IPROC_PCIE_PAXC_V2: + break; + default: + pcie->map_irq = of_irq_parse_and_map_pci; + } ret = iproc_pcie_setup(pcie, &res); if (ret) diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index 0b999a9fb843e8444aefe0e15c733145924fc786..3ebc025499b972ba47cef16e192f54f87bb674ba 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -38,6 +39,12 @@ #define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT) #define PAXC_RESET_MASK 0x7f +#define GIC_V3_CFG_SHIFT 0 +#define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT) + +#define MSI_ENABLE_CFG_SHIFT 0 +#define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT) + #define CFG_IND_ADDR_MASK 0x00001ffc #define CFG_ADDR_BUS_NUM_SHIFT 20 @@ -58,59 +65,319 @@ #define PCIE_DL_ACTIVE_SHIFT 2 #define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT) +#define APB_ERR_EN_SHIFT 0 +#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT) + +/* derive the enum index of the outbound/inbound mapping registers */ +#define MAP_REG(base_reg, index) ((base_reg) + (index) * 2) + +/* + * Maximum number of outbound mapping window sizes that can be supported by any + * OARR/OMAP mapping pair + */ +#define MAX_NUM_OB_WINDOW_SIZES 4 + #define OARR_VALID_SHIFT 0 #define OARR_VALID BIT(OARR_VALID_SHIFT) #define OARR_SIZE_CFG_SHIFT 1 -#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT) -#define PCI_EXP_CAP 0xac +/* + * Maximum number of inbound mapping region sizes that can be supported by an + * IARR + */ +#define MAX_NUM_IB_REGION_SIZES 9 + +#define IMAP_VALID_SHIFT 0 +#define IMAP_VALID BIT(IMAP_VALID_SHIFT) -#define MAX_NUM_OB_WINDOWS 2 +#define PCI_EXP_CAP 0xac #define IPROC_PCIE_REG_INVALID 0xffff +/** + * iProc PCIe outbound mapping controller specific parameters + * + * @window_sizes: list of supported outbound mapping window sizes in MB + * @nr_sizes: number of supported outbound mapping window sizes + */ +struct iproc_pcie_ob_map { + resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES]; + unsigned int nr_sizes; +}; + +static const struct iproc_pcie_ob_map paxb_ob_map[] = { + { + /* OARR0/OMAP0 */ + .window_sizes = { 128, 256 }, + .nr_sizes = 2, + }, + { + /* OARR1/OMAP1 */ + .window_sizes = { 128, 256 }, + .nr_sizes = 2, + }, +}; + +static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = { + { + /* OARR0/OMAP0 */ + .window_sizes = { 128, 256 }, + .nr_sizes = 2, + }, + { + /* OARR1/OMAP1 */ + .window_sizes = { 128, 256 }, + .nr_sizes = 2, + }, + { + /* OARR2/OMAP2 */ + .window_sizes = { 128, 256, 512, 1024 }, + .nr_sizes = 4, + }, + { + /* OARR3/OMAP3 */ + .window_sizes = { 128, 256, 512, 1024 }, + .nr_sizes = 4, + }, +}; + +/** + * iProc PCIe inbound mapping type + */ +enum iproc_pcie_ib_map_type { + /* for DDR memory */ + IPROC_PCIE_IB_MAP_MEM = 0, + + /* for device I/O memory */ + IPROC_PCIE_IB_MAP_IO, + + /* invalid or unused */ + IPROC_PCIE_IB_MAP_INVALID +}; + +/** + * iProc PCIe inbound mapping controller specific parameters + * + * @type: inbound mapping region type + * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or + * SZ_1G + * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or + * GB, depedning on the size unit + * @nr_sizes: number of supported inbound mapping region sizes + * @nr_windows: number of supported inbound mapping windows for the region + * @imap_addr_offset: register offset between the upper and lower 32-bit + * IMAP address registers + * @imap_window_offset: register offset between each IMAP window + */ +struct iproc_pcie_ib_map { + enum iproc_pcie_ib_map_type type; + unsigned int size_unit; + resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES]; + unsigned int nr_sizes; + unsigned int nr_windows; + u16 imap_addr_offset; + u16 imap_window_offset; +}; + +static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = { + { + /* IARR0/IMAP0 */ + .type = IPROC_PCIE_IB_MAP_IO, + .size_unit = SZ_1K, + .region_sizes = { 32 }, + .nr_sizes = 1, + .nr_windows = 8, + .imap_addr_offset = 0x40, + .imap_window_offset = 0x4, + }, + { + /* IARR1/IMAP1 (currently unused) */ + .type = IPROC_PCIE_IB_MAP_INVALID, + }, + { + /* IARR2/IMAP2 */ + .type = IPROC_PCIE_IB_MAP_MEM, + .size_unit = SZ_1M, + .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192, + 16384 }, + .nr_sizes = 9, + .nr_windows = 1, + .imap_addr_offset = 0x4, + .imap_window_offset = 0x8, + }, + { + /* IARR3/IMAP3 */ + .type = IPROC_PCIE_IB_MAP_MEM, + .size_unit = SZ_1G, + .region_sizes = { 1, 2, 4, 8, 16, 32 }, + .nr_sizes = 6, + .nr_windows = 8, + .imap_addr_offset = 0x4, + .imap_window_offset = 0x8, + }, + { + /* IARR4/IMAP4 */ + .type = IPROC_PCIE_IB_MAP_MEM, + .size_unit = SZ_1G, + .region_sizes = { 32, 64, 128, 256, 512 }, + .nr_sizes = 5, + .nr_windows = 8, + .imap_addr_offset = 0x4, + .imap_window_offset = 0x8, + }, +}; + +/* + * iProc PCIe host registers + */ enum iproc_pcie_reg { + /* clock/reset signal control */ IPROC_PCIE_CLK_CTRL = 0, + + /* + * To allow MSI to be steered to an external MSI controller (e.g., ARM + * GICv3 ITS) + */ + IPROC_PCIE_MSI_GIC_MODE, + + /* + * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the + * window where the MSI posted writes are written, for the writes to be + * interpreted as MSI writes. + */ + IPROC_PCIE_MSI_BASE_ADDR, + IPROC_PCIE_MSI_WINDOW_SIZE, + + /* + * To hold the address of the register where the MSI writes are + * programed. When ARM GICv3 ITS is used, this should be programmed + * with the address of the GITS_TRANSLATER register. + */ + IPROC_PCIE_MSI_ADDR_LO, + IPROC_PCIE_MSI_ADDR_HI, + + /* enable MSI */ + IPROC_PCIE_MSI_EN_CFG, + + /* allow access to root complex configuration space */ IPROC_PCIE_CFG_IND_ADDR, IPROC_PCIE_CFG_IND_DATA, + + /* allow access to device configuration space */ IPROC_PCIE_CFG_ADDR, IPROC_PCIE_CFG_DATA, + + /* enable INTx */ IPROC_PCIE_INTX_EN, - IPROC_PCIE_OARR_LO, - IPROC_PCIE_OARR_HI, - IPROC_PCIE_OMAP_LO, - IPROC_PCIE_OMAP_HI, + + /* outbound address mapping */ + IPROC_PCIE_OARR0, + IPROC_PCIE_OMAP0, + IPROC_PCIE_OARR1, + IPROC_PCIE_OMAP1, + IPROC_PCIE_OARR2, + IPROC_PCIE_OMAP2, + IPROC_PCIE_OARR3, + IPROC_PCIE_OMAP3, + + /* inbound address mapping */ + IPROC_PCIE_IARR0, + IPROC_PCIE_IMAP0, + IPROC_PCIE_IARR1, + IPROC_PCIE_IMAP1, + IPROC_PCIE_IARR2, + IPROC_PCIE_IMAP2, + IPROC_PCIE_IARR3, + IPROC_PCIE_IMAP3, + IPROC_PCIE_IARR4, + IPROC_PCIE_IMAP4, + + /* link status */ IPROC_PCIE_LINK_STATUS, + + /* enable APB error for unsupported requests */ + IPROC_PCIE_APB_ERR_EN, + + /* total number of core registers */ + IPROC_PCIE_MAX_NUM_REG, +}; + +/* iProc PCIe PAXB BCMA registers */ +static const u16 iproc_pcie_reg_paxb_bcma[] = { + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x120, + [IPROC_PCIE_CFG_IND_DATA] = 0x124, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, + [IPROC_PCIE_INTX_EN] = 0x330, + [IPROC_PCIE_LINK_STATUS] = 0xf0c, }; /* iProc PCIe PAXB registers */ static const u16 iproc_pcie_reg_paxb[] = { - [IPROC_PCIE_CLK_CTRL] = 0x000, - [IPROC_PCIE_CFG_IND_ADDR] = 0x120, - [IPROC_PCIE_CFG_IND_DATA] = 0x124, - [IPROC_PCIE_CFG_ADDR] = 0x1f8, - [IPROC_PCIE_CFG_DATA] = 0x1fc, - [IPROC_PCIE_INTX_EN] = 0x330, - [IPROC_PCIE_OARR_LO] = 0xd20, - [IPROC_PCIE_OARR_HI] = 0xd24, - [IPROC_PCIE_OMAP_LO] = 0xd40, - [IPROC_PCIE_OMAP_HI] = 0xd44, - [IPROC_PCIE_LINK_STATUS] = 0xf0c, + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x120, + [IPROC_PCIE_CFG_IND_DATA] = 0x124, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, + [IPROC_PCIE_INTX_EN] = 0x330, + [IPROC_PCIE_OARR0] = 0xd20, + [IPROC_PCIE_OMAP0] = 0xd40, + [IPROC_PCIE_OARR1] = 0xd28, + [IPROC_PCIE_OMAP1] = 0xd48, + [IPROC_PCIE_LINK_STATUS] = 0xf0c, + [IPROC_PCIE_APB_ERR_EN] = 0xf40, +}; + +/* iProc PCIe PAXB v2 registers */ +static const u16 iproc_pcie_reg_paxb_v2[] = { + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x120, + [IPROC_PCIE_CFG_IND_DATA] = 0x124, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, + [IPROC_PCIE_INTX_EN] = 0x330, + [IPROC_PCIE_OARR0] = 0xd20, + [IPROC_PCIE_OMAP0] = 0xd40, + [IPROC_PCIE_OARR1] = 0xd28, + [IPROC_PCIE_OMAP1] = 0xd48, + [IPROC_PCIE_OARR2] = 0xd60, + [IPROC_PCIE_OMAP2] = 0xd68, + [IPROC_PCIE_OARR3] = 0xdf0, + [IPROC_PCIE_OMAP3] = 0xdf8, + [IPROC_PCIE_IARR0] = 0xd00, + [IPROC_PCIE_IMAP0] = 0xc00, + [IPROC_PCIE_IARR2] = 0xd10, + [IPROC_PCIE_IMAP2] = 0xcc0, + [IPROC_PCIE_IARR3] = 0xe00, + [IPROC_PCIE_IMAP3] = 0xe08, + [IPROC_PCIE_IARR4] = 0xe68, + [IPROC_PCIE_IMAP4] = 0xe70, + [IPROC_PCIE_LINK_STATUS] = 0xf0c, + [IPROC_PCIE_APB_ERR_EN] = 0xf40, }; /* iProc PCIe PAXC v1 registers */ static const u16 iproc_pcie_reg_paxc[] = { - [IPROC_PCIE_CLK_CTRL] = 0x000, - [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, - [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, - [IPROC_PCIE_CFG_ADDR] = 0x1f8, - [IPROC_PCIE_CFG_DATA] = 0x1fc, - [IPROC_PCIE_INTX_EN] = IPROC_PCIE_REG_INVALID, - [IPROC_PCIE_OARR_LO] = IPROC_PCIE_REG_INVALID, - [IPROC_PCIE_OARR_HI] = IPROC_PCIE_REG_INVALID, - [IPROC_PCIE_OMAP_LO] = IPROC_PCIE_REG_INVALID, - [IPROC_PCIE_OMAP_HI] = IPROC_PCIE_REG_INVALID, - [IPROC_PCIE_LINK_STATUS] = IPROC_PCIE_REG_INVALID, + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, + [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, +}; + +/* iProc PCIe PAXC v2 registers */ +static const u16 iproc_pcie_reg_paxc_v2[] = { + [IPROC_PCIE_MSI_GIC_MODE] = 0x050, + [IPROC_PCIE_MSI_BASE_ADDR] = 0x074, + [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078, + [IPROC_PCIE_MSI_ADDR_LO] = 0x07c, + [IPROC_PCIE_MSI_ADDR_HI] = 0x080, + [IPROC_PCIE_MSI_EN_CFG] = 0x09c, + [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, + [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, }; static inline struct iproc_pcie *iproc_data(struct pci_bus *bus) @@ -159,16 +426,26 @@ static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie, writel(val, pcie->base + offset); } -static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie, - enum iproc_pcie_reg reg, - unsigned window, u32 val) +/** + * APB error forwarding can be disabled during access of configuration + * registers of the endpoint device, to prevent unsupported requests + * (typically seen during enumeration with multi-function devices) from + * triggering a system exception. + */ +static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus, + bool disable) { - u16 offset = iproc_pcie_reg_offset(pcie, reg); - - if (iproc_pcie_reg_is_invalid(offset)) - return; + struct iproc_pcie *pcie = iproc_data(bus); + u32 val; - writel(val, pcie->base + offset + (window * 8)); + if (bus->number && pcie->has_apb_err_disable) { + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN); + if (disable) + val &= ~APB_ERR_EN; + else + val |= APB_ERR_EN; + iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val); + } } /** @@ -204,7 +481,7 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, * PAXC is connected to an internally emulated EP within the SoC. It * allows only one device. */ - if (pcie->type == IPROC_PCIE_PAXC) + if (pcie->ep_is_internal) if (slot > 0) return NULL; @@ -222,26 +499,47 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, return (pcie->base + offset); } +static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + int ret; + + iproc_pcie_apb_err_disable(bus, true); + ret = pci_generic_config_read32(bus, devfn, where, size, val); + iproc_pcie_apb_err_disable(bus, false); + + return ret; +} + +static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + int ret; + + iproc_pcie_apb_err_disable(bus, true); + ret = pci_generic_config_write32(bus, devfn, where, size, val); + iproc_pcie_apb_err_disable(bus, false); + + return ret; +} + static struct pci_ops iproc_pcie_ops = { .map_bus = iproc_pcie_map_cfg_bus, - .read = pci_generic_config_read32, - .write = pci_generic_config_write32, + .read = iproc_pcie_config_read32, + .write = iproc_pcie_config_write32, }; static void iproc_pcie_reset(struct iproc_pcie *pcie) { u32 val; - if (pcie->type == IPROC_PCIE_PAXC) { - val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); - val &= ~PAXC_RESET_MASK; - iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); - udelay(100); - val |= PAXC_RESET_MASK; - iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); - udelay(100); + /* + * PAXC and the internal emulated endpoint device downstream should not + * be reset. If firmware has been loaded on the endpoint device at an + * earlier boot stage, reset here causes issues. + */ + if (pcie->ep_is_internal) return; - } /* * Select perst_b signal as reset source. Put the device into reset, @@ -270,7 +568,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) * PAXC connects to emulated endpoint devices directly and does not * have a Serdes. Therefore skip the link detection logic here. */ - if (pcie->type == IPROC_PCIE_PAXC) + if (pcie->ep_is_internal) return 0; val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS); @@ -334,6 +632,58 @@ static void iproc_pcie_enable(struct iproc_pcie *pcie) iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK); } +static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie, + int window_idx) +{ + u32 val; + + val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx)); + + return !!(val & OARR_VALID); +} + +static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx, + int size_idx, u64 axi_addr, u64 pci_addr) +{ + struct device *dev = pcie->dev; + u16 oarr_offset, omap_offset; + + /* + * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based + * on window index. + */ + oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0, + window_idx)); + omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0, + window_idx)); + if (iproc_pcie_reg_is_invalid(oarr_offset) || + iproc_pcie_reg_is_invalid(omap_offset)) + return -EINVAL; + + /* + * Program the OARR registers. The upper 32-bit OARR register is + * always right after the lower 32-bit OARR register. + */ + writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) | + OARR_VALID, pcie->base + oarr_offset); + writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4); + + /* now program the OMAP registers */ + writel(lower_32_bits(pci_addr), pcie->base + omap_offset); + writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4); + + dev_info(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n", + window_idx, oarr_offset, &axi_addr, &pci_addr); + dev_info(dev, "oarr lo 0x%x oarr hi 0x%x\n", + readl(pcie->base + oarr_offset), + readl(pcie->base + oarr_offset + 4)); + dev_info(dev, "omap lo 0x%x omap hi 0x%x\n", + readl(pcie->base + omap_offset), + readl(pcie->base + omap_offset + 4)); + + return 0; +} + /** * Some iProc SoCs require the SW to configure the outbound address mapping * @@ -350,24 +700,7 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, { struct iproc_pcie_ob *ob = &pcie->ob; struct device *dev = pcie->dev; - unsigned i; - u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS; - u64 remainder; - - if (size > max_size) { - dev_err(dev, - "res size %pap exceeds max supported size 0x%llx\n", - &size, max_size); - return -EINVAL; - } - - div64_u64_rem(size, ob->window_size, &remainder); - if (remainder) { - dev_err(dev, - "res size %pap needs to be multiple of window size %pap\n", - &size, &ob->window_size); - return -EINVAL; - } + int ret = -EINVAL, window_idx, size_idx; if (axi_addr < ob->axi_offset) { dev_err(dev, "axi address %pap less than offset %pap\n", @@ -381,26 +714,70 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, */ axi_addr -= ob->axi_offset; - for (i = 0; i < MAX_NUM_OB_WINDOWS; i++) { - iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_LO, i, - lower_32_bits(axi_addr) | OARR_VALID | - (ob->set_oarr_size ? 1 : 0)); - iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_HI, i, - upper_32_bits(axi_addr)); - iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_LO, i, - lower_32_bits(pci_addr)); - iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_HI, i, - upper_32_bits(pci_addr)); - - size -= ob->window_size; - if (size == 0) + /* iterate through all OARR/OMAP mapping windows */ + for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) { + const struct iproc_pcie_ob_map *ob_map = + &pcie->ob_map[window_idx]; + + /* + * If current outbound window is already in use, move on to the + * next one. + */ + if (iproc_pcie_ob_is_valid(pcie, window_idx)) + continue; + + /* + * Iterate through all supported window sizes within the + * OARR/OMAP pair to find a match. Go through the window sizes + * in a descending order. + */ + for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0; + size_idx--) { + resource_size_t window_size = + ob_map->window_sizes[size_idx] * SZ_1M; + + if (size < window_size) + continue; + + if (!IS_ALIGNED(axi_addr, window_size) || + !IS_ALIGNED(pci_addr, window_size)) { + dev_err(dev, + "axi %pap or pci %pap not aligned\n", + &axi_addr, &pci_addr); + return -EINVAL; + } + + /* + * Match found! Program both OARR and OMAP and mark + * them as a valid entry. + */ + ret = iproc_pcie_ob_write(pcie, window_idx, size_idx, + axi_addr, pci_addr); + if (ret) + goto err_ob; + + size -= window_size; + if (size == 0) + return 0; + + /* + * If we are here, we are done with the current window, + * but not yet finished all mappings. Need to move on + * to the next window. + */ + axi_addr += window_size; + pci_addr += window_size; break; - - axi_addr += ob->window_size; - pci_addr += ob->window_size; + } } - return 0; +err_ob: + dev_err(dev, "unable to configure outbound mapping\n"); + dev_err(dev, + "axi %pap, axi offset %pap, pci %pap, res size %pap\n", + &axi_addr, &ob->axi_offset, &pci_addr, &size); + + return ret; } static int iproc_pcie_map_ranges(struct iproc_pcie *pcie, @@ -434,13 +811,323 @@ static int iproc_pcie_map_ranges(struct iproc_pcie *pcie, return 0; } +static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie, + int region_idx) +{ + const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; + u32 val; + + val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx)); + + return !!(val & (BIT(ib_map->nr_sizes) - 1)); +} + +static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map, + enum iproc_pcie_ib_map_type type) +{ + return !!(ib_map->type == type); +} + +static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx, + int size_idx, int nr_windows, u64 axi_addr, + u64 pci_addr, resource_size_t size) +{ + struct device *dev = pcie->dev; + const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; + u16 iarr_offset, imap_offset; + u32 val; + int window_idx; + + iarr_offset = iproc_pcie_reg_offset(pcie, + MAP_REG(IPROC_PCIE_IARR0, region_idx)); + imap_offset = iproc_pcie_reg_offset(pcie, + MAP_REG(IPROC_PCIE_IMAP0, region_idx)); + if (iproc_pcie_reg_is_invalid(iarr_offset) || + iproc_pcie_reg_is_invalid(imap_offset)) + return -EINVAL; + + dev_info(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n", + region_idx, iarr_offset, &axi_addr, &pci_addr); + + /* + * Program the IARR registers. The upper 32-bit IARR register is + * always right after the lower 32-bit IARR register. + */ + writel(lower_32_bits(pci_addr) | BIT(size_idx), + pcie->base + iarr_offset); + writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4); + + dev_info(dev, "iarr lo 0x%x iarr hi 0x%x\n", + readl(pcie->base + iarr_offset), + readl(pcie->base + iarr_offset + 4)); + + /* + * Now program the IMAP registers. Each IARR region may have one or + * more IMAP windows. + */ + size >>= ilog2(nr_windows); + for (window_idx = 0; window_idx < nr_windows; window_idx++) { + val = readl(pcie->base + imap_offset); + val |= lower_32_bits(axi_addr) | IMAP_VALID; + writel(val, pcie->base + imap_offset); + writel(upper_32_bits(axi_addr), + pcie->base + imap_offset + ib_map->imap_addr_offset); + + dev_info(dev, "imap window [%d] lo 0x%x hi 0x%x\n", + window_idx, readl(pcie->base + imap_offset), + readl(pcie->base + imap_offset + + ib_map->imap_addr_offset)); + + imap_offset += ib_map->imap_window_offset; + axi_addr += size; + } + + return 0; +} + +static int iproc_pcie_setup_ib(struct iproc_pcie *pcie, + struct of_pci_range *range, + enum iproc_pcie_ib_map_type type) +{ + struct device *dev = pcie->dev; + struct iproc_pcie_ib *ib = &pcie->ib; + int ret; + unsigned int region_idx, size_idx; + u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr; + resource_size_t size = range->size; + + /* iterate through all IARR mapping regions */ + for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) { + const struct iproc_pcie_ib_map *ib_map = + &pcie->ib_map[region_idx]; + + /* + * If current inbound region is already in use or not a + * compatible type, move on to the next. + */ + if (iproc_pcie_ib_is_in_use(pcie, region_idx) || + !iproc_pcie_ib_check_type(ib_map, type)) + continue; + + /* iterate through all supported region sizes to find a match */ + for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) { + resource_size_t region_size = + ib_map->region_sizes[size_idx] * ib_map->size_unit; + + if (size != region_size) + continue; + + if (!IS_ALIGNED(axi_addr, region_size) || + !IS_ALIGNED(pci_addr, region_size)) { + dev_err(dev, + "axi %pap or pci %pap not aligned\n", + &axi_addr, &pci_addr); + return -EINVAL; + } + + /* Match found! Program IARR and all IMAP windows. */ + ret = iproc_pcie_ib_write(pcie, region_idx, size_idx, + ib_map->nr_windows, axi_addr, + pci_addr, size); + if (ret) + goto err_ib; + else + return 0; + + } + } + ret = -EINVAL; + +err_ib: + dev_err(dev, "unable to configure inbound mapping\n"); + dev_err(dev, "axi %pap, pci %pap, res size %pap\n", + &axi_addr, &pci_addr, &size); + + return ret; +} + +static int pci_dma_range_parser_init(struct of_pci_range_parser *parser, + struct device_node *node) +{ + const int na = 3, ns = 2; + int rlen; + + parser->node = node; + parser->pna = of_n_addr_cells(node); + parser->np = parser->pna + na + ns; + + parser->range = of_get_property(node, "dma-ranges", &rlen); + if (!parser->range) + return -ENOENT; + + parser->end = parser->range + rlen / sizeof(__be32); + return 0; +} + +static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie) +{ + struct of_pci_range range; + struct of_pci_range_parser parser; + int ret; + + /* Get the dma-ranges from DT */ + ret = pci_dma_range_parser_init(&parser, pcie->dev->of_node); + if (ret) + return ret; + + for_each_of_pci_range(&parser, &range) { + /* Each range entry corresponds to an inbound mapping region */ + ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM); + if (ret) + return ret; + } + + return 0; +} + +static int iproce_pcie_get_msi(struct iproc_pcie *pcie, + struct device_node *msi_node, + u64 *msi_addr) +{ + struct device *dev = pcie->dev; + int ret; + struct resource res; + + /* + * Check if 'msi-map' points to ARM GICv3 ITS, which is the only + * supported external MSI controller that requires steering. + */ + if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) { + dev_err(dev, "unable to find compatible MSI controller\n"); + return -ENODEV; + } + + /* derive GITS_TRANSLATER address from GICv3 */ + ret = of_address_to_resource(msi_node, 0, &res); + if (ret < 0) { + dev_err(dev, "unable to obtain MSI controller resources\n"); + return ret; + } + + *msi_addr = res.start + GITS_TRANSLATER; + return 0; +} + +static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr) +{ + int ret; + struct of_pci_range range; + + memset(&range, 0, sizeof(range)); + range.size = SZ_32K; + range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1); + + ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO); + return ret; +} + +static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr) +{ + u32 val; + + /* + * Program bits [43:13] of address of GITS_TRANSLATER register into + * bits [30:0] of the MSI base address register. In fact, in all iProc + * based SoCs, all I/O register bases are well below the 32-bit + * boundary, so we can safely assume bits [43:32] are always zeros. + */ + iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR, + (u32)(msi_addr >> 13)); + + /* use a default 8K window size */ + iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0); + + /* steering MSI to GICv3 ITS */ + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE); + val |= GIC_V3_CFG; + iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val); + + /* + * Program bits [43:2] of address of GITS_TRANSLATER register into the + * iProc MSI address registers. + */ + msi_addr >>= 2; + iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI, + upper_32_bits(msi_addr)); + iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO, + lower_32_bits(msi_addr)); + + /* enable MSI */ + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG); + val |= MSI_ENABLE_CFG; + iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val); +} + +static int iproc_pcie_msi_steer(struct iproc_pcie *pcie, + struct device_node *msi_node) +{ + struct device *dev = pcie->dev; + int ret; + u64 msi_addr; + + ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr); + if (ret < 0) { + dev_err(dev, "msi steering failed\n"); + return ret; + } + + switch (pcie->type) { + case IPROC_PCIE_PAXB_V2: + ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr); + if (ret) + return ret; + break; + case IPROC_PCIE_PAXC_V2: + iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr); + break; + default: + return -EINVAL; + } + + return 0; +} + static int iproc_pcie_msi_enable(struct iproc_pcie *pcie) { struct device_node *msi_node; + int ret; + + /* + * Either the "msi-parent" or the "msi-map" phandle needs to exist + * for us to obtain the MSI node. + */ msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0); - if (!msi_node) - return -ENODEV; + if (!msi_node) { + const __be32 *msi_map = NULL; + int len; + u32 phandle; + + msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len); + if (!msi_map) + return -ENODEV; + + phandle = be32_to_cpup(msi_map + 1); + msi_node = of_find_node_by_phandle(phandle); + if (!msi_node) + return -ENODEV; + } + + /* + * Certain revisions of the iProc PCIe controller require additional + * configurations to steer the MSI writes towards an external MSI + * controller. + */ + if (pcie->need_msi_steer) { + ret = iproc_pcie_msi_steer(pcie, msi_node); + if (ret) + return ret; + } /* * If another MSI controller is being used, the call below should fail @@ -454,6 +1141,65 @@ static void iproc_pcie_msi_disable(struct iproc_pcie *pcie) iproc_msi_exit(pcie); } +static int iproc_pcie_rev_init(struct iproc_pcie *pcie) +{ + struct device *dev = pcie->dev; + unsigned int reg_idx; + const u16 *regs; + + switch (pcie->type) { + case IPROC_PCIE_PAXB_BCMA: + regs = iproc_pcie_reg_paxb_bcma; + break; + case IPROC_PCIE_PAXB: + regs = iproc_pcie_reg_paxb; + pcie->has_apb_err_disable = true; + if (pcie->need_ob_cfg) { + pcie->ob_map = paxb_ob_map; + pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map); + } + break; + case IPROC_PCIE_PAXB_V2: + regs = iproc_pcie_reg_paxb_v2; + pcie->has_apb_err_disable = true; + if (pcie->need_ob_cfg) { + pcie->ob_map = paxb_v2_ob_map; + pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map); + } + pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map); + pcie->ib_map = paxb_v2_ib_map; + pcie->need_msi_steer = true; + break; + case IPROC_PCIE_PAXC: + regs = iproc_pcie_reg_paxc; + pcie->ep_is_internal = true; + break; + case IPROC_PCIE_PAXC_V2: + regs = iproc_pcie_reg_paxc_v2; + pcie->ep_is_internal = true; + pcie->need_msi_steer = true; + break; + default: + dev_err(dev, "incompatible iProc PCIe interface\n"); + return -EINVAL; + } + + pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG, + sizeof(*pcie->reg_offsets), + GFP_KERNEL); + if (!pcie->reg_offsets) + return -ENOMEM; + + /* go through the register table and populate all valid registers */ + pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ? + IPROC_PCIE_REG_INVALID : regs[0]; + for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++) + pcie->reg_offsets[reg_idx] = regs[reg_idx] ? + regs[reg_idx] : IPROC_PCIE_REG_INVALID; + + return 0; +} + int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) { struct device *dev; @@ -462,6 +1208,13 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) struct pci_bus *bus; dev = pcie->dev; + + ret = iproc_pcie_rev_init(pcie); + if (ret) { + dev_err(dev, "unable to initialize controller parameters\n"); + return ret; + } + ret = devm_request_pci_bus_resources(dev, res); if (ret) return ret; @@ -478,19 +1231,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) goto err_exit_phy; } - switch (pcie->type) { - case IPROC_PCIE_PAXB: - pcie->reg_offsets = iproc_pcie_reg_paxb; - break; - case IPROC_PCIE_PAXC: - pcie->reg_offsets = iproc_pcie_reg_paxc; - break; - default: - dev_err(dev, "incompatible iProc PCIe interface\n"); - ret = -EINVAL; - goto err_power_off_phy; - } - iproc_pcie_reset(pcie); if (pcie->need_ob_cfg) { @@ -501,6 +1241,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) } } + ret = iproc_pcie_map_dma_ranges(pcie); + if (ret && ret != -ENOENT) + goto err_power_off_phy; + #ifdef CONFIG_ARM pcie->sysdata.private_data = pcie; sysdata = &pcie->sysdata; @@ -530,7 +1274,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); - pci_fixup_irqs(pci_common_swizzle, pcie->map_irq); + + if (pcie->map_irq) + pci_fixup_irqs(pci_common_swizzle, pcie->map_irq); + pci_bus_add_devices(bus); return 0; diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h index e84d93c53c7b59ee5cf75a002f95860f1ee2fba9..04fed8e907f12b602b5fb1f11ff59971ebd0b87f 100644 --- a/drivers/pci/host/pcie-iproc.h +++ b/drivers/pci/host/pcie-iproc.h @@ -24,23 +24,34 @@ * endpoint devices. */ enum iproc_pcie_type { - IPROC_PCIE_PAXB = 0, + IPROC_PCIE_PAXB_BCMA = 0, + IPROC_PCIE_PAXB, + IPROC_PCIE_PAXB_V2, IPROC_PCIE_PAXC, + IPROC_PCIE_PAXC_V2, }; /** * iProc PCIe outbound mapping - * @set_oarr_size: indicates the OARR size bit needs to be set * @axi_offset: offset from the AXI address to the internal address used by * the iProc PCIe core - * @window_size: outbound window size + * @nr_windows: total number of supported outbound mapping windows */ struct iproc_pcie_ob { - bool set_oarr_size; resource_size_t axi_offset; - resource_size_t window_size; + unsigned int nr_windows; }; +/** + * iProc PCIe inbound mapping + * @nr_regions: total number of supported inbound mapping regions + */ +struct iproc_pcie_ib { + unsigned int nr_regions; +}; + +struct iproc_pcie_ob_map; +struct iproc_pcie_ib_map; struct iproc_msi; /** @@ -55,14 +66,25 @@ struct iproc_msi; * @root_bus: pointer to root bus * @phy: optional PHY device that controls the Serdes * @map_irq: function callback to map interrupts + * @ep_is_internal: indicates an internal emulated endpoint device is connected + * @has_apb_err_disable: indicates the controller can be configured to prevent + * unsupported request from being forwarded as an APB bus error + * * @need_ob_cfg: indicates SW needs to configure the outbound mapping window - * @ob: outbound mapping parameters + * @ob: outbound mapping related parameters + * @ob_map: outbound mapping related parameters specific to the controller + * + * @ib: inbound mapping related parameters + * @ib_map: outbound mapping region related parameters + * + * @need_msi_steer: indicates additional configuration of the iProc PCIe + * controller is required to steer MSI writes to external interrupt controller * @msi: MSI data */ struct iproc_pcie { struct device *dev; enum iproc_pcie_type type; - const u16 *reg_offsets; + u16 *reg_offsets; void __iomem *base; phys_addr_t base_addr; #ifdef CONFIG_ARM @@ -71,8 +93,17 @@ struct iproc_pcie { struct pci_bus *root_bus; struct phy *phy; int (*map_irq)(const struct pci_dev *, u8, u8); + bool ep_is_internal; + bool has_apb_err_disable; + bool need_ob_cfg; struct iproc_pcie_ob ob; + const struct iproc_pcie_ob_map *ob_map; + + struct iproc_pcie_ib ib; + const struct iproc_pcie_ib_map *ib_map; + + bool need_msi_steer; struct iproc_msi *msi; }; diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c index 35936409b2d45921a049db1d4ddcbc64c4bb8f04..734ba0d4a5c8c7b091e4f54e1e57c04f1e8246c0 100644 --- a/drivers/pci/host/pcie-qcom.c +++ b/drivers/pci/host/pcie-qcom.c @@ -36,11 +36,17 @@ #include "pcie-designware.h" +#define PCIE20_PARF_SYS_CTRL 0x00 #define PCIE20_PARF_PHY_CTRL 0x40 #define PCIE20_PARF_PHY_REFCLK 0x4C #define PCIE20_PARF_DBI_BASE_ADDR 0x168 -#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16c +#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C +#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 +#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 +#define PCIE20_PARF_LTSSM 0x1B0 +#define PCIE20_PARF_SID_OFFSET 0x234 +#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C #define PCIE20_ELBI_SYS_CTRL 0x04 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) @@ -72,9 +78,18 @@ struct qcom_pcie_resources_v1 { struct regulator *vdda; }; +struct qcom_pcie_resources_v2 { + struct clk *aux_clk; + struct clk *master_clk; + struct clk *slave_clk; + struct clk *cfg_clk; + struct clk *pipe_clk; +}; + union qcom_pcie_resources { struct qcom_pcie_resources_v0 v0; struct qcom_pcie_resources_v1 v1; + struct qcom_pcie_resources_v2 v2; }; struct qcom_pcie; @@ -82,7 +97,9 @@ struct qcom_pcie; struct qcom_pcie_ops { int (*get_resources)(struct qcom_pcie *pcie); int (*init)(struct qcom_pcie *pcie); + int (*post_init)(struct qcom_pcie *pcie); void (*deinit)(struct qcom_pcie *pcie); + void (*ltssm_enable)(struct qcom_pcie *pcie); }; struct qcom_pcie { @@ -116,17 +133,35 @@ static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg) return dw_handle_msi_irq(pp); } -static int qcom_pcie_establish_link(struct qcom_pcie *pcie) +static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie) { u32 val; - if (dw_pcie_link_up(&pcie->pp)) - return 0; - /* enable link training */ val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); +} + +static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie) +{ + u32 val; + + /* enable link training */ + val = readl(pcie->parf + PCIE20_PARF_LTSSM); + val |= BIT(8); + writel(val, pcie->parf + PCIE20_PARF_LTSSM); +} + +static int qcom_pcie_establish_link(struct qcom_pcie *pcie) +{ + + if (dw_pcie_link_up(&pcie->pp)) + return 0; + + /* Enable Link Training state machine */ + if (pcie->ops->ltssm_enable) + pcie->ops->ltssm_enable(pcie); return dw_pcie_wait_for_link(&pcie->pp); } @@ -421,6 +456,113 @@ static int qcom_pcie_init_v1(struct qcom_pcie *pcie) return ret; } +static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v2 *res = &pcie->res.v2; + struct device *dev = pcie->pp.dev; + + res->aux_clk = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux_clk)) + return PTR_ERR(res->aux_clk); + + res->cfg_clk = devm_clk_get(dev, "cfg"); + if (IS_ERR(res->cfg_clk)) + return PTR_ERR(res->cfg_clk); + + res->master_clk = devm_clk_get(dev, "bus_master"); + if (IS_ERR(res->master_clk)) + return PTR_ERR(res->master_clk); + + res->slave_clk = devm_clk_get(dev, "bus_slave"); + if (IS_ERR(res->slave_clk)) + return PTR_ERR(res->slave_clk); + + res->pipe_clk = devm_clk_get(dev, "pipe"); + if (IS_ERR(res->pipe_clk)) + return PTR_ERR(res->pipe_clk); + + return 0; +} + +static int qcom_pcie_init_v2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v2 *res = &pcie->res.v2; + struct device *dev = pcie->pp.dev; + u32 val; + int ret; + + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); + return ret; + } + + ret = clk_prepare_enable(res->cfg_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable cfg clock\n"); + goto err_cfg_clk; + } + + ret = clk_prepare_enable(res->master_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable master clock\n"); + goto err_master_clk; + } + + ret = clk_prepare_enable(res->slave_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable slave clock\n"); + goto err_slave_clk; + } + + /* enable PCIe clocks and resets */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + /* change DBI base address */ + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + /* MAC PHY_POWERDOWN MUX DISABLE */ + val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); + val &= ~BIT(29); + writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); + + val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + val |= BIT(4); + writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + + val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + val |= BIT(31); + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + + return 0; + +err_slave_clk: + clk_disable_unprepare(res->master_clk); +err_master_clk: + clk_disable_unprepare(res->cfg_clk); +err_cfg_clk: + clk_disable_unprepare(res->aux_clk); + + return ret; +} + +static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v2 *res = &pcie->res.v2; + struct device *dev = pcie->pp.dev; + int ret; + + ret = clk_prepare_enable(res->pipe_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable pipe clock\n"); + return ret; + } + + return 0; +} + static int qcom_pcie_link_up(struct pcie_port *pp) { struct qcom_pcie *pcie = to_qcom_pcie(pp); @@ -429,6 +571,17 @@ static int qcom_pcie_link_up(struct pcie_port *pp) return !!(val & PCI_EXP_LNKSTA_DLLLA); } +static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v2 *res = &pcie->res.v2; + + clk_disable_unprepare(res->pipe_clk); + clk_disable_unprepare(res->slave_clk); + clk_disable_unprepare(res->master_clk); + clk_disable_unprepare(res->cfg_clk); + clk_disable_unprepare(res->aux_clk); +} + static void qcom_pcie_host_init(struct pcie_port *pp) { struct qcom_pcie *pcie = to_qcom_pcie(pp); @@ -444,6 +597,9 @@ static void qcom_pcie_host_init(struct pcie_port *pp) if (ret) goto err_deinit; + if (pcie->ops->post_init) + pcie->ops->post_init(pcie); + dw_pcie_setup_rc(pp); if (IS_ENABLED(CONFIG_PCI_MSI)) @@ -487,12 +643,22 @@ static const struct qcom_pcie_ops ops_v0 = { .get_resources = qcom_pcie_get_resources_v0, .init = qcom_pcie_init_v0, .deinit = qcom_pcie_deinit_v0, + .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable, }; static const struct qcom_pcie_ops ops_v1 = { .get_resources = qcom_pcie_get_resources_v1, .init = qcom_pcie_init_v1, .deinit = qcom_pcie_deinit_v1, + .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable, +}; + +static const struct qcom_pcie_ops ops_v2 = { + .get_resources = qcom_pcie_get_resources_v2, + .init = qcom_pcie_init_v2, + .post_init = qcom_pcie_post_init_v2, + .deinit = qcom_pcie_deinit_v2, + .ltssm_enable = qcom_pcie_v2_ltssm_enable, }; static int qcom_pcie_probe(struct platform_device *pdev) @@ -572,6 +738,7 @@ static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 }, { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 }, { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 }, + { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 }, { } }; diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index 62700d1896f4b885517cab4533b5e4c2c1a6db47..aca85be101f83378ee0ca1b0cb15d31d164127c2 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c @@ -1071,13 +1071,14 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie, static const struct of_device_id rcar_pcie_of_match[] = { { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 }, - { .compatible = "renesas,pcie-rcar-gen2", - .data = rcar_pcie_hw_init_gen2 }, { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init_gen2 }, { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init_gen2 }, + { .compatible = "renesas,pcie-rcar-gen2", + .data = rcar_pcie_hw_init_gen2 }, { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init }, + { .compatible = "renesas,pcie-rcar-gen3", .data = rcar_pcie_hw_init }, {}, }; diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c index e0b22dab9b7ac37c81d380bfe00751f4496f4516..f2dca7bb0b39f8d02537ba985f958a19d1bfcf14 100644 --- a/drivers/pci/host/pcie-rockchip.c +++ b/drivers/pci/host/pcie-rockchip.c @@ -53,6 +53,7 @@ #define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008) #define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x)) #define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040) +#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0) #define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080) #define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48) #define PCIE_CLIENT_LINK_STATUS_UP 0x00300000 @@ -135,13 +136,14 @@ #define PCIE_RC_CONFIG_VENDOR (PCIE_RC_CONFIG_BASE + 0x00) #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) #define PCIE_RC_CONFIG_SCC_SHIFT 16 +#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) +#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 +#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff +#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 #define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) -#define PCIE_RC_CONFIG_LCS_RETRAIN_LINK BIT(5) -#define PCIE_RC_CONFIG_LCS_LBMIE BIT(10) -#define PCIE_RC_CONFIG_LCS_LABIE BIT(11) -#define PCIE_RC_CONFIG_LCS_LBMS BIT(30) -#define PCIE_RC_CONFIG_LCS_LAMS BIT(31) #define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c) +#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274) +#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20) #define PCIE_CORE_AXI_CONF_BASE 0xc00000 #define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0) @@ -190,6 +192,9 @@ struct rockchip_pcie { struct reset_control *mgmt_rst; struct reset_control *mgmt_sticky_rst; struct reset_control *pipe_rst; + struct reset_control *pm_rst; + struct reset_control *aclk_rst; + struct reset_control *pclk_rst; struct clk *aclk_pcie; struct clk *aclk_perf_pcie; struct clk *hclk_pcie; @@ -200,8 +205,14 @@ struct rockchip_pcie { struct gpio_desc *ep_gpio; u32 lanes; u8 root_bus_nr; + int link_gen; struct device *dev; struct irq_domain *irq_domain; + u32 io_size; + int offset; + phys_addr_t io_bus_addr; + u32 mem_size; + phys_addr_t mem_bus_addr; }; static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg) @@ -220,7 +231,7 @@ static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip) u32 status; status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); - status |= (PCIE_RC_CONFIG_LCS_LBMIE | PCIE_RC_CONFIG_LCS_LABIE); + status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); } @@ -229,7 +240,7 @@ static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip) u32 status; status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); - status |= (PCIE_RC_CONFIG_LCS_LBMS | PCIE_RC_CONFIG_LCS_LAMS); + status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); } @@ -395,6 +406,40 @@ static struct pci_ops rockchip_pcie_ops = { .write = rockchip_pcie_wr_conf, }; +static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) +{ + u32 status, curr, scale, power; + + if (IS_ERR(rockchip->vpcie3v3)) + return; + + /* + * Set RC's captured slot power limit and scale if + * vpcie3v3 available. The default values are both zero + * which means the software should set these two according + * to the actual power supply. + */ + curr = regulator_get_current_limit(rockchip->vpcie3v3); + if (curr > 0) { + scale = 3; /* 0.001x */ + curr = curr / 1000; /* convert to mA */ + power = (curr * 3300) / 1000; /* milliwatt */ + while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { + if (!scale) { + dev_warn(rockchip->dev, "invalid power supply\n"); + return; + } + scale--; + power = power / 10; + } + + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); + status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | + (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); + } +} + /** * rockchip_pcie_init_port - Initialize hardware * @rockchip: PCIe port information @@ -408,6 +453,24 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) gpiod_set_value(rockchip->ep_gpio, 0); + err = reset_control_assert(rockchip->aclk_rst); + if (err) { + dev_err(dev, "assert aclk_rst err %d\n", err); + return err; + } + + err = reset_control_assert(rockchip->pclk_rst); + if (err) { + dev_err(dev, "assert pclk_rst err %d\n", err); + return err; + } + + err = reset_control_assert(rockchip->pm_rst); + if (err) { + dev_err(dev, "assert pm_rst err %d\n", err); + return err; + } + err = phy_init(rockchip->phy); if (err < 0) { dev_err(dev, "fail to init phy, err %d\n", err); @@ -438,14 +501,40 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) return err; } + udelay(10); + + err = reset_control_deassert(rockchip->pm_rst); + if (err) { + dev_err(dev, "deassert pm_rst err %d\n", err); + return err; + } + + err = reset_control_deassert(rockchip->aclk_rst); + if (err) { + dev_err(dev, "deassert aclk_rst err %d\n", err); + return err; + } + + err = reset_control_deassert(rockchip->pclk_rst); + if (err) { + dev_err(dev, "deassert pclk_rst err %d\n", err); + return err; + } + + if (rockchip->link_gen == 2) + rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_2, + PCIE_CLIENT_CONFIG); + else + rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1, + PCIE_CLIENT_CONFIG); + rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE | PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) | - PCIE_CLIENT_MODE_RC | - PCIE_CLIENT_GEN_SEL_2, - PCIE_CLIENT_CONFIG); + PCIE_CLIENT_MODE_RC, + PCIE_CLIENT_CONFIG); err = phy_power_on(rockchip->phy); if (err) { @@ -481,21 +570,19 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) return err; } - /* - * We need to read/write PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 before - * enabling ASPM. Otherwise L1PwrOnSc and L1PwrOnVal isn't - * reliable and enabling ASPM doesn't work. This is a controller - * bug we need to work around. - */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2); - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2); - /* Fix the transmitted FTS count desired to exit from L0s. */ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1); - status = (status & PCIE_CORE_CTRL_PLC1_FTS_MASK) | + status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) | (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT); rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1); + rockchip_pcie_set_power_limit(rockchip); + + /* Set RC's clock architecture as common clock */ + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= PCI_EXP_LNKCTL_CCC; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + /* Enable Gen1 training */ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, PCIE_CLIENT_CONFIG); @@ -522,35 +609,37 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) msleep(20); } - /* - * Enable retrain for gen2. This should be configured only after - * gen1 finished. - */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); - status |= PCIE_RC_CONFIG_LCS_RETRAIN_LINK; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + if (rockchip->link_gen == 2) { + /* + * Enable retrain for gen2. This should be configured only after + * gen1 finished. + */ + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= PCI_EXP_LNKCTL_RL; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + + timeout = jiffies + msecs_to_jiffies(500); + for (;;) { + status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); + if ((status & PCIE_CORE_PL_CONF_SPEED_MASK) == + PCIE_CORE_PL_CONF_SPEED_5G) { + dev_dbg(dev, "PCIe link training gen2 pass!\n"); + break; + } - timeout = jiffies + msecs_to_jiffies(500); - for (;;) { - status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); - if ((status & PCIE_CORE_PL_CONF_SPEED_MASK) == - PCIE_CORE_PL_CONF_SPEED_5G) { - dev_dbg(dev, "PCIe link training gen2 pass!\n"); - break; - } + if (time_after(jiffies, timeout)) { + dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n"); + break; + } - if (time_after(jiffies, timeout)) { - dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n"); - break; + msleep(20); } - - msleep(20); } /* Check the final link width from negotiated lane counter from MGMT */ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); - status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >> - PCIE_CORE_PL_CONF_LANE_MASK); + status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >> + PCIE_CORE_PL_CONF_LANE_SHIFT); dev_dbg(dev, "current link width is x%d\n", status); rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, @@ -558,6 +647,12 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) rockchip_pcie_write(rockchip, PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT, PCIE_RC_CONFIG_RID_CCR); + + /* Clear THP cap's next cap pointer to remove L1 substate cap */ + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP); + status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP); + rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); rockchip_pcie_write(rockchip, @@ -753,6 +848,10 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) rockchip->lanes = 1; } + rockchip->link_gen = of_pci_get_max_link_speed(node); + if (rockchip->link_gen < 0 || rockchip->link_gen > 2) + rockchip->link_gen = 2; + rockchip->core_rst = devm_reset_control_get(dev, "core"); if (IS_ERR(rockchip->core_rst)) { if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER) @@ -781,6 +880,27 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) return PTR_ERR(rockchip->pipe_rst); } + rockchip->pm_rst = devm_reset_control_get(dev, "pm"); + if (IS_ERR(rockchip->pm_rst)) { + if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER) + dev_err(dev, "missing pm reset property in node\n"); + return PTR_ERR(rockchip->pm_rst); + } + + rockchip->pclk_rst = devm_reset_control_get(dev, "pclk"); + if (IS_ERR(rockchip->pclk_rst)) { + if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER) + dev_err(dev, "missing pclk reset property in node\n"); + return PTR_ERR(rockchip->pclk_rst); + } + + rockchip->aclk_rst = devm_reset_control_get(dev, "aclk"); + if (IS_ERR(rockchip->aclk_rst)) { + if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER) + dev_err(dev, "missing aclk reset property in node\n"); + return PTR_ERR(rockchip->aclk_rst); + } + rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH); if (IS_ERR(rockchip->ep_gpio)) { dev_err(dev, "missing ep-gpios property in node\n"); @@ -1025,6 +1145,50 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip, return 0; } +static int rockchip_cfg_atu(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + int offset; + int err; + int reg_no; + + for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) { + err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, + AXI_WRAPPER_MEM_WRITE, + 20 - 1, + rockchip->mem_bus_addr + + (reg_no << 20), + 0); + if (err) { + dev_err(dev, "program RC mem outbound ATU failed\n"); + return err; + } + } + + err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0); + if (err) { + dev_err(dev, "program RC mem inbound ATU failed\n"); + return err; + } + + offset = rockchip->mem_size >> 20; + for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) { + err = rockchip_pcie_prog_ob_atu(rockchip, + reg_no + 1 + offset, + AXI_WRAPPER_IO_WRITE, + 20 - 1, + rockchip->io_bus_addr + + (reg_no << 20), + 0); + if (err) { + dev_err(dev, "program RC io outbound ATU failed\n"); + return err; + } + } + + return 0; +} + static int rockchip_pcie_probe(struct platform_device *pdev) { struct rockchip_pcie *rockchip; @@ -1034,13 +1198,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) resource_size_t io_base; struct resource *mem; struct resource *io; - phys_addr_t io_bus_addr = 0; - u32 io_size; - phys_addr_t mem_bus_addr = 0; - u32 mem_size = 0; - int reg_no; int err; - int offset; LIST_HEAD(res); @@ -1107,14 +1265,13 @@ static int rockchip_pcie_probe(struct platform_device *pdev) goto err_vpcie; /* Get the I/O and memory ranges from DT */ - io_size = 0; resource_list_for_each_entry(win, &res) { switch (resource_type(win->res)) { case IORESOURCE_IO: io = win->res; io->name = "I/O"; - io_size = resource_size(io); - io_bus_addr = io->start - win->offset; + rockchip->io_size = resource_size(io); + rockchip->io_bus_addr = io->start - win->offset; err = pci_remap_iospace(io, io_base); if (err) { dev_warn(dev, "error %d: failed to map resource %pR\n", @@ -1125,8 +1282,8 @@ static int rockchip_pcie_probe(struct platform_device *pdev) case IORESOURCE_MEM: mem = win->res; mem->name = "MEM"; - mem_size = resource_size(mem); - mem_bus_addr = mem->start - win->offset; + rockchip->mem_size = resource_size(mem); + rockchip->mem_bus_addr = mem->start - win->offset; break; case IORESOURCE_BUS: rockchip->root_bus_nr = win->res->start; @@ -1136,45 +1293,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev) } } - if (mem_size) { - for (reg_no = 0; reg_no < (mem_size >> 20); reg_no++) { - err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, - AXI_WRAPPER_MEM_WRITE, - 20 - 1, - mem_bus_addr + - (reg_no << 20), - 0); - if (err) { - dev_err(dev, "program RC mem outbound ATU failed\n"); - goto err_vpcie; - } - } - } - - err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0); - if (err) { - dev_err(dev, "program RC mem inbound ATU failed\n"); + err = rockchip_cfg_atu(rockchip); + if (err) goto err_vpcie; - } - - offset = mem_size >> 20; - - if (io_size) { - for (reg_no = 0; reg_no < (io_size >> 20); reg_no++) { - err = rockchip_pcie_prog_ob_atu(rockchip, - reg_no + 1 + offset, - AXI_WRAPPER_IO_WRITE, - 20 - 1, - io_bus_addr + - (reg_no << 20), - 0); - if (err) { - dev_err(dev, "program RC io outbound ATU failed\n"); - goto err_vpcie; - } - } - } - bus = pci_scan_root_bus(&pdev->dev, 0, &rockchip_pcie_ops, rockchip, &res); if (!bus) { err = -ENOMEM; @@ -1187,9 +1308,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev) pcie_bus_configure_settings(child); pci_bus_add_devices(bus); - - dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n"); - return err; err_vpcie: diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c index 3cf197ba7f37baa6dd9613844cd21767e3caef93..dafe8b88d97d8ce192ba8abd44cee9da37efb55e 100644 --- a/drivers/pci/host/pcie-spear13xx.c +++ b/drivers/pci/host/pcie-spear13xx.c @@ -296,8 +296,4 @@ static struct platform_driver spear13xx_pcie_driver = { }, }; -static int __init spear13xx_pcie_init(void) -{ - return platform_driver_register(&spear13xx_pcie_driver); -} -device_initcall(spear13xx_pcie_init); +builtin_platform_driver(spear13xx_pcie_driver); diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c index 37e29b580be323a10c9cb17985bd2684873e9bcf..18ef1a93c10ac191cec6b26760533a644f0c1934 100644 --- a/drivers/pci/host/vmd.c +++ b/drivers/pci/host/vmd.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -39,7 +40,6 @@ static DEFINE_RAW_SPINLOCK(list_lock); /** * struct vmd_irq - private data to map driver IRQ to the VMD shared vector * @node: list item for parent traversal. - * @rcu: RCU callback item for freeing. * @irq: back pointer to parent. * @enabled: true if driver enabled IRQ * @virq: the virtual IRQ value provided to the requesting driver. @@ -49,7 +49,6 @@ static DEFINE_RAW_SPINLOCK(list_lock); */ struct vmd_irq { struct list_head node; - struct rcu_head rcu; struct vmd_irq_list *irq; bool enabled; unsigned int virq; @@ -58,11 +57,13 @@ struct vmd_irq { /** * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector * @irq_list: the list of irq's the VMD one demuxes to. + * @srcu: SRCU struct for local synchronization. * @count: number of child IRQs assigned to this vector; used to track * sharing. */ struct vmd_irq_list { struct list_head irq_list; + struct srcu_struct srcu; unsigned int count; }; @@ -224,14 +225,14 @@ static void vmd_msi_free(struct irq_domain *domain, struct vmd_irq *vmdirq = irq_get_chip_data(virq); unsigned long flags; - synchronize_rcu(); + synchronize_srcu(&vmdirq->irq->srcu); /* XXX: Potential optimization to rebalance */ raw_spin_lock_irqsave(&list_lock, flags); vmdirq->irq->count--; raw_spin_unlock_irqrestore(&list_lock, flags); - kfree_rcu(vmdirq, rcu); + kfree(vmdirq); } static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, @@ -646,11 +647,12 @@ static irqreturn_t vmd_irq(int irq, void *data) { struct vmd_irq_list *irqs = data; struct vmd_irq *vmdirq; + int idx; - rcu_read_lock(); + idx = srcu_read_lock(&irqs->srcu); list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) generic_handle_irq(vmdirq->virq); - rcu_read_unlock(); + srcu_read_unlock(&irqs->srcu, idx); return IRQ_HANDLED; } @@ -696,6 +698,10 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENOMEM; for (i = 0; i < vmd->msix_count; i++) { + err = init_srcu_struct(&vmd->irqs[i].srcu); + if (err) + return err; + INIT_LIST_HEAD(&vmd->irqs[i].irq_list); err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), vmd_irq, 0, "vmd", &vmd->irqs[i]); @@ -714,12 +720,20 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) return 0; } +static void vmd_cleanup_srcu(struct vmd_dev *vmd) +{ + int i; + + for (i = 0; i < vmd->msix_count; i++) + cleanup_srcu_struct(&vmd->irqs[i].srcu); +} + static void vmd_remove(struct pci_dev *dev) { struct vmd_dev *vmd = pci_get_drvdata(dev); vmd_detach_resources(vmd); - pci_set_drvdata(dev, NULL); + vmd_cleanup_srcu(vmd); sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_stop_root_bus(vmd->bus); pci_remove_root_bus(vmd->bus); @@ -727,7 +741,7 @@ static void vmd_remove(struct pci_dev *dev) irq_domain_remove(vmd->irq_domain); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int vmd_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index a46b585fae316f2f987080d7bc8dec976089af6b..5ed2dcaa8e27c0ebe0729c3f543a58261c86a651 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -222,35 +222,6 @@ static void acpiphp_post_dock_fixup(struct acpi_device *adev) acpiphp_let_context_go(context); } -/* Check whether the PCI device is managed by native PCIe hotplug driver */ -static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev) -{ - u32 reg32; - acpi_handle tmp; - struct acpi_pci_root *root; - - /* Check whether the PCIe port supports native PCIe hotplug */ - if (pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32)) - return false; - if (!(reg32 & PCI_EXP_SLTCAP_HPC)) - return false; - - /* - * Check whether native PCIe hotplug has been enabled for - * this PCIe hierarchy. - */ - tmp = acpi_find_root_bridge_handle(pdev); - if (!tmp) - return false; - root = acpi_pci_find_root(tmp); - if (!root) - return false; - if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)) - return false; - - return true; -} - /** * acpiphp_add_context - Add ACPIPHP context to an ACPI device object. * @handle: ACPI handle of the object to add a context to. @@ -334,7 +305,7 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data, * expose slots to user space in those cases. */ if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev)) - && !(pdev && device_is_managed_by_native_pciehp(pdev))) { + && !(pdev && pdev->is_hotplug_bridge && pciehp_is_native(pdev))) { unsigned long long sun; int retval; diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index f6221d739f59d73bfb09f49bc66a8cf33b2e9cdf..68d105aaf4e2ef9daf6ee37800cff620805b3487 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -35,7 +35,7 @@ #include #include #include -#include +#include #include "acpiphp.h" #include "../pci.h" diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index 74f3a0695b43e2ba94d8c8fe2824ec001681708f..33d300d124110b07f4c5d987937c4d38b4e69f08 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c @@ -40,7 +40,7 @@ #include #include -#include +#include #include "cpqphp.h" #include "cpqphp_nvram.h" @@ -867,7 +867,8 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) { err(msg_HPC_not_supported); - return -ENODEV; + rc = -ENODEV; + goto err_disable_device; } /* TODO: This code can be made to support non-Compaq or Intel diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c index c25fc906105935b91b6567af72ba7b978d7760d4..daae8071a15674f6d881aa79e1413d33b5d9c7c3 100644 --- a/drivers/pci/hotplug/cpqphp_nvram.c +++ b/drivers/pci/hotplug/cpqphp_nvram.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include "cpqphp.h" #include "cpqphp_nvram.h" diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index fea0b8b33589365663422bf116afbfb359cfbd10..7b0e97be9063d971dbd327836ab1c9cd2377287d 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c @@ -23,6 +23,9 @@ * * Send feedback to * + * Authors: + * Greg Kroah-Hartman + * Scott Murray */ #include /* try_module_get & module_put */ @@ -39,7 +42,7 @@ #include #include #include -#include +#include #include "../pci.h" #include "cpci_hotplug.h" @@ -50,15 +53,9 @@ #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME, ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME, ## arg) - /* local variables */ static bool debug; -#define DRIVER_VERSION "0.5" -#define DRIVER_AUTHOR "Greg Kroah-Hartman , Scott Murray " -#define DRIVER_DESC "PCI Hot Plug PCI Core" - - static LIST_HEAD(pci_hotplug_slot_list); static DEFINE_MUTEX(pci_hp_mutex); @@ -534,7 +531,6 @@ static int __init pci_hotplug_init(void) return result; } - info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); return result; } device_initcall(pci_hotplug_init); diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 7d32fa33dcef902a926b1603a06f2002971675a4..35d84845d5af93769c5062ccf1395c467e29d8fe 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -25,6 +25,10 @@ * * Send feedback to , * + * Authors: + * Dan Zink + * Greg Kroah-Hartman + * Dely Sy " */ #include @@ -42,10 +46,6 @@ bool pciehp_poll_mode; int pciehp_poll_time; static bool pciehp_force; -#define DRIVER_VERSION "0.4" -#define DRIVER_AUTHOR "Dan Zink , Greg Kroah-Hartman , Dely Sy " -#define DRIVER_DESC "PCI Express Hot Plug Controller Driver" - /* * not really modular, but the easiest way to keep compat with existing * bootargs behaviour is to continue using module_param here. @@ -333,7 +333,6 @@ static int __init pcied_init(void) retval = pcie_port_service_register(&hpdriver_portdrv); dbg("pcie_port_service_register = %d\n", retval); - info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); if (retval) dbg("Failure to register service\n"); diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index efe69e879455cbd19ac4733d863d81d6666c3a7b..10c9c0ba8ff2394dc8b43d640e5dbd4d53c5e84e 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "../pci.h" #include "pciehp.h" @@ -98,6 +99,7 @@ static int board_added(struct slot *p_slot) pciehp_green_led_blink(p_slot); /* Check link training status */ + pm_runtime_get_sync(&ctrl->pcie->port->dev); retval = pciehp_check_link_status(ctrl); if (retval) { ctrl_err(ctrl, "Failed to check link status\n"); @@ -118,12 +120,14 @@ static int board_added(struct slot *p_slot) if (retval != -EEXIST) goto err_exit; } + pm_runtime_put(&ctrl->pcie->port->dev); pciehp_green_led_on(p_slot); pciehp_set_attention_status(p_slot, 0); return 0; err_exit: + pm_runtime_put(&ctrl->pcie->port->dev); set_slot_off(ctrl, p_slot); return retval; } @@ -137,7 +141,9 @@ static int remove_board(struct slot *p_slot) int retval; struct controller *ctrl = p_slot->ctrl; + pm_runtime_get_sync(&ctrl->pcie->port->dev); retval = pciehp_unconfigure_device(p_slot); + pm_runtime_put(&ctrl->pcie->port->dev); if (retval) return retval; @@ -410,7 +416,7 @@ int pciehp_enable_slot(struct slot *p_slot) if (getstatus) { ctrl_info(ctrl, "Slot(%s): Already enabled\n", slot_name(p_slot)); - return -EINVAL; + return 0; } } diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index b57fc6d6e28a6d6333b4c2ee1367c428dacd1768..026830a138aec192e1b4027b2e6479025c72da83 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -620,8 +620,18 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) pciehp_queue_interrupt_event(slot, INT_BUTTON_PRESS); } - /* Check Presence Detect Changed */ - if (events & PCI_EXP_SLTSTA_PDC) { + /* + * Check Link Status Changed at higher precedence than Presence + * Detect Changed. The PDS value may be set to "card present" from + * out-of-band detection, which may be in conflict with a Link Down + * and cause the wrong event to queue. + */ + if (events & PCI_EXP_SLTSTA_DLLSC) { + ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot), + link ? "Up" : "Down"); + pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP : + INT_LINK_DOWN); + } else if (events & PCI_EXP_SLTSTA_PDC) { present = !!(status & PCI_EXP_SLTSTA_PDS); ctrl_info(ctrl, "Slot(%s): Card %spresent\n", slot_name(slot), present ? "" : "not "); @@ -636,13 +646,6 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) pciehp_queue_interrupt_event(slot, INT_POWER_FAULT); } - if (events & PCI_EXP_SLTSTA_DLLSC) { - ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot), - link ? "Up" : "Down"); - pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP : - INT_LINK_DOWN); - } - return IRQ_HANDLED; } diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index dc67f39779ecd3cbfbd7d0f60eaafc627e85e2d2..c614ff7c3bc3f9b28a0c48cd4c1c236899477606 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -257,8 +257,13 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn) static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn) { - if (vio_find_node(dn)) + struct vio_dev *vio_dev; + + vio_dev = vio_find_node(dn); + if (vio_dev) { + put_device(&vio_dev->dev); return -EINVAL; + } if (!vio_register_device_node(dn)) { printk(KERN_ERR @@ -334,6 +339,9 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn) return -EINVAL; vio_unregister_device(vio_dev); + + put_device(&vio_dev->dev); + return 0; } diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index 50b8b7d54416dfe1fb773103fa52516e9690155b..530d0e49f2edbd6f58b0cccd529cf030fbbf1ed5 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -5,12 +5,13 @@ * * Author(s): * Jan Glauber + * + * License: GPL */ #define KMSG_COMPONENT "zpci" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include #include #include #include @@ -21,10 +22,6 @@ #define SLOT_NAME_SIZE 10 static LIST_HEAD(s390_hotplug_slot_list); -MODULE_AUTHOR("Jan Glauber ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; - pci_cfg_access_lock(dev); - pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); - msleep(100); - pci_cfg_access_unlock(dev); - iov->initial_VFs = initial; if (nr_virtfn < initial) initial = nr_virtfn; @@ -323,6 +316,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) goto err_pcibios; } + pci_iov_set_numvfs(dev, nr_virtfn); + iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; + pci_cfg_access_lock(dev); + pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); + msleep(100); + pci_cfg_access_unlock(dev); + for (i = 0; i < initial; i++) { rc = pci_iov_add_virtfn(dev, i, 0); if (rc) @@ -554,21 +554,61 @@ void pci_iov_release(struct pci_dev *dev) } /** - * pci_iov_resource_bar - get position of the SR-IOV BAR + * pci_iov_update_resource - update a VF BAR * @dev: the PCI device * @resno: the resource number * - * Returns position of the BAR encapsulated in the SR-IOV capability. + * Update a VF BAR in the SR-IOV capability of a PF. */ -int pci_iov_resource_bar(struct pci_dev *dev, int resno) +void pci_iov_update_resource(struct pci_dev *dev, int resno) { - if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END) - return 0; + struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL; + struct resource *res = dev->resource + resno; + int vf_bar = resno - PCI_IOV_RESOURCES; + struct pci_bus_region region; + u16 cmd; + u32 new; + int reg; + + /* + * The generic pci_restore_bars() path calls this for all devices, + * including VFs and non-SR-IOV devices. If this is not a PF, we + * have nothing to do. + */ + if (!iov) + return; + + pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd); + if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) { + dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n", + vf_bar, res); + return; + } + + /* + * Ignore unimplemented BARs, unused resource slots for 64-bit + * BARs, and non-movable resources, e.g., those described via + * Enhanced Allocation. + */ + if (!res->flags) + return; + + if (res->flags & IORESOURCE_UNSET) + return; + + if (res->flags & IORESOURCE_PCI_FIXED) + return; - BUG_ON(!dev->is_physfn); + pcibios_resource_to_bus(dev->bus, ®ion, res); + new = region.start; + new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; - return dev->sriov->pos + PCI_SRIOV_BAR + - 4 * (resno - PCI_IOV_RESOURCES); + reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar; + pci_write_config_dword(dev, reg, new); + if (res->flags & IORESOURCE_MEM_64) { + new = region.start >> 16 >> 16; + pci_write_config_dword(dev, reg + 4, new); + } } resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev, diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index ad70507cfb566a2291498d4ba723e1a5a4aebd3c..50c5003295ca535036b056d7855caf0c96473f13 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -551,14 +551,14 @@ static int populate_msi_sysfs(struct pci_dev *pdev) } static struct msi_desc * -msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity) +msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) { struct cpumask *masks = NULL; struct msi_desc *entry; u16 control; - if (affinity) { - masks = irq_create_affinity_masks(dev->irq_affinity, nvec); + if (affd) { + masks = irq_create_affinity_masks(nvec, affd); if (!masks) pr_err("Unable to allocate affinity masks, ignoring\n"); } @@ -618,7 +618,8 @@ static int msi_verify_entries(struct pci_dev *dev) * an error, and a positive return value indicates the number of interrupts * which could have been allocated. */ -static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity) +static int msi_capability_init(struct pci_dev *dev, int nvec, + const struct irq_affinity *affd) { struct msi_desc *entry; int ret; @@ -626,7 +627,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity) pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ - entry = msi_setup_entry(dev, nvec, affinity); + entry = msi_setup_entry(dev, nvec, affd); if (!entry) return -ENOMEM; @@ -690,14 +691,14 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, struct msix_entry *entries, int nvec, - bool affinity) + const struct irq_affinity *affd) { struct cpumask *curmsk, *masks = NULL; struct msi_desc *entry; int ret, i; - if (affinity) { - masks = irq_create_affinity_masks(dev->irq_affinity, nvec); + if (affd) { + masks = irq_create_affinity_masks(nvec, affd); if (!masks) pr_err("Unable to allocate affinity masks, ignoring\n"); } @@ -753,14 +754,14 @@ static void msix_program_entries(struct pci_dev *dev, * @dev: pointer to the pci_dev data structure of MSI-X device function * @entries: pointer to an array of struct msix_entry entries * @nvec: number of @entries - * @affinity: flag to indicate cpu irq affinity mask should be set + * @affd: Optional pointer to enable automatic affinity assignement * * Setup the MSI-X capability structure of device function with a * single MSI-X irq. A return of zero indicates the successful setup of * requested MSI-X entries with allocated irqs or non-zero for otherwise. **/ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, - int nvec, bool affinity) + int nvec, const struct irq_affinity *affd) { int ret; u16 control; @@ -775,7 +776,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, if (!base) return -ENOMEM; - ret = msix_setup_entries(dev, base, entries, nvec, affinity); + ret = msix_setup_entries(dev, base, entries, nvec, affd); if (ret) return ret; @@ -956,7 +957,7 @@ int pci_msix_vec_count(struct pci_dev *dev) EXPORT_SYMBOL(pci_msix_vec_count); static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, - int nvec, bool affinity) + int nvec, const struct irq_affinity *affd) { int nr_entries; int i, j; @@ -988,7 +989,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); return -EINVAL; } - return msix_capability_init(dev, entries, nvec, affinity); + return msix_capability_init(dev, entries, nvec, affd); } /** @@ -1008,7 +1009,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, **/ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) { - return __pci_enable_msix(dev, entries, nvec, false); + return __pci_enable_msix(dev, entries, nvec, NULL); } EXPORT_SYMBOL(pci_enable_msix); @@ -1059,9 +1060,8 @@ int pci_msi_enabled(void) EXPORT_SYMBOL(pci_msi_enabled); static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, - unsigned int flags) + const struct irq_affinity *affd) { - bool affinity = flags & PCI_IRQ_AFFINITY; int nvec; int rc; @@ -1090,14 +1090,13 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, nvec = maxvec; for (;;) { - if (affinity) { - nvec = irq_calc_affinity_vectors(dev->irq_affinity, - nvec); + if (affd) { + nvec = irq_calc_affinity_vectors(nvec, affd); if (nvec < minvec) return -ENOSPC; } - rc = msi_capability_init(dev, nvec, affinity); + rc = msi_capability_init(dev, nvec, affd); if (rc == 0) return nvec; @@ -1124,29 +1123,27 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, **/ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) { - return __pci_enable_msi_range(dev, minvec, maxvec, 0); + return __pci_enable_msi_range(dev, minvec, maxvec, NULL); } EXPORT_SYMBOL(pci_enable_msi_range); static int __pci_enable_msix_range(struct pci_dev *dev, - struct msix_entry *entries, int minvec, int maxvec, - unsigned int flags) + struct msix_entry *entries, int minvec, + int maxvec, const struct irq_affinity *affd) { - bool affinity = flags & PCI_IRQ_AFFINITY; int rc, nvec = maxvec; if (maxvec < minvec) return -ERANGE; for (;;) { - if (affinity) { - nvec = irq_calc_affinity_vectors(dev->irq_affinity, - nvec); + if (affd) { + nvec = irq_calc_affinity_vectors(nvec, affd); if (nvec < minvec) return -ENOSPC; } - rc = __pci_enable_msix(dev, entries, nvec, affinity); + rc = __pci_enable_msix(dev, entries, nvec, affd); if (rc == 0) return nvec; @@ -1177,16 +1174,17 @@ static int __pci_enable_msix_range(struct pci_dev *dev, int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec) { - return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0); + return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL); } EXPORT_SYMBOL(pci_enable_msix_range); /** - * pci_alloc_irq_vectors - allocate multiple IRQs for a device + * pci_alloc_irq_vectors_affinity - allocate multiple IRQs for a device * @dev: PCI device to operate on * @min_vecs: minimum number of vectors required (must be >= 1) * @max_vecs: maximum (desired) number of vectors * @flags: flags or quirks for the allocation + * @affd: optional description of the affinity requirements * * Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI * vectors if available, and fall back to a single legacy vector @@ -1198,20 +1196,30 @@ EXPORT_SYMBOL(pci_enable_msix_range); * To get the Linux IRQ number used for a vector that can be passed to * request_irq() use the pci_irq_vector() helper. */ -int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, - unsigned int max_vecs, unsigned int flags) +int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags, + const struct irq_affinity *affd) { + static const struct irq_affinity msi_default_affd; int vecs = -ENOSPC; + if (flags & PCI_IRQ_AFFINITY) { + if (!affd) + affd = &msi_default_affd; + } else { + if (WARN_ON(affd)) + affd = NULL; + } + if (flags & PCI_IRQ_MSIX) { vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, - flags); + affd); if (vecs > 0) return vecs; } if (flags & PCI_IRQ_MSI) { - vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags); + vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); if (vecs > 0) return vecs; } @@ -1224,7 +1232,7 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, return vecs; } -EXPORT_SYMBOL(pci_alloc_irq_vectors); +EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); /** * pci_free_irq_vectors - free previously allocated IRQs for a device @@ -1294,7 +1302,8 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) } else if (dev->msi_enabled) { struct msi_desc *entry = first_pci_msi_entry(dev); - if (WARN_ON_ONCE(!entry || nr >= entry->nvec_used)) + if (WARN_ON_ONCE(!entry || !entry->affinity || + nr >= entry->nvec_used)) return NULL; return &entry->affinity[nr]; diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index d966d47c9e80fbccbf904ed8c546db6645ca5be5..001860361434623fcf84de1c2533df03835de8a0 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -29,6 +29,82 @@ const u8 pci_acpi_dsm_uuid[] = { 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d }; +#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) +static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res) +{ + struct device *dev = &adev->dev; + struct resource_entry *entry; + struct list_head list; + unsigned long flags; + int ret; + + INIT_LIST_HEAD(&list); + flags = IORESOURCE_MEM; + ret = acpi_dev_get_resources(adev, &list, + acpi_dev_filter_resource_type_cb, + (void *) flags); + if (ret < 0) { + dev_err(dev, "failed to parse _CRS method, error code %d\n", + ret); + return ret; + } + + if (ret == 0) { + dev_err(dev, "no IO and memory resources present in _CRS\n"); + return -EINVAL; + } + + entry = list_first_entry(&list, struct resource_entry, node); + *res = *entry->res; + acpi_dev_free_resource_list(&list); + return 0; +} + +static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context, + void **retval) +{ + u16 *segment = context; + unsigned long long uid; + acpi_status status; + + status = acpi_evaluate_integer(handle, "_UID", NULL, &uid); + if (ACPI_FAILURE(status) || uid != *segment) + return AE_CTRL_DEPTH; + + *(acpi_handle *)retval = handle; + return AE_CTRL_TERMINATE; +} + +int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, + struct resource *res) +{ + struct acpi_device *adev; + acpi_status status; + acpi_handle handle; + int ret; + + status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle); + if (ACPI_FAILURE(status)) { + dev_err(dev, "can't find _HID %s device to locate resources\n", + hid); + return -ENODEV; + } + + ret = acpi_bus_get_device(handle, &adev); + if (ret) + return ret; + + ret = acpi_get_rc_addr(adev, res); + if (ret) { + dev_err(dev, "can't get resource from %s\n", + dev_name(&adev->dev)); + return ret; + } + + return 0; +} +#endif + phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) { acpi_status status = AE_NOT_EXIST; @@ -293,6 +369,30 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) } EXPORT_SYMBOL_GPL(pci_get_hp_params); +/** + * pciehp_is_native - Check whether a hotplug port is handled by the OS + * @pdev: Hotplug port to check + * + * Walk up from @pdev to the host bridge, obtain its cached _OSC Control Field + * and return the value of the "PCI Express Native Hot Plug control" bit. + * On failure to obtain the _OSC Control Field return %false. + */ +bool pciehp_is_native(struct pci_dev *pdev) +{ + struct acpi_pci_root *root; + acpi_handle handle; + + handle = acpi_find_root_bridge_handle(pdev); + if (!handle) + return false; + + root = acpi_pci_find_root(handle); + if (!root) + return false; + + return root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL; +} + /** * pci_acpi_wake_bus - Root bus wakeup notification fork function. * @work: Work item to handle. diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c index 55f453de562ee63b8e53ab90cf6b671ef676c2a8..1c4af7227bcadbee2a1a286c0b3c6411b7a6f8fc 100644 --- a/drivers/pci/pci-mid.c +++ b/drivers/pci/pci-mid.c @@ -29,6 +29,11 @@ static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state) return intel_mid_pci_set_power_state(pdev, state); } +static pci_power_t mid_pci_get_power_state(struct pci_dev *pdev) +{ + return intel_mid_pci_get_power_state(pdev); +} + static pci_power_t mid_pci_choose_state(struct pci_dev *pdev) { return PCI_D3hot; @@ -49,9 +54,10 @@ static bool mid_pci_need_resume(struct pci_dev *dev) return false; } -static struct pci_platform_pm_ops mid_pci_platform_pm = { +static const struct pci_platform_pm_ops mid_pci_platform_pm = { .is_manageable = mid_pci_power_manageable, .set_state = mid_pci_set_power_state, + .get_state = mid_pci_get_power_state, .choose_state = mid_pci_choose_state, .sleep_wake = mid_pci_sleep_wake, .run_wake = mid_pci_run_wake, diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index bcd10c795284cff70a58e73aac8ce2810da6831e..066628776e1be62ec4fdfb2692bd15dbee7ab761 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -50,6 +50,7 @@ pci_config_attr(vendor, "0x%04x\n"); pci_config_attr(device, "0x%04x\n"); pci_config_attr(subsystem_vendor, "0x%04x\n"); pci_config_attr(subsystem_device, "0x%04x\n"); +pci_config_attr(revision, "0x%02x\n"); pci_config_attr(class, "0x%06x\n"); pci_config_attr(irq, "%u\n"); @@ -568,6 +569,7 @@ static struct attribute *pci_dev_attrs[] = { &dev_attr_device.attr, &dev_attr_subsystem_vendor.attr, &dev_attr_subsystem_device.attr, + &dev_attr_revision.attr, &dev_attr_class.attr, &dev_attr_irq.attr, &dev_attr_local_cpus.attr, diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index ba34907538f6160898cc1f04782a716cfe1d27d4..a881c0d3d2e87e023bd9f68eb35b9d57ac2e3a9f 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -564,10 +564,6 @@ static void pci_restore_bars(struct pci_dev *dev) { int i; - /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */ - if (dev->is_virtfn) - return; - for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) pci_update_resource(dev, i); } @@ -2106,6 +2102,10 @@ bool pci_dev_run_wake(struct pci_dev *dev) if (!dev->pme_support) return false; + /* PME-capable in principle, but not from the intended sleep state */ + if (!pci_pme_capable(dev, pci_target_state(dev))) + return false; + while (bus->parent) { struct pci_dev *bridge = bus->self; @@ -2226,7 +2226,7 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev) * This function checks if it is possible to move the bridge to D3. * Currently we only allow D3 for recent enough PCIe ports. */ -static bool pci_bridge_d3_possible(struct pci_dev *bridge) +bool pci_bridge_d3_possible(struct pci_dev *bridge) { unsigned int year; @@ -2239,6 +2239,14 @@ static bool pci_bridge_d3_possible(struct pci_dev *bridge) case PCI_EXP_TYPE_DOWNSTREAM: if (pci_bridge_d3_disable) return false; + + /* + * Hotplug ports handled by firmware in System Management Mode + * may not be put into D3 by the OS (Thunderbolt on non-Macs). + */ + if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge)) + return false; + if (pci_bridge_d3_force) return true; @@ -2259,32 +2267,36 @@ static bool pci_bridge_d3_possible(struct pci_dev *bridge) static int pci_dev_check_d3cold(struct pci_dev *dev, void *data) { bool *d3cold_ok = data; - bool no_d3cold; - /* - * The device needs to be allowed to go D3cold and if it is wake - * capable to do so from D3cold. - */ - no_d3cold = dev->no_d3cold || !dev->d3cold_allowed || - (device_may_wakeup(&dev->dev) && !pci_pme_capable(dev, PCI_D3cold)) || - !pci_power_manageable(dev); + if (/* The device needs to be allowed to go D3cold ... */ + dev->no_d3cold || !dev->d3cold_allowed || - *d3cold_ok = !no_d3cold; + /* ... and if it is wakeup capable to do so from D3cold. */ + (device_may_wakeup(&dev->dev) && + !pci_pme_capable(dev, PCI_D3cold)) || - return no_d3cold; + /* If it is a bridge it must be allowed to go to D3. */ + !pci_power_manageable(dev) || + + /* Hotplug interrupts cannot be delivered if the link is down. */ + dev->is_hotplug_bridge) + + *d3cold_ok = false; + + return !*d3cold_ok; } /* * pci_bridge_d3_update - Update bridge D3 capabilities * @dev: PCI device which is changed - * @remove: Is the device being removed * * Update upstream bridge PM capabilities accordingly depending on if the * device PM configuration was changed or the device is being removed. The * change is also propagated upstream. */ -static void pci_bridge_d3_update(struct pci_dev *dev, bool remove) +void pci_bridge_d3_update(struct pci_dev *dev) { + bool remove = !device_is_registered(&dev->dev); struct pci_dev *bridge; bool d3cold_ok = true; @@ -2292,55 +2304,39 @@ static void pci_bridge_d3_update(struct pci_dev *dev, bool remove) if (!bridge || !pci_bridge_d3_possible(bridge)) return; - pci_dev_get(bridge); /* - * If the device is removed we do not care about its D3cold - * capabilities. + * If D3 is currently allowed for the bridge, removing one of its + * children won't change that. + */ + if (remove && bridge->bridge_d3) + return; + + /* + * If D3 is currently allowed for the bridge and a child is added or + * changed, disallowance of D3 can only be caused by that child, so + * we only need to check that single device, not any of its siblings. + * + * If D3 is currently not allowed for the bridge, checking the device + * first may allow us to skip checking its siblings. */ if (!remove) pci_dev_check_d3cold(dev, &d3cold_ok); - if (d3cold_ok) { - /* - * We need to go through all children to find out if all of - * them can still go to D3cold. - */ + /* + * If D3 is currently not allowed for the bridge, this may be caused + * either by the device being changed/removed or any of its siblings, + * so we need to go through all children to find out if one of them + * continues to block D3. + */ + if (d3cold_ok && !bridge->bridge_d3) pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold, &d3cold_ok); - } if (bridge->bridge_d3 != d3cold_ok) { bridge->bridge_d3 = d3cold_ok; /* Propagate change to upstream bridges */ - pci_bridge_d3_update(bridge, false); + pci_bridge_d3_update(bridge); } - - pci_dev_put(bridge); -} - -/** - * pci_bridge_d3_device_changed - Update bridge D3 capabilities on change - * @dev: PCI device that was changed - * - * If a device is added or its PM configuration, such as is it allowed to - * enter D3cold, is changed this function updates upstream bridge PM - * capabilities accordingly. - */ -void pci_bridge_d3_device_changed(struct pci_dev *dev) -{ - pci_bridge_d3_update(dev, false); -} - -/** - * pci_bridge_d3_device_removed - Update bridge D3 capabilities on remove - * @dev: PCI device being removed - * - * Function updates upstream bridge PM capabilities based on other devices - * still left on the bus. - */ -void pci_bridge_d3_device_removed(struct pci_dev *dev) -{ - pci_bridge_d3_update(dev, true); } /** @@ -2355,7 +2351,7 @@ void pci_d3cold_enable(struct pci_dev *dev) { if (dev->no_d3cold) { dev->no_d3cold = false; - pci_bridge_d3_device_changed(dev); + pci_bridge_d3_update(dev); } } EXPORT_SYMBOL_GPL(pci_d3cold_enable); @@ -2372,7 +2368,7 @@ void pci_d3cold_disable(struct pci_dev *dev) { if (!dev->no_d3cold) { dev->no_d3cold = true; - pci_bridge_d3_device_changed(dev); + pci_bridge_d3_update(dev); } } EXPORT_SYMBOL_GPL(pci_d3cold_disable); @@ -4831,36 +4827,6 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags) } EXPORT_SYMBOL(pci_select_bars); -/** - * pci_resource_bar - get position of the BAR associated with a resource - * @dev: the PCI device - * @resno: the resource number - * @type: the BAR type to be filled in - * - * Returns BAR position in config space, or 0 if the BAR is invalid. - */ -int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) -{ - int reg; - - if (resno < PCI_ROM_RESOURCE) { - *type = pci_bar_unknown; - return PCI_BASE_ADDRESS_0 + 4 * resno; - } else if (resno == PCI_ROM_RESOURCE) { - *type = pci_bar_mem32; - return dev->rom_base_reg; - } else if (resno < PCI_BRIDGE_RESOURCES) { - /* device specific resource */ - *type = pci_bar_unknown; - reg = pci_iov_resource_bar(dev, resno); - if (reg) - return reg; - } - - dev_err(&dev->dev, "BAR %d: invalid resource\n", resno); - return 0; -} - /* Some architectures require additional programming to enable VGA */ static arch_set_vga_state_t arch_set_vga_state; diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 451856210e1816b4cf3932e5e32675199353b2cd..cb17db242f30de0d4dbf7f7fcd84b0c79586808c 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -1,9 +1,6 @@ #ifndef DRIVERS_PCI_H #define DRIVERS_PCI_H -#define PCI_CFG_SPACE_SIZE 256 -#define PCI_CFG_SPACE_EXP_SIZE 4096 - #define PCI_FIND_CAP_TTL 48 extern const unsigned char pcie_link_speed[]; @@ -85,8 +82,8 @@ void pci_pm_init(struct pci_dev *dev); void pci_ea_init(struct pci_dev *dev); void pci_allocate_cap_save_buffers(struct pci_dev *dev); void pci_free_cap_save_buffers(struct pci_dev *dev); -void pci_bridge_d3_device_changed(struct pci_dev *dev); -void pci_bridge_d3_device_removed(struct pci_dev *dev); +bool pci_bridge_d3_possible(struct pci_dev *dev); +void pci_bridge_d3_update(struct pci_dev *dev); static inline void pci_wakeup_event(struct pci_dev *dev) { @@ -245,7 +242,6 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, int pci_setup_device(struct pci_dev *dev); int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, struct resource *res, unsigned int reg); -int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type); void pci_configure_ari(struct pci_dev *dev); void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head); @@ -289,7 +285,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev) #ifdef CONFIG_PCI_IOV int pci_iov_init(struct pci_dev *dev); void pci_iov_release(struct pci_dev *dev); -int pci_iov_resource_bar(struct pci_dev *dev, int resno); +void pci_iov_update_resource(struct pci_dev *dev, int resno); resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); void pci_restore_iov_state(struct pci_dev *dev); int pci_iov_bus_range(struct pci_bus *bus); @@ -303,10 +299,6 @@ static inline void pci_iov_release(struct pci_dev *dev) { } -static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno) -{ - return 0; -} static inline void pci_restore_iov_state(struct pci_dev *dev) { } @@ -356,4 +348,9 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe) } #endif +#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) +int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, + struct resource *res); +#endif + #endif /* DRIVERS_PCI_H */ diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c index db553dc22c8e0fca49ad98ff2f9fd3111e18918a..2b6a59266689b486c9fe42355af77868086a35b1 100644 --- a/drivers/pci/pcie/aer/aer_inject.c +++ b/drivers/pci/pcie/aer/aer_inject.c @@ -307,20 +307,6 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus) return 0; } -static struct pci_dev *pcie_find_root_port(struct pci_dev *dev) -{ - while (1) { - if (!pci_is_pcie(dev)) - break; - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) - return dev; - if (!dev->bus->self) - break; - dev = dev->bus->self; - } - return NULL; -} - static int find_aer_device_iter(struct device *device, void *data) { struct pcie_device **result = data; diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 139150b2bdfd67bc63dcecc2b6691e3f4210f9c6..dea186a9d6b698488384153348389f7c9715e775 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c @@ -30,13 +30,6 @@ #include "aerdrv.h" #include "../../pci.h" -/* - * Version Information - */ -#define DRIVER_VERSION "v1.0" -#define DRIVER_AUTHOR "tom.l.nguyen@intel.com" -#define DRIVER_DESC "Root Port Advanced Error Reporting Driver" - static int aer_probe(struct pcie_device *dev); static void aer_remove(struct pcie_device *dev); static pci_ers_result_t aer_error_detected(struct pci_dev *dev, @@ -297,12 +290,12 @@ static int aer_probe(struct pcie_device *dev) { int status; struct aer_rpc *rpc; - struct device *device = &dev->device; + struct device *device = &dev->port->dev; /* Alloc rpc data structure */ rpc = aer_alloc_rpc(dev); if (!rpc) { - dev_printk(KERN_DEBUG, device, "alloc rpc failed\n"); + dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n"); aer_remove(dev); return -ENOMEM; } @@ -310,7 +303,8 @@ static int aer_probe(struct pcie_device *dev) /* Request IRQ ISR */ status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev); if (status) { - dev_printk(KERN_DEBUG, device, "request IRQ failed\n"); + dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n", + dev->irq); aer_remove(dev); return status; } @@ -318,8 +312,8 @@ static int aer_probe(struct pcie_device *dev) rpc->isr = 1; aer_enable_rootport(rpc); - - return status; + dev_info(device, "AER enabled with IRQ %d\n", dev->irq); + return 0; } /** diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 0ec649d961d7a6ee515747c3458a8ce1df108099..17ac1dce32867051298a5489841de8b636835a68 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -351,12 +351,26 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) return; } + /* Get upstream/downstream components' register state */ + pcie_get_aspm_reg(parent, &upreg); + child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); + pcie_get_aspm_reg(child, &dwreg); + + /* + * If ASPM not supported, don't mess with the clocks and link, + * bail out now. + */ + if (!(upreg.support & dwreg.support)) + return; + /* Configure common clock before checking latencies */ pcie_aspm_configure_common_clock(link); - /* Get upstream/downstream components' register state */ + /* + * Re-read upstream/downstream components' register state + * after clock configuration + */ pcie_get_aspm_reg(parent, &upreg); - child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); pcie_get_aspm_reg(child, &dwreg); /* @@ -886,8 +900,8 @@ static ssize_t clk_ctl_store(struct device *dev, return n; } -static DEVICE_ATTR(link_state, 0644, link_state_show, link_state_store); -static DEVICE_ATTR(clk_ctl, 0644, clk_ctl_show, clk_ctl_store); +static DEVICE_ATTR_RW(link_state); +static DEVICE_ATTR_RW(clk_ctl); static char power_group[] = "power"; void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 884bad5320f829e754290c5edd38f2f0aadb3620..717529331dace5f45bb7337b1fb28adb7e81db5c 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -300,8 +300,6 @@ static irqreturn_t pcie_pme_irq(int irq, void *context) */ static int pcie_pme_set_native(struct pci_dev *dev, void *ign) { - dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n"); - device_set_run_wake(&dev->dev, true); dev->pme_interrupt = true; return 0; @@ -319,23 +317,8 @@ static int pcie_pme_set_native(struct pci_dev *dev, void *ign) static void pcie_pme_mark_devices(struct pci_dev *port) { pcie_pme_set_native(port, NULL); - if (port->subordinate) { + if (port->subordinate) pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL); - } else { - struct pci_bus *bus = port->bus; - struct pci_dev *dev; - - /* Check if this is a root port event collector. */ - if (pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC || !bus) - return; - - down_read(&pci_bus_sem); - list_for_each_entry(dev, &bus->devices, bus_list) - if (pci_is_pcie(dev) - && pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) - pcie_pme_set_native(dev, NULL); - up_read(&pci_bus_sem); - } } /** @@ -364,12 +347,14 @@ static int pcie_pme_probe(struct pcie_device *srv) ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv); if (ret) { kfree(data); - } else { - pcie_pme_mark_devices(port); - pcie_pme_interrupt_enable(port, true); + return ret; } - return ret; + dev_info(&port->dev, "Signaling PME with IRQ %d\n", srv->irq); + + pcie_pme_mark_devices(port); + pcie_pme_interrupt_enable(port, true); + return 0; } static bool pcie_pme_check_wakeup(struct pci_bus *bus) diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index e9270b4026f3c05ff43959c8945efef3e63ae1de..9698289f105c324bf6d3cd42ce12d421c0fd2838 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -499,7 +499,6 @@ static int pcie_port_probe_service(struct device *dev) if (status) return status; - dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name); get_device(dev); return 0; } @@ -524,8 +523,6 @@ static int pcie_port_remove_service(struct device *dev) pciedev = to_pcie_device(dev); driver = to_service_driver(dev->driver); if (driver && driver->remove) { - dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n", - driver->name); driver->remove(pciedev); put_device(dev); } diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 79327cc14e7dbbc43cdcc964499e0083c483aba1..8aa3f14bc87d59cc4c7461a5f071539af2394dbf 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -19,6 +19,7 @@ #include #include +#include "../pci.h" #include "portdrv.h" #include "aer/aerdrv.h" @@ -149,15 +150,7 @@ static int pcie_portdrv_probe(struct pci_dev *dev, pci_save_state(dev); - /* - * Prevent runtime PM if the port is advertising support for PCIe - * hotplug. Otherwise the BIOS hotplug SMI code might not be able - * to enumerate devices behind this port properly (the port is - * powered down preventing all config space accesses to the - * subordinate devices). We can't be sure for native PCIe hotplug - * either so prevent that as well. - */ - if (!dev->is_hotplug_bridge) { + if (pci_bridge_d3_possible(dev)) { /* * Keep the port resumed 100ms to make sure things like * config space accesses from userspace (lspci) will not @@ -175,7 +168,7 @@ static int pcie_portdrv_probe(struct pci_dev *dev, static void pcie_portdrv_remove(struct pci_dev *dev) { - if (!dev->is_hotplug_bridge) { + if (pci_bridge_d3_possible(dev)) { pm_runtime_forbid(&dev->dev); pm_runtime_get_noresume(&dev->dev); pm_runtime_dont_use_autosuspend(&dev->dev); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ab002671fa60e14b83568a24d2cf504687f391f9..e164b5c9f0f03d953e825150977fe0c571b40690 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -227,7 +227,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK; } } else { - res->flags |= (l & IORESOURCE_ROM_ENABLE); + if (l & PCI_ROM_ADDRESS_ENABLE) + res->flags |= IORESOURCE_ROM_ENABLE; l64 = l & PCI_ROM_ADDRESS_MASK; sz64 = sz & PCI_ROM_ADDRESS_MASK; mask64 = (u32)PCI_ROM_ADDRESS_MASK; @@ -521,18 +522,19 @@ static void pci_release_host_bridge_dev(struct device *dev) kfree(bridge); } -static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b) +struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) { struct pci_host_bridge *bridge; - bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); + bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL); if (!bridge) return NULL; INIT_LIST_HEAD(&bridge->windows); - bridge->bus = b; + return bridge; } +EXPORT_SYMBOL(pci_alloc_host_bridge); static const unsigned char pcix_bus_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ @@ -717,6 +719,123 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus) dev_set_msi_domain(&bus->dev, d); } +int pci_register_host_bridge(struct pci_host_bridge *bridge) +{ + struct device *parent = bridge->dev.parent; + struct resource_entry *window, *n; + struct pci_bus *bus, *b; + resource_size_t offset; + LIST_HEAD(resources); + struct resource *res; + char addr[64], *fmt; + const char *name; + int err; + + bus = pci_alloc_bus(NULL); + if (!bus) + return -ENOMEM; + + bridge->bus = bus; + + /* temporarily move resources off the list */ + list_splice_init(&bridge->windows, &resources); + bus->sysdata = bridge->sysdata; + bus->msi = bridge->msi; + bus->ops = bridge->ops; + bus->number = bus->busn_res.start = bridge->busnr; +#ifdef CONFIG_PCI_DOMAINS_GENERIC + bus->domain_nr = pci_bus_find_domain_nr(bus, parent); +#endif + + b = pci_find_bus(pci_domain_nr(bus), bridge->busnr); + if (b) { + /* If we already got to this bus through a different bridge, ignore it */ + dev_dbg(&b->dev, "bus already known\n"); + err = -EEXIST; + goto free; + } + + dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus), + bridge->busnr); + + err = pcibios_root_bridge_prepare(bridge); + if (err) + goto free; + + err = device_register(&bridge->dev); + if (err) + put_device(&bridge->dev); + + bus->bridge = get_device(&bridge->dev); + device_enable_async_suspend(bus->bridge); + pci_set_bus_of_node(bus); + pci_set_bus_msi_domain(bus); + + if (!parent) + set_dev_node(bus->bridge, pcibus_to_node(bus)); + + bus->dev.class = &pcibus_class; + bus->dev.parent = bus->bridge; + + dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number); + name = dev_name(&bus->dev); + + err = device_register(&bus->dev); + if (err) + goto unregister; + + pcibios_add_bus(bus); + + /* Create legacy_io and legacy_mem files for this bus */ + pci_create_legacy_files(bus); + + if (parent) + dev_info(parent, "PCI host bridge to bus %s\n", name); + else + pr_info("PCI host bridge to bus %s\n", name); + + /* Add initial resources to the bus */ + resource_list_for_each_entry_safe(window, n, &resources) { + list_move_tail(&window->node, &bridge->windows); + offset = window->offset; + res = window->res; + + if (res->flags & IORESOURCE_BUS) + pci_bus_insert_busn_res(bus, bus->number, res->end); + else + pci_bus_add_resource(bus, res, 0); + + if (offset) { + if (resource_type(res) == IORESOURCE_IO) + fmt = " (bus address [%#06llx-%#06llx])"; + else + fmt = " (bus address [%#010llx-%#010llx])"; + + snprintf(addr, sizeof(addr), fmt, + (unsigned long long)(res->start - offset), + (unsigned long long)(res->end - offset)); + } else + addr[0] = '\0'; + + dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr); + } + + down_write(&pci_bus_sem); + list_add_tail(&bus->node, &pci_root_buses); + up_write(&pci_bus_sem); + + return 0; + +unregister: + put_device(&bridge->dev); + device_unregister(&bridge->dev); + +free: + kfree(bus); + return err; +} +EXPORT_SYMBOL(pci_register_host_bridge); + static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, struct pci_dev *bridge, int busnr) { @@ -1439,6 +1558,21 @@ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) dev_warn(&dev->dev, "PCI-X settings not supported\n"); } +static bool pcie_root_rcb_set(struct pci_dev *dev) +{ + struct pci_dev *rp = pcie_find_root_port(dev); + u16 lnkctl; + + if (!rp) + return false; + + pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); + if (lnkctl & PCI_EXP_LNKCTL_RCB) + return true; + + return false; +} + static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) { int pos; @@ -1468,9 +1602,20 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); /* Initialize Link Control Register */ - if (pcie_cap_has_lnkctl(dev)) + if (pcie_cap_has_lnkctl(dev)) { + + /* + * If the Root Port supports Read Completion Boundary of + * 128, set RCB to 128. Otherwise, clear it. + */ + hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; + hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; + if (pcie_root_rcb_set(dev)) + hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; + pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); + } /* Find Advanced Error Reporting Enhanced Capability */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); @@ -1738,8 +1883,7 @@ static void pci_dma_configure(struct pci_dev *dev) if (attr == DEV_DMA_NOT_SUPPORTED) dev_warn(&dev->dev, "DMA not supported.\n"); else - arch_setup_dma_ops(&dev->dev, 0, 0, NULL, - attr == DEV_DMA_COHERENT); + acpi_dma_configure(&dev->dev, attr); } pci_put_host_bridge_device(bridge); @@ -2130,113 +2274,43 @@ void __weak pcibios_remove_bus(struct pci_bus *bus) { } -struct pci_bus *pci_create_root_bus(struct device *parent, int bus, - struct pci_ops *ops, void *sysdata, struct list_head *resources) +static struct pci_bus *pci_create_root_bus_msi(struct device *parent, + int bus, struct pci_ops *ops, void *sysdata, + struct list_head *resources, struct msi_controller *msi) { int error; struct pci_host_bridge *bridge; - struct pci_bus *b, *b2; - struct resource_entry *window, *n; - struct resource *res; - resource_size_t offset; - char bus_addr[64]; - char *fmt; - - b = pci_alloc_bus(NULL); - if (!b) - return NULL; - - b->sysdata = sysdata; - b->ops = ops; - b->number = b->busn_res.start = bus; -#ifdef CONFIG_PCI_DOMAINS_GENERIC - b->domain_nr = pci_bus_find_domain_nr(b, parent); -#endif - b2 = pci_find_bus(pci_domain_nr(b), bus); - if (b2) { - /* If we already got to this bus through a different bridge, ignore it */ - dev_dbg(&b2->dev, "bus already known\n"); - goto err_out; - } - bridge = pci_alloc_host_bridge(b); + bridge = pci_alloc_host_bridge(0); if (!bridge) - goto err_out; + return NULL; bridge->dev.parent = parent; bridge->dev.release = pci_release_host_bridge_dev; - dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus); - error = pcibios_root_bridge_prepare(bridge); - if (error) { - kfree(bridge); - goto err_out; - } - - error = device_register(&bridge->dev); - if (error) { - put_device(&bridge->dev); - goto err_out; - } - b->bridge = get_device(&bridge->dev); - device_enable_async_suspend(b->bridge); - pci_set_bus_of_node(b); - pci_set_bus_msi_domain(b); - - if (!parent) - set_dev_node(b->bridge, pcibus_to_node(b)); - - b->dev.class = &pcibus_class; - b->dev.parent = b->bridge; - dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus); - error = device_register(&b->dev); - if (error) - goto class_dev_reg_err; - pcibios_add_bus(b); + list_splice_init(resources, &bridge->windows); + bridge->sysdata = sysdata; + bridge->busnr = bus; + bridge->ops = ops; + bridge->msi = msi; - /* Create legacy_io and legacy_mem files for this bus */ - pci_create_legacy_files(b); - - if (parent) - dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev)); - else - printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); - - /* Add initial resources to the bus */ - resource_list_for_each_entry_safe(window, n, resources) { - list_move_tail(&window->node, &bridge->windows); - res = window->res; - offset = window->offset; - if (res->flags & IORESOURCE_BUS) - pci_bus_insert_busn_res(b, bus, res->end); - else - pci_bus_add_resource(b, res, 0); - if (offset) { - if (resource_type(res) == IORESOURCE_IO) - fmt = " (bus address [%#06llx-%#06llx])"; - else - fmt = " (bus address [%#010llx-%#010llx])"; - snprintf(bus_addr, sizeof(bus_addr), fmt, - (unsigned long long) (res->start - offset), - (unsigned long long) (res->end - offset)); - } else - bus_addr[0] = '\0'; - dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr); - } - - down_write(&pci_bus_sem); - list_add_tail(&b->node, &pci_root_buses); - up_write(&pci_bus_sem); + error = pci_register_host_bridge(bridge); + if (error < 0) + goto err_out; - return b; + return bridge->bus; -class_dev_reg_err: - put_device(&bridge->dev); - device_unregister(&bridge->dev); err_out: - kfree(b); + kfree(bridge); return NULL; } + +struct pci_bus *pci_create_root_bus(struct device *parent, int bus, + struct pci_ops *ops, void *sysdata, struct list_head *resources) +{ + return pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, + NULL); +} EXPORT_SYMBOL_GPL(pci_create_root_bus); int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) @@ -2317,12 +2391,10 @@ struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus, break; } - b = pci_create_root_bus(parent, bus, ops, sysdata, resources); + b = pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, msi); if (!b) return NULL; - b->msi = msi; - if (!found) { dev_info(&b->dev, "No busn resource found for root bus, will use [bus %02x-ff]\n", diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 2408abe4ee8c68919d8a79465e62ab16f3c8660e..f82710a8694ddd48925a8ba92ef196e033e06dc8 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include "pci.h" diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index c232729f5b1b6863f914fdab6c836d4d74204453..1800befa8b8baa1ca87052b7a4dc4e36cde9b2d8 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2156,7 +2156,7 @@ static void quirk_blacklist_vpd(struct pci_dev *dev) { if (dev->vpd) { dev->vpd->len = 0; - dev_warn(&dev->dev, FW_BUG "VPD access disabled\n"); + dev_warn(&dev->dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n"); } } @@ -3044,7 +3044,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb); static ktime_t fixup_debug_start(struct pci_dev *dev, void (*fn)(struct pci_dev *dev)) { - ktime_t calltime = ktime_set(0, 0); + ktime_t calltime = 0; dev_dbg(&dev->dev, "calling %pF\n", fn); if (initcall_debug) { @@ -3137,8 +3137,9 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay); + /* - * Some devices may pass our check in pci_intx_mask_supported if + * Some devices may pass our check in pci_intx_mask_supported() if * PCI_COMMAND_INTX_DISABLE works though they actually do not properly * support this feature. */ @@ -3146,53 +3147,139 @@ static void quirk_broken_intx_masking(struct pci_dev *dev) { dev->broken_intx_masking = 1; } -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */ - quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */ + quirk_broken_intx_masking); + /* * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10) * Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC * * RTL8110SC - Fails under PCI device assignment using DisINTx masking. */ -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID, - quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169, + quirk_broken_intx_masking); /* * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking, * DisINTx can be set but the interrupt status bit is non-functional. */ -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1572, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1574, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1580, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1581, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1583, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1584, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1585, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1586, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1587, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1588, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1589, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d0, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d1, - quirk_broken_intx_masking); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d2, - quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, + quirk_broken_intx_masking); + +static u16 mellanox_broken_intx_devs[] = { + PCI_DEVICE_ID_MELLANOX_HERMON_SDR, + PCI_DEVICE_ID_MELLANOX_HERMON_DDR, + PCI_DEVICE_ID_MELLANOX_HERMON_QDR, + PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2, + PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2, + PCI_DEVICE_ID_MELLANOX_HERMON_EN, + PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2, + PCI_DEVICE_ID_MELLANOX_CONNECTX_EN, + PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2, + PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2, + PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2, + PCI_DEVICE_ID_MELLANOX_CONNECTX2, + PCI_DEVICE_ID_MELLANOX_CONNECTX3, + PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO, +}; + +#define CONNECTX_4_CURR_MAX_MINOR 99 +#define CONNECTX_4_INTX_SUPPORT_MINOR 14 + +/* + * Check ConnectX-4/LX FW version to see if it supports legacy interrupts. + * If so, don't mark it as broken. + * FW minor > 99 means older FW version format and no INTx masking support. + * FW minor < 14 means new FW version format and no INTx masking support. + */ +static void mellanox_check_broken_intx_masking(struct pci_dev *pdev) +{ + __be32 __iomem *fw_ver; + u16 fw_major; + u16 fw_minor; + u16 fw_subminor; + u32 fw_maj_min; + u32 fw_sub_min; + int i; + + for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) { + if (pdev->device == mellanox_broken_intx_devs[i]) { + pdev->broken_intx_masking = 1; + return; + } + } + + /* Getting here means Connect-IB cards and up. Connect-IB has no INTx + * support so shouldn't be checked further + */ + if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB) + return; + + if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 && + pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) + return; + + /* For ConnectX-4 and ConnectX-4LX, need to check FW support */ + if (pci_enable_device_mem(pdev)) { + dev_warn(&pdev->dev, "Can't enable device memory\n"); + return; + } + + fw_ver = ioremap(pci_resource_start(pdev, 0), 4); + if (!fw_ver) { + dev_warn(&pdev->dev, "Can't map ConnectX-4 initialization segment\n"); + goto out; + } + + /* Reading from resource space should be 32b aligned */ + fw_maj_min = ioread32be(fw_ver); + fw_sub_min = ioread32be(fw_ver + 1); + fw_major = fw_maj_min & 0xffff; + fw_minor = fw_maj_min >> 16; + fw_subminor = fw_sub_min & 0xffff; + if (fw_minor > CONNECTX_4_CURR_MAX_MINOR || + fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) { + dev_warn(&pdev->dev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n", + fw_major, fw_minor, fw_subminor, pdev->device == + PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14); + pdev->broken_intx_masking = 1; + } + + iounmap(fw_ver); + +out: + pci_disable_device(pdev); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID, + mellanox_check_broken_intx_masking); static void quirk_no_bus_reset(struct pci_dev *dev) { @@ -3255,6 +3342,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE, quirk_thunderbolt_hotplug_msi); +static void quirk_chelsio_extend_vpd(struct pci_dev *dev) +{ + pci_set_vpd_size(dev, 8192); +} + +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd); + #ifdef CONFIG_ACPI /* * Apple: Shutdown Cactus Ridge Thunderbolt controller. diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index f9357e09e9b30131d631361a9d0092a1e19980bb..73a03d3825903b4cf8674c727d5eac8cf2cb082c 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -40,7 +40,7 @@ static void pci_destroy_dev(struct pci_dev *dev) list_del(&dev->bus_list); up_write(&pci_bus_sem); - pci_bridge_d3_device_removed(dev); + pci_bridge_d3_update(dev); pci_free_resources(dev); put_device(&dev->dev); } diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index 06663d391b393782de79260c8ae4addc46983535..b6edb187d160c6dda3fb4bbccb5b0b85877ec4f5 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -35,6 +35,11 @@ int pci_enable_rom(struct pci_dev *pdev) if (res->flags & IORESOURCE_ROM_SHADOW) return 0; + /* + * Ideally pci_update_resource() would update the ROM BAR address, + * and we would only set the enable bit here. But apparently some + * devices have buggy ROM BARs that read as zero when disabled. + */ pcibios_resource_to_bus(pdev->bus, ®ion, res); pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); rom_addr &= ~PCI_ROM_ADDRESS_MASK; diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 66c4d8f4223377d6bb54849d7794fc8172b1e0a2..4bc589ee78d03716c708fa18d56844049e7ac433 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -25,21 +25,18 @@ #include #include "pci.h" - -void pci_update_resource(struct pci_dev *dev, int resno) +static void pci_std_update_resource(struct pci_dev *dev, int resno) { struct pci_bus_region region; bool disable; u16 cmd; u32 new, check, mask; int reg; - enum pci_bar_type type; struct resource *res = dev->resource + resno; - if (dev->is_virtfn) { - dev_warn(&dev->dev, "can't update VF BAR%d\n", resno); + /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */ + if (dev->is_virtfn) return; - } /* * Ignore resources for unimplemented BARs and unused resource slots @@ -60,21 +57,34 @@ void pci_update_resource(struct pci_dev *dev, int resno) return; pcibios_resource_to_bus(dev->bus, ®ion, res); + new = region.start; - new = region.start | (res->flags & PCI_REGION_FLAG_MASK); - if (res->flags & IORESOURCE_IO) + if (res->flags & IORESOURCE_IO) { mask = (u32)PCI_BASE_ADDRESS_IO_MASK; - else + new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK; + } else if (resno == PCI_ROM_RESOURCE) { + mask = (u32)PCI_ROM_ADDRESS_MASK; + } else { mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; + new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; + } - reg = pci_resource_bar(dev, resno, &type); - if (!reg) - return; - if (type != pci_bar_unknown) { + if (resno < PCI_ROM_RESOURCE) { + reg = PCI_BASE_ADDRESS_0 + 4 * resno; + } else if (resno == PCI_ROM_RESOURCE) { + + /* + * Apparently some Matrox devices have ROM BARs that read + * as zero when disabled, so don't update ROM BARs unless + * they're enabled. See https://lkml.org/lkml/2005/8/30/138. + */ if (!(res->flags & IORESOURCE_ROM_ENABLE)) return; + + reg = dev->rom_base_reg; new |= PCI_ROM_ADDRESS_ENABLE; - } + } else + return; /* * We can't update a 64-bit BAR atomically, so when possible, @@ -110,6 +120,16 @@ void pci_update_resource(struct pci_dev *dev, int resno) pci_write_config_word(dev, PCI_COMMAND, cmd); } +void pci_update_resource(struct pci_dev *dev, int resno) +{ + if (resno <= PCI_ROM_RESOURCE) + pci_std_update_resource(dev, resno); +#ifdef CONFIG_PCI_IOV + else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) + pci_iov_update_resource(dev, resno); +#endif +} + int pci_claim_resource(struct pci_dev *dev, int resource) { struct resource *res = &dev->resource[resource]; @@ -121,6 +141,14 @@ int pci_claim_resource(struct pci_dev *dev, int resource) return -EINVAL; } + /* + * If we have a shadow copy in RAM, the PCI device doesn't respond + * to the shadow range, so we don't need to claim it, and upstream + * bridges don't need to route the range to the device. + */ + if (res->flags & IORESOURCE_ROM_SHADOW) + return 0; + root = pci_find_parent_resource(dev, res); if (!root) { dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n", diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c index b91c4da6836574f78df3785ae9ef419b80e069be..9bf993e1f71e837faebe8227accb6a7225a3d7cc 100644 --- a/drivers/pci/syscall.c +++ b/drivers/pci/syscall.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include "pci.h" SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index d6ff5e82377d29c719f84fdf4ab6b3db1400edf3..8fc2e9532575679ab60724201fa0181b1679c809 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -1038,10 +1038,8 @@ static int pcifront_detach_devices(struct pcifront_device *pdev) err = -ENOMEM; goto out; } - err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d", - &state); - if (err != 1) - state = XenbusStateUnknown; + state = xenbus_read_unsigned(pdev->xdev->otherend, str, + XenbusStateUnknown); if (state != XenbusStateClosing) continue; diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c index eb126b98ed8a3d42cf83566c30c53546896f4c54..e50bbf8261885dcd123389f63377b1a7270b609b 100644 --- a/drivers/pcmcia/m32r_pcc.c +++ b/drivers/pcmcia/m32r_pcc.c @@ -296,10 +296,11 @@ static int __init is_alive(u_short sock) return 0; } -static void add_pcc_socket(ulong base, int irq, ulong mapaddr, - unsigned int ioaddr) +static int add_pcc_socket(ulong base, int irq, ulong mapaddr, + unsigned int ioaddr) { pcc_socket_t *t = &socket[pcc_sockets]; + int err; /* add sockets */ t->ioaddr = ioaddr; @@ -328,11 +329,16 @@ static void add_pcc_socket(ulong base, int irq, ulong mapaddr, t->socket.irq_mask = 0; t->socket.pci_irq = 2 + pcc_sockets; /* XXX */ - request_irq(irq, pcc_interrupt, 0, "m32r-pcc", pcc_interrupt); + err = request_irq(irq, pcc_interrupt, 0, "m32r-pcc", pcc_interrupt); + if (err) { + if (t->base > 0) + release_region(t->base, 0x20); + return err; + } pcc_sockets++; - return; + return 0; } @@ -683,26 +689,29 @@ static int __init init_m32r_pcc(void) return ret; ret = platform_device_register(&pcc_device); - if (ret){ - platform_driver_unregister(&pcc_driver); - return ret; - } + if (ret) + goto unreg_driv; printk(KERN_INFO "m32r PCC probe:\n"); pcc_sockets = 0; - add_pcc_socket(M32R_PCC0_BASE, PCC0_IRQ, M32R_PCC0_MAPBASE, 0x1000); + ret = add_pcc_socket(M32R_PCC0_BASE, PCC0_IRQ, M32R_PCC0_MAPBASE, + 0x1000); + if (ret) + goto unreg_dev; #ifdef CONFIG_M32RPCC_SLOT2 - add_pcc_socket(M32R_PCC1_BASE, PCC1_IRQ, M32R_PCC1_MAPBASE, 0x2000); + ret = add_pcc_socket(M32R_PCC1_BASE, PCC1_IRQ, M32R_PCC1_MAPBASE, + 0x2000); + if (ret) + goto unreg_dev; #endif if (pcc_sockets == 0) { printk("socket is not found.\n"); - platform_device_unregister(&pcc_device); - platform_driver_unregister(&pcc_driver); - return -ENODEV; + ret = -ENODEV; + goto unreg_dev; } /* Set up interrupt handler(s) */ @@ -728,6 +737,12 @@ static int __init init_m32r_pcc(void) } return 0; + +unreg_dev: + platform_device_unregister(&pcc_device); +unreg_driv: + platform_driver_unregister(&pcc_driver); + return ret; } /* init_m32r_pcc */ static void __exit exit_m32r_pcc(void) diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index 153f3122283deb9fa4260c0e1da0c443a5fde2c0..b6b316de055c7129648904cbdae92cafb593e8dd 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c @@ -107,7 +107,7 @@ int soc_pcmcia_regulator_set(struct soc_pcmcia_socket *skt, ret = regulator_enable(r->reg); } else { - regulator_disable(r->reg); + ret = regulator_disable(r->reg); } if (ret == 0) r->on = on; diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index b37b5729456663bc1639d7ed544634432d883470..6d9335865880e18bd3d8e6172e876fe38180a22a 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -1084,7 +1084,7 @@ static int arm_pmu_hp_init(void) int ret; ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING, - "AP_PERF_ARM_STARTING", + "perf/arm/pmu:starting", arm_perf_starting_cpu, NULL); if (ret) pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index fe00f9134d515e604fcbf24769a90da3376916b8..e8eb7f225a88eee4490180bd6a3bc2f2ca6872f8 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -129,16 +129,6 @@ config PHY_MIPHY28LP Enable this to support the miphy transceiver (for SATA/PCIE/USB3) that is part of STMicroelectronics STiH407 SoC. -config PHY_MIPHY365X - tristate "STMicroelectronics MIPHY365X PHY driver for STiH41x series" - depends on ARCH_STI - depends on HAS_IOMEM - depends on OF - select GENERIC_PHY - help - Enable this to support the miphy transceiver (for SATA/PCIE) - that is part of STMicroelectronics STiH41x SoC series. - config PHY_RCAR_GEN2 tristate "Renesas R-Car generation 2 USB PHY driver" depends on ARCH_RENESAS @@ -373,7 +363,9 @@ config PHY_ROCKCHIP_INNO_USB2 tristate "Rockchip INNO USB2PHY Driver" depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF depends on COMMON_CLK + depends on USB_SUPPORT select GENERIC_PHY + select USB_COMMON help Support for Rockchip USB2.0 PHY with Innosilicon IP block. @@ -438,14 +430,6 @@ config PHY_STIH407_USB Enable this support to enable the picoPHY device used by USB2 and USB3 controllers on STMicroelectronics STiH407 SoC families. -config PHY_STIH41X_USB - tristate "STMicroelectronics USB2 PHY driver for STiH41x series" - depends on ARCH_STI - select GENERIC_PHY - help - Enable this to support the USB transceiver that is part of - STMicroelectronics STiH41x SoC series. - config PHY_QCOM_UFS tristate "Qualcomm UFS PHY driver" depends on OF && ARCH_QCOM @@ -489,4 +473,17 @@ config PHY_NS2_PCIE help Enable this to support the Broadcom Northstar2 PCIe PHY. If unsure, say N. + +config PHY_MESON8B_USB2 + tristate "Meson8b and GXBB USB2 PHY driver" + default ARCH_MESON + depends on OF && (ARCH_MESON || COMPILE_TEST) + depends on USB_SUPPORT + select USB_COMMON + select GENERIC_PHY + help + Enable this to support the Meson USB2 PHYs found in Meson8b + and GXBB SoCs. + If unsure, say N. + endmenu diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index a534cf5be07dbe1a00d5bf5f7eab958c66060642..65eb2f436a41d9c2c302fe3ac02c27242c620c38 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -18,7 +18,6 @@ obj-$(CONFIG_PHY_PXA_28NM_USB2) += phy-pxa-28nm-usb2.o obj-$(CONFIG_PHY_PXA_28NM_HSIC) += phy-pxa-28nm-hsic.o obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o obj-$(CONFIG_PHY_MIPHY28LP) += phy-miphy28lp.o -obj-$(CONFIG_PHY_MIPHY365X) += phy-miphy365x.o obj-$(CONFIG_PHY_RCAR_GEN2) += phy-rcar-gen2.o obj-$(CONFIG_PHY_RCAR_GEN3_USB2) += phy-rcar-gen3-usb2.o obj-$(CONFIG_OMAP_CONTROL_PHY) += phy-omap-control.o @@ -50,7 +49,6 @@ obj-$(CONFIG_PHY_ST_SPEAR1310_MIPHY) += phy-spear1310-miphy.o obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY) += phy-spear1340-miphy.o obj-$(CONFIG_PHY_XGENE) += phy-xgene.o obj-$(CONFIG_PHY_STIH407_USB) += phy-stih407-usb.o -obj-$(CONFIG_PHY_STIH41X_USB) += phy-stih41x-usb.o obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs.o obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-20nm.o obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o @@ -60,3 +58,4 @@ obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o +obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c index f84a33a1bdd935c5a6c171929d172b1a1a67840c..2c7a57f2d595743c28d3efd66c36df2b3f37a727 100644 --- a/drivers/phy/phy-berlin-sata.c +++ b/drivers/phy/phy-berlin-sata.c @@ -85,7 +85,6 @@ static int phy_berlin_sata_power_on(struct phy *phy) struct phy_berlin_desc *desc = phy_get_drvdata(phy); struct phy_berlin_priv *priv = dev_get_drvdata(phy->dev.parent); void __iomem *ctrl_reg = priv->base + 0x60 + (desc->index * 0x80); - int ret = 0; u32 regval; clk_prepare_enable(priv->clk); @@ -130,7 +129,7 @@ static int phy_berlin_sata_power_on(struct phy *phy) clk_disable_unprepare(priv->clk); - return ret; + return 0; } static int phy_berlin_sata_power_off(struct phy *phy) diff --git a/drivers/phy/phy-brcm-sata.c b/drivers/phy/phy-brcm-sata.c index 8ffc44afdb75baaceab39801b811d3c3314604e6..ccbc3d9949982a8be160cf77e8ad2da98d0113fe 100644 --- a/drivers/phy/phy-brcm-sata.c +++ b/drivers/phy/phy-brcm-sata.c @@ -140,7 +140,7 @@ static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port) default: dev_err(priv->dev, "invalid phy version\n"); break; - }; + } return priv->phy_base + (port->portnum * size); } @@ -157,7 +157,7 @@ static inline void __iomem *brcm_sata_ctrl_base(struct brcm_sata_port *port) default: dev_err(priv->dev, "invalid phy version\n"); break; - }; + } return priv->ctrl_base + (port->portnum * size); } @@ -365,7 +365,7 @@ static int brcm_sata_phy_init(struct phy *phy) break; default: rc = -ENODEV; - }; + } return rc; } diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/phy-da8xx-usb.c index 32ae78c8ca17655d877a66952d098d56a0eacf3c..1b82bff6330fd2f10b51373253943af64da46fb0 100644 --- a/drivers/phy/phy-da8xx-usb.c +++ b/drivers/phy/phy-da8xx-usb.c @@ -23,6 +23,8 @@ #include #include +#define PHY_INIT_BITS (CFGCHIP2_SESENDEN | CFGCHIP2_VBDTCTEN) + struct da8xx_usb_phy { struct phy_provider *phy_provider; struct phy *usb11_phy; @@ -198,7 +200,8 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev) } else { int ret; - ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0"); + ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", + "ohci-da8xx"); if (ret) dev_warn(dev, "Failed to create usb11 phy lookup\n"); ret = phy_create_lookup(d_phy->usb20_phy, "usb-phy", @@ -207,6 +210,9 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev) dev_warn(dev, "Failed to create usb20 phy lookup\n"); } + regmap_write_bits(d_phy->regmap, CFGCHIP(2), + PHY_INIT_BITS, PHY_INIT_BITS); + return 0; } @@ -216,7 +222,7 @@ static int da8xx_usb_phy_remove(struct platform_device *pdev) if (!pdev->dev.of_node) { phy_remove_lookup(d_phy->usb20_phy, "usb-phy", "musb-da8xx"); - phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0"); + phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci-da8xx"); } return 0; diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c index 8b851f718123a1110016cfc462385b85a9da3bac..6bee04cc4d5344f620e464c1936823cb31919b70 100644 --- a/drivers/phy/phy-exynos-mipi-video.c +++ b/drivers/phy/phy-exynos-mipi-video.c @@ -229,19 +229,6 @@ struct exynos_mipi_video_phy { spinlock_t slock; }; -static inline int __is_running(const struct exynos_mipi_phy_desc *data, - struct exynos_mipi_video_phy *state) -{ - u32 val; - int ret; - - ret = regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val); - if (ret) - return 0; - - return val & data->resetn_val; -} - static int __set_phy_state(const struct exynos_mipi_phy_desc *data, struct exynos_mipi_video_phy *state, unsigned int on) { @@ -251,7 +238,7 @@ static int __set_phy_state(const struct exynos_mipi_phy_desc *data, /* disable in PMU sysreg */ if (!on && data->coupled_phy_id >= 0 && - !__is_running(state->phys[data->coupled_phy_id].data, state)) { + state->phys[data->coupled_phy_id].phy->power_count == 0) { regmap_read(state->regmaps[data->enable_map], data->enable_reg, &val); val &= ~data->enable_val; diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/phy-exynos4210-usb2.c index f30bbb0fb3b2652e897c1922a7f51e48b3dc270f..1f50e10048284d936733a881aa7b9f0ec273d082 100644 --- a/drivers/phy/phy-exynos4210-usb2.c +++ b/drivers/phy/phy-exynos4210-usb2.c @@ -141,7 +141,7 @@ static void exynos4210_isol(struct samsung_usb2_phy_instance *inst, bool on) break; default: return; - }; + } regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask); } @@ -179,7 +179,7 @@ static void exynos4210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on) rstbits = EXYNOS_4210_URSTCON_PHY1_P1P2 | EXYNOS_4210_URSTCON_HOST_LINK_P2; break; - }; + } if (on) { clk = readl(drv->reg_phy + EXYNOS_4210_UPHYCLK); diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/phy-exynos4x12-usb2.c index 765da90a536f057f9e3ccfc4bfe24868a388a63f..7f27a91acf87b4bd054e5d112abe7cd95410cb81 100644 --- a/drivers/phy/phy-exynos4x12-usb2.c +++ b/drivers/phy/phy-exynos4x12-usb2.c @@ -187,7 +187,7 @@ static void exynos4x12_isol(struct samsung_usb2_phy_instance *inst, bool on) break; default: return; - }; + } regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask); } @@ -237,7 +237,7 @@ static void exynos4x12_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on) rstbits = EXYNOS_4x12_URSTCON_HSIC1 | EXYNOS_4x12_URSTCON_HOST_LINK_P1; break; - }; + } if (on) { pwr = readl(drv->reg_phy + EXYNOS_4x12_UPHYPWR); diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/phy-exynos5250-usb2.c index 2ed1735a076a1f8a50898de3d3de28bc7dfa235d..aad806272305f068dd8fa06ee079eeb45cc6ade5 100644 --- a/drivers/phy/phy-exynos5250-usb2.c +++ b/drivers/phy/phy-exynos5250-usb2.c @@ -192,7 +192,7 @@ static void exynos5250_isol(struct samsung_usb2_phy_instance *inst, bool on) break; default: return; - }; + } regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask); } diff --git a/drivers/phy/phy-meson8b-usb2.c b/drivers/phy/phy-meson8b-usb2.c new file mode 100644 index 0000000000000000000000000000000000000000..33c9f4ba157d18937d38ea04cfc2898bc3dfe86e --- /dev/null +++ b/drivers/phy/phy-meson8b-usb2.c @@ -0,0 +1,286 @@ +/* + * Meson8b and GXBB USB2 PHY driver + * + * Copyright (C) 2016 Martin Blumenstingl + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define REG_CONFIG 0x00 + #define REG_CONFIG_CLK_EN BIT(0) + #define REG_CONFIG_CLK_SEL_MASK GENMASK(3, 1) + #define REG_CONFIG_CLK_DIV_MASK GENMASK(10, 4) + #define REG_CONFIG_CLK_32k_ALTSEL BIT(15) + #define REG_CONFIG_TEST_TRIG BIT(31) + +#define REG_CTRL 0x04 + #define REG_CTRL_SOFT_PRST BIT(0) + #define REG_CTRL_SOFT_HRESET BIT(1) + #define REG_CTRL_SS_SCALEDOWN_MODE_MASK GENMASK(3, 2) + #define REG_CTRL_CLK_DET_RST BIT(4) + #define REG_CTRL_INTR_SEL BIT(5) + #define REG_CTRL_CLK_DETECTED BIT(8) + #define REG_CTRL_SOF_SENT_RCVD_TGL BIT(9) + #define REG_CTRL_SOF_TOGGLE_OUT BIT(10) + #define REG_CTRL_POWER_ON_RESET BIT(15) + #define REG_CTRL_SLEEPM BIT(16) + #define REG_CTRL_TX_BITSTUFF_ENN_H BIT(17) + #define REG_CTRL_TX_BITSTUFF_ENN BIT(18) + #define REG_CTRL_COMMON_ON BIT(19) + #define REG_CTRL_REF_CLK_SEL_MASK GENMASK(21, 20) + #define REG_CTRL_REF_CLK_SEL_SHIFT 20 + #define REG_CTRL_FSEL_MASK GENMASK(24, 22) + #define REG_CTRL_FSEL_SHIFT 22 + #define REG_CTRL_PORT_RESET BIT(25) + #define REG_CTRL_THREAD_ID_MASK GENMASK(31, 26) + +#define REG_ENDP_INTR 0x08 + +/* bits [31:26], [24:21] and [15:3] seem to be read-only */ +#define REG_ADP_BC 0x0c + #define REG_ADP_BC_VBUS_VLD_EXT_SEL BIT(0) + #define REG_ADP_BC_VBUS_VLD_EXT BIT(1) + #define REG_ADP_BC_OTG_DISABLE BIT(2) + #define REG_ADP_BC_ID_PULLUP BIT(3) + #define REG_ADP_BC_DRV_VBUS BIT(4) + #define REG_ADP_BC_ADP_PRB_EN BIT(5) + #define REG_ADP_BC_ADP_DISCHARGE BIT(6) + #define REG_ADP_BC_ADP_CHARGE BIT(7) + #define REG_ADP_BC_SESS_END BIT(8) + #define REG_ADP_BC_DEVICE_SESS_VLD BIT(9) + #define REG_ADP_BC_B_VALID BIT(10) + #define REG_ADP_BC_A_VALID BIT(11) + #define REG_ADP_BC_ID_DIG BIT(12) + #define REG_ADP_BC_VBUS_VALID BIT(13) + #define REG_ADP_BC_ADP_PROBE BIT(14) + #define REG_ADP_BC_ADP_SENSE BIT(15) + #define REG_ADP_BC_ACA_ENABLE BIT(16) + #define REG_ADP_BC_DCD_ENABLE BIT(17) + #define REG_ADP_BC_VDAT_DET_EN_B BIT(18) + #define REG_ADP_BC_VDAT_SRC_EN_B BIT(19) + #define REG_ADP_BC_CHARGE_SEL BIT(20) + #define REG_ADP_BC_CHARGE_DETECT BIT(21) + #define REG_ADP_BC_ACA_PIN_RANGE_C BIT(22) + #define REG_ADP_BC_ACA_PIN_RANGE_B BIT(23) + #define REG_ADP_BC_ACA_PIN_RANGE_A BIT(24) + #define REG_ADP_BC_ACA_PIN_GND BIT(25) + #define REG_ADP_BC_ACA_PIN_FLOAT BIT(26) + +#define REG_DBG_UART 0x14 + +#define REG_TEST 0x18 + #define REG_TEST_DATA_IN_MASK GENMASK(3, 0) + #define REG_TEST_EN_MASK GENMASK(7, 4) + #define REG_TEST_ADDR_MASK GENMASK(11, 8) + #define REG_TEST_DATA_OUT_SEL BIT(12) + #define REG_TEST_CLK BIT(13) + #define REG_TEST_VA_TEST_EN_B_MASK GENMASK(15, 14) + #define REG_TEST_DATA_OUT_MASK GENMASK(19, 16) + #define REG_TEST_DISABLE_ID_PULLUP BIT(20) + +#define REG_TUNE 0x1c + #define REG_TUNE_TX_RES_TUNE_MASK GENMASK(1, 0) + #define REG_TUNE_TX_HSXV_TUNE_MASK GENMASK(3, 2) + #define REG_TUNE_TX_VREF_TUNE_MASK GENMASK(7, 4) + #define REG_TUNE_TX_RISE_TUNE_MASK GENMASK(9, 8) + #define REG_TUNE_TX_PREEMP_PULSE_TUNE BIT(10) + #define REG_TUNE_TX_PREEMP_AMP_TUNE_MASK GENMASK(12, 11) + #define REG_TUNE_TX_FSLS_TUNE_MASK GENMASK(16, 13) + #define REG_TUNE_SQRX_TUNE_MASK GENMASK(19, 17) + #define REG_TUNE_OTG_TUNE GENMASK(22, 20) + #define REG_TUNE_COMP_DIS_TUNE GENMASK(25, 23) + #define REG_TUNE_HOST_DM_PULLDOWN BIT(26) + #define REG_TUNE_HOST_DP_PULLDOWN BIT(27) + +#define RESET_COMPLETE_TIME 500 +#define ACA_ENABLE_COMPLETE_TIME 50 + +struct phy_meson8b_usb2_priv { + void __iomem *regs; + enum usb_dr_mode dr_mode; + struct clk *clk_usb_general; + struct clk *clk_usb; + struct reset_control *reset; +}; + +static u32 phy_meson8b_usb2_read(struct phy_meson8b_usb2_priv *phy_priv, + u32 reg) +{ + return readl(phy_priv->regs + reg); +} + +static void phy_meson8b_usb2_mask_bits(struct phy_meson8b_usb2_priv *phy_priv, + u32 reg, u32 mask, u32 value) +{ + u32 data; + + data = phy_meson8b_usb2_read(phy_priv, reg); + data &= ~mask; + data |= (value & mask); + + writel(data, phy_priv->regs + reg); +} + +static int phy_meson8b_usb2_power_on(struct phy *phy) +{ + struct phy_meson8b_usb2_priv *priv = phy_get_drvdata(phy); + int ret; + + if (!IS_ERR_OR_NULL(priv->reset)) { + ret = reset_control_reset(priv->reset); + if (ret) { + dev_err(&phy->dev, "Failed to trigger USB reset\n"); + return ret; + } + } + + ret = clk_prepare_enable(priv->clk_usb_general); + if (ret) { + dev_err(&phy->dev, "Failed to enable USB general clock\n"); + return ret; + } + + ret = clk_prepare_enable(priv->clk_usb); + if (ret) { + dev_err(&phy->dev, "Failed to enable USB DDR clock\n"); + clk_disable_unprepare(priv->clk_usb_general); + return ret; + } + + phy_meson8b_usb2_mask_bits(priv, REG_CONFIG, REG_CONFIG_CLK_32k_ALTSEL, + REG_CONFIG_CLK_32k_ALTSEL); + + phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_REF_CLK_SEL_MASK, + 0x2 << REG_CTRL_REF_CLK_SEL_SHIFT); + + phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_FSEL_MASK, + 0x5 << REG_CTRL_FSEL_SHIFT); + + /* reset the PHY */ + phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_POWER_ON_RESET, + REG_CTRL_POWER_ON_RESET); + udelay(RESET_COMPLETE_TIME); + phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_POWER_ON_RESET, 0); + udelay(RESET_COMPLETE_TIME); + + phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_SOF_TOGGLE_OUT, + REG_CTRL_SOF_TOGGLE_OUT); + + if (priv->dr_mode == USB_DR_MODE_HOST) { + phy_meson8b_usb2_mask_bits(priv, REG_ADP_BC, + REG_ADP_BC_ACA_ENABLE, + REG_ADP_BC_ACA_ENABLE); + + udelay(ACA_ENABLE_COMPLETE_TIME); + + if (phy_meson8b_usb2_read(priv, REG_ADP_BC) & + REG_ADP_BC_ACA_PIN_FLOAT) { + dev_warn(&phy->dev, "USB ID detect failed!\n"); + clk_disable_unprepare(priv->clk_usb); + clk_disable_unprepare(priv->clk_usb_general); + return -EINVAL; + } + } + + return 0; +} + +static int phy_meson8b_usb2_power_off(struct phy *phy) +{ + struct phy_meson8b_usb2_priv *priv = phy_get_drvdata(phy); + + clk_disable_unprepare(priv->clk_usb); + clk_disable_unprepare(priv->clk_usb_general); + + return 0; +} + +static const struct phy_ops phy_meson8b_usb2_ops = { + .power_on = phy_meson8b_usb2_power_on, + .power_off = phy_meson8b_usb2_power_off, + .owner = THIS_MODULE, +}; + +static int phy_meson8b_usb2_probe(struct platform_device *pdev) +{ + struct phy_meson8b_usb2_priv *priv; + struct resource *res; + struct phy *phy; + struct phy_provider *phy_provider; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->regs)) + return PTR_ERR(priv->regs); + + priv->clk_usb_general = devm_clk_get(&pdev->dev, "usb_general"); + if (IS_ERR(priv->clk_usb_general)) + return PTR_ERR(priv->clk_usb_general); + + priv->clk_usb = devm_clk_get(&pdev->dev, "usb"); + if (IS_ERR(priv->clk_usb)) + return PTR_ERR(priv->clk_usb); + + priv->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL); + if (PTR_ERR(priv->reset) == -EPROBE_DEFER) + return PTR_ERR(priv->reset); + + priv->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1); + if (priv->dr_mode == USB_DR_MODE_UNKNOWN) { + dev_err(&pdev->dev, + "missing dual role configuration of the controller\n"); + return -EINVAL; + } + + phy = devm_phy_create(&pdev->dev, NULL, &phy_meson8b_usb2_ops); + if (IS_ERR(phy)) { + dev_err(&pdev->dev, "failed to create PHY\n"); + return PTR_ERR(phy); + } + + phy_set_drvdata(phy, priv); + + phy_provider = + devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(phy_provider); +} + +static const struct of_device_id phy_meson8b_usb2_of_match[] = { + { .compatible = "amlogic,meson8b-usb2-phy", }, + { .compatible = "amlogic,meson-gxbb-usb2-phy", }, + { }, +}; +MODULE_DEVICE_TABLE(of, phy_meson8b_usb2_of_match); + +static struct platform_driver phy_meson8b_usb2_driver = { + .probe = phy_meson8b_usb2_probe, + .driver = { + .name = "phy-meson-usb2", + .of_match_table = phy_meson8b_usb2_of_match, + }, +}; +module_platform_driver(phy_meson8b_usb2_driver); + +MODULE_AUTHOR("Martin Blumenstingl "); +MODULE_DESCRIPTION("Meson8b and GXBB USB2 PHY driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c deleted file mode 100644 index e661f3b36eaa0cdf992cde8f88d92644ff585c5e..0000000000000000000000000000000000000000 --- a/drivers/phy/phy-miphy365x.c +++ /dev/null @@ -1,625 +0,0 @@ -/* - * Copyright (C) 2014 STMicroelectronics – All Rights Reserved - * - * STMicroelectronics PHY driver MiPHY365 (for SoC STiH416). - * - * Authors: Alexandre Torgue - * Lee Jones - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2, as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define HFC_TIMEOUT 100 - -#define SYSCFG_SELECT_SATA_MASK BIT(1) -#define SYSCFG_SELECT_SATA_POS 1 - -/* MiPHY365x register definitions */ -#define RESET_REG 0x00 -#define RST_PLL BIT(1) -#define RST_PLL_CAL BIT(2) -#define RST_RX BIT(4) -#define RST_MACRO BIT(7) - -#define STATUS_REG 0x01 -#define IDLL_RDY BIT(0) -#define PLL_RDY BIT(1) -#define DES_BIT_LOCK BIT(2) -#define DES_SYMBOL_LOCK BIT(3) - -#define CTRL_REG 0x02 -#define TERM_EN BIT(0) -#define PCI_EN BIT(2) -#define DES_BIT_LOCK_EN BIT(3) -#define TX_POL BIT(5) - -#define INT_CTRL_REG 0x03 - -#define BOUNDARY1_REG 0x10 -#define SPDSEL_SEL BIT(0) - -#define BOUNDARY3_REG 0x12 -#define TX_SPDSEL_GEN1_VAL 0 -#define TX_SPDSEL_GEN2_VAL 0x01 -#define TX_SPDSEL_GEN3_VAL 0x02 -#define RX_SPDSEL_GEN1_VAL 0 -#define RX_SPDSEL_GEN2_VAL (0x01 << 3) -#define RX_SPDSEL_GEN3_VAL (0x02 << 3) - -#define PCIE_REG 0x16 - -#define BUF_SEL_REG 0x20 -#define CONF_GEN_SEL_GEN3 0x02 -#define CONF_GEN_SEL_GEN2 0x01 -#define PD_VDDTFILTER BIT(4) - -#define TXBUF1_REG 0x21 -#define SWING_VAL 0x04 -#define SWING_VAL_GEN1 0x03 -#define PREEMPH_VAL (0x3 << 5) - -#define TXBUF2_REG 0x22 -#define TXSLEW_VAL 0x2 -#define TXSLEW_VAL_GEN1 0x4 - -#define RXBUF_OFFSET_CTRL_REG 0x23 - -#define RXBUF_REG 0x25 -#define SDTHRES_VAL 0x01 -#define EQ_ON3 (0x03 << 4) -#define EQ_ON1 (0x01 << 4) - -#define COMP_CTRL1_REG 0x40 -#define START_COMSR BIT(0) -#define START_COMZC BIT(1) -#define COMSR_DONE BIT(2) -#define COMZC_DONE BIT(3) -#define COMP_AUTO_LOAD BIT(4) - -#define COMP_CTRL2_REG 0x41 -#define COMP_2MHZ_RAT_GEN1 0x1e -#define COMP_2MHZ_RAT 0xf - -#define COMP_CTRL3_REG 0x42 -#define COMSR_COMP_REF 0x33 - -#define COMP_IDLL_REG 0x47 -#define COMZC_IDLL 0x2a - -#define PLL_CTRL1_REG 0x50 -#define PLL_START_CAL BIT(0) -#define BUF_EN BIT(2) -#define SYNCHRO_TX BIT(3) -#define SSC_EN BIT(6) -#define CONFIG_PLL BIT(7) - -#define PLL_CTRL2_REG 0x51 -#define BYPASS_PLL_CAL BIT(1) - -#define PLL_RAT_REG 0x52 - -#define PLL_SSC_STEP_MSB_REG 0x56 -#define PLL_SSC_STEP_MSB_VAL 0x03 - -#define PLL_SSC_STEP_LSB_REG 0x57 -#define PLL_SSC_STEP_LSB_VAL 0x63 - -#define PLL_SSC_PER_MSB_REG 0x58 -#define PLL_SSC_PER_MSB_VAL 0 - -#define PLL_SSC_PER_LSB_REG 0x59 -#define PLL_SSC_PER_LSB_VAL 0xf1 - -#define IDLL_TEST_REG 0x72 -#define START_CLK_HF BIT(6) - -#define DES_BITLOCK_REG 0x86 -#define BIT_LOCK_LEVEL 0x01 -#define BIT_LOCK_CNT_512 (0x03 << 5) - -struct miphy365x_phy { - struct phy *phy; - void __iomem *base; - bool pcie_tx_pol_inv; - bool sata_tx_pol_inv; - u32 sata_gen; - u32 ctrlreg; - u8 type; -}; - -struct miphy365x_dev { - struct device *dev; - struct regmap *regmap; - struct mutex miphy_mutex; - struct miphy365x_phy **phys; - int nphys; -}; - -/* - * These values are represented in Device tree. They are considered to be ABI - * and although they can be extended any existing values must not change. - */ -enum miphy_sata_gen { - SATA_GEN1 = 1, - SATA_GEN2, - SATA_GEN3 -}; - -static u8 rx_tx_spd[] = { - 0, /* GEN0 doesn't exist. */ - TX_SPDSEL_GEN1_VAL | RX_SPDSEL_GEN1_VAL, - TX_SPDSEL_GEN2_VAL | RX_SPDSEL_GEN2_VAL, - TX_SPDSEL_GEN3_VAL | RX_SPDSEL_GEN3_VAL -}; - -/* - * This function selects the system configuration, - * either two SATA, one SATA and one PCIe, or two PCIe lanes. - */ -static int miphy365x_set_path(struct miphy365x_phy *miphy_phy, - struct miphy365x_dev *miphy_dev) -{ - bool sata = (miphy_phy->type == PHY_TYPE_SATA); - - return regmap_update_bits(miphy_dev->regmap, - miphy_phy->ctrlreg, - SYSCFG_SELECT_SATA_MASK, - sata << SYSCFG_SELECT_SATA_POS); -} - -static int miphy365x_init_pcie_port(struct miphy365x_phy *miphy_phy, - struct miphy365x_dev *miphy_dev) -{ - u8 val; - - if (miphy_phy->pcie_tx_pol_inv) { - /* Invert Tx polarity and clear pci_txdetect_pol bit */ - val = TERM_EN | PCI_EN | DES_BIT_LOCK_EN | TX_POL; - writeb_relaxed(val, miphy_phy->base + CTRL_REG); - writeb_relaxed(0x00, miphy_phy->base + PCIE_REG); - } - - return 0; -} - -static inline int miphy365x_hfc_not_rdy(struct miphy365x_phy *miphy_phy, - struct miphy365x_dev *miphy_dev) -{ - unsigned long timeout = jiffies + msecs_to_jiffies(HFC_TIMEOUT); - u8 mask = IDLL_RDY | PLL_RDY; - u8 regval; - - do { - regval = readb_relaxed(miphy_phy->base + STATUS_REG); - if (!(regval & mask)) - return 0; - - usleep_range(2000, 2500); - } while (time_before(jiffies, timeout)); - - dev_err(miphy_dev->dev, "HFC ready timeout!\n"); - return -EBUSY; -} - -static inline int miphy365x_rdy(struct miphy365x_phy *miphy_phy, - struct miphy365x_dev *miphy_dev) -{ - unsigned long timeout = jiffies + msecs_to_jiffies(HFC_TIMEOUT); - u8 mask = IDLL_RDY | PLL_RDY; - u8 regval; - - do { - regval = readb_relaxed(miphy_phy->base + STATUS_REG); - if ((regval & mask) == mask) - return 0; - - usleep_range(2000, 2500); - } while (time_before(jiffies, timeout)); - - dev_err(miphy_dev->dev, "PHY not ready timeout!\n"); - return -EBUSY; -} - -static inline void miphy365x_set_comp(struct miphy365x_phy *miphy_phy, - struct miphy365x_dev *miphy_dev) -{ - u8 val, mask; - - if (miphy_phy->sata_gen == SATA_GEN1) - writeb_relaxed(COMP_2MHZ_RAT_GEN1, - miphy_phy->base + COMP_CTRL2_REG); - else - writeb_relaxed(COMP_2MHZ_RAT, - miphy_phy->base + COMP_CTRL2_REG); - - if (miphy_phy->sata_gen != SATA_GEN3) { - writeb_relaxed(COMSR_COMP_REF, - miphy_phy->base + COMP_CTRL3_REG); - /* - * Force VCO current to value defined by address 0x5A - * and disable PCIe100Mref bit - * Enable auto load compensation for pll_i_bias - */ - writeb_relaxed(BYPASS_PLL_CAL, miphy_phy->base + PLL_CTRL2_REG); - writeb_relaxed(COMZC_IDLL, miphy_phy->base + COMP_IDLL_REG); - } - - /* - * Force restart compensation and enable auto load - * for Comzc_Tx, Comzc_Rx and Comsr on macro - */ - val = START_COMSR | START_COMZC | COMP_AUTO_LOAD; - writeb_relaxed(val, miphy_phy->base + COMP_CTRL1_REG); - - mask = COMSR_DONE | COMZC_DONE; - while ((readb_relaxed(miphy_phy->base + COMP_CTRL1_REG) & mask) != mask) - cpu_relax(); -} - -static inline void miphy365x_set_ssc(struct miphy365x_phy *miphy_phy, - struct miphy365x_dev *miphy_dev) -{ - u8 val; - - /* - * SSC Settings. SSC will be enabled through Link - * SSC Ampl. = 0.4% - * SSC Freq = 31KHz - */ - writeb_relaxed(PLL_SSC_STEP_MSB_VAL, - miphy_phy->base + PLL_SSC_STEP_MSB_REG); - writeb_relaxed(PLL_SSC_STEP_LSB_VAL, - miphy_phy->base + PLL_SSC_STEP_LSB_REG); - writeb_relaxed(PLL_SSC_PER_MSB_VAL, - miphy_phy->base + PLL_SSC_PER_MSB_REG); - writeb_relaxed(PLL_SSC_PER_LSB_VAL, - miphy_phy->base + PLL_SSC_PER_LSB_REG); - - /* SSC Settings complete */ - if (miphy_phy->sata_gen == SATA_GEN1) { - val = PLL_START_CAL | BUF_EN | SYNCHRO_TX | CONFIG_PLL; - writeb_relaxed(val, miphy_phy->base + PLL_CTRL1_REG); - } else { - val = SSC_EN | PLL_START_CAL | BUF_EN | SYNCHRO_TX | CONFIG_PLL; - writeb_relaxed(val, miphy_phy->base + PLL_CTRL1_REG); - } -} - -static int miphy365x_init_sata_port(struct miphy365x_phy *miphy_phy, - struct miphy365x_dev *miphy_dev) -{ - int ret; - u8 val; - - /* - * Force PHY macro reset, PLL calibration reset, PLL reset - * and assert Deserializer Reset - */ - val = RST_PLL | RST_PLL_CAL | RST_RX | RST_MACRO; - writeb_relaxed(val, miphy_phy->base + RESET_REG); - - if (miphy_phy->sata_tx_pol_inv) - writeb_relaxed(TX_POL, miphy_phy->base + CTRL_REG); - - /* - * Force macro1 to use rx_lspd, tx_lspd - * Force Rx_Clock on first I-DLL phase - * Force Des in HP mode on macro, rx_lspd, tx_lspd for Gen2/3 - */ - writeb_relaxed(SPDSEL_SEL, miphy_phy->base + BOUNDARY1_REG); - writeb_relaxed(START_CLK_HF, miphy_phy->base + IDLL_TEST_REG); - val = rx_tx_spd[miphy_phy->sata_gen]; - writeb_relaxed(val, miphy_phy->base + BOUNDARY3_REG); - - /* Wait for HFC_READY = 0 */ - ret = miphy365x_hfc_not_rdy(miphy_phy, miphy_dev); - if (ret) - return ret; - - /* Compensation Recalibration */ - miphy365x_set_comp(miphy_phy, miphy_dev); - - switch (miphy_phy->sata_gen) { - case SATA_GEN3: - /* - * TX Swing target 550-600mv peak to peak diff - * Tx Slew target 90-110ps rising/falling time - * Rx Eq ON3, Sigdet threshold SDTH1 - */ - val = PD_VDDTFILTER | CONF_GEN_SEL_GEN3; - writeb_relaxed(val, miphy_phy->base + BUF_SEL_REG); - val = SWING_VAL | PREEMPH_VAL; - writeb_relaxed(val, miphy_phy->base + TXBUF1_REG); - writeb_relaxed(TXSLEW_VAL, miphy_phy->base + TXBUF2_REG); - writeb_relaxed(0x00, miphy_phy->base + RXBUF_OFFSET_CTRL_REG); - val = SDTHRES_VAL | EQ_ON3; - writeb_relaxed(val, miphy_phy->base + RXBUF_REG); - break; - case SATA_GEN2: - /* - * conf gen sel=0x1 to program Gen2 banked registers - * VDDT filter ON - * Tx Swing target 550-600mV peak-to-peak diff - * Tx Slew target 90-110 ps rising/falling time - * RX Equalization ON1, Sigdet threshold SDTH1 - */ - writeb_relaxed(CONF_GEN_SEL_GEN2, - miphy_phy->base + BUF_SEL_REG); - writeb_relaxed(SWING_VAL, miphy_phy->base + TXBUF1_REG); - writeb_relaxed(TXSLEW_VAL, miphy_phy->base + TXBUF2_REG); - val = SDTHRES_VAL | EQ_ON1; - writeb_relaxed(val, miphy_phy->base + RXBUF_REG); - break; - case SATA_GEN1: - /* - * conf gen sel = 00b to program Gen1 banked registers - * VDDT filter ON - * Tx Swing target 500-550mV peak-to-peak diff - * Tx Slew target120-140 ps rising/falling time - */ - writeb_relaxed(PD_VDDTFILTER, miphy_phy->base + BUF_SEL_REG); - writeb_relaxed(SWING_VAL_GEN1, miphy_phy->base + TXBUF1_REG); - writeb_relaxed(TXSLEW_VAL_GEN1, miphy_phy->base + TXBUF2_REG); - break; - default: - break; - } - - /* Force Macro1 in partial mode & release pll cal reset */ - writeb_relaxed(RST_RX, miphy_phy->base + RESET_REG); - usleep_range(100, 150); - - miphy365x_set_ssc(miphy_phy, miphy_dev); - - /* Wait for phy_ready */ - ret = miphy365x_rdy(miphy_phy, miphy_dev); - if (ret) - return ret; - - /* - * Enable macro1 to use rx_lspd & tx_lspd - * Release Rx_Clock on first I-DLL phase on macro1 - * Assert deserializer reset - * des_bit_lock_en is set - * bit lock detection strength - * Deassert deserializer reset - */ - writeb_relaxed(0x00, miphy_phy->base + BOUNDARY1_REG); - writeb_relaxed(0x00, miphy_phy->base + IDLL_TEST_REG); - writeb_relaxed(RST_RX, miphy_phy->base + RESET_REG); - val = miphy_phy->sata_tx_pol_inv ? - (TX_POL | DES_BIT_LOCK_EN) : DES_BIT_LOCK_EN; - writeb_relaxed(val, miphy_phy->base + CTRL_REG); - - val = BIT_LOCK_CNT_512 | BIT_LOCK_LEVEL; - writeb_relaxed(val, miphy_phy->base + DES_BITLOCK_REG); - writeb_relaxed(0x00, miphy_phy->base + RESET_REG); - - return 0; -} - -static int miphy365x_init(struct phy *phy) -{ - struct miphy365x_phy *miphy_phy = phy_get_drvdata(phy); - struct miphy365x_dev *miphy_dev = dev_get_drvdata(phy->dev.parent); - int ret = 0; - - mutex_lock(&miphy_dev->miphy_mutex); - - ret = miphy365x_set_path(miphy_phy, miphy_dev); - if (ret) { - mutex_unlock(&miphy_dev->miphy_mutex); - return ret; - } - - /* Initialise Miphy for PCIe or SATA */ - if (miphy_phy->type == PHY_TYPE_PCIE) - ret = miphy365x_init_pcie_port(miphy_phy, miphy_dev); - else - ret = miphy365x_init_sata_port(miphy_phy, miphy_dev); - - mutex_unlock(&miphy_dev->miphy_mutex); - - return ret; -} - -static int miphy365x_get_addr(struct device *dev, - struct miphy365x_phy *miphy_phy, int index) -{ - struct device_node *phynode = miphy_phy->phy->dev.of_node; - const char *name; - int type = miphy_phy->type; - int ret; - - ret = of_property_read_string_index(phynode, "reg-names", index, &name); - if (ret) { - dev_err(dev, "no reg-names property not found\n"); - return ret; - } - - if (!((!strncmp(name, "sata", 4) && type == PHY_TYPE_SATA) || - (!strncmp(name, "pcie", 4) && type == PHY_TYPE_PCIE))) - return 0; - - miphy_phy->base = of_iomap(phynode, index); - if (!miphy_phy->base) { - dev_err(dev, "Failed to map %s\n", phynode->full_name); - return -EINVAL; - } - - return 0; -} - -static struct phy *miphy365x_xlate(struct device *dev, - struct of_phandle_args *args) -{ - struct miphy365x_dev *miphy_dev = dev_get_drvdata(dev); - struct miphy365x_phy *miphy_phy = NULL; - struct device_node *phynode = args->np; - int ret, index; - - if (args->args_count != 1) { - dev_err(dev, "Invalid number of cells in 'phy' property\n"); - return ERR_PTR(-EINVAL); - } - - for (index = 0; index < miphy_dev->nphys; index++) - if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { - miphy_phy = miphy_dev->phys[index]; - break; - } - - if (!miphy_phy) { - dev_err(dev, "Failed to find appropriate phy\n"); - return ERR_PTR(-EINVAL); - } - - miphy_phy->type = args->args[0]; - - if (!(miphy_phy->type == PHY_TYPE_SATA || - miphy_phy->type == PHY_TYPE_PCIE)) { - dev_err(dev, "Unsupported device type: %d\n", miphy_phy->type); - return ERR_PTR(-EINVAL); - } - - /* Each port handles SATA and PCIE - third entry is always sysconf. */ - for (index = 0; index < 3; index++) { - ret = miphy365x_get_addr(dev, miphy_phy, index); - if (ret < 0) - return ERR_PTR(ret); - } - - return miphy_phy->phy; -} - -static const struct phy_ops miphy365x_ops = { - .init = miphy365x_init, - .owner = THIS_MODULE, -}; - -static int miphy365x_of_probe(struct device_node *phynode, - struct miphy365x_phy *miphy_phy) -{ - of_property_read_u32(phynode, "st,sata-gen", &miphy_phy->sata_gen); - if (!miphy_phy->sata_gen) - miphy_phy->sata_gen = SATA_GEN1; - - miphy_phy->pcie_tx_pol_inv = - of_property_read_bool(phynode, "st,pcie-tx-pol-inv"); - - miphy_phy->sata_tx_pol_inv = - of_property_read_bool(phynode, "st,sata-tx-pol-inv"); - - return 0; -} - -static int miphy365x_probe(struct platform_device *pdev) -{ - struct device_node *child, *np = pdev->dev.of_node; - struct miphy365x_dev *miphy_dev; - struct phy_provider *provider; - struct phy *phy; - int ret, port = 0; - - miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); - if (!miphy_dev) - return -ENOMEM; - - miphy_dev->nphys = of_get_child_count(np); - miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys, - sizeof(*miphy_dev->phys), GFP_KERNEL); - if (!miphy_dev->phys) - return -ENOMEM; - - miphy_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg"); - if (IS_ERR(miphy_dev->regmap)) { - dev_err(miphy_dev->dev, "No syscfg phandle specified\n"); - return PTR_ERR(miphy_dev->regmap); - } - - miphy_dev->dev = &pdev->dev; - - dev_set_drvdata(&pdev->dev, miphy_dev); - - mutex_init(&miphy_dev->miphy_mutex); - - for_each_child_of_node(np, child) { - struct miphy365x_phy *miphy_phy; - - miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), - GFP_KERNEL); - if (!miphy_phy) { - ret = -ENOMEM; - goto put_child; - } - - miphy_dev->phys[port] = miphy_phy; - - phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops); - if (IS_ERR(phy)) { - dev_err(&pdev->dev, "failed to create PHY\n"); - ret = PTR_ERR(phy); - goto put_child; - } - - miphy_dev->phys[port]->phy = phy; - - ret = miphy365x_of_probe(child, miphy_phy); - if (ret) - goto put_child; - - phy_set_drvdata(phy, miphy_dev->phys[port]); - - port++; - /* sysconfig offsets are indexed from 1 */ - ret = of_property_read_u32_index(np, "st,syscfg", port, - &miphy_phy->ctrlreg); - if (ret) { - dev_err(&pdev->dev, "No sysconfig offset found\n"); - goto put_child; - } - } - - provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate); - return PTR_ERR_OR_ZERO(provider); -put_child: - of_node_put(child); - return ret; -} - -static const struct of_device_id miphy365x_of_match[] = { - { .compatible = "st,miphy365x-phy", }, - { }, -}; -MODULE_DEVICE_TABLE(of, miphy365x_of_match); - -static struct platform_driver miphy365x_driver = { - .probe = miphy365x_probe, - .driver = { - .name = "miphy365x-phy", - .of_match_table = miphy365x_of_match, - } -}; -module_platform_driver(miphy365x_driver); - -MODULE_AUTHOR("Alexandre Torgue "); -MODULE_DESCRIPTION("STMicroelectronics miphy365x driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h index 2bd5ce43a7240d66429ed781b4780c33210897a0..d505d98cf5f8187c1be36c93ca05ac5c7e510804 100644 --- a/drivers/phy/phy-qcom-ufs-i.h +++ b/drivers/phy/phy-qcom-ufs-i.h @@ -141,11 +141,8 @@ struct ufs_qcom_phy_specific_ops { struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy); int ufs_qcom_phy_power_on(struct phy *generic_phy); int ufs_qcom_phy_power_off(struct phy *generic_phy); -int ufs_qcom_phy_exit(struct phy *generic_phy); -int ufs_qcom_phy_init_clks(struct phy *generic_phy, - struct ufs_qcom_phy *phy_common); -int ufs_qcom_phy_init_vregulators(struct phy *generic_phy, - struct ufs_qcom_phy *phy_common); +int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common); +int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common); int ufs_qcom_phy_remove(struct phy *generic_phy, struct ufs_qcom_phy *ufs_qcom_phy); struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev, diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/phy-qcom-ufs-qmp-14nm.c index 6ee51490f78603cb52d361b7d8b9258b4db350a3..c71c84734916c80f1a9a6600e9ad8fc533e637fd 100644 --- a/drivers/phy/phy-qcom-ufs-qmp-14nm.c +++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.c @@ -44,30 +44,12 @@ void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common) static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy) { - struct ufs_qcom_phy_qmp_14nm *phy = phy_get_drvdata(generic_phy); - struct ufs_qcom_phy *phy_common = &phy->common_cfg; - int err; - - err = ufs_qcom_phy_init_clks(generic_phy, phy_common); - if (err) { - dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n", - __func__, err); - goto out; - } - - err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common); - if (err) { - dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n", - __func__, err); - goto out; - } - phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV; - phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV; - - ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common); + return 0; +} -out: - return err; +static int ufs_qcom_phy_qmp_14nm_exit(struct phy *generic_phy) +{ + return 0; } static @@ -117,7 +99,7 @@ static int ufs_qcom_phy_qmp_14nm_is_pcs_ready(struct ufs_qcom_phy *phy_common) static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = { .init = ufs_qcom_phy_qmp_14nm_init, - .exit = ufs_qcom_phy_exit, + .exit = ufs_qcom_phy_qmp_14nm_exit, .power_on = ufs_qcom_phy_power_on, .power_off = ufs_qcom_phy_power_off, .owner = THIS_MODULE, @@ -136,6 +118,7 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct phy *generic_phy; struct ufs_qcom_phy_qmp_14nm *phy; + struct ufs_qcom_phy *phy_common; int err = 0; phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); @@ -143,8 +126,9 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev) err = -ENOMEM; goto out; } + phy_common = &phy->common_cfg; - generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg, + generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common, &ufs_qcom_phy_qmp_14nm_phy_ops, &phy_14nm_ops); if (!generic_phy) { @@ -154,39 +138,43 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev) goto out; } - phy_set_drvdata(generic_phy, phy); + err = ufs_qcom_phy_init_clks(phy_common); + if (err) { + dev_err(phy_common->dev, + "%s: ufs_qcom_phy_init_clks() failed %d\n", + __func__, err); + goto out; + } - strlcpy(phy->common_cfg.name, UFS_PHY_NAME, - sizeof(phy->common_cfg.name)); + err = ufs_qcom_phy_init_vregulators(phy_common); + if (err) { + dev_err(phy_common->dev, + "%s: ufs_qcom_phy_init_vregulators() failed %d\n", + __func__, err); + goto out; + } + phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV; + phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV; -out: - return err; -} + ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common); -static int ufs_qcom_phy_qmp_14nm_remove(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct phy *generic_phy = to_phy(dev); - struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); - int err = 0; + phy_set_drvdata(generic_phy, phy); - err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy); - if (err) - dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n", - __func__, err); + strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name)); +out: return err; } static const struct of_device_id ufs_qcom_phy_qmp_14nm_of_match[] = { {.compatible = "qcom,ufs-phy-qmp-14nm"}, + {.compatible = "qcom,msm8996-ufs-phy-qmp-14nm"}, {}, }; MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_14nm_of_match); static struct platform_driver ufs_qcom_phy_qmp_14nm_driver = { .probe = ufs_qcom_phy_qmp_14nm_probe, - .remove = ufs_qcom_phy_qmp_14nm_remove, .driver = { .of_match_table = ufs_qcom_phy_qmp_14nm_of_match, .name = "ufs_qcom_phy_qmp_14nm", diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/phy-qcom-ufs-qmp-20nm.c index 770087ab05e29264d797231eb64e3090fb343142..1a26a64e06d34ffc9cbc48fc3067376f4b3fadc1 100644 --- a/drivers/phy/phy-qcom-ufs-qmp-20nm.c +++ b/drivers/phy/phy-qcom-ufs-qmp-20nm.c @@ -63,28 +63,12 @@ void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common) static int ufs_qcom_phy_qmp_20nm_init(struct phy *generic_phy) { - struct ufs_qcom_phy_qmp_20nm *phy = phy_get_drvdata(generic_phy); - struct ufs_qcom_phy *phy_common = &phy->common_cfg; - int err = 0; - - err = ufs_qcom_phy_init_clks(generic_phy, phy_common); - if (err) { - dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n", - __func__, err); - goto out; - } - - err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common); - if (err) { - dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n", - __func__, err); - goto out; - } - - ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common); + return 0; +} -out: - return err; +static int ufs_qcom_phy_qmp_20nm_exit(struct phy *generic_phy) +{ + return 0; } static @@ -173,7 +157,7 @@ static int ufs_qcom_phy_qmp_20nm_is_pcs_ready(struct ufs_qcom_phy *phy_common) static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = { .init = ufs_qcom_phy_qmp_20nm_init, - .exit = ufs_qcom_phy_exit, + .exit = ufs_qcom_phy_qmp_20nm_exit, .power_on = ufs_qcom_phy_power_on, .power_off = ufs_qcom_phy_power_off, .owner = THIS_MODULE, @@ -192,6 +176,7 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct phy *generic_phy; struct ufs_qcom_phy_qmp_20nm *phy; + struct ufs_qcom_phy *phy_common; int err = 0; phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); @@ -199,8 +184,9 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev) err = -ENOMEM; goto out; } + phy_common = &phy->common_cfg; - generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg, + generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common, &ufs_qcom_phy_qmp_20nm_phy_ops, &phy_20nm_ops); if (!generic_phy) { @@ -210,27 +196,27 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev) goto out; } - phy_set_drvdata(generic_phy, phy); + err = ufs_qcom_phy_init_clks(phy_common); + if (err) { + dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n", + __func__, err); + goto out; + } - strlcpy(phy->common_cfg.name, UFS_PHY_NAME, - sizeof(phy->common_cfg.name)); + err = ufs_qcom_phy_init_vregulators(phy_common); + if (err) { + dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n", + __func__, err); + goto out; + } -out: - return err; -} + ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common); -static int ufs_qcom_phy_qmp_20nm_remove(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct phy *generic_phy = to_phy(dev); - struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); - int err = 0; + phy_set_drvdata(generic_phy, phy); - err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy); - if (err) - dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n", - __func__, err); + strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name)); +out: return err; } @@ -242,7 +228,6 @@ MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_20nm_of_match); static struct platform_driver ufs_qcom_phy_qmp_20nm_driver = { .probe = ufs_qcom_phy_qmp_20nm_probe, - .remove = ufs_qcom_phy_qmp_20nm_remove, .driver = { .of_match_table = ufs_qcom_phy_qmp_20nm_of_match, .name = "ufs_qcom_phy_qmp_20nm", diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c index 18a5b495ad65accddda823338376678276b8f0e6..c69568b8543dcfaf4958875288cf9b13432169e5 100644 --- a/drivers/phy/phy-qcom-ufs.c +++ b/drivers/phy/phy-qcom-ufs.c @@ -22,13 +22,6 @@ #define VDDP_REF_CLK_MIN_UV 1200000 #define VDDP_REF_CLK_MAX_UV 1200000 -static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *, - const char *, bool); -static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *, - const char *); -static int ufs_qcom_phy_base_init(struct platform_device *pdev, - struct ufs_qcom_phy *phy_common); - int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy, struct ufs_qcom_phy_calibration *tbl_A, int tbl_size_A, @@ -75,45 +68,6 @@ int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy, } EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate); -struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev, - struct ufs_qcom_phy *common_cfg, - const struct phy_ops *ufs_qcom_phy_gen_ops, - struct ufs_qcom_phy_specific_ops *phy_spec_ops) -{ - int err; - struct device *dev = &pdev->dev; - struct phy *generic_phy = NULL; - struct phy_provider *phy_provider; - - err = ufs_qcom_phy_base_init(pdev, common_cfg); - if (err) { - dev_err(dev, "%s: phy base init failed %d\n", __func__, err); - goto out; - } - - phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - if (IS_ERR(phy_provider)) { - err = PTR_ERR(phy_provider); - dev_err(dev, "%s: failed to register phy %d\n", __func__, err); - goto out; - } - - generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops); - if (IS_ERR(generic_phy)) { - err = PTR_ERR(generic_phy); - dev_err(dev, "%s: failed to create phy %d\n", __func__, err); - generic_phy = NULL; - goto out; - } - - common_cfg->phy_spec_ops = phy_spec_ops; - common_cfg->dev = dev; - -out: - return generic_phy; -} -EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe); - /* * This assumes the embedded phy structure inside generic_phy is of type * struct ufs_qcom_phy. In order to function properly it's crucial @@ -154,13 +108,50 @@ int ufs_qcom_phy_base_init(struct platform_device *pdev, return 0; } -static int __ufs_qcom_phy_clk_get(struct phy *phy, +struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev, + struct ufs_qcom_phy *common_cfg, + const struct phy_ops *ufs_qcom_phy_gen_ops, + struct ufs_qcom_phy_specific_ops *phy_spec_ops) +{ + int err; + struct device *dev = &pdev->dev; + struct phy *generic_phy = NULL; + struct phy_provider *phy_provider; + + err = ufs_qcom_phy_base_init(pdev, common_cfg); + if (err) { + dev_err(dev, "%s: phy base init failed %d\n", __func__, err); + goto out; + } + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(phy_provider)) { + err = PTR_ERR(phy_provider); + dev_err(dev, "%s: failed to register phy %d\n", __func__, err); + goto out; + } + + generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops); + if (IS_ERR(generic_phy)) { + err = PTR_ERR(generic_phy); + dev_err(dev, "%s: failed to create phy %d\n", __func__, err); + generic_phy = NULL; + goto out; + } + + common_cfg->phy_spec_ops = phy_spec_ops; + common_cfg->dev = dev; + +out: + return generic_phy; +} +EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe); + +static int __ufs_qcom_phy_clk_get(struct device *dev, const char *name, struct clk **clk_out, bool err_print) { struct clk *clk; int err = 0; - struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); - struct device *dev = ufs_qcom_phy->dev; clk = devm_clk_get(dev, name); if (IS_ERR(clk)) { @@ -174,42 +165,44 @@ static int __ufs_qcom_phy_clk_get(struct phy *phy, return err; } -static -int ufs_qcom_phy_clk_get(struct phy *phy, +static int ufs_qcom_phy_clk_get(struct device *dev, const char *name, struct clk **clk_out) { - return __ufs_qcom_phy_clk_get(phy, name, clk_out, true); + return __ufs_qcom_phy_clk_get(dev, name, clk_out, true); } -int -ufs_qcom_phy_init_clks(struct phy *generic_phy, - struct ufs_qcom_phy *phy_common) +int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common) { int err; - err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk", + if (of_device_is_compatible(phy_common->dev->of_node, + "qcom,msm8996-ufs-phy-qmp-14nm")) + goto skip_txrx_clk; + + err = ufs_qcom_phy_clk_get(phy_common->dev, "tx_iface_clk", &phy_common->tx_iface_clk); if (err) goto out; - err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk", + err = ufs_qcom_phy_clk_get(phy_common->dev, "rx_iface_clk", &phy_common->rx_iface_clk); if (err) goto out; - err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src", + err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src", &phy_common->ref_clk_src); if (err) goto out; +skip_txrx_clk: /* * "ref_clk_parent" is optional hence don't abort init if it's not * found. */ - __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent", + __ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent", &phy_common->ref_clk_parent, false); - err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk", + err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk", &phy_common->ref_clk); out: @@ -217,41 +210,14 @@ ufs_qcom_phy_init_clks(struct phy *generic_phy, } EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks); -int -ufs_qcom_phy_init_vregulators(struct phy *generic_phy, - struct ufs_qcom_phy *phy_common) -{ - int err; - - err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll, - "vdda-pll"); - if (err) - goto out; - - err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy, - "vdda-phy"); - - if (err) - goto out; - - /* vddp-ref-clk-* properties are optional */ - __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk, - "vddp-ref-clk", true); -out: - return err; -} -EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators); - -static int __ufs_qcom_phy_init_vreg(struct phy *phy, +static int __ufs_qcom_phy_init_vreg(struct device *dev, struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional) { int err = 0; - struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); - struct device *dev = ufs_qcom_phy->dev; char prop_name[MAX_PROP_NAME]; - vreg->name = kstrdup(name, GFP_KERNEL); + vreg->name = devm_kstrdup(dev, name, GFP_KERNEL); if (!vreg->name) { err = -ENOMEM; goto out; @@ -304,14 +270,36 @@ static int __ufs_qcom_phy_init_vreg(struct phy *phy, return err; } -static int ufs_qcom_phy_init_vreg(struct phy *phy, +static int ufs_qcom_phy_init_vreg(struct device *dev, struct ufs_qcom_phy_vreg *vreg, const char *name) { - return __ufs_qcom_phy_init_vreg(phy, vreg, name, false); + return __ufs_qcom_phy_init_vreg(dev, vreg, name, false); } -static -int ufs_qcom_phy_cfg_vreg(struct phy *phy, +int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common) +{ + int err; + + err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_pll, + "vdda-pll"); + if (err) + goto out; + + err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_phy, + "vdda-phy"); + + if (err) + goto out; + + /* vddp-ref-clk-* properties are optional */ + __ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk, + "vddp-ref-clk", true); +out: + return err; +} +EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators); + +static int ufs_qcom_phy_cfg_vreg(struct device *dev, struct ufs_qcom_phy_vreg *vreg, bool on) { int ret = 0; @@ -319,10 +307,6 @@ int ufs_qcom_phy_cfg_vreg(struct phy *phy, const char *name = vreg->name; int min_uV; int uA_load; - struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); - struct device *dev = ufs_qcom_phy->dev; - - BUG_ON(!vreg); if (regulator_count_voltages(reg) > 0) { min_uV = on ? vreg->min_uV : 0; @@ -350,18 +334,15 @@ int ufs_qcom_phy_cfg_vreg(struct phy *phy, return ret; } -static -int ufs_qcom_phy_enable_vreg(struct phy *phy, +static int ufs_qcom_phy_enable_vreg(struct device *dev, struct ufs_qcom_phy_vreg *vreg) { - struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); - struct device *dev = ufs_qcom_phy->dev; int ret = 0; if (!vreg || vreg->enabled) goto out; - ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true); + ret = ufs_qcom_phy_cfg_vreg(dev, vreg, true); if (ret) { dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n", __func__, ret); @@ -380,10 +361,9 @@ int ufs_qcom_phy_enable_vreg(struct phy *phy, return ret; } -int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy) +static int ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy *phy) { int ret = 0; - struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); if (phy->is_ref_clk_enabled) goto out; @@ -430,14 +410,10 @@ int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy) out: return ret; } -EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk); -static -int ufs_qcom_phy_disable_vreg(struct phy *phy, +static int ufs_qcom_phy_disable_vreg(struct device *dev, struct ufs_qcom_phy_vreg *vreg) { - struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); - struct device *dev = ufs_qcom_phy->dev; int ret = 0; if (!vreg || !vreg->enabled || vreg->is_always_on) @@ -447,7 +423,7 @@ int ufs_qcom_phy_disable_vreg(struct phy *phy, if (!ret) { /* ignore errors on applying disable config */ - ufs_qcom_phy_cfg_vreg(phy, vreg, false); + ufs_qcom_phy_cfg_vreg(dev, vreg, false); vreg->enabled = false; } else { dev_err(dev, "%s: %s disable failed, err=%d\n", @@ -457,10 +433,8 @@ int ufs_qcom_phy_disable_vreg(struct phy *phy, return ret; } -void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy) +static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy) { - struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); - if (phy->is_ref_clk_enabled) { clk_disable_unprepare(phy->ref_clk); /* @@ -473,7 +447,6 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy) phy->is_ref_clk_enabled = false; } } -EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk); #define UFS_REF_CLK_EN (1 << 5) @@ -526,9 +499,8 @@ void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk); /* Turn ON M-PHY RMMI interface clocks */ -int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) +static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy) { - struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); int ret = 0; if (phy->is_iface_clk_enabled) @@ -552,20 +524,16 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) out: return ret; } -EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk); /* Turn OFF M-PHY RMMI interface clocks */ -void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) +void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy) { - struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); - if (phy->is_iface_clk_enabled) { clk_disable_unprepare(phy->tx_iface_clk); clk_disable_unprepare(phy->rx_iface_clk); phy->is_iface_clk_enabled = false; } } -EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk); int ufs_qcom_phy_start_serdes(struct phy *generic_phy) { @@ -634,29 +602,6 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) } EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy); -int ufs_qcom_phy_remove(struct phy *generic_phy, - struct ufs_qcom_phy *ufs_qcom_phy) -{ - phy_power_off(generic_phy); - - kfree(ufs_qcom_phy->vdda_pll.name); - kfree(ufs_qcom_phy->vdda_phy.name); - - return 0; -} -EXPORT_SYMBOL_GPL(ufs_qcom_phy_remove); - -int ufs_qcom_phy_exit(struct phy *generic_phy) -{ - struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); - - if (ufs_qcom_phy->is_powered_on) - phy_power_off(generic_phy); - - return 0; -} -EXPORT_SYMBOL_GPL(ufs_qcom_phy_exit); - int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy) { struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); @@ -678,7 +623,10 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy) struct device *dev = phy_common->dev; int err; - err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy); + if (phy_common->is_powered_on) + return 0; + + err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy); if (err) { dev_err(dev, "%s enable vdda_phy failed, err=%d\n", __func__, err); @@ -688,23 +636,30 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy) phy_common->phy_spec_ops->power_control(phy_common, true); /* vdda_pll also enables ref clock LDOs so enable it first */ - err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll); + err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_pll); if (err) { dev_err(dev, "%s enable vdda_pll failed, err=%d\n", __func__, err); goto out_disable_phy; } - err = ufs_qcom_phy_enable_ref_clk(generic_phy); + err = ufs_qcom_phy_enable_iface_clk(phy_common); if (err) { - dev_err(dev, "%s enable phy ref clock failed, err=%d\n", + dev_err(dev, "%s enable phy iface clock failed, err=%d\n", __func__, err); goto out_disable_pll; } + err = ufs_qcom_phy_enable_ref_clk(phy_common); + if (err) { + dev_err(dev, "%s enable phy ref clock failed, err=%d\n", + __func__, err); + goto out_disable_iface_clk; + } + /* enable device PHY ref_clk pad rail */ if (phy_common->vddp_ref_clk.reg) { - err = ufs_qcom_phy_enable_vreg(generic_phy, + err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vddp_ref_clk); if (err) { dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n", @@ -717,11 +672,13 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy) goto out; out_disable_ref_clk: - ufs_qcom_phy_disable_ref_clk(generic_phy); + ufs_qcom_phy_disable_ref_clk(phy_common); +out_disable_iface_clk: + ufs_qcom_phy_disable_iface_clk(phy_common); out_disable_pll: - ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll); + ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_pll); out_disable_phy: - ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy); + ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_phy); out: return err; } @@ -731,15 +688,19 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy) { struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); + if (!phy_common->is_powered_on) + return 0; + phy_common->phy_spec_ops->power_control(phy_common, false); if (phy_common->vddp_ref_clk.reg) - ufs_qcom_phy_disable_vreg(generic_phy, + ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vddp_ref_clk); - ufs_qcom_phy_disable_ref_clk(generic_phy); + ufs_qcom_phy_disable_ref_clk(phy_common); + ufs_qcom_phy_disable_iface_clk(phy_common); - ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll); - ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy); + ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll); + ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy); phy_common->is_powered_on = false; return 0; diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c index 3d97eadd247d11253263c30981240f8698456b68..c63da1b955c17325881018bd912e742eff0018d9 100644 --- a/drivers/phy/phy-rcar-gen3-usb2.c +++ b/drivers/phy/phy-rcar-gen3-usb2.c @@ -70,6 +70,7 @@ #define USB2_LINECTRL1_DP_RPD BIT(18) #define USB2_LINECTRL1_DMRPD_EN BIT(17) #define USB2_LINECTRL1_DM_RPD BIT(16) +#define USB2_LINECTRL1_OPMODE_NODRV BIT(6) /* ADPCTRL */ #define USB2_ADPCTRL_OTGSESSVLD BIT(20) @@ -161,6 +162,43 @@ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch) schedule_work(&ch->work); } +static void rcar_gen3_init_for_b_host(struct rcar_gen3_chan *ch) +{ + void __iomem *usb2_base = ch->base; + u32 val; + + val = readl(usb2_base + USB2_LINECTRL1); + writel(val | USB2_LINECTRL1_OPMODE_NODRV, usb2_base + USB2_LINECTRL1); + + rcar_gen3_set_linectrl(ch, 1, 1); + rcar_gen3_set_host_mode(ch, 1); + rcar_gen3_enable_vbus_ctrl(ch, 0); + + val = readl(usb2_base + USB2_LINECTRL1); + writel(val & ~USB2_LINECTRL1_OPMODE_NODRV, usb2_base + USB2_LINECTRL1); +} + +static void rcar_gen3_init_for_a_peri(struct rcar_gen3_chan *ch) +{ + rcar_gen3_set_linectrl(ch, 0, 1); + rcar_gen3_set_host_mode(ch, 0); + rcar_gen3_enable_vbus_ctrl(ch, 1); +} + +static void rcar_gen3_init_from_a_peri_to_a_host(struct rcar_gen3_chan *ch) +{ + void __iomem *usb2_base = ch->base; + u32 val; + + val = readl(usb2_base + USB2_OBINTEN); + writel(val & ~USB2_OBINT_BITS, usb2_base + USB2_OBINTEN); + + rcar_gen3_enable_vbus_ctrl(ch, 0); + rcar_gen3_init_for_host(ch); + + writel(val | USB2_OBINT_BITS, usb2_base + USB2_OBINTEN); +} + static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch) { return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG); @@ -174,6 +212,65 @@ static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch) rcar_gen3_init_for_peri(ch); } +static bool rcar_gen3_is_host(struct rcar_gen3_chan *ch) +{ + return !(readl(ch->base + USB2_COMMCTRL) & USB2_COMMCTRL_OTG_PERI); +} + +static ssize_t role_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct rcar_gen3_chan *ch = dev_get_drvdata(dev); + bool is_b_device, is_host, new_mode_is_host; + + if (!ch->has_otg || !ch->phy->init_count) + return -EIO; + + /* + * is_b_device: true is B-Device. false is A-Device. + * If {new_mode_}is_host: true is Host mode. false is Peripheral mode. + */ + is_b_device = rcar_gen3_check_id(ch); + is_host = rcar_gen3_is_host(ch); + if (!strncmp(buf, "host", strlen("host"))) + new_mode_is_host = true; + else if (!strncmp(buf, "peripheral", strlen("peripheral"))) + new_mode_is_host = false; + else + return -EINVAL; + + /* If current and new mode is the same, this returns the error */ + if (is_host == new_mode_is_host) + return -EINVAL; + + if (new_mode_is_host) { /* And is_host must be false */ + if (!is_b_device) /* A-Peripheral */ + rcar_gen3_init_from_a_peri_to_a_host(ch); + else /* B-Peripheral */ + rcar_gen3_init_for_b_host(ch); + } else { /* And is_host must be true */ + if (!is_b_device) /* A-Host */ + rcar_gen3_init_for_a_peri(ch); + else /* B-Host */ + rcar_gen3_init_for_peri(ch); + } + + return count; +} + +static ssize_t role_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rcar_gen3_chan *ch = dev_get_drvdata(dev); + + if (!ch->has_otg || !ch->phy->init_count) + return -EIO; + + return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" : + "peripheral"); +} +static DEVICE_ATTR_RW(role); + static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) { void __iomem *usb2_base = ch->base; @@ -351,21 +448,40 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) channel->vbus = NULL; } + platform_set_drvdata(pdev, channel); phy_set_drvdata(channel->phy, channel); provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - if (IS_ERR(provider)) + if (IS_ERR(provider)) { dev_err(dev, "Failed to register PHY provider\n"); + } else if (channel->has_otg) { + int ret; + + ret = device_create_file(dev, &dev_attr_role); + if (ret < 0) + return ret; + } return PTR_ERR_OR_ZERO(provider); } +static int rcar_gen3_phy_usb2_remove(struct platform_device *pdev) +{ + struct rcar_gen3_chan *channel = platform_get_drvdata(pdev); + + if (channel->has_otg) + device_remove_file(&pdev->dev, &dev_attr_role); + + return 0; +}; + static struct platform_driver rcar_gen3_phy_usb2_driver = { .driver = { .name = "phy_rcar_gen3_usb2", .of_match_table = rcar_gen3_phy_usb2_match_table, }, .probe = rcar_gen3_phy_usb2_probe, + .remove = rcar_gen3_phy_usb2_remove, }; module_platform_driver(rcar_gen3_phy_usb2_driver); diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/phy-rockchip-emmc.c index fd57345ffed28709c1e76105cb4fd7df32debe1c..f1b24f18e9b22d1e1071f0f870724e08b0d0c19e 100644 --- a/drivers/phy/phy-rockchip-emmc.c +++ b/drivers/phy/phy-rockchip-emmc.c @@ -132,7 +132,7 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off) default: ideal_rate = 200000000; break; - }; + } diff = (rate > ideal_rate) ? rate - ideal_rate : ideal_rate - rate; diff --git a/drivers/phy/phy-rockchip-inno-usb2.c b/drivers/phy/phy-rockchip-inno-usb2.c index ac203107b0713708db66f89a3cc3b997cf5a10bd..2f99ec95079cd2bcd3419a2c38c38bf2a8dd25cd 100644 --- a/drivers/phy/phy-rockchip-inno-usb2.c +++ b/drivers/phy/phy-rockchip-inno-usb2.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -30,11 +31,15 @@ #include #include #include +#include #include #include +#include +#include #define BIT_WRITEABLE_SHIFT 16 -#define SCHEDULE_DELAY (60 * HZ) +#define SCHEDULE_DELAY (60 * HZ) +#define OTG_SCHEDULE_DELAY (2 * HZ) enum rockchip_usb2phy_port_id { USB2PHY_PORT_OTG, @@ -49,6 +54,37 @@ enum rockchip_usb2phy_host_state { PHY_STATE_FS_LS_ONLINE = 4, }; +/** + * Different states involved in USB charger detection. + * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection + * process is not yet started. + * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact. + * USB_CHG_STATE_DCD_DONE Data pin contact is detected. + * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects + * between SDP and DCP/CDP). + * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects + * between DCP and CDP). + * USB_CHG_STATE_DETECTED USB charger type is determined. + */ +enum usb_chg_state { + USB_CHG_STATE_UNDEFINED = 0, + USB_CHG_STATE_WAIT_FOR_DCD, + USB_CHG_STATE_DCD_DONE, + USB_CHG_STATE_PRIMARY_DONE, + USB_CHG_STATE_SECONDARY_DONE, + USB_CHG_STATE_DETECTED, +}; + +static const unsigned int rockchip_usb2phy_extcon_cable[] = { + EXTCON_USB, + EXTCON_USB_HOST, + EXTCON_CHG_USB_SDP, + EXTCON_CHG_USB_CDP, + EXTCON_CHG_USB_DCP, + EXTCON_CHG_USB_SLOW, + EXTCON_NONE, +}; + struct usb2phy_reg { unsigned int offset; unsigned int bitend; @@ -57,20 +93,56 @@ struct usb2phy_reg { unsigned int enable; }; +/** + * struct rockchip_chg_det_reg: usb charger detect registers + * @cp_det: charging port detected successfully. + * @dcp_det: dedicated charging port detected successfully. + * @dp_det: assert data pin connect successfully. + * @idm_sink_en: open dm sink curren. + * @idp_sink_en: open dp sink current. + * @idp_src_en: open dm source current. + * @rdm_pdwn_en: open dm pull down resistor. + * @vdm_src_en: open dm voltage source. + * @vdp_src_en: open dp voltage source. + * @opmode: utmi operational mode. + */ +struct rockchip_chg_det_reg { + struct usb2phy_reg cp_det; + struct usb2phy_reg dcp_det; + struct usb2phy_reg dp_det; + struct usb2phy_reg idm_sink_en; + struct usb2phy_reg idp_sink_en; + struct usb2phy_reg idp_src_en; + struct usb2phy_reg rdm_pdwn_en; + struct usb2phy_reg vdm_src_en; + struct usb2phy_reg vdp_src_en; + struct usb2phy_reg opmode; +}; + /** * struct rockchip_usb2phy_port_cfg: usb-phy port configuration. * @phy_sus: phy suspend register. + * @bvalid_det_en: vbus valid rise detection enable register. + * @bvalid_det_st: vbus valid rise detection status register. + * @bvalid_det_clr: vbus valid rise detection clear register. * @ls_det_en: linestate detection enable register. * @ls_det_st: linestate detection state register. * @ls_det_clr: linestate detection clear register. + * @utmi_avalid: utmi vbus avalid status register. + * @utmi_bvalid: utmi vbus bvalid status register. * @utmi_ls: utmi linestate state register. * @utmi_hstdet: utmi host disconnect register. */ struct rockchip_usb2phy_port_cfg { struct usb2phy_reg phy_sus; + struct usb2phy_reg bvalid_det_en; + struct usb2phy_reg bvalid_det_st; + struct usb2phy_reg bvalid_det_clr; struct usb2phy_reg ls_det_en; struct usb2phy_reg ls_det_st; struct usb2phy_reg ls_det_clr; + struct usb2phy_reg utmi_avalid; + struct usb2phy_reg utmi_bvalid; struct usb2phy_reg utmi_ls; struct usb2phy_reg utmi_hstdet; }; @@ -80,31 +152,51 @@ struct rockchip_usb2phy_port_cfg { * @reg: the address offset of grf for usb-phy config. * @num_ports: specify how many ports that the phy has. * @clkout_ctl: keep on/turn off output clk of phy. + * @chg_det: charger detection registers. */ struct rockchip_usb2phy_cfg { unsigned int reg; unsigned int num_ports; struct usb2phy_reg clkout_ctl; const struct rockchip_usb2phy_port_cfg port_cfgs[USB2PHY_NUM_PORTS]; + const struct rockchip_chg_det_reg chg_det; }; /** * struct rockchip_usb2phy_port: usb-phy port data. * @port_id: flag for otg port or host port. * @suspended: phy suspended flag. + * @utmi_avalid: utmi avalid status usage flag. + * true - use avalid to get vbus status + * flase - use bvalid to get vbus status + * @vbus_attached: otg device vbus status. + * @bvalid_irq: IRQ number assigned for vbus valid rise detection. * @ls_irq: IRQ number assigned for linestate detection. * @mutex: for register updating in sm_work. - * @sm_work: OTG state machine work. + * @chg_work: charge detect work. + * @otg_sm_work: OTG state machine work. + * @sm_work: HOST state machine work. * @phy_cfg: port register configuration, assigned by driver data. + * @event_nb: hold event notification callback. + * @state: define OTG enumeration states before device reset. + * @mode: the dr_mode of the controller. */ struct rockchip_usb2phy_port { struct phy *phy; unsigned int port_id; bool suspended; + bool utmi_avalid; + bool vbus_attached; + int bvalid_irq; int ls_irq; struct mutex mutex; + struct delayed_work chg_work; + struct delayed_work otg_sm_work; struct delayed_work sm_work; const struct rockchip_usb2phy_port_cfg *port_cfg; + struct notifier_block event_nb; + enum usb_otg_state state; + enum usb_dr_mode mode; }; /** @@ -113,6 +205,11 @@ struct rockchip_usb2phy_port { * @clk: clock struct of phy input clk. * @clk480m: clock struct of phy output clk. * @clk_hw: clock struct of phy output clk management. + * @chg_state: states involved in USB charger detection. + * @chg_type: USB charger types. + * @dcd_retries: The retry count used to track Data contact + * detection process. + * @edev: extcon device for notification registration * @phy_cfg: phy register configuration, assigned by driver data. * @ports: phy port instance. */ @@ -122,6 +219,10 @@ struct rockchip_usb2phy { struct clk *clk; struct clk *clk480m; struct clk_hw clk480m_hw; + enum usb_chg_state chg_state; + enum power_supply_type chg_type; + u8 dcd_retries; + struct extcon_dev *edev; const struct rockchip_usb2phy_cfg *phy_cfg; struct rockchip_usb2phy_port ports[USB2PHY_NUM_PORTS]; }; @@ -153,7 +254,7 @@ static inline bool property_enabled(struct rockchip_usb2phy *rphy, return tmp == reg->enable; } -static int rockchip_usb2phy_clk480m_enable(struct clk_hw *hw) +static int rockchip_usb2phy_clk480m_prepare(struct clk_hw *hw) { struct rockchip_usb2phy *rphy = container_of(hw, struct rockchip_usb2phy, clk480m_hw); @@ -165,14 +266,14 @@ static int rockchip_usb2phy_clk480m_enable(struct clk_hw *hw) if (ret) return ret; - /* waitting for the clk become stable */ - mdelay(1); + /* waiting for the clk become stable */ + usleep_range(1200, 1300); } return 0; } -static void rockchip_usb2phy_clk480m_disable(struct clk_hw *hw) +static void rockchip_usb2phy_clk480m_unprepare(struct clk_hw *hw) { struct rockchip_usb2phy *rphy = container_of(hw, struct rockchip_usb2phy, clk480m_hw); @@ -181,7 +282,7 @@ static void rockchip_usb2phy_clk480m_disable(struct clk_hw *hw) property_enable(rphy, &rphy->phy_cfg->clkout_ctl, false); } -static int rockchip_usb2phy_clk480m_enabled(struct clk_hw *hw) +static int rockchip_usb2phy_clk480m_prepared(struct clk_hw *hw) { struct rockchip_usb2phy *rphy = container_of(hw, struct rockchip_usb2phy, clk480m_hw); @@ -197,9 +298,9 @@ rockchip_usb2phy_clk480m_recalc_rate(struct clk_hw *hw, } static const struct clk_ops rockchip_usb2phy_clkout_ops = { - .enable = rockchip_usb2phy_clk480m_enable, - .disable = rockchip_usb2phy_clk480m_disable, - .is_enabled = rockchip_usb2phy_clk480m_enabled, + .prepare = rockchip_usb2phy_clk480m_prepare, + .unprepare = rockchip_usb2phy_clk480m_unprepare, + .is_prepared = rockchip_usb2phy_clk480m_prepared, .recalc_rate = rockchip_usb2phy_clk480m_recalc_rate, }; @@ -263,33 +364,84 @@ rockchip_usb2phy_clk480m_register(struct rockchip_usb2phy *rphy) return ret; } -static int rockchip_usb2phy_init(struct phy *phy) +static int rockchip_usb2phy_extcon_register(struct rockchip_usb2phy *rphy) { - struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy); - struct rockchip_usb2phy *rphy = dev_get_drvdata(phy->dev.parent); int ret; + struct device_node *node = rphy->dev->of_node; + struct extcon_dev *edev; + + if (of_property_read_bool(node, "extcon")) { + edev = extcon_get_edev_by_phandle(rphy->dev, 0); + if (IS_ERR(edev)) { + if (PTR_ERR(edev) != -EPROBE_DEFER) + dev_err(rphy->dev, "Invalid or missing extcon\n"); + return PTR_ERR(edev); + } + } else { + /* Initialize extcon device */ + edev = devm_extcon_dev_allocate(rphy->dev, + rockchip_usb2phy_extcon_cable); - if (rport->port_id == USB2PHY_PORT_HOST) { - /* clear linestate and enable linestate detect irq */ - mutex_lock(&rport->mutex); + if (IS_ERR(edev)) + return -ENOMEM; - ret = property_enable(rphy, &rport->port_cfg->ls_det_clr, true); + ret = devm_extcon_dev_register(rphy->dev, edev); if (ret) { - mutex_unlock(&rport->mutex); + dev_err(rphy->dev, "failed to register extcon device\n"); return ret; } + } - ret = property_enable(rphy, &rport->port_cfg->ls_det_en, true); - if (ret) { - mutex_unlock(&rport->mutex); - return ret; + rphy->edev = edev; + + return 0; +} + +static int rockchip_usb2phy_init(struct phy *phy) +{ + struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy); + struct rockchip_usb2phy *rphy = dev_get_drvdata(phy->dev.parent); + int ret = 0; + + mutex_lock(&rport->mutex); + + if (rport->port_id == USB2PHY_PORT_OTG) { + if (rport->mode != USB_DR_MODE_HOST) { + /* clear bvalid status and enable bvalid detect irq */ + ret = property_enable(rphy, + &rport->port_cfg->bvalid_det_clr, + true); + if (ret) + goto out; + + ret = property_enable(rphy, + &rport->port_cfg->bvalid_det_en, + true); + if (ret) + goto out; + + schedule_delayed_work(&rport->otg_sm_work, + OTG_SCHEDULE_DELAY); + } else { + /* If OTG works in host only mode, do nothing. */ + dev_dbg(&rport->phy->dev, "mode %d\n", rport->mode); } + } else if (rport->port_id == USB2PHY_PORT_HOST) { + /* clear linestate and enable linestate detect irq */ + ret = property_enable(rphy, &rport->port_cfg->ls_det_clr, true); + if (ret) + goto out; + + ret = property_enable(rphy, &rport->port_cfg->ls_det_en, true); + if (ret) + goto out; - mutex_unlock(&rport->mutex); schedule_delayed_work(&rport->sm_work, SCHEDULE_DELAY); } - return 0; +out: + mutex_unlock(&rport->mutex); + return ret; } static int rockchip_usb2phy_power_on(struct phy *phy) @@ -340,7 +492,11 @@ static int rockchip_usb2phy_exit(struct phy *phy) { struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy); - if (rport->port_id == USB2PHY_PORT_HOST) + if (rport->port_id == USB2PHY_PORT_OTG && + rport->mode != USB_DR_MODE_HOST) { + cancel_delayed_work_sync(&rport->otg_sm_work); + cancel_delayed_work_sync(&rport->chg_work); + } else if (rport->port_id == USB2PHY_PORT_HOST) cancel_delayed_work_sync(&rport->sm_work); return 0; @@ -354,6 +510,249 @@ static const struct phy_ops rockchip_usb2phy_ops = { .owner = THIS_MODULE, }; +static void rockchip_usb2phy_otg_sm_work(struct work_struct *work) +{ + struct rockchip_usb2phy_port *rport = + container_of(work, struct rockchip_usb2phy_port, + otg_sm_work.work); + struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent); + static unsigned int cable; + unsigned long delay; + bool vbus_attach, sch_work, notify_charger; + + if (rport->utmi_avalid) + vbus_attach = + property_enabled(rphy, &rport->port_cfg->utmi_avalid); + else + vbus_attach = + property_enabled(rphy, &rport->port_cfg->utmi_bvalid); + + sch_work = false; + notify_charger = false; + delay = OTG_SCHEDULE_DELAY; + dev_dbg(&rport->phy->dev, "%s otg sm work\n", + usb_otg_state_string(rport->state)); + + switch (rport->state) { + case OTG_STATE_UNDEFINED: + rport->state = OTG_STATE_B_IDLE; + if (!vbus_attach) + rockchip_usb2phy_power_off(rport->phy); + /* fall through */ + case OTG_STATE_B_IDLE: + if (extcon_get_cable_state_(rphy->edev, EXTCON_USB_HOST) > 0) { + dev_dbg(&rport->phy->dev, "usb otg host connect\n"); + rport->state = OTG_STATE_A_HOST; + rockchip_usb2phy_power_on(rport->phy); + return; + } else if (vbus_attach) { + dev_dbg(&rport->phy->dev, "vbus_attach\n"); + switch (rphy->chg_state) { + case USB_CHG_STATE_UNDEFINED: + schedule_delayed_work(&rport->chg_work, 0); + return; + case USB_CHG_STATE_DETECTED: + switch (rphy->chg_type) { + case POWER_SUPPLY_TYPE_USB: + dev_dbg(&rport->phy->dev, + "sdp cable is connecetd\n"); + rockchip_usb2phy_power_on(rport->phy); + rport->state = OTG_STATE_B_PERIPHERAL; + notify_charger = true; + sch_work = true; + cable = EXTCON_CHG_USB_SDP; + break; + case POWER_SUPPLY_TYPE_USB_DCP: + dev_dbg(&rport->phy->dev, + "dcp cable is connecetd\n"); + rockchip_usb2phy_power_off(rport->phy); + notify_charger = true; + sch_work = true; + cable = EXTCON_CHG_USB_DCP; + break; + case POWER_SUPPLY_TYPE_USB_CDP: + dev_dbg(&rport->phy->dev, + "cdp cable is connecetd\n"); + rockchip_usb2phy_power_on(rport->phy); + rport->state = OTG_STATE_B_PERIPHERAL; + notify_charger = true; + sch_work = true; + cable = EXTCON_CHG_USB_CDP; + break; + default: + break; + } + break; + default: + break; + } + } else { + notify_charger = true; + rphy->chg_state = USB_CHG_STATE_UNDEFINED; + rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN; + } + + if (rport->vbus_attached != vbus_attach) { + rport->vbus_attached = vbus_attach; + + if (notify_charger && rphy->edev) + extcon_set_cable_state_(rphy->edev, + cable, vbus_attach); + } + break; + case OTG_STATE_B_PERIPHERAL: + if (!vbus_attach) { + dev_dbg(&rport->phy->dev, "usb disconnect\n"); + rphy->chg_state = USB_CHG_STATE_UNDEFINED; + rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN; + rport->state = OTG_STATE_B_IDLE; + delay = 0; + rockchip_usb2phy_power_off(rport->phy); + } + sch_work = true; + break; + case OTG_STATE_A_HOST: + if (extcon_get_cable_state_(rphy->edev, EXTCON_USB_HOST) == 0) { + dev_dbg(&rport->phy->dev, "usb otg host disconnect\n"); + rport->state = OTG_STATE_B_IDLE; + rockchip_usb2phy_power_off(rport->phy); + } + break; + default: + break; + } + + if (sch_work) + schedule_delayed_work(&rport->otg_sm_work, delay); +} + +static const char *chg_to_string(enum power_supply_type chg_type) +{ + switch (chg_type) { + case POWER_SUPPLY_TYPE_USB: + return "USB_SDP_CHARGER"; + case POWER_SUPPLY_TYPE_USB_DCP: + return "USB_DCP_CHARGER"; + case POWER_SUPPLY_TYPE_USB_CDP: + return "USB_CDP_CHARGER"; + default: + return "INVALID_CHARGER"; + } +} + +static void rockchip_chg_enable_dcd(struct rockchip_usb2phy *rphy, + bool en) +{ + property_enable(rphy, &rphy->phy_cfg->chg_det.rdm_pdwn_en, en); + property_enable(rphy, &rphy->phy_cfg->chg_det.idp_src_en, en); +} + +static void rockchip_chg_enable_primary_det(struct rockchip_usb2phy *rphy, + bool en) +{ + property_enable(rphy, &rphy->phy_cfg->chg_det.vdp_src_en, en); + property_enable(rphy, &rphy->phy_cfg->chg_det.idm_sink_en, en); +} + +static void rockchip_chg_enable_secondary_det(struct rockchip_usb2phy *rphy, + bool en) +{ + property_enable(rphy, &rphy->phy_cfg->chg_det.vdm_src_en, en); + property_enable(rphy, &rphy->phy_cfg->chg_det.idp_sink_en, en); +} + +#define CHG_DCD_POLL_TIME (100 * HZ / 1000) +#define CHG_DCD_MAX_RETRIES 6 +#define CHG_PRIMARY_DET_TIME (40 * HZ / 1000) +#define CHG_SECONDARY_DET_TIME (40 * HZ / 1000) +static void rockchip_chg_detect_work(struct work_struct *work) +{ + struct rockchip_usb2phy_port *rport = + container_of(work, struct rockchip_usb2phy_port, chg_work.work); + struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent); + bool is_dcd, tmout, vout; + unsigned long delay; + + dev_dbg(&rport->phy->dev, "chg detection work state = %d\n", + rphy->chg_state); + switch (rphy->chg_state) { + case USB_CHG_STATE_UNDEFINED: + if (!rport->suspended) + rockchip_usb2phy_power_off(rport->phy); + /* put the controller in non-driving mode */ + property_enable(rphy, &rphy->phy_cfg->chg_det.opmode, false); + /* Start DCD processing stage 1 */ + rockchip_chg_enable_dcd(rphy, true); + rphy->chg_state = USB_CHG_STATE_WAIT_FOR_DCD; + rphy->dcd_retries = 0; + delay = CHG_DCD_POLL_TIME; + break; + case USB_CHG_STATE_WAIT_FOR_DCD: + /* get data contact detection status */ + is_dcd = property_enabled(rphy, &rphy->phy_cfg->chg_det.dp_det); + tmout = ++rphy->dcd_retries == CHG_DCD_MAX_RETRIES; + /* stage 2 */ + if (is_dcd || tmout) { + /* stage 4 */ + /* Turn off DCD circuitry */ + rockchip_chg_enable_dcd(rphy, false); + /* Voltage Source on DP, Probe on DM */ + rockchip_chg_enable_primary_det(rphy, true); + delay = CHG_PRIMARY_DET_TIME; + rphy->chg_state = USB_CHG_STATE_DCD_DONE; + } else { + /* stage 3 */ + delay = CHG_DCD_POLL_TIME; + } + break; + case USB_CHG_STATE_DCD_DONE: + vout = property_enabled(rphy, &rphy->phy_cfg->chg_det.cp_det); + rockchip_chg_enable_primary_det(rphy, false); + if (vout) { + /* Voltage Source on DM, Probe on DP */ + rockchip_chg_enable_secondary_det(rphy, true); + delay = CHG_SECONDARY_DET_TIME; + rphy->chg_state = USB_CHG_STATE_PRIMARY_DONE; + } else { + if (rphy->dcd_retries == CHG_DCD_MAX_RETRIES) { + /* floating charger found */ + rphy->chg_type = POWER_SUPPLY_TYPE_USB_DCP; + rphy->chg_state = USB_CHG_STATE_DETECTED; + delay = 0; + } else { + rphy->chg_type = POWER_SUPPLY_TYPE_USB; + rphy->chg_state = USB_CHG_STATE_DETECTED; + delay = 0; + } + } + break; + case USB_CHG_STATE_PRIMARY_DONE: + vout = property_enabled(rphy, &rphy->phy_cfg->chg_det.dcp_det); + /* Turn off voltage source */ + rockchip_chg_enable_secondary_det(rphy, false); + if (vout) + rphy->chg_type = POWER_SUPPLY_TYPE_USB_DCP; + else + rphy->chg_type = POWER_SUPPLY_TYPE_USB_CDP; + /* fall through */ + case USB_CHG_STATE_SECONDARY_DONE: + rphy->chg_state = USB_CHG_STATE_DETECTED; + delay = 0; + /* fall through */ + case USB_CHG_STATE_DETECTED: + /* put the controller in normal mode */ + property_enable(rphy, &rphy->phy_cfg->chg_det.opmode, true); + rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work); + dev_info(&rport->phy->dev, "charger = %s\n", + chg_to_string(rphy->chg_type)); + return; + default: + return; + } + + schedule_delayed_work(&rport->chg_work, delay); +} + /* * The function manage host-phy port state and suspend/resume phy port * to save power. @@ -485,6 +884,26 @@ static irqreturn_t rockchip_usb2phy_linestate_irq(int irq, void *data) return IRQ_HANDLED; } +static irqreturn_t rockchip_usb2phy_bvalid_irq(int irq, void *data) +{ + struct rockchip_usb2phy_port *rport = data; + struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent); + + if (!property_enabled(rphy, &rport->port_cfg->bvalid_det_st)) + return IRQ_NONE; + + mutex_lock(&rport->mutex); + + /* clear bvalid detect irq pending status */ + property_enable(rphy, &rport->port_cfg->bvalid_det_clr, true); + + mutex_unlock(&rport->mutex); + + rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work); + + return IRQ_HANDLED; +} + static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy, struct rockchip_usb2phy_port *rport, struct device_node *child_np) @@ -509,13 +928,86 @@ static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy, IRQF_ONESHOT, "rockchip_usb2phy", rport); if (ret) { - dev_err(rphy->dev, "failed to request irq handle\n"); + dev_err(rphy->dev, "failed to request linestate irq handle\n"); return ret; } return 0; } +static int rockchip_otg_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct rockchip_usb2phy_port *rport = + container_of(nb, struct rockchip_usb2phy_port, event_nb); + + schedule_delayed_work(&rport->otg_sm_work, OTG_SCHEDULE_DELAY); + + return NOTIFY_DONE; +} + +static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy, + struct rockchip_usb2phy_port *rport, + struct device_node *child_np) +{ + int ret; + + rport->port_id = USB2PHY_PORT_OTG; + rport->port_cfg = &rphy->phy_cfg->port_cfgs[USB2PHY_PORT_OTG]; + rport->state = OTG_STATE_UNDEFINED; + + /* + * set suspended flag to true, but actually don't + * put phy in suspend mode, it aims to enable usb + * phy and clock in power_on() called by usb controller + * driver during probe. + */ + rport->suspended = true; + rport->vbus_attached = false; + + mutex_init(&rport->mutex); + + rport->mode = of_usb_get_dr_mode_by_phy(child_np, -1); + if (rport->mode == USB_DR_MODE_HOST) { + ret = 0; + goto out; + } + + INIT_DELAYED_WORK(&rport->chg_work, rockchip_chg_detect_work); + INIT_DELAYED_WORK(&rport->otg_sm_work, rockchip_usb2phy_otg_sm_work); + + rport->utmi_avalid = + of_property_read_bool(child_np, "rockchip,utmi-avalid"); + + rport->bvalid_irq = of_irq_get_byname(child_np, "otg-bvalid"); + if (rport->bvalid_irq < 0) { + dev_err(rphy->dev, "no vbus valid irq provided\n"); + ret = rport->bvalid_irq; + goto out; + } + + ret = devm_request_threaded_irq(rphy->dev, rport->bvalid_irq, NULL, + rockchip_usb2phy_bvalid_irq, + IRQF_ONESHOT, + "rockchip_usb2phy_bvalid", rport); + if (ret) { + dev_err(rphy->dev, "failed to request otg-bvalid irq handle\n"); + goto out; + } + + if (!IS_ERR(rphy->edev)) { + rport->event_nb.notifier_call = rockchip_otg_event; + + ret = extcon_register_notifier(rphy->edev, EXTCON_USB_HOST, + &rport->event_nb); + if (ret) + dev_err(rphy->dev, "register USB HOST notifier failed\n"); + } + +out: + return ret; +} + static int rockchip_usb2phy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -553,8 +1045,14 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) rphy->dev = dev; phy_cfgs = match->data; + rphy->chg_state = USB_CHG_STATE_UNDEFINED; + rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN; platform_set_drvdata(pdev, rphy); + ret = rockchip_usb2phy_extcon_register(rphy); + if (ret) + return ret; + /* find out a proper config which can be matched with dt. */ index = 0; while (phy_cfgs[index].reg) { @@ -591,13 +1089,9 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) struct rockchip_usb2phy_port *rport = &rphy->ports[index]; struct phy *phy; - /* - * This driver aim to support both otg-port and host-port, - * but unfortunately, the otg part is not ready in current, - * so this comments and below codes are interim, which should - * be changed after otg-port is supplied soon. - */ - if (of_node_cmp(child_np->name, "host-port")) + /* This driver aims to support both otg-port and host-port */ + if (of_node_cmp(child_np->name, "host-port") && + of_node_cmp(child_np->name, "otg-port")) goto next_child; phy = devm_phy_create(dev, child_np, &rockchip_usb2phy_ops); @@ -610,9 +1104,18 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) rport->phy = phy; phy_set_drvdata(rport->phy, rport); - ret = rockchip_usb2phy_host_port_init(rphy, rport, child_np); - if (ret) - goto put_child; + /* initialize otg/host port separately */ + if (!of_node_cmp(child_np->name, "host-port")) { + ret = rockchip_usb2phy_host_port_init(rphy, rport, + child_np); + if (ret) + goto put_child; + } else { + ret = rockchip_usb2phy_otg_port_init(rphy, rport, + child_np); + if (ret) + goto put_child; + } next_child: /* to prevent out of boundary */ @@ -654,10 +1157,18 @@ static const struct rockchip_usb2phy_cfg rk3366_phy_cfgs[] = { static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = { { - .reg = 0xe450, + .reg = 0xe450, .num_ports = 2, .clkout_ctl = { 0xe450, 4, 4, 1, 0 }, .port_cfgs = { + [USB2PHY_PORT_OTG] = { + .phy_sus = { 0xe454, 1, 0, 2, 1 }, + .bvalid_det_en = { 0xe3c0, 3, 3, 0, 1 }, + .bvalid_det_st = { 0xe3e0, 3, 3, 0, 1 }, + .bvalid_det_clr = { 0xe3d0, 3, 3, 0, 1 }, + .utmi_avalid = { 0xe2ac, 7, 7, 0, 1 }, + .utmi_bvalid = { 0xe2ac, 12, 12, 0, 1 }, + }, [USB2PHY_PORT_HOST] = { .phy_sus = { 0xe458, 1, 0, 0x2, 0x1 }, .ls_det_en = { 0xe3c0, 6, 6, 0, 1 }, @@ -667,12 +1178,32 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = { .utmi_hstdet = { 0xe2ac, 23, 23, 0, 1 } } }, + .chg_det = { + .opmode = { 0xe454, 3, 0, 5, 1 }, + .cp_det = { 0xe2ac, 2, 2, 0, 1 }, + .dcp_det = { 0xe2ac, 1, 1, 0, 1 }, + .dp_det = { 0xe2ac, 0, 0, 0, 1 }, + .idm_sink_en = { 0xe450, 8, 8, 0, 1 }, + .idp_sink_en = { 0xe450, 7, 7, 0, 1 }, + .idp_src_en = { 0xe450, 9, 9, 0, 1 }, + .rdm_pdwn_en = { 0xe450, 10, 10, 0, 1 }, + .vdm_src_en = { 0xe450, 12, 12, 0, 1 }, + .vdp_src_en = { 0xe450, 11, 11, 0, 1 }, + }, }, { - .reg = 0xe460, + .reg = 0xe460, .num_ports = 2, .clkout_ctl = { 0xe460, 4, 4, 1, 0 }, .port_cfgs = { + [USB2PHY_PORT_OTG] = { + .phy_sus = { 0xe464, 1, 0, 2, 1 }, + .bvalid_det_en = { 0xe3c0, 8, 8, 0, 1 }, + .bvalid_det_st = { 0xe3e0, 8, 8, 0, 1 }, + .bvalid_det_clr = { 0xe3d0, 8, 8, 0, 1 }, + .utmi_avalid = { 0xe2ac, 10, 10, 0, 1 }, + .utmi_bvalid = { 0xe2ac, 16, 16, 0, 1 }, + }, [USB2PHY_PORT_HOST] = { .phy_sus = { 0xe468, 1, 0, 0x2, 0x1 }, .ls_det_en = { 0xe3c0, 11, 11, 0, 1 }, diff --git a/drivers/phy/phy-rockchip-pcie.c b/drivers/phy/phy-rockchip-pcie.c index a2b4c6b58aea66d0b06cb153359ea3c31d4f6e66..6904633cad687d114cf19eb3292aba9896feb408 100644 --- a/drivers/phy/phy-rockchip-pcie.c +++ b/drivers/phy/phy-rockchip-pcie.c @@ -249,21 +249,10 @@ static int rockchip_pcie_phy_init(struct phy *phy) static int rockchip_pcie_phy_exit(struct phy *phy) { struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy); - int err = 0; clk_disable_unprepare(rk_phy->clk_pciephy_ref); - err = reset_control_deassert(rk_phy->phy_rst); - if (err) { - dev_err(&phy->dev, "deassert phy_rst err %d\n", err); - goto err_reset; - } - - return err; - -err_reset: - clk_prepare_enable(rk_phy->clk_pciephy_ref); - return err; + return 0; } static const struct phy_ops ops = { diff --git a/drivers/phy/phy-s5pv210-usb2.c b/drivers/phy/phy-s5pv210-usb2.c index 004d320767e4df752681ef674fb8287c5f68ef14..f6f72339bbc32ace9605fed669f9d34e42bfae52 100644 --- a/drivers/phy/phy-s5pv210-usb2.c +++ b/drivers/phy/phy-s5pv210-usb2.c @@ -103,7 +103,7 @@ static void s5pv210_isol(struct samsung_usb2_phy_instance *inst, bool on) break; default: return; - }; + } regmap_update_bits(drv->reg_pmu, S5PV210_USB_ISOL_OFFSET, mask, on ? 0 : mask); @@ -127,7 +127,7 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on) rstbits = S5PV210_URSTCON_PHY1_ALL | S5PV210_URSTCON_HOST_LINK_ALL; break; - }; + } if (on) { writel(drv->ref_reg_val, drv->reg_phy + S5PV210_UPHYCLK); diff --git a/drivers/phy/phy-stih41x-usb.c b/drivers/phy/phy-stih41x-usb.c deleted file mode 100644 index 0ac74639ad0244aa81e9f4878c58c5d6aff39841..0000000000000000000000000000000000000000 --- a/drivers/phy/phy-stih41x-usb.c +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Copyright (C) 2014 STMicroelectronics - * - * STMicroelectronics PHY driver for STiH41x USB. - * - * Author: Maxime Coquelin - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2, as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define SYSCFG332 0x80 -#define SYSCFG2520 0x820 - -/** - * struct stih41x_usb_cfg - SoC specific PHY register mapping - * @syscfg: Offset in syscfg registers bank - * @cfg_mask: Bits mask for PHY configuration - * @cfg: Static configuration value for PHY - * @oscok: Notify the PHY oscillator clock is ready - * Setting this bit enable the PHY - */ -struct stih41x_usb_cfg { - u32 syscfg; - u32 cfg_mask; - u32 cfg; - u32 oscok; -}; - -/** - * struct stih41x_usb_phy - Private data for the PHY - * @dev: device for this controller - * @regmap: Syscfg registers bank in which PHY is configured - * @cfg: SoC specific PHY register mapping - * @clk: Oscillator used by the PHY - */ -struct stih41x_usb_phy { - struct device *dev; - struct regmap *regmap; - const struct stih41x_usb_cfg *cfg; - struct clk *clk; -}; - -static struct stih41x_usb_cfg stih415_usb_phy_cfg = { - .syscfg = SYSCFG332, - .cfg_mask = 0x3f, - .cfg = 0x38, - .oscok = BIT(6), -}; - -static struct stih41x_usb_cfg stih416_usb_phy_cfg = { - .syscfg = SYSCFG2520, - .cfg_mask = 0x33f, - .cfg = 0x238, - .oscok = BIT(6), -}; - -static int stih41x_usb_phy_init(struct phy *phy) -{ - struct stih41x_usb_phy *phy_dev = phy_get_drvdata(phy); - - return regmap_update_bits(phy_dev->regmap, phy_dev->cfg->syscfg, - phy_dev->cfg->cfg_mask, phy_dev->cfg->cfg); -} - -static int stih41x_usb_phy_power_on(struct phy *phy) -{ - struct stih41x_usb_phy *phy_dev = phy_get_drvdata(phy); - int ret; - - ret = clk_prepare_enable(phy_dev->clk); - if (ret) { - dev_err(phy_dev->dev, "Failed to enable osc_phy clock\n"); - return ret; - } - - ret = regmap_update_bits(phy_dev->regmap, phy_dev->cfg->syscfg, - phy_dev->cfg->oscok, phy_dev->cfg->oscok); - if (ret) - clk_disable_unprepare(phy_dev->clk); - - return ret; -} - -static int stih41x_usb_phy_power_off(struct phy *phy) -{ - struct stih41x_usb_phy *phy_dev = phy_get_drvdata(phy); - int ret; - - ret = regmap_update_bits(phy_dev->regmap, phy_dev->cfg->syscfg, - phy_dev->cfg->oscok, 0); - if (ret) { - dev_err(phy_dev->dev, "Failed to clear oscok bit\n"); - return ret; - } - - clk_disable_unprepare(phy_dev->clk); - - return 0; -} - -static const struct phy_ops stih41x_usb_phy_ops = { - .init = stih41x_usb_phy_init, - .power_on = stih41x_usb_phy_power_on, - .power_off = stih41x_usb_phy_power_off, - .owner = THIS_MODULE, -}; - -static const struct of_device_id stih41x_usb_phy_of_match[]; - -static int stih41x_usb_phy_probe(struct platform_device *pdev) -{ - struct device_node *np = pdev->dev.of_node; - const struct of_device_id *match; - struct stih41x_usb_phy *phy_dev; - struct device *dev = &pdev->dev; - struct phy_provider *phy_provider; - struct phy *phy; - - phy_dev = devm_kzalloc(dev, sizeof(*phy_dev), GFP_KERNEL); - if (!phy_dev) - return -ENOMEM; - - match = of_match_device(stih41x_usb_phy_of_match, &pdev->dev); - if (!match) - return -ENODEV; - - phy_dev->cfg = match->data; - - phy_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg"); - if (IS_ERR(phy_dev->regmap)) { - dev_err(dev, "No syscfg phandle specified\n"); - return PTR_ERR(phy_dev->regmap); - } - - phy_dev->clk = devm_clk_get(dev, "osc_phy"); - if (IS_ERR(phy_dev->clk)) { - dev_err(dev, "osc_phy clk not found\n"); - return PTR_ERR(phy_dev->clk); - } - - phy = devm_phy_create(dev, NULL, &stih41x_usb_phy_ops); - - if (IS_ERR(phy)) { - dev_err(dev, "failed to create phy\n"); - return PTR_ERR(phy); - } - - phy_dev->dev = dev; - - phy_set_drvdata(phy, phy_dev); - - phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - return PTR_ERR_OR_ZERO(phy_provider); -} - -static const struct of_device_id stih41x_usb_phy_of_match[] = { - { .compatible = "st,stih415-usb-phy", .data = &stih415_usb_phy_cfg }, - { .compatible = "st,stih416-usb-phy", .data = &stih416_usb_phy_cfg }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(of, stih41x_usb_phy_of_match); - -static struct platform_driver stih41x_usb_phy_driver = { - .probe = stih41x_usb_phy_probe, - .driver = { - .name = "stih41x-usb-phy", - .of_match_table = stih41x_usb_phy_of_match, - } -}; -module_platform_driver(stih41x_usb_phy_driver); - -MODULE_AUTHOR("Maxime Coquelin "); -MODULE_DESCRIPTION("STMicroelectronics USB PHY driver for STiH41x series"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c index b9342a2af7b3666f86471d649bd134a4baa9ff13..bf28a0fdd569c56a6dc6f395925dfbf6e89903b2 100644 --- a/drivers/phy/phy-sun4i-usb.c +++ b/drivers/phy/phy-sun4i-usb.c @@ -264,7 +264,7 @@ static int sun4i_usb_phy_init(struct phy *_phy) return ret; } - if (data->cfg->enable_pmu_unk1) { + if (phy->pmu && data->cfg->enable_pmu_unk1) { val = readl(phy->pmu + REG_PMU_UNK1); writel(val & ~2, phy->pmu + REG_PMU_UNK1); } @@ -436,25 +436,31 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy, enum phy_mode mode) { struct sun4i_usb_phy *phy = phy_get_drvdata(_phy); struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy); + int new_mode; if (phy->index != 0) return -EINVAL; switch (mode) { case PHY_MODE_USB_HOST: - data->dr_mode = USB_DR_MODE_HOST; + new_mode = USB_DR_MODE_HOST; break; case PHY_MODE_USB_DEVICE: - data->dr_mode = USB_DR_MODE_PERIPHERAL; + new_mode = USB_DR_MODE_PERIPHERAL; break; case PHY_MODE_USB_OTG: - data->dr_mode = USB_DR_MODE_OTG; + new_mode = USB_DR_MODE_OTG; break; default: return -EINVAL; } - dev_info(&_phy->dev, "Changing dr_mode to %d\n", (int)data->dr_mode); + if (new_mode != data->dr_mode) { + dev_info(&_phy->dev, "Changing dr_mode to %d\n", new_mode); + data->dr_mode = new_mode; + } + + data->id_det = -1; /* Force reprocessing of id */ data->force_session_end = true; queue_delayed_work(system_wq, &data->detect, 0); diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index bf46844dc387c69f9c52d9cec3da2f5f0c7f16fe..9c84d32c6f60f26bb508eea22458e6fa6b4d3599 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c @@ -537,10 +537,7 @@ static int ti_pipe3_get_pll_base(struct ti_pipe3 *phy) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll_ctrl"); phy->pll_ctrl_base = devm_ioremap_resource(dev, res); - if (IS_ERR(phy->pll_ctrl_base)) - return PTR_ERR(phy->pll_ctrl_base); - - return 0; + return PTR_ERR_OR_ZERO(phy->pll_ctrl_base); } static int ti_pipe3_probe(struct platform_device *pdev) @@ -592,10 +589,7 @@ static int ti_pipe3_probe(struct platform_device *pdev) ti_pipe3_power_off(generic_phy); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - if (IS_ERR(phy_provider)) - return PTR_ERR(phy_provider); - - return 0; + return PTR_ERR_OR_ZERO(phy_provider); } static int ti_pipe3_remove(struct platform_device *pdev) diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index 87e6334eab9309f85e8cb5308f96dec54de7f495..2990b3965460e837c4218c9e3ca22ab6198cd6d1 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c @@ -317,6 +317,9 @@ static enum musb_vbus_id_status linkstat = MUSB_VBUS_OFF; } + kobject_uevent(&twl->dev->kobj, linkstat == MUSB_VBUS_VALID + ? KOBJ_ONLINE : KOBJ_OFFLINE); + dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n", status, status, linkstat); @@ -459,8 +462,6 @@ static int twl4030_phy_power_off(struct phy *phy) struct twl4030_usb *twl = phy_get_drvdata(phy); dev_dbg(twl->dev, "%s\n", __func__); - pm_runtime_mark_last_busy(twl->dev); - pm_runtime_put_autosuspend(twl->dev); return 0; } @@ -472,6 +473,8 @@ static int twl4030_phy_power_on(struct phy *phy) dev_dbg(twl->dev, "%s\n", __func__); pm_runtime_get_sync(twl->dev); schedule_delayed_work(&twl->id_workaround_work, HZ); + pm_runtime_mark_last_busy(twl->dev); + pm_runtime_put_autosuspend(twl->dev); return 0; } diff --git a/drivers/phy/tegra/xusb-tegra124.c b/drivers/phy/tegra/xusb-tegra124.c index 119957249a51e54ef2a4b78bab2428ebb0171267..c45cbedc6634d1da2fe00ed9592b6de5e9316f45 100644 --- a/drivers/phy/tegra/xusb-tegra124.c +++ b/drivers/phy/tegra/xusb-tegra124.c @@ -1483,7 +1483,6 @@ static int tegra124_usb3_port_enable(struct tegra_xusb_port *port) struct tegra_xusb_padctl *padctl = port->padctl; struct tegra_xusb_lane *lane = usb3->base.lane; unsigned int index = port->index, offset; - int ret = 0; u32 value; value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP); @@ -1612,7 +1611,7 @@ static int tegra124_usb3_port_enable(struct tegra_xusb_port *port) value &= ~XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN(index); padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM); - return ret; + return 0; } static void tegra124_usb3_port_disable(struct tegra_xusb_port *port) diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 873424ab0e3284cd3eaaeb01560e6451e199ff67..3cbcb2537657623444c65ab59aa6300b7b66526f 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -561,10 +561,7 @@ static int tegra_xusb_usb2_port_parse_dt(struct tegra_xusb_usb2_port *usb2) usb2->internal = of_property_read_bool(np, "nvidia,internal"); usb2->supply = devm_regulator_get(&port->dev, "vbus"); - if (IS_ERR(usb2->supply)) - return PTR_ERR(usb2->supply); - - return 0; + return PTR_ERR_OR_ZERO(usb2->supply); } static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl, @@ -731,10 +728,7 @@ static int tegra_xusb_usb3_port_parse_dt(struct tegra_xusb_usb3_port *usb3) usb3->internal = of_property_read_bool(np, "nvidia,internal"); usb3->supply = devm_regulator_get(&port->dev, "vbus"); - if (IS_ERR(usb3->supply)) - return PTR_ERR(usb3->supply); - - return 0; + return PTR_ERR_OR_ZERO(usb3->supply); } static int tegra_xusb_add_usb3_port(struct tegra_xusb_padctl *padctl, diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 0e75d94972bae3865b3d8b64bca28e987b24d1da..54044a8ecbd7f7e923cb44634bf97f4bf97afa38 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -93,6 +93,15 @@ config PINCTRL_AMD Requires ACPI/FDT device enumeration code to set up a platform device. +config PINCTRL_DA850_PUPD + tristate "TI DA850/OMAP-L138/AM18XX pullup/pulldown groups" + depends on OF && (ARCH_DAVINCI_DA850 || COMPILE_TEST) + select PINCONF + select GENERIC_PINCONF + help + Driver for TI DA850/OMAP-L138/AM18XX pinconf. Used to control + pullup/pulldown pin groups. + config PINCTRL_DIGICOLOR bool depends on OF && (ARCH_DIGICOLOR || COMPILE_TEST) @@ -164,6 +173,21 @@ config PINCTRL_SIRF select GENERIC_PINCONF select GPIOLIB_IRQCHIP +config PINCTRL_SX150X + bool "Semtech SX150x I2C GPIO expander pinctrl driver" + depends on GPIOLIB && I2C=y + select PINMUX + select PINCONF + select GENERIC_PINCONF + select GPIOLIB_IRQCHIP + select REGMAP + help + Say yes here to provide support for Semtech SX150x-series I2C + GPIO expanders as pinctrl module. + Compatible models include: + - 8 bits: sx1508q, sx1502q + - 16 bits: sx1509q, sx1506q + config PINCTRL_PISTACHIO def_bool y if MACH_PISTACHIO depends on GPIOLIB @@ -209,7 +233,7 @@ config PINCTRL_COH901 config PINCTRL_MAX77620 tristate "MAX77620/MAX20024 Pincontrol support" - depends on MFD_MAX77620 + depends on MFD_MAX77620 && OF select PINMUX select GENERIC_PINCONF help diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index 11bad373dfe0e77ab4eadec4ff5bdb10f65b758e..25d50a86981d360ad805a3c13a1453e87d7d25cd 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o +obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o obj-$(CONFIG_PINCTRL_MAX77620) += pinctrl-max77620.o @@ -25,6 +26,7 @@ obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o obj-$(CONFIG_PINCTRL_SIRF) += sirf/ +obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-$(CONFIG_PINCTRL_TZ1090) += pinctrl-tz1090.o obj-$(CONFIG_PINCTRL_TZ1090_PDC) += pinctrl-tz1090-pdc.o diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c index c8c72e8259d38bc47c83a4222c7311f6e97e7036..87b46390b69597a3299143f3ec4e0ab282475718 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c @@ -26,7 +26,7 @@ #define ASPEED_G5_NR_PINS 228 -#define COND1 SIG_DESC_BIT(SCU90, 6, 0) +#define COND1 { SCU90, BIT(6), 0, 0 } #define COND2 { SCU94, GENMASK(1, 0), 0, 0 } #define B14 0 diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig index 63246770bd74b9dcd3804cee66a960f2404b7092..8968dd7aebed2133b0103b39d7b752916dbfaab0 100644 --- a/drivers/pinctrl/bcm/Kconfig +++ b/drivers/pinctrl/bcm/Kconfig @@ -20,6 +20,7 @@ config PINCTRL_BCM2835 bool select PINMUX select PINCONF + select GPIOLIB_IRQCHIP config PINCTRL_IPROC_GPIO bool "Broadcom iProc GPIO (with PINCONF) driver" diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index fa77165fab2c1348163979da507df17e7168c49b..85d0091128644c446aed878e87769e82c77c3ebf 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -24,11 +24,9 @@ #include #include #include -#include #include #include #include -#include #include #include #include @@ -47,6 +45,7 @@ #define MODULE_NAME "pinctrl-bcm2835" #define BCM2835_NUM_GPIOS 54 #define BCM2835_NUM_BANKS 2 +#define BCM2835_NUM_IRQS 3 #define BCM2835_PIN_BITMAP_SZ \ DIV_ROUND_UP(BCM2835_NUM_GPIOS, sizeof(unsigned long) * 8) @@ -76,41 +75,27 @@ enum bcm2835_pinconf_param { BCM2835_PINCONF_PARAM_PULL, }; -enum bcm2835_pinconf_pull { - BCM2835_PINCONFIG_PULL_NONE, - BCM2835_PINCONFIG_PULL_DOWN, - BCM2835_PINCONFIG_PULL_UP, -}; - #define BCM2835_PINCONF_PACK(_param_, _arg_) ((_param_) << 16 | (_arg_)) #define BCM2835_PINCONF_UNPACK_PARAM(_conf_) ((_conf_) >> 16) #define BCM2835_PINCONF_UNPACK_ARG(_conf_) ((_conf_) & 0xffff) -struct bcm2835_gpio_irqdata { - struct bcm2835_pinctrl *pc; - int bank; -}; - struct bcm2835_pinctrl { struct device *dev; void __iomem *base; - int irq[BCM2835_NUM_BANKS]; + int irq[BCM2835_NUM_IRQS]; /* note: locking assumes each bank will have its own unsigned long */ unsigned long enabled_irq_map[BCM2835_NUM_BANKS]; unsigned int irq_type[BCM2835_NUM_GPIOS]; struct pinctrl_dev *pctl_dev; - struct irq_domain *irq_domain; struct gpio_chip gpio_chip; struct pinctrl_gpio_range gpio_range; - struct bcm2835_gpio_irqdata irq_data[BCM2835_NUM_BANKS]; + int irq_group[BCM2835_NUM_IRQS]; spinlock_t irq_lock[BCM2835_NUM_BANKS]; }; -static struct lock_class_key gpio_lock_class; - /* pins are just named GPIO0..GPIO53 */ #define BCM2835_GPIO_PIN(a) PINCTRL_PIN(a, "gpio" #a) static struct pinctrl_pin_desc bcm2835_gpio_pins[] = { @@ -368,13 +353,6 @@ static int bcm2835_gpio_direction_output(struct gpio_chip *chip, return pinctrl_gpio_direction_output(chip->base + offset); } -static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset) -{ - struct bcm2835_pinctrl *pc = gpiochip_get_data(chip); - - return irq_linear_revmap(pc->irq_domain, offset); -} - static struct gpio_chip bcm2835_gpio_chip = { .label = MODULE_NAME, .owner = THIS_MODULE, @@ -385,31 +363,67 @@ static struct gpio_chip bcm2835_gpio_chip = { .get_direction = bcm2835_gpio_get_direction, .get = bcm2835_gpio_get, .set = bcm2835_gpio_set, - .to_irq = bcm2835_gpio_to_irq, .base = -1, .ngpio = BCM2835_NUM_GPIOS, .can_sleep = false, }; -static irqreturn_t bcm2835_gpio_irq_handler(int irq, void *dev_id) +static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc, + unsigned int bank, u32 mask) { - struct bcm2835_gpio_irqdata *irqdata = dev_id; - struct bcm2835_pinctrl *pc = irqdata->pc; - int bank = irqdata->bank; unsigned long events; unsigned offset; unsigned gpio; unsigned int type; events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4); + events &= mask; events &= pc->enabled_irq_map[bank]; for_each_set_bit(offset, &events, 32) { gpio = (32 * bank) + offset; + /* FIXME: no clue why the code looks up the type here */ type = pc->irq_type[gpio]; - generic_handle_irq(irq_linear_revmap(pc->irq_domain, gpio)); + generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain, + gpio)); + } +} + +static void bcm2835_gpio_irq_handler(struct irq_desc *desc) +{ + struct gpio_chip *chip = irq_desc_get_handler_data(desc); + struct bcm2835_pinctrl *pc = gpiochip_get_data(chip); + struct irq_chip *host_chip = irq_desc_get_chip(desc); + int irq = irq_desc_get_irq(desc); + int group; + int i; + + for (i = 0; i < ARRAY_SIZE(pc->irq); i++) { + if (pc->irq[i] == irq) { + group = pc->irq_group[i]; + break; + } + } + /* This should not happen, every IRQ has a bank */ + if (i == ARRAY_SIZE(pc->irq)) + BUG(); + + chained_irq_enter(host_chip, desc); + + switch (group) { + case 0: /* IRQ0 covers GPIOs 0-27 */ + bcm2835_gpio_irq_handle_bank(pc, 0, 0x0fffffff); + break; + case 1: /* IRQ1 covers GPIOs 28-45 */ + bcm2835_gpio_irq_handle_bank(pc, 0, 0xf0000000); + bcm2835_gpio_irq_handle_bank(pc, 1, 0x00003fff); + break; + case 2: /* IRQ2 covers GPIOs 46-53 */ + bcm2835_gpio_irq_handle_bank(pc, 1, 0x003fc000); + break; } - return events ? IRQ_HANDLED : IRQ_NONE; + + chained_irq_exit(host_chip, desc); } static inline void __bcm2835_gpio_irq_config(struct bcm2835_pinctrl *pc, @@ -455,7 +469,8 @@ static void bcm2835_gpio_irq_config(struct bcm2835_pinctrl *pc, static void bcm2835_gpio_irq_enable(struct irq_data *data) { - struct bcm2835_pinctrl *pc = irq_data_get_irq_chip_data(data); + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + struct bcm2835_pinctrl *pc = gpiochip_get_data(chip); unsigned gpio = irqd_to_hwirq(data); unsigned offset = GPIO_REG_SHIFT(gpio); unsigned bank = GPIO_REG_OFFSET(gpio); @@ -469,7 +484,8 @@ static void bcm2835_gpio_irq_enable(struct irq_data *data) static void bcm2835_gpio_irq_disable(struct irq_data *data) { - struct bcm2835_pinctrl *pc = irq_data_get_irq_chip_data(data); + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + struct bcm2835_pinctrl *pc = gpiochip_get_data(chip); unsigned gpio = irqd_to_hwirq(data); unsigned offset = GPIO_REG_SHIFT(gpio); unsigned bank = GPIO_REG_OFFSET(gpio); @@ -575,7 +591,8 @@ static int __bcm2835_gpio_irq_set_type_enabled(struct bcm2835_pinctrl *pc, static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type) { - struct bcm2835_pinctrl *pc = irq_data_get_irq_chip_data(data); + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + struct bcm2835_pinctrl *pc = gpiochip_get_data(chip); unsigned gpio = irqd_to_hwirq(data); unsigned offset = GPIO_REG_SHIFT(gpio); unsigned bank = GPIO_REG_OFFSET(gpio); @@ -601,7 +618,8 @@ static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type) static void bcm2835_gpio_irq_ack(struct irq_data *data) { - struct bcm2835_pinctrl *pc = irq_data_get_irq_chip_data(data); + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + struct bcm2835_pinctrl *pc = gpiochip_get_data(chip); unsigned gpio = irqd_to_hwirq(data); bcm2835_gpio_set_bit(pc, GPEDS0, gpio); @@ -644,10 +662,11 @@ static void bcm2835_pctl_pin_dbg_show(struct pinctrl_dev *pctldev, unsigned offset) { struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev); + struct gpio_chip *chip = &pc->gpio_chip; enum bcm2835_fsel fsel = bcm2835_pinctrl_fsel_get(pc, offset); const char *fname = bcm2835_functions[fsel]; int value = bcm2835_gpio_get_bit(pc, GPLEV0, offset); - int irq = irq_find_mapping(pc->irq_domain, offset); + int irq = irq_find_mapping(chip->irqdomain, offset); seq_printf(s, "function %s in %s; irq %d (%s)", fname, value ? "hi" : "lo", @@ -821,6 +840,16 @@ static const struct pinctrl_ops bcm2835_pctl_ops = { .dt_free_map = bcm2835_pctl_dt_free_map, }; +static int bcm2835_pmx_free(struct pinctrl_dev *pctldev, + unsigned offset) +{ + struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev); + + /* disable by setting to GPIO_IN */ + bcm2835_pinctrl_fsel_set(pc, offset, BCM2835_FSEL_GPIO_IN); + return 0; +} + static int bcm2835_pmx_get_functions_count(struct pinctrl_dev *pctldev) { return BCM2835_FSEL_COUNT; @@ -880,6 +909,7 @@ static int bcm2835_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, } static const struct pinmux_ops bcm2835_pmx_ops = { + .free = bcm2835_pmx_free, .get_functions_count = bcm2835_pmx_get_functions_count, .get_function_name = bcm2835_pmx_get_function_name, .get_function_groups = bcm2835_pmx_get_function_groups, @@ -917,12 +947,14 @@ static int bcm2835_pinconf_set(struct pinctrl_dev *pctldev, bcm2835_gpio_wr(pc, GPPUD, arg & 3); /* - * Docs say to wait 150 cycles, but not of what. We assume a - * 1 MHz clock here, which is pretty slow... + * BCM2835 datasheet say to wait 150 cycles, but not of what. + * But the VideoCore firmware delay for this operation + * based nearly on the same amount of VPU cycles and this clock + * runs at 250 MHz. */ - udelay(150); + udelay(1); bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), BIT(bit)); - udelay(150); + udelay(1); bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), 0); } /* for each config */ @@ -980,26 +1012,9 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) pc->gpio_chip.parent = dev; pc->gpio_chip.of_node = np; - pc->irq_domain = irq_domain_add_linear(np, BCM2835_NUM_GPIOS, - &irq_domain_simple_ops, NULL); - if (!pc->irq_domain) { - dev_err(dev, "could not create IRQ domain\n"); - return -ENOMEM; - } - - for (i = 0; i < BCM2835_NUM_GPIOS; i++) { - int irq = irq_create_mapping(pc->irq_domain, i); - irq_set_lockdep_class(irq, &gpio_lock_class); - irq_set_chip_and_handler(irq, &bcm2835_gpio_irq_chip, - handle_level_irq); - irq_set_chip_data(irq, pc); - } - for (i = 0; i < BCM2835_NUM_BANKS; i++) { unsigned long events; unsigned offset; - int len; - char *name; /* clear event detection flags */ bcm2835_gpio_wr(pc, GPREN0 + i * 4, 0); @@ -1014,24 +1029,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) for_each_set_bit(offset, &events, 32) bcm2835_gpio_wr(pc, GPEDS0 + i * 4, BIT(offset)); - pc->irq[i] = irq_of_parse_and_map(np, i); - pc->irq_data[i].pc = pc; - pc->irq_data[i].bank = i; spin_lock_init(&pc->irq_lock[i]); - - len = strlen(dev_name(pc->dev)) + 16; - name = devm_kzalloc(pc->dev, len, GFP_KERNEL); - if (!name) - return -ENOMEM; - snprintf(name, len, "%s:bank%d", dev_name(pc->dev), i); - - err = devm_request_irq(dev, pc->irq[i], - bcm2835_gpio_irq_handler, IRQF_SHARED, - name, &pc->irq_data[i]); - if (err) { - dev_err(dev, "unable to request IRQ %d\n", pc->irq[i]); - return err; - } } err = gpiochip_add_data(&pc->gpio_chip, pc); @@ -1040,6 +1038,29 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) return err; } + err = gpiochip_irqchip_add(&pc->gpio_chip, &bcm2835_gpio_irq_chip, + 0, handle_level_irq, IRQ_TYPE_NONE); + if (err) { + dev_info(dev, "could not add irqchip\n"); + return err; + } + + for (i = 0; i < BCM2835_NUM_IRQS; i++) { + pc->irq[i] = irq_of_parse_and_map(np, i); + pc->irq_group[i] = i; + /* + * Use the same handler for all groups: this is necessary + * since we use one gpiochip to cover all lines - the + * irq handler then needs to figure out which group and + * bank that was firing the IRQ and look up the per-group + * and bank data. + */ + gpiochip_set_chained_irqchip(&pc->gpio_chip, + &bcm2835_gpio_irq_chip, + pc->irq[i], + bcm2835_gpio_irq_handler); + } + pc->pctl_dev = devm_pinctrl_register(dev, &bcm2835_pinctrl_desc, pc); if (IS_ERR(pc->pctl_dev)) { gpiochip_remove(&pc->gpio_chip); diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c index 7f77007163985762abc6110f282eec762bdc8a92..5d1e505c3c63d76a85af0dcb7051b9a3da15c2e5 100644 --- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c @@ -844,6 +844,6 @@ static struct platform_driver iproc_gpio_driver = { static int __init iproc_gpio_init(void) { - return platform_driver_probe(&iproc_gpio_driver, iproc_gpio_probe); + return platform_driver_register(&iproc_gpio_driver); } arch_initcall_sync(iproc_gpio_init); diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c index 35783db1c10bad5f50bb8ac41a59307dc97b852e..c8deb8be1da785fd15d8f5182b3b19143ca3c33f 100644 --- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c @@ -741,6 +741,6 @@ static struct platform_driver nsp_gpio_driver = { static int __init nsp_gpio_init(void) { - return platform_driver_probe(&nsp_gpio_driver, nsp_gpio_probe); + return platform_driver_register(&nsp_gpio_driver); } arch_initcall_sync(nsp_gpio_init); diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index 54dad89fc9bfe60b23938cc21f1f198311b7f28a..260908480075a246bf6d311ab83566a9c706486c 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -253,3 +253,147 @@ int pinctrl_dt_to_map(struct pinctrl *p) pinctrl_dt_free_maps(p); return ret; } + +/* + * For pinctrl binding, typically #pinctrl-cells is for the pin controller + * device, so either parent or grandparent. See pinctrl-bindings.txt. + */ +static int pinctrl_find_cells_size(const struct device_node *np) +{ + const char *cells_name = "#pinctrl-cells"; + int cells_size, error; + + error = of_property_read_u32(np->parent, cells_name, &cells_size); + if (error) { + error = of_property_read_u32(np->parent->parent, + cells_name, &cells_size); + if (error) + return -ENOENT; + } + + return cells_size; +} + +/** + * pinctrl_get_list_and_count - Gets the list and it's cell size and number + * @np: pointer to device node with the property + * @list_name: property that contains the list + * @list: pointer for the list found + * @cells_size: pointer for the cell size found + * @nr_elements: pointer for the number of elements found + * + * Typically np is a single pinctrl entry containing the list. + */ +static int pinctrl_get_list_and_count(const struct device_node *np, + const char *list_name, + const __be32 **list, + int *cells_size, + int *nr_elements) +{ + int size; + + *cells_size = 0; + *nr_elements = 0; + + *list = of_get_property(np, list_name, &size); + if (!*list) + return -ENOENT; + + *cells_size = pinctrl_find_cells_size(np); + if (*cells_size < 0) + return -ENOENT; + + /* First element is always the index within the pinctrl device */ + *nr_elements = (size / sizeof(**list)) / (*cells_size + 1); + + return 0; +} + +/** + * pinctrl_count_index_with_args - Count number of elements in a pinctrl entry + * @np: pointer to device node with the property + * @list_name: property that contains the list + * + * Counts the number of elements in a pinctrl array consisting of an index + * within the controller and a number of u32 entries specified for each + * entry. Note that device_node is always for the parent pin controller device. + */ +int pinctrl_count_index_with_args(const struct device_node *np, + const char *list_name) +{ + const __be32 *list; + int size, nr_cells, error; + + error = pinctrl_get_list_and_count(np, list_name, &list, + &nr_cells, &size); + if (error) + return error; + + return size; +} +EXPORT_SYMBOL_GPL(pinctrl_count_index_with_args); + +/** + * pinctrl_copy_args - Populates of_phandle_args based on index + * @np: pointer to device node with the property + * @list: pointer to a list with the elements + * @index: entry within the list of elements + * @nr_cells: number of cells in the list + * @nr_elem: number of elements for each entry in the list + * @out_args: returned values + * + * Populates the of_phandle_args based on the index in the list. + */ +static int pinctrl_copy_args(const struct device_node *np, + const __be32 *list, + int index, int nr_cells, int nr_elem, + struct of_phandle_args *out_args) +{ + int i; + + memset(out_args, 0, sizeof(*out_args)); + out_args->np = (struct device_node *)np; + out_args->args_count = nr_cells + 1; + + if (index >= nr_elem) + return -EINVAL; + + list += index * (nr_cells + 1); + + for (i = 0; i < nr_cells + 1; i++) + out_args->args[i] = be32_to_cpup(list++); + + return 0; +} + +/** + * pinctrl_parse_index_with_args - Find a node pointed by index in a list + * @np: pointer to device node with the property + * @list_name: property that contains the list + * @index: index within the list + * @out_arts: entries in the list pointed by index + * + * Finds the selected element in a pinctrl array consisting of an index + * within the controller and a number of u32 entries specified for each + * entry. Note that device_node is always for the parent pin controller device. + */ +int pinctrl_parse_index_with_args(const struct device_node *np, + const char *list_name, int index, + struct of_phandle_args *out_args) +{ + const __be32 *list; + int nr_elem, nr_cells, error; + + error = pinctrl_get_list_and_count(np, list_name, &list, + &nr_cells, &nr_elem); + if (error || !nr_cells) + return error; + + error = pinctrl_copy_args(np, list, index, nr_cells, nr_elem, + out_args); + if (error) + return error; + + return 0; +} +EXPORT_SYMBOL_GPL(pinctrl_parse_index_with_args); diff --git a/drivers/pinctrl/devicetree.h b/drivers/pinctrl/devicetree.h index 760bc4960f586a549eeeb2a1e99a8168864e49dd..c2d1a550585070524a50465b388af4393c7e8d15 100644 --- a/drivers/pinctrl/devicetree.h +++ b/drivers/pinctrl/devicetree.h @@ -16,11 +16,20 @@ * along with this program. If not, see . */ +struct of_phandle_args; + #ifdef CONFIG_OF void pinctrl_dt_free_maps(struct pinctrl *p); int pinctrl_dt_to_map(struct pinctrl *p); +int pinctrl_count_index_with_args(const struct device_node *np, + const char *list_name); + +int pinctrl_parse_index_with_args(const struct device_node *np, + const char *list_name, int index, + struct of_phandle_args *out_args); + #else static inline int pinctrl_dt_to_map(struct pinctrl *p) @@ -32,4 +41,18 @@ static inline void pinctrl_dt_free_maps(struct pinctrl *p) { } +static inline int pinctrl_count_index_with_args(const struct device_node *np, + const char *list_name) +{ + return -ENODEV; +} + +static inline int +pinctrl_parse_index_with_args(const struct device_node *np, + const char *list_name, int index, + struct of_phandle_args *out_args) +{ + return -ENODEV; +} + #endif diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index 47613201269af42dd3fff670c92008333bf0adf0..5ef7e875b50e89a9d39dcfbd67e927b42ff9d087 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -687,6 +687,7 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev, if (!info->functions) return -ENOMEM; + info->group_index = 0; if (flat_funcs) { info->ngroups = of_get_child_count(np); } else { @@ -777,10 +778,10 @@ int imx_pinctrl_probe(struct platform_device *pdev, imx_pinctrl_desc->name = dev_name(&pdev->dev); imx_pinctrl_desc->pins = info->pins; imx_pinctrl_desc->npins = info->npins; - imx_pinctrl_desc->pctlops = &imx_pctrl_ops, - imx_pinctrl_desc->pmxops = &imx_pmx_ops, - imx_pinctrl_desc->confops = &imx_pinconf_ops, - imx_pinctrl_desc->owner = THIS_MODULE, + imx_pinctrl_desc->pctlops = &imx_pctrl_ops; + imx_pinctrl_desc->pmxops = &imx_pmx_ops; + imx_pinctrl_desc->confops = &imx_pinconf_ops; + imx_pinctrl_desc->owner = THIS_MODULE; ret = imx_pinctrl_probe_dt(pdev, info); if (ret) { diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 71bbeb9321bad587504bf5ca4f4c889019b4b9f4..37300634b7d2c853a05f5dcecce6b9d46d187b59 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -1703,7 +1703,7 @@ static int byt_gpio_probe(struct byt_gpio *vg) if (irq_rc && irq_rc->start) { byt_gpio_irq_init_hw(vg); ret = gpiochip_irqchip_add(gc, &byt_irqchip, 0, - handle_simple_irq, IRQ_TYPE_NONE); + handle_bad_irq, IRQ_TYPE_NONE); if (ret) { dev_err(&vg->pdev->dev, "failed to add irqchip\n"); goto fail; diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 30389f4ccab4935c20ddcbed2c5a543e77d33fc5..5e66860a5e677290a0c649728cc48f852cc408b9 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -762,7 +762,7 @@ static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, seq_printf(s, "mode %d ", mode); } - seq_printf(s, "ctrl0 0x%08x ctrl1 0x%08x", ctrl0, ctrl1); + seq_printf(s, "0x%08x 0x%08x", ctrl0, ctrl1); if (locked) seq_puts(s, " [LOCKED]"); @@ -1652,12 +1652,15 @@ static int chv_pinctrl_probe(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -static int chv_pinctrl_suspend(struct device *dev) +static int chv_pinctrl_suspend_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); + unsigned long flags; int i; + raw_spin_lock_irqsave(&chv_lock, flags); + pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK); for (i = 0; i < pctrl->community->npins; i++) { @@ -1678,15 +1681,20 @@ static int chv_pinctrl_suspend(struct device *dev) ctx->padctrl1 = readl(reg); } + raw_spin_unlock_irqrestore(&chv_lock, flags); + return 0; } -static int chv_pinctrl_resume(struct device *dev) +static int chv_pinctrl_resume_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); + unsigned long flags; int i; + raw_spin_lock_irqsave(&chv_lock, flags); + /* * Mask all interrupts before restoring per-pin configuration * registers because we don't know in which state BIOS left them @@ -1731,12 +1739,15 @@ static int chv_pinctrl_resume(struct device *dev) chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK); + raw_spin_unlock_irqrestore(&chv_lock, flags); + return 0; } #endif static const struct dev_pm_ops chv_pinctrl_pm_ops = { - SET_LATE_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend, chv_pinctrl_resume) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend_noirq, + chv_pinctrl_resume_noirq) }; static const struct acpi_device_id chv_pinctrl_acpi_match[] = { diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 01443762e57055b88ea9f1accb0cf6e10eda2978..1e139672f1af9da0fa7ff4af1a919395e2ea6957 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -911,7 +911,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq) } ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0, - handle_simple_irq, IRQ_TYPE_NONE); + handle_bad_irq, IRQ_TYPE_NONE); if (ret) { dev_err(pctrl->dev, "failed to add irqchip\n"); goto fail; diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c index 7826c7f0cb7cac1959e0c8efb27a0c546f09b060..b21896126f760a5cbae044ab13cba527e9dbfeff 100644 --- a/drivers/pinctrl/intel/pinctrl-merrifield.c +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c @@ -814,10 +814,51 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin, return 0; } +static int mrfld_config_group_get(struct pinctrl_dev *pctldev, + unsigned int group, unsigned long *config) +{ + const unsigned int *pins; + unsigned int npins; + int ret; + + ret = mrfld_get_group_pins(pctldev, group, &pins, &npins); + if (ret) + return ret; + + ret = mrfld_config_get(pctldev, pins[0], config); + if (ret) + return ret; + + return 0; +} + +static int mrfld_config_group_set(struct pinctrl_dev *pctldev, + unsigned int group, unsigned long *configs, + unsigned int num_configs) +{ + const unsigned int *pins; + unsigned int npins; + int i, ret; + + ret = mrfld_get_group_pins(pctldev, group, &pins, &npins); + if (ret) + return ret; + + for (i = 0; i < npins; i++) { + ret = mrfld_config_set(pctldev, pins[i], configs, num_configs); + if (ret) + return ret; + } + + return 0; +} + static const struct pinconf_ops mrfld_pinconf_ops = { .is_generic = true, .pin_config_get = mrfld_config_get, .pin_config_set = mrfld_config_set, + .pin_config_group_get = mrfld_config_group_get, + .pin_config_group_set = mrfld_config_group_set, }; static const struct pinctrl_desc mrfld_pinctrl_desc = { diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6397.c b/drivers/pinctrl/mediatek/pinctrl-mt6397.c index 6eccb85c02cd2f764ba43843649c5e6629426225..afcede7e2222161a7f748964f6bd0e6e9ae9572b 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt6397.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt6397.c @@ -64,8 +64,4 @@ static struct platform_driver mtk_pinctrl_driver = { }, }; -static int __init mtk_pinctrl_init(void) -{ - return platform_driver_register(&mtk_pinctrl_driver); -} -device_initcall(mtk_pinctrl_init); +builtin_platform_driver(mtk_pinctrl_driver); diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8173.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8173.h index 13e5b68bfe1b49716bddaa174467ea8bb686da32..9b018fdbeb51f4c4be4dfef9f06f56c4ef08338f 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8173.h +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8173.h @@ -201,7 +201,7 @@ static const struct mtk_desc_pin mtk_pins_mt8173[] = { MTK_PIN( PINCTRL_PIN(16, "IDDIG"), NULL, "mt8173", - MTK_EINT_FUNCTION(0, 16), + MTK_EINT_FUNCTION(1, 16), MTK_FUNCTION(0, "GPIO16"), MTK_FUNCTION(1, "IDDIG"), MTK_FUNCTION(2, "CMFLASH"), diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile index 24434f139947c045800ea3fd10e3e75869d81692..27c5b5126008b4336175417e1e075f63a6beb957 100644 --- a/drivers/pinctrl/meson/Makefile +++ b/drivers/pinctrl/meson/Makefile @@ -1,2 +1,3 @@ -obj-y += pinctrl-meson8.o pinctrl-meson8b.o pinctrl-meson-gxbb.o +obj-y += pinctrl-meson8.o pinctrl-meson8b.o +obj-y += pinctrl-meson-gxbb.o pinctrl-meson-gxl.o obj-y += pinctrl-meson.o diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c new file mode 100644 index 0000000000000000000000000000000000000000..25694f7094c714bbf35eee2ae7b51e2b4ce5b1e9 --- /dev/null +++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c @@ -0,0 +1,589 @@ +/* + * Pin controller and GPIO driver for Amlogic Meson GXL. + * + * Copyright (C) 2016 Endless Mobile, Inc. + * Author: Carlo Caione + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include "pinctrl-meson.h" + +#define EE_OFF 10 + +static const struct pinctrl_pin_desc meson_gxl_periphs_pins[] = { + MESON_PIN(GPIOZ_0, EE_OFF), + MESON_PIN(GPIOZ_1, EE_OFF), + MESON_PIN(GPIOZ_2, EE_OFF), + MESON_PIN(GPIOZ_3, EE_OFF), + MESON_PIN(GPIOZ_4, EE_OFF), + MESON_PIN(GPIOZ_5, EE_OFF), + MESON_PIN(GPIOZ_6, EE_OFF), + MESON_PIN(GPIOZ_7, EE_OFF), + MESON_PIN(GPIOZ_8, EE_OFF), + MESON_PIN(GPIOZ_9, EE_OFF), + MESON_PIN(GPIOZ_10, EE_OFF), + MESON_PIN(GPIOZ_11, EE_OFF), + MESON_PIN(GPIOZ_12, EE_OFF), + MESON_PIN(GPIOZ_13, EE_OFF), + MESON_PIN(GPIOZ_14, EE_OFF), + MESON_PIN(GPIOZ_15, EE_OFF), + + MESON_PIN(GPIOH_0, EE_OFF), + MESON_PIN(GPIOH_1, EE_OFF), + MESON_PIN(GPIOH_2, EE_OFF), + MESON_PIN(GPIOH_3, EE_OFF), + MESON_PIN(GPIOH_4, EE_OFF), + MESON_PIN(GPIOH_5, EE_OFF), + MESON_PIN(GPIOH_6, EE_OFF), + MESON_PIN(GPIOH_7, EE_OFF), + MESON_PIN(GPIOH_8, EE_OFF), + MESON_PIN(GPIOH_9, EE_OFF), + + MESON_PIN(BOOT_0, EE_OFF), + MESON_PIN(BOOT_1, EE_OFF), + MESON_PIN(BOOT_2, EE_OFF), + MESON_PIN(BOOT_3, EE_OFF), + MESON_PIN(BOOT_4, EE_OFF), + MESON_PIN(BOOT_5, EE_OFF), + MESON_PIN(BOOT_6, EE_OFF), + MESON_PIN(BOOT_7, EE_OFF), + MESON_PIN(BOOT_8, EE_OFF), + MESON_PIN(BOOT_9, EE_OFF), + MESON_PIN(BOOT_10, EE_OFF), + MESON_PIN(BOOT_11, EE_OFF), + MESON_PIN(BOOT_12, EE_OFF), + MESON_PIN(BOOT_13, EE_OFF), + MESON_PIN(BOOT_14, EE_OFF), + MESON_PIN(BOOT_15, EE_OFF), + + MESON_PIN(CARD_0, EE_OFF), + MESON_PIN(CARD_1, EE_OFF), + MESON_PIN(CARD_2, EE_OFF), + MESON_PIN(CARD_3, EE_OFF), + MESON_PIN(CARD_4, EE_OFF), + MESON_PIN(CARD_5, EE_OFF), + MESON_PIN(CARD_6, EE_OFF), + + MESON_PIN(GPIODV_0, EE_OFF), + MESON_PIN(GPIODV_1, EE_OFF), + MESON_PIN(GPIODV_2, EE_OFF), + MESON_PIN(GPIODV_3, EE_OFF), + MESON_PIN(GPIODV_4, EE_OFF), + MESON_PIN(GPIODV_5, EE_OFF), + MESON_PIN(GPIODV_6, EE_OFF), + MESON_PIN(GPIODV_7, EE_OFF), + MESON_PIN(GPIODV_8, EE_OFF), + MESON_PIN(GPIODV_9, EE_OFF), + MESON_PIN(GPIODV_10, EE_OFF), + MESON_PIN(GPIODV_11, EE_OFF), + MESON_PIN(GPIODV_12, EE_OFF), + MESON_PIN(GPIODV_13, EE_OFF), + MESON_PIN(GPIODV_14, EE_OFF), + MESON_PIN(GPIODV_15, EE_OFF), + MESON_PIN(GPIODV_16, EE_OFF), + MESON_PIN(GPIODV_17, EE_OFF), + MESON_PIN(GPIODV_19, EE_OFF), + MESON_PIN(GPIODV_20, EE_OFF), + MESON_PIN(GPIODV_21, EE_OFF), + MESON_PIN(GPIODV_22, EE_OFF), + MESON_PIN(GPIODV_23, EE_OFF), + MESON_PIN(GPIODV_24, EE_OFF), + MESON_PIN(GPIODV_25, EE_OFF), + MESON_PIN(GPIODV_26, EE_OFF), + MESON_PIN(GPIODV_27, EE_OFF), + MESON_PIN(GPIODV_28, EE_OFF), + MESON_PIN(GPIODV_29, EE_OFF), + + MESON_PIN(GPIOX_0, EE_OFF), + MESON_PIN(GPIOX_1, EE_OFF), + MESON_PIN(GPIOX_2, EE_OFF), + MESON_PIN(GPIOX_3, EE_OFF), + MESON_PIN(GPIOX_4, EE_OFF), + MESON_PIN(GPIOX_5, EE_OFF), + MESON_PIN(GPIOX_6, EE_OFF), + MESON_PIN(GPIOX_7, EE_OFF), + MESON_PIN(GPIOX_8, EE_OFF), + MESON_PIN(GPIOX_9, EE_OFF), + MESON_PIN(GPIOX_10, EE_OFF), + MESON_PIN(GPIOX_11, EE_OFF), + MESON_PIN(GPIOX_12, EE_OFF), + MESON_PIN(GPIOX_13, EE_OFF), + MESON_PIN(GPIOX_14, EE_OFF), + MESON_PIN(GPIOX_15, EE_OFF), + MESON_PIN(GPIOX_16, EE_OFF), + MESON_PIN(GPIOX_17, EE_OFF), + MESON_PIN(GPIOX_18, EE_OFF), + + MESON_PIN(GPIOCLK_0, EE_OFF), + MESON_PIN(GPIOCLK_1, EE_OFF), + + MESON_PIN(GPIO_TEST_N, EE_OFF), +}; + +static const unsigned int emmc_nand_d07_pins[] = { + PIN(BOOT_0, EE_OFF), PIN(BOOT_1, EE_OFF), PIN(BOOT_2, EE_OFF), + PIN(BOOT_3, EE_OFF), PIN(BOOT_4, EE_OFF), PIN(BOOT_5, EE_OFF), + PIN(BOOT_6, EE_OFF), PIN(BOOT_7, EE_OFF), +}; +static const unsigned int emmc_clk_pins[] = { PIN(BOOT_8, EE_OFF) }; +static const unsigned int emmc_cmd_pins[] = { PIN(BOOT_10, EE_OFF) }; +static const unsigned int emmc_ds_pins[] = { PIN(BOOT_15, EE_OFF) }; + +static const unsigned int sdcard_d0_pins[] = { PIN(CARD_1, EE_OFF) }; +static const unsigned int sdcard_d1_pins[] = { PIN(CARD_0, EE_OFF) }; +static const unsigned int sdcard_d2_pins[] = { PIN(CARD_5, EE_OFF) }; +static const unsigned int sdcard_d3_pins[] = { PIN(CARD_4, EE_OFF) }; +static const unsigned int sdcard_cmd_pins[] = { PIN(CARD_3, EE_OFF) }; +static const unsigned int sdcard_clk_pins[] = { PIN(CARD_2, EE_OFF) }; + +static const unsigned int sdio_d0_pins[] = { PIN(GPIOX_0, EE_OFF) }; +static const unsigned int sdio_d1_pins[] = { PIN(GPIOX_1, EE_OFF) }; +static const unsigned int sdio_d2_pins[] = { PIN(GPIOX_2, EE_OFF) }; +static const unsigned int sdio_d3_pins[] = { PIN(GPIOX_3, EE_OFF) }; +static const unsigned int sdio_cmd_pins[] = { PIN(GPIOX_4, EE_OFF) }; +static const unsigned int sdio_clk_pins[] = { PIN(GPIOX_5, EE_OFF) }; +static const unsigned int sdio_irq_pins[] = { PIN(GPIOX_7, EE_OFF) }; + +static const unsigned int nand_ce0_pins[] = { PIN(BOOT_8, EE_OFF) }; +static const unsigned int nand_ce1_pins[] = { PIN(BOOT_9, EE_OFF) }; +static const unsigned int nand_rb0_pins[] = { PIN(BOOT_10, EE_OFF) }; +static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, EE_OFF) }; +static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, EE_OFF) }; +static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, EE_OFF) }; +static const unsigned int nand_ren_wr_pins[] = { PIN(BOOT_14, EE_OFF) }; +static const unsigned int nand_dqs_pins[] = { PIN(BOOT_15, EE_OFF) }; + +static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_12, EE_OFF) }; +static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_13, EE_OFF) }; +static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_14, EE_OFF) }; +static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_15, EE_OFF) }; + +static const unsigned int uart_tx_b_pins[] = { PIN(GPIODV_24, EE_OFF) }; +static const unsigned int uart_rx_b_pins[] = { PIN(GPIODV_25, EE_OFF) }; + +static const unsigned int uart_tx_c_pins[] = { PIN(GPIOX_8, EE_OFF) }; +static const unsigned int uart_rx_c_pins[] = { PIN(GPIOX_9, EE_OFF) }; + +static const unsigned int i2c_sck_a_pins[] = { PIN(GPIODV_25, EE_OFF) }; +static const unsigned int i2c_sda_a_pins[] = { PIN(GPIODV_24, EE_OFF) }; + +static const unsigned int i2c_sck_b_pins[] = { PIN(GPIODV_27, EE_OFF) }; +static const unsigned int i2c_sda_b_pins[] = { PIN(GPIODV_26, EE_OFF) }; + +static const unsigned int i2c_sck_c_pins[] = { PIN(GPIODV_29, EE_OFF) }; +static const unsigned int i2c_sda_c_pins[] = { PIN(GPIODV_28, EE_OFF) }; + +static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_0, EE_OFF) }; +static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_1, EE_OFF) }; +static const unsigned int eth_clk_rx_clk_pins[] = { PIN(GPIOZ_2, EE_OFF) }; +static const unsigned int eth_rx_dv_pins[] = { PIN(GPIOZ_3, EE_OFF) }; +static const unsigned int eth_rxd0_pins[] = { PIN(GPIOZ_4, EE_OFF) }; +static const unsigned int eth_rxd1_pins[] = { PIN(GPIOZ_5, EE_OFF) }; +static const unsigned int eth_rxd2_pins[] = { PIN(GPIOZ_6, EE_OFF) }; +static const unsigned int eth_rxd3_pins[] = { PIN(GPIOZ_7, EE_OFF) }; +static const unsigned int eth_rgmii_tx_clk_pins[] = { PIN(GPIOZ_8, EE_OFF) }; +static const unsigned int eth_tx_en_pins[] = { PIN(GPIOZ_9, EE_OFF) }; +static const unsigned int eth_txd0_pins[] = { PIN(GPIOZ_10, EE_OFF) }; +static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_11, EE_OFF) }; +static const unsigned int eth_txd2_pins[] = { PIN(GPIOZ_12, EE_OFF) }; +static const unsigned int eth_txd3_pins[] = { PIN(GPIOZ_13, EE_OFF) }; + +static const unsigned int pwm_e_pins[] = { PIN(GPIOX_16, EE_OFF) }; + +static const struct pinctrl_pin_desc meson_gxl_aobus_pins[] = { + MESON_PIN(GPIOAO_0, 0), + MESON_PIN(GPIOAO_1, 0), + MESON_PIN(GPIOAO_2, 0), + MESON_PIN(GPIOAO_3, 0), + MESON_PIN(GPIOAO_4, 0), + MESON_PIN(GPIOAO_5, 0), + MESON_PIN(GPIOAO_6, 0), + MESON_PIN(GPIOAO_7, 0), + MESON_PIN(GPIOAO_8, 0), + MESON_PIN(GPIOAO_9, 0), +}; + +static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) }; +static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; +static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; +static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; +static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) }; +static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0), + PIN(GPIOAO_5, 0) }; +static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; +static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; + +static const unsigned int remote_input_ao_pins[] = {PIN(GPIOAO_7, 0) }; + +static struct meson_pmx_group meson_gxl_periphs_groups[] = { + GPIO_GROUP(GPIOZ_0, EE_OFF), + GPIO_GROUP(GPIOZ_1, EE_OFF), + GPIO_GROUP(GPIOZ_2, EE_OFF), + GPIO_GROUP(GPIOZ_3, EE_OFF), + GPIO_GROUP(GPIOZ_4, EE_OFF), + GPIO_GROUP(GPIOZ_5, EE_OFF), + GPIO_GROUP(GPIOZ_6, EE_OFF), + GPIO_GROUP(GPIOZ_7, EE_OFF), + GPIO_GROUP(GPIOZ_8, EE_OFF), + GPIO_GROUP(GPIOZ_9, EE_OFF), + GPIO_GROUP(GPIOZ_10, EE_OFF), + GPIO_GROUP(GPIOZ_11, EE_OFF), + GPIO_GROUP(GPIOZ_12, EE_OFF), + GPIO_GROUP(GPIOZ_13, EE_OFF), + GPIO_GROUP(GPIOZ_14, EE_OFF), + GPIO_GROUP(GPIOZ_15, EE_OFF), + + GPIO_GROUP(GPIOH_0, EE_OFF), + GPIO_GROUP(GPIOH_1, EE_OFF), + GPIO_GROUP(GPIOH_2, EE_OFF), + GPIO_GROUP(GPIOH_3, EE_OFF), + GPIO_GROUP(GPIOH_4, EE_OFF), + GPIO_GROUP(GPIOH_5, EE_OFF), + GPIO_GROUP(GPIOH_6, EE_OFF), + GPIO_GROUP(GPIOH_7, EE_OFF), + GPIO_GROUP(GPIOH_8, EE_OFF), + GPIO_GROUP(GPIOH_9, EE_OFF), + + GPIO_GROUP(BOOT_0, EE_OFF), + GPIO_GROUP(BOOT_1, EE_OFF), + GPIO_GROUP(BOOT_2, EE_OFF), + GPIO_GROUP(BOOT_3, EE_OFF), + GPIO_GROUP(BOOT_4, EE_OFF), + GPIO_GROUP(BOOT_5, EE_OFF), + GPIO_GROUP(BOOT_6, EE_OFF), + GPIO_GROUP(BOOT_7, EE_OFF), + GPIO_GROUP(BOOT_8, EE_OFF), + GPIO_GROUP(BOOT_9, EE_OFF), + GPIO_GROUP(BOOT_10, EE_OFF), + GPIO_GROUP(BOOT_11, EE_OFF), + GPIO_GROUP(BOOT_12, EE_OFF), + GPIO_GROUP(BOOT_13, EE_OFF), + GPIO_GROUP(BOOT_14, EE_OFF), + GPIO_GROUP(BOOT_15, EE_OFF), + + GPIO_GROUP(CARD_0, EE_OFF), + GPIO_GROUP(CARD_1, EE_OFF), + GPIO_GROUP(CARD_2, EE_OFF), + GPIO_GROUP(CARD_3, EE_OFF), + GPIO_GROUP(CARD_4, EE_OFF), + GPIO_GROUP(CARD_5, EE_OFF), + GPIO_GROUP(CARD_6, EE_OFF), + + GPIO_GROUP(GPIODV_0, EE_OFF), + GPIO_GROUP(GPIODV_1, EE_OFF), + GPIO_GROUP(GPIODV_2, EE_OFF), + GPIO_GROUP(GPIODV_3, EE_OFF), + GPIO_GROUP(GPIODV_4, EE_OFF), + GPIO_GROUP(GPIODV_5, EE_OFF), + GPIO_GROUP(GPIODV_6, EE_OFF), + GPIO_GROUP(GPIODV_7, EE_OFF), + GPIO_GROUP(GPIODV_8, EE_OFF), + GPIO_GROUP(GPIODV_9, EE_OFF), + GPIO_GROUP(GPIODV_10, EE_OFF), + GPIO_GROUP(GPIODV_11, EE_OFF), + GPIO_GROUP(GPIODV_12, EE_OFF), + GPIO_GROUP(GPIODV_13, EE_OFF), + GPIO_GROUP(GPIODV_14, EE_OFF), + GPIO_GROUP(GPIODV_15, EE_OFF), + GPIO_GROUP(GPIODV_16, EE_OFF), + GPIO_GROUP(GPIODV_17, EE_OFF), + GPIO_GROUP(GPIODV_19, EE_OFF), + GPIO_GROUP(GPIODV_20, EE_OFF), + GPIO_GROUP(GPIODV_21, EE_OFF), + GPIO_GROUP(GPIODV_22, EE_OFF), + GPIO_GROUP(GPIODV_23, EE_OFF), + GPIO_GROUP(GPIODV_24, EE_OFF), + GPIO_GROUP(GPIODV_25, EE_OFF), + GPIO_GROUP(GPIODV_26, EE_OFF), + GPIO_GROUP(GPIODV_27, EE_OFF), + GPIO_GROUP(GPIODV_28, EE_OFF), + GPIO_GROUP(GPIODV_29, EE_OFF), + + GPIO_GROUP(GPIOX_0, EE_OFF), + GPIO_GROUP(GPIOX_1, EE_OFF), + GPIO_GROUP(GPIOX_2, EE_OFF), + GPIO_GROUP(GPIOX_3, EE_OFF), + GPIO_GROUP(GPIOX_4, EE_OFF), + GPIO_GROUP(GPIOX_5, EE_OFF), + GPIO_GROUP(GPIOX_6, EE_OFF), + GPIO_GROUP(GPIOX_7, EE_OFF), + GPIO_GROUP(GPIOX_8, EE_OFF), + GPIO_GROUP(GPIOX_9, EE_OFF), + GPIO_GROUP(GPIOX_10, EE_OFF), + GPIO_GROUP(GPIOX_11, EE_OFF), + GPIO_GROUP(GPIOX_12, EE_OFF), + GPIO_GROUP(GPIOX_13, EE_OFF), + GPIO_GROUP(GPIOX_14, EE_OFF), + GPIO_GROUP(GPIOX_15, EE_OFF), + GPIO_GROUP(GPIOX_16, EE_OFF), + GPIO_GROUP(GPIOX_17, EE_OFF), + GPIO_GROUP(GPIOX_18, EE_OFF), + + GPIO_GROUP(GPIOCLK_0, EE_OFF), + GPIO_GROUP(GPIOCLK_1, EE_OFF), + + GPIO_GROUP(GPIO_TEST_N, EE_OFF), + + /* Bank X */ + GROUP(sdio_d0, 5, 31), + GROUP(sdio_d1, 5, 30), + GROUP(sdio_d2, 5, 29), + GROUP(sdio_d3, 5, 28), + GROUP(sdio_cmd, 5, 27), + GROUP(sdio_clk, 5, 26), + GROUP(sdio_irq, 5, 24), + GROUP(uart_tx_a, 5, 19), + GROUP(uart_rx_a, 5, 18), + GROUP(uart_cts_a, 5, 17), + GROUP(uart_rts_a, 5, 16), + GROUP(uart_tx_c, 5, 13), + GROUP(uart_rx_c, 5, 12), + GROUP(pwm_e, 5, 15), + + /* Bank Z */ + GROUP(eth_mdio, 4, 22), + GROUP(eth_mdc, 4, 23), + GROUP(eth_clk_rx_clk, 4, 21), + GROUP(eth_rx_dv, 4, 20), + GROUP(eth_rxd0, 4, 19), + GROUP(eth_rxd1, 4, 18), + GROUP(eth_rxd2, 4, 17), + GROUP(eth_rxd3, 4, 16), + GROUP(eth_rgmii_tx_clk, 4, 15), + GROUP(eth_tx_en, 4, 14), + GROUP(eth_txd0, 4, 13), + GROUP(eth_txd1, 4, 12), + GROUP(eth_txd2, 4, 11), + GROUP(eth_txd3, 4, 10), + + /* Bank DV */ + GROUP(uart_tx_b, 2, 16), + GROUP(uart_rx_b, 2, 15), + GROUP(i2c_sck_a, 1, 15), + GROUP(i2c_sda_a, 1, 14), + GROUP(i2c_sck_b, 1, 13), + GROUP(i2c_sda_b, 1, 12), + GROUP(i2c_sck_c, 1, 11), + GROUP(i2c_sda_c, 1, 10), + + /* Bank BOOT */ + GROUP(emmc_nand_d07, 7, 31), + GROUP(emmc_clk, 7, 30), + GROUP(emmc_cmd, 7, 29), + GROUP(emmc_ds, 7, 28), + GROUP(nand_ce0, 7, 7), + GROUP(nand_ce1, 7, 6), + GROUP(nand_rb0, 7, 5), + GROUP(nand_ale, 7, 4), + GROUP(nand_cle, 7, 3), + GROUP(nand_wen_clk, 7, 2), + GROUP(nand_ren_wr, 7, 1), + GROUP(nand_dqs, 7, 0), + + /* Bank CARD */ + GROUP(sdcard_d1, 6, 5), + GROUP(sdcard_d0, 6, 4), + GROUP(sdcard_d3, 6, 1), + GROUP(sdcard_d2, 6, 0), + GROUP(sdcard_cmd, 6, 2), + GROUP(sdcard_clk, 6, 3), +}; + +static struct meson_pmx_group meson_gxl_aobus_groups[] = { + GPIO_GROUP(GPIOAO_0, 0), + GPIO_GROUP(GPIOAO_1, 0), + GPIO_GROUP(GPIOAO_2, 0), + GPIO_GROUP(GPIOAO_3, 0), + GPIO_GROUP(GPIOAO_4, 0), + GPIO_GROUP(GPIOAO_5, 0), + GPIO_GROUP(GPIOAO_6, 0), + GPIO_GROUP(GPIOAO_7, 0), + GPIO_GROUP(GPIOAO_8, 0), + GPIO_GROUP(GPIOAO_9, 0), + + /* bank AO */ + GROUP(uart_tx_ao_b, 0, 26), + GROUP(uart_rx_ao_b, 0, 25), + GROUP(uart_tx_ao_a, 0, 12), + GROUP(uart_rx_ao_a, 0, 11), + GROUP(uart_cts_ao_a, 0, 10), + GROUP(uart_rts_ao_a, 0, 9), + GROUP(uart_cts_ao_b, 0, 8), + GROUP(uart_rts_ao_b, 0, 7), + GROUP(remote_input_ao, 0, 0), +}; + +static const char * const gpio_periphs_groups[] = { + "GPIOZ_0", "GPIOZ_1", "GPIOZ_2", "GPIOZ_3", "GPIOZ_4", + "GPIOZ_5", "GPIOZ_6", "GPIOZ_7", "GPIOZ_8", "GPIOZ_9", + "GPIOZ_10", "GPIOZ_11", "GPIOZ_12", "GPIOZ_13", "GPIOZ_14", + "GPIOZ_15", + + "GPIOH_0", "GPIOH_1", "GPIOH_2", "GPIOH_3", "GPIOH_4", + "GPIOH_5", "GPIOH_6", "GPIOH_7", "GPIOH_8", "GPIOH_9", + + "BOOT_0", "BOOT_1", "BOOT_2", "BOOT_3", "BOOT_4", + "BOOT_5", "BOOT_6", "BOOT_7", "BOOT_8", "BOOT_9", + "BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14", + "BOOT_15", + + "CARD_0", "CARD_1", "CARD_2", "CARD_3", "CARD_4", + "CARD_5", "CARD_6", + + "GPIODV_0", "GPIODV_1", "GPIODV_2", "GPIODV_3", "GPIODV_4", + "GPIODV_5", "GPIODV_6", "GPIODV_7", "GPIODV_8", "GPIODV_9", + "GPIODV_10", "GPIODV_11", "GPIODV_12", "GPIODV_13", "GPIODV_14", + "GPIODV_15", "GPIODV_16", "GPIODV_17", "GPIODV_18", "GPIODV_19", + "GPIODV_20", "GPIODV_21", "GPIODV_22", "GPIODV_23", "GPIODV_24", + "GPIODV_25", "GPIODV_26", "GPIODV_27", "GPIODV_28", "GPIODV_29", + + "GPIOX_0", "GPIOX_1", "GPIOX_2", "GPIOX_3", "GPIOX_4", + "GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9", + "GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14", + "GPIOX_15", "GPIOX_16", "GPIOX_17", "GPIOX_18", + + "GPIO_TEST_N", +}; + +static const char * const emmc_groups[] = { + "emmc_nand_d07", "emmc_clk", "emmc_cmd", "emmc_ds", +}; + +static const char * const sdcard_groups[] = { + "sdcard_d0", "sdcard_d1", "sdcard_d2", "sdcard_d3", + "sdcard_cmd", "sdcard_clk", +}; + +static const char * const sdio_groups[] = { + "sdio_d0", "sdio_d1", "sdio_d2", "sdio_d3", + "sdio_cmd", "sdio_clk", "sdio_irq", +}; + +static const char * const nand_groups[] = { + "nand_ce0", "nand_ce1", "nand_rb0", "nand_ale", "nand_cle", + "nand_wen_clk", "nand_ren_wr", "nand_dqs", +}; + +static const char * const uart_a_groups[] = { + "uart_tx_a", "uart_rx_a", "uart_cts_a", "uart_rts_a", +}; + +static const char * const uart_b_groups[] = { + "uart_tx_b", "uart_rx_b", +}; + +static const char * const uart_c_groups[] = { + "uart_tx_c", "uart_rx_c", +}; + +static const char * const i2c_a_groups[] = { + "i2c_sck_a", "i2c_sda_a", +}; + +static const char * const i2c_b_groups[] = { + "i2c_sck_b", "i2c_sda_b", +}; + +static const char * const i2c_c_groups[] = { + "i2c_sck_c", "i2c_sda_c", +}; + +static const char * const eth_groups[] = { + "eth_mdio", "eth_mdc", "eth_clk_rx_clk", "eth_rx_dv", + "eth_rxd0", "eth_rxd1", "eth_rxd2", "eth_rxd3", + "eth_rgmii_tx_clk", "eth_tx_en", + "eth_txd0", "eth_txd1", "eth_txd2", "eth_txd3", +}; + +static const char * const pwm_e_groups[] = { + "pwm_e", +}; + +static const char * const gpio_aobus_groups[] = { + "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4", + "GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9", +}; + +static const char * const uart_ao_groups[] = { + "uart_tx_ao_a", "uart_rx_ao_a", "uart_cts_ao_a", "uart_rts_ao_a", +}; + +static const char * const uart_ao_b_groups[] = { + "uart_tx_ao_b", "uart_rx_ao_b", "uart_cts_ao_b", "uart_rts_ao_b", +}; + +static const char * const remote_input_ao_groups[] = { + "remote_input_ao", +}; + +static struct meson_pmx_func meson_gxl_periphs_functions[] = { + FUNCTION(gpio_periphs), + FUNCTION(emmc), + FUNCTION(sdcard), + FUNCTION(sdio), + FUNCTION(nand), + FUNCTION(uart_a), + FUNCTION(uart_b), + FUNCTION(uart_c), + FUNCTION(i2c_a), + FUNCTION(i2c_b), + FUNCTION(i2c_c), + FUNCTION(eth), + FUNCTION(pwm_e), +}; + +static struct meson_pmx_func meson_gxl_aobus_functions[] = { + FUNCTION(gpio_aobus), + FUNCTION(uart_ao), + FUNCTION(uart_ao_b), + FUNCTION(remote_input_ao), +}; + +static struct meson_bank meson_gxl_periphs_banks[] = { + /* name first last pullen pull dir out in */ + BANK("X", PIN(GPIOX_0, EE_OFF), PIN(GPIOX_18, EE_OFF), 4, 0, 4, 0, 12, 0, 13, 0, 14, 0), + BANK("DV", PIN(GPIODV_0, EE_OFF), PIN(GPIODV_29, EE_OFF), 0, 0, 0, 0, 0, 0, 1, 0, 2, 0), + BANK("H", PIN(GPIOH_0, EE_OFF), PIN(GPIOH_9, EE_OFF), 1, 20, 1, 20, 3, 20, 4, 20, 5, 20), + BANK("Z", PIN(GPIOZ_0, EE_OFF), PIN(GPIOZ_15, EE_OFF), 3, 0, 3, 0, 9, 0, 10, 0, 11, 0), + BANK("CARD", PIN(CARD_0, EE_OFF), PIN(CARD_6, EE_OFF), 2, 20, 2, 20, 6, 20, 7, 20, 8, 20), + BANK("BOOT", PIN(BOOT_0, EE_OFF), PIN(BOOT_15, EE_OFF), 2, 0, 2, 0, 6, 0, 7, 0, 8, 0), + BANK("CLK", PIN(GPIOCLK_0, EE_OFF), PIN(GPIOCLK_1, EE_OFF), 3, 28, 3, 28, 9, 28, 10, 28, 11, 28), +}; + +static struct meson_bank meson_gxl_aobus_banks[] = { + /* name first last pullen pull dir out in */ + BANK("AO", PIN(GPIOAO_0, 0), PIN(GPIOAO_9, 0), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), +}; + +struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = { + .name = "periphs-banks", + .pin_base = 10, + .pins = meson_gxl_periphs_pins, + .groups = meson_gxl_periphs_groups, + .funcs = meson_gxl_periphs_functions, + .banks = meson_gxl_periphs_banks, + .num_pins = ARRAY_SIZE(meson_gxl_periphs_pins), + .num_groups = ARRAY_SIZE(meson_gxl_periphs_groups), + .num_funcs = ARRAY_SIZE(meson_gxl_periphs_functions), + .num_banks = ARRAY_SIZE(meson_gxl_periphs_banks), +}; + +struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = { + .name = "aobus-banks", + .pin_base = 0, + .pins = meson_gxl_aobus_pins, + .groups = meson_gxl_aobus_groups, + .funcs = meson_gxl_aobus_functions, + .banks = meson_gxl_aobus_banks, + .num_pins = ARRAY_SIZE(meson_gxl_aobus_pins), + .num_groups = ARRAY_SIZE(meson_gxl_aobus_groups), + .num_funcs = ARRAY_SIZE(meson_gxl_aobus_functions), + .num_banks = ARRAY_SIZE(meson_gxl_aobus_banks), +}; diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 57122eda155afdeba61202506e779ab320468017..a579126832afde2d758d629f58338af2d9e9378f 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -524,6 +524,14 @@ static const struct of_device_id meson_pinctrl_dt_match[] = { .compatible = "amlogic,meson-gxbb-aobus-pinctrl", .data = &meson_gxbb_aobus_pinctrl_data, }, + { + .compatible = "amlogic,meson-gxl-periphs-pinctrl", + .data = &meson_gxl_periphs_pinctrl_data, + }, + { + .compatible = "amlogic,meson-gxl-aobus-pinctrl", + .data = &meson_gxl_aobus_pinctrl_data, + }, { }, }; diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h index 98b5080650c1ac3c6ad1d16ea7298d5a0b2feb07..1aa871d5431ee8b9b1a3c2106909b7571aebb52a 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.h +++ b/drivers/pinctrl/meson/pinctrl-meson.h @@ -169,3 +169,5 @@ extern struct meson_pinctrl_data meson8b_cbus_pinctrl_data; extern struct meson_pinctrl_data meson8b_aobus_pinctrl_data; extern struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data; extern struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data; +extern struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data; +extern struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data; diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c index 8392083514fb63242a65927a4e51db0c27e030e3..af4814479eb02deaf703c90e6b317a378d55563a 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c @@ -379,13 +379,24 @@ static const unsigned msp0txrx_a_1_pins[] = { DB8500_PIN_AC4, DB8500_PIN_AC3 }; static const unsigned msp0tfstck_a_1_pins[] = { DB8500_PIN_AF3, DB8500_PIN_AE3 }; static const unsigned msp0rfsrck_a_1_pins[] = { DB8500_PIN_AD3, DB8500_PIN_AD4 }; /* Basic pins of the MMC/SD card 0 interface */ -static const unsigned mc0_a_1_pins[] = { DB8500_PIN_AC2, DB8500_PIN_AC1, - DB8500_PIN_AB4, DB8500_PIN_AA3, DB8500_PIN_AA4, DB8500_PIN_AB2, - DB8500_PIN_Y4, DB8500_PIN_Y2, DB8500_PIN_AA2, DB8500_PIN_AA1 }; +static const unsigned mc0_a_1_pins[] = { DB8500_PIN_AC2, /* MC0_CMDDIR */ + DB8500_PIN_AC1, /* MC0_DAT0DIR */ + DB8500_PIN_AB4, /* MC0_DAT2DIR */ + DB8500_PIN_AA3, /* MC0_FBCLK */ + DB8500_PIN_AA4, /* MC0_CLK */ + DB8500_PIN_AB2, /* MC0_CMD */ + DB8500_PIN_Y4, /* MC0_DAT0 */ + DB8500_PIN_Y2, /* MC0_DAT1 */ + DB8500_PIN_AA2, /* MC0_DAT2 */ + DB8500_PIN_AA1 /* MC0_DAT3 */ +}; /* Often only 4 bits are used, then these are not needed (only used for MMC) */ -static const unsigned mc0_dat47_a_1_pins[] = { DB8500_PIN_W2, DB8500_PIN_W3, - DB8500_PIN_V3, DB8500_PIN_V2}; -static const unsigned mc0dat31dir_a_1_pins[] = { DB8500_PIN_AB3 }; +static const unsigned mc0_dat47_a_1_pins[] = { DB8500_PIN_W2, /* MC0_DAT4 */ + DB8500_PIN_W3, /* MC0_DAT5 */ + DB8500_PIN_V3, /* MC0_DAT6 */ + DB8500_PIN_V2 /* MC0_DAT7 */ +}; +static const unsigned mc0dat31dir_a_1_pins[] = { DB8500_PIN_AB3 }; /* MC0_DAT31DIR */ /* MSP1 can only be on these pins, but TXD and RXD can be flipped */ static const unsigned msp1txrx_a_1_pins[] = { DB8500_PIN_AF2, DB8500_PIN_AG2 }; static const unsigned msp1_a_1_pins[] = { DB8500_PIN_AE1, DB8500_PIN_AE2 }; diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index 5020ae5344794da4a64c571bc72e46243c853749..ce3335accb5b1ccc2abb8549049944c413fefa04 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c @@ -381,7 +381,7 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev, if (ret < 0) goto exit; - for_each_child_of_node(np_config, np) { + for_each_available_child_of_node(np_config, np) { ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map, &reserved_maps, num_maps, type); if (ret < 0) diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 9f09041859099134220b54c7aee691f8810ee09d..569bc28cb90972943bb0929127688b2a1e59ff91 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -56,6 +56,9 @@ static int gpio_banks; #define DRIVE_STRENGTH_SHIFT 5 #define DRIVE_STRENGTH_MASK 0x3 #define DRIVE_STRENGTH (DRIVE_STRENGTH_MASK << DRIVE_STRENGTH_SHIFT) +#define OUTPUT (1 << 7) +#define OUTPUT_VAL_SHIFT 8 +#define OUTPUT_VAL (0x1 << OUTPUT_VAL_SHIFT) #define DEBOUNCE (1 << 16) #define DEBOUNCE_VAL_SHIFT 17 #define DEBOUNCE_VAL (0x3fff << DEBOUNCE_VAL_SHIFT) @@ -375,6 +378,19 @@ static void at91_mux_set_pullup(void __iomem *pio, unsigned mask, bool on) writel_relaxed(mask, pio + (on ? PIO_PUER : PIO_PUDR)); } +static bool at91_mux_get_output(void __iomem *pio, unsigned int pin, bool *val) +{ + *val = (readl_relaxed(pio + PIO_ODSR) >> pin) & 0x1; + return (readl_relaxed(pio + PIO_OSR) >> pin) & 0x1; +} + +static void at91_mux_set_output(void __iomem *pio, unsigned int mask, + bool is_on, bool val) +{ + writel_relaxed(mask, pio + (val ? PIO_SODR : PIO_CODR)); + writel_relaxed(mask, pio + (is_on ? PIO_OER : PIO_ODR)); +} + static unsigned at91_mux_get_multidrive(void __iomem *pio, unsigned pin) { return (readl_relaxed(pio + PIO_MDSR) >> pin) & 0x1; @@ -848,6 +864,7 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev, void __iomem *pio; unsigned pin; int div; + bool out; *config = 0; dev_dbg(info->dev, "%s:%d, pin_id=%d", __func__, __LINE__, pin_id); @@ -875,6 +892,8 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev, if (info->ops->get_drivestrength) *config |= (info->ops->get_drivestrength(pio, pin) << DRIVE_STRENGTH_SHIFT); + if (at91_mux_get_output(pio, pin, &out)) + *config |= OUTPUT | (out << OUTPUT_VAL_SHIFT); return 0; } @@ -907,6 +926,8 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev, if (config & PULL_UP && config & PULL_DOWN) return -EINVAL; + at91_mux_set_output(pio, mask, config & OUTPUT, + (config & OUTPUT_VAL) >> OUTPUT_VAL_SHIFT); at91_mux_set_pullup(pio, mask, config & PULL_UP); at91_mux_set_multidrive(pio, mask, config & MULTI_DRIVE); if (info->ops->set_deglitch) diff --git a/drivers/pinctrl/pinctrl-da850-pupd.c b/drivers/pinctrl/pinctrl-da850-pupd.c new file mode 100644 index 0000000000000000000000000000000000000000..b36a90a3f3e492493b41c5c61c61582e7d35a091 --- /dev/null +++ b/drivers/pinctrl/pinctrl-da850-pupd.c @@ -0,0 +1,210 @@ +/* + * Pinconf driver for TI DA850/OMAP-L138/AM18XX pullup/pulldown groups + * + * Copyright (C) 2016 David Lechner + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DA850_PUPD_ENA 0x00 +#define DA850_PUPD_SEL 0x04 + +struct da850_pupd_data { + void __iomem *base; + struct pinctrl_desc desc; + struct pinctrl_dev *pinctrl; +}; + +static const char * const da850_pupd_group_names[] = { + "cp0", "cp1", "cp2", "cp3", "cp4", "cp5", "cp6", "cp7", + "cp8", "cp9", "cp10", "cp11", "cp12", "cp13", "cp14", "cp15", + "cp16", "cp17", "cp18", "cp19", "cp20", "cp21", "cp22", "cp23", + "cp24", "cp25", "cp26", "cp27", "cp28", "cp29", "cp30", "cp31", +}; + +static int da850_pupd_get_groups_count(struct pinctrl_dev *pctldev) +{ + return ARRAY_SIZE(da850_pupd_group_names); +} + +static const char *da850_pupd_get_group_name(struct pinctrl_dev *pctldev, + unsigned int selector) +{ + return da850_pupd_group_names[selector]; +} + +static int da850_pupd_get_group_pins(struct pinctrl_dev *pctldev, + unsigned int selector, + const unsigned int **pins, + unsigned int *num_pins) +{ + *num_pins = 0; + + return 0; +} + +static const struct pinctrl_ops da850_pupd_pctlops = { + .get_groups_count = da850_pupd_get_groups_count, + .get_group_name = da850_pupd_get_group_name, + .get_group_pins = da850_pupd_get_group_pins, + .dt_node_to_map = pinconf_generic_dt_node_to_map_group, + .dt_free_map = pinconf_generic_dt_free_map, +}; + +static int da850_pupd_pin_config_group_get(struct pinctrl_dev *pctldev, + unsigned int selector, + unsigned long *config) +{ + struct da850_pupd_data *data = pinctrl_dev_get_drvdata(pctldev); + enum pin_config_param param = pinconf_to_config_param(*config); + u32 val; + u16 arg; + + val = readl(data->base + DA850_PUPD_ENA); + arg = !!(~val & BIT(selector)); + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + break; + case PIN_CONFIG_BIAS_PULL_UP: + case PIN_CONFIG_BIAS_PULL_DOWN: + if (arg) { + /* bias is disabled */ + arg = 0; + break; + } + val = readl(data->base + DA850_PUPD_SEL); + if (param == PIN_CONFIG_BIAS_PULL_DOWN) + val = ~val; + arg = !!(val & BIT(selector)); + break; + default: + return -EINVAL; + } + + *config = pinconf_to_config_packed(param, arg); + + return 0; +} + +static int da850_pupd_pin_config_group_set(struct pinctrl_dev *pctldev, + unsigned int selector, + unsigned long *configs, + unsigned int num_configs) +{ + struct da850_pupd_data *data = pinctrl_dev_get_drvdata(pctldev); + u32 ena, sel; + enum pin_config_param param; + u16 arg; + int i; + + ena = readl(data->base + DA850_PUPD_ENA); + sel = readl(data->base + DA850_PUPD_SEL); + + for (i = 0; i < num_configs; i++) { + param = pinconf_to_config_param(configs[i]); + arg = pinconf_to_config_argument(configs[i]); + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + ena &= ~BIT(selector); + break; + case PIN_CONFIG_BIAS_PULL_UP: + ena |= BIT(selector); + sel |= BIT(selector); + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + ena |= BIT(selector); + sel &= ~BIT(selector); + break; + default: + return -EINVAL; + } + } + + writel(sel, data->base + DA850_PUPD_SEL); + writel(ena, data->base + DA850_PUPD_ENA); + + return 0; +} + +static const struct pinconf_ops da850_pupd_confops = { + .is_generic = true, + .pin_config_group_get = da850_pupd_pin_config_group_get, + .pin_config_group_set = da850_pupd_pin_config_group_set, +}; + +static int da850_pupd_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct da850_pupd_data *data; + struct resource *res; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->base = devm_ioremap_resource(dev, res); + if (IS_ERR(data->base)) { + dev_err(dev, "Could not map resource\n"); + return PTR_ERR(data->base); + } + + data->desc.name = dev_name(dev); + data->desc.pctlops = &da850_pupd_pctlops; + data->desc.confops = &da850_pupd_confops; + data->desc.owner = THIS_MODULE; + + data->pinctrl = devm_pinctrl_register(dev, &data->desc, data); + if (IS_ERR(data->pinctrl)) { + dev_err(dev, "Failed to register pinctrl\n"); + return PTR_ERR(data->pinctrl); + } + + platform_set_drvdata(pdev, data); + + return 0; +} + +static int da850_pupd_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id da850_pupd_of_match[] = { + { .compatible = "ti,da850-pupd" }, + { } +}; + +static struct platform_driver da850_pupd_driver = { + .driver = { + .name = "ti-da850-pupd", + .of_match_table = da850_pupd_of_match, + }, + .probe = da850_pupd_probe, + .remove = da850_pupd_remove, +}; +module_platform_driver(da850_pupd_driver); + +MODULE_AUTHOR("David Lechner "); +MODULE_DESCRIPTION("TI DA850/OMAP-L138/AM18XX pullup/pulldown configuration"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/pinctrl-oxnas.c b/drivers/pinctrl/pinctrl-oxnas.c index 917a7d2535d71c0e061848dc5627ef8c15d6bf22..494ec9a7573a5c2026386d7753e503d53fada9ab 100644 --- a/drivers/pinctrl/pinctrl-oxnas.c +++ b/drivers/pinctrl/pinctrl-oxnas.c @@ -37,15 +37,24 @@ #define GPIO_BANK_START(bank) ((bank) * PINS_PER_BANK) -/* Regmap Offsets */ -#define PINMUX_PRIMARY_SEL0 0x0c -#define PINMUX_SECONDARY_SEL0 0x14 -#define PINMUX_TERTIARY_SEL0 0x8c -#define PINMUX_PRIMARY_SEL1 0x10 -#define PINMUX_SECONDARY_SEL1 0x18 -#define PINMUX_TERTIARY_SEL1 0x90 -#define PINMUX_PULLUP_CTRL0 0xac -#define PINMUX_PULLUP_CTRL1 0xb0 +/* OX810 Regmap Offsets */ +#define PINMUX_810_PRIMARY_SEL0 0x0c +#define PINMUX_810_SECONDARY_SEL0 0x14 +#define PINMUX_810_TERTIARY_SEL0 0x8c +#define PINMUX_810_PRIMARY_SEL1 0x10 +#define PINMUX_810_SECONDARY_SEL1 0x18 +#define PINMUX_810_TERTIARY_SEL1 0x90 +#define PINMUX_810_PULLUP_CTRL0 0xac +#define PINMUX_810_PULLUP_CTRL1 0xb0 + +/* OX820 Regmap Offsets */ +#define PINMUX_820_BANK_OFFSET 0x100000 +#define PINMUX_820_SECONDARY_SEL 0x14 +#define PINMUX_820_TERTIARY_SEL 0x8c +#define PINMUX_820_QUATERNARY_SEL 0x94 +#define PINMUX_820_DEBUG_SEL 0x9c +#define PINMUX_820_ALTERNATIVE_SEL 0xa4 +#define PINMUX_820_PULLUP_CTRL 0xac /* GPIO Registers */ #define INPUT_VALUE 0x00 @@ -87,8 +96,6 @@ struct oxnas_pinctrl { struct regmap *regmap; struct device *dev; struct pinctrl_dev *pctldev; - const struct pinctrl_pin_desc *pins; - unsigned int npins; const struct oxnas_function *functions; unsigned int nfunctions; const struct oxnas_pin_group *groups; @@ -97,7 +104,50 @@ struct oxnas_pinctrl { unsigned int nbanks; }; -static const struct pinctrl_pin_desc oxnas_pins[] = { +struct oxnas_pinctrl_data { + struct pinctrl_desc *desc; + struct oxnas_pinctrl *pctl; +}; + +static const struct pinctrl_pin_desc oxnas_ox810se_pins[] = { + PINCTRL_PIN(0, "gpio0"), + PINCTRL_PIN(1, "gpio1"), + PINCTRL_PIN(2, "gpio2"), + PINCTRL_PIN(3, "gpio3"), + PINCTRL_PIN(4, "gpio4"), + PINCTRL_PIN(5, "gpio5"), + PINCTRL_PIN(6, "gpio6"), + PINCTRL_PIN(7, "gpio7"), + PINCTRL_PIN(8, "gpio8"), + PINCTRL_PIN(9, "gpio9"), + PINCTRL_PIN(10, "gpio10"), + PINCTRL_PIN(11, "gpio11"), + PINCTRL_PIN(12, "gpio12"), + PINCTRL_PIN(13, "gpio13"), + PINCTRL_PIN(14, "gpio14"), + PINCTRL_PIN(15, "gpio15"), + PINCTRL_PIN(16, "gpio16"), + PINCTRL_PIN(17, "gpio17"), + PINCTRL_PIN(18, "gpio18"), + PINCTRL_PIN(19, "gpio19"), + PINCTRL_PIN(20, "gpio20"), + PINCTRL_PIN(21, "gpio21"), + PINCTRL_PIN(22, "gpio22"), + PINCTRL_PIN(23, "gpio23"), + PINCTRL_PIN(24, "gpio24"), + PINCTRL_PIN(25, "gpio25"), + PINCTRL_PIN(26, "gpio26"), + PINCTRL_PIN(27, "gpio27"), + PINCTRL_PIN(28, "gpio28"), + PINCTRL_PIN(29, "gpio29"), + PINCTRL_PIN(30, "gpio30"), + PINCTRL_PIN(31, "gpio31"), + PINCTRL_PIN(32, "gpio32"), + PINCTRL_PIN(33, "gpio33"), + PINCTRL_PIN(34, "gpio34"), +}; + +static const struct pinctrl_pin_desc oxnas_ox820_pins[] = { PINCTRL_PIN(0, "gpio0"), PINCTRL_PIN(1, "gpio1"), PINCTRL_PIN(2, "gpio2"), @@ -133,9 +183,24 @@ static const struct pinctrl_pin_desc oxnas_pins[] = { PINCTRL_PIN(32, "gpio32"), PINCTRL_PIN(33, "gpio33"), PINCTRL_PIN(34, "gpio34"), + PINCTRL_PIN(35, "gpio35"), + PINCTRL_PIN(36, "gpio36"), + PINCTRL_PIN(37, "gpio37"), + PINCTRL_PIN(38, "gpio38"), + PINCTRL_PIN(39, "gpio39"), + PINCTRL_PIN(40, "gpio40"), + PINCTRL_PIN(41, "gpio41"), + PINCTRL_PIN(42, "gpio42"), + PINCTRL_PIN(43, "gpio43"), + PINCTRL_PIN(44, "gpio44"), + PINCTRL_PIN(45, "gpio45"), + PINCTRL_PIN(46, "gpio46"), + PINCTRL_PIN(47, "gpio47"), + PINCTRL_PIN(48, "gpio48"), + PINCTRL_PIN(49, "gpio49"), }; -static const char * const oxnas_fct0_group[] = { +static const char * const oxnas_ox810se_fct0_group[] = { "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio8", "gpio9", "gpio10", "gpio11", @@ -147,7 +212,7 @@ static const char * const oxnas_fct0_group[] = { "gpio32", "gpio33", "gpio34" }; -static const char * const oxnas_fct3_group[] = { +static const char * const oxnas_ox810se_fct3_group[] = { "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio8", "gpio9", @@ -158,6 +223,40 @@ static const char * const oxnas_fct3_group[] = { "gpio34" }; +static const char * const oxnas_ox820_fct0_group[] = { + "gpio0", "gpio1", "gpio2", "gpio3", + "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", + "gpio12", "gpio13", "gpio14", "gpio15", + "gpio16", "gpio17", "gpio18", "gpio19", + "gpio20", "gpio21", "gpio22", "gpio23", + "gpio24", "gpio25", "gpio26", "gpio27", + "gpio28", "gpio29", "gpio30", "gpio31", + "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", + "gpio40", "gpio41", "gpio42", "gpio43", + "gpio44", "gpio45", "gpio46", "gpio47", + "gpio48", "gpio49" +}; + +static const char * const oxnas_ox820_fct1_group[] = { + "gpio3", "gpio4", + "gpio12", "gpio13", "gpio14", "gpio15", + "gpio16", "gpio17", "gpio18", "gpio19", + "gpio20", "gpio21", "gpio22", "gpio23", + "gpio24" +}; + +static const char * const oxnas_ox820_fct4_group[] = { + "gpio5", "gpio6", "gpio7", "gpio8", + "gpio24", "gpio25", "gpio26", "gpio27", + "gpio40", "gpio41", "gpio42", "gpio43" +}; + +static const char * const oxnas_ox820_fct5_group[] = { + "gpio28", "gpio29", "gpio30", "gpio31" +}; + #define FUNCTION(_name, _gr) \ { \ .name = #_name, \ @@ -165,9 +264,16 @@ static const char * const oxnas_fct3_group[] = { .ngroups = ARRAY_SIZE(oxnas_##_gr##_group), \ } -static const struct oxnas_function oxnas_functions[] = { - FUNCTION(gpio, fct0), - FUNCTION(fct3, fct3), +static const struct oxnas_function oxnas_ox810se_functions[] = { + FUNCTION(gpio, ox810se_fct0), + FUNCTION(fct3, ox810se_fct3), +}; + +static const struct oxnas_function oxnas_ox820_functions[] = { + FUNCTION(gpio, ox820_fct0), + FUNCTION(fct1, ox820_fct1), + FUNCTION(fct4, ox820_fct4), + FUNCTION(fct5, ox820_fct5), }; #define OXNAS_PINCTRL_GROUP(_pin, _name, ...) \ @@ -185,7 +291,7 @@ static const struct oxnas_function oxnas_functions[] = { .fct = _fct, \ } -static const struct oxnas_pin_group oxnas_groups[] = { +static const struct oxnas_pin_group oxnas_ox810se_groups[] = { OXNAS_PINCTRL_GROUP(0, gpio0, OXNAS_PINCTRL_FUNCTION(gpio, 0), OXNAS_PINCTRL_FUNCTION(fct3, 3)), @@ -282,6 +388,140 @@ static const struct oxnas_pin_group oxnas_groups[] = { OXNAS_PINCTRL_FUNCTION(fct3, 3)), }; +static const struct oxnas_pin_group oxnas_ox820_groups[] = { + OXNAS_PINCTRL_GROUP(0, gpio0, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(1, gpio1, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(2, gpio2, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(3, gpio3, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1)), + OXNAS_PINCTRL_GROUP(4, gpio4, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1)), + OXNAS_PINCTRL_GROUP(5, gpio5, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct4, 4)), + OXNAS_PINCTRL_GROUP(6, gpio6, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct4, 4)), + OXNAS_PINCTRL_GROUP(7, gpio7, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct4, 4)), + OXNAS_PINCTRL_GROUP(8, gpio8, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct4, 4)), + OXNAS_PINCTRL_GROUP(9, gpio9, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(10, gpio10, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(11, gpio11, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(12, gpio12, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1)), + OXNAS_PINCTRL_GROUP(13, gpio13, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1)), + OXNAS_PINCTRL_GROUP(14, gpio14, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1)), + OXNAS_PINCTRL_GROUP(15, gpio15, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1)), + OXNAS_PINCTRL_GROUP(16, gpio16, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1)), + OXNAS_PINCTRL_GROUP(17, gpio17, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1)), + OXNAS_PINCTRL_GROUP(18, gpio18, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1)), + OXNAS_PINCTRL_GROUP(19, gpio19, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1)), + OXNAS_PINCTRL_GROUP(20, gpio20, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1)), + OXNAS_PINCTRL_GROUP(21, gpio21, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1)), + OXNAS_PINCTRL_GROUP(22, gpio22, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1)), + OXNAS_PINCTRL_GROUP(23, gpio23, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1)), + OXNAS_PINCTRL_GROUP(24, gpio24, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct1, 1), + OXNAS_PINCTRL_FUNCTION(fct4, 5)), + OXNAS_PINCTRL_GROUP(25, gpio25, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct4, 4)), + OXNAS_PINCTRL_GROUP(26, gpio26, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct4, 4)), + OXNAS_PINCTRL_GROUP(27, gpio27, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct4, 4)), + OXNAS_PINCTRL_GROUP(28, gpio28, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct5, 5)), + OXNAS_PINCTRL_GROUP(29, gpio29, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct5, 5)), + OXNAS_PINCTRL_GROUP(30, gpio30, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct5, 5)), + OXNAS_PINCTRL_GROUP(31, gpio31, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct5, 5)), + OXNAS_PINCTRL_GROUP(32, gpio32, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(33, gpio33, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(34, gpio34, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(35, gpio35, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(36, gpio36, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(37, gpio37, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(38, gpio38, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(39, gpio39, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(40, gpio40, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct4, 4)), + OXNAS_PINCTRL_GROUP(41, gpio41, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct4, 4)), + OXNAS_PINCTRL_GROUP(42, gpio42, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct4, 4)), + OXNAS_PINCTRL_GROUP(43, gpio43, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct4, 4)), + OXNAS_PINCTRL_GROUP(44, gpio44, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(45, gpio45, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(46, gpio46, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(47, gpio47, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(48, gpio48, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(49, gpio49, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), +}; + static inline struct oxnas_gpio_bank *pctl_to_bank(struct oxnas_pinctrl *pctl, unsigned int pin) { @@ -352,8 +592,8 @@ static int oxnas_pinmux_get_function_groups(struct pinctrl_dev *pctldev, return 0; } -static int oxnas_pinmux_enable(struct pinctrl_dev *pctldev, - unsigned int func, unsigned int group) +static int oxnas_ox810se_pinmux_enable(struct pinctrl_dev *pctldev, + unsigned int func, unsigned int group) { struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); const struct oxnas_pin_group *pg = &pctl->groups[group]; @@ -371,22 +611,22 @@ static int oxnas_pinmux_enable(struct pinctrl_dev *pctldev, regmap_write_bits(pctl->regmap, (pg->bank ? - PINMUX_PRIMARY_SEL1 : - PINMUX_PRIMARY_SEL0), + PINMUX_810_PRIMARY_SEL1 : + PINMUX_810_PRIMARY_SEL0), mask, (functions->fct == 1 ? mask : 0)); regmap_write_bits(pctl->regmap, (pg->bank ? - PINMUX_SECONDARY_SEL1 : - PINMUX_SECONDARY_SEL0), + PINMUX_810_SECONDARY_SEL1 : + PINMUX_810_SECONDARY_SEL0), mask, (functions->fct == 2 ? mask : 0)); regmap_write_bits(pctl->regmap, (pg->bank ? - PINMUX_TERTIARY_SEL1 : - PINMUX_TERTIARY_SEL0), + PINMUX_810_TERTIARY_SEL1 : + PINMUX_810_TERTIARY_SEL0), mask, (functions->fct == 3 ? mask : 0)); @@ -402,9 +642,64 @@ static int oxnas_pinmux_enable(struct pinctrl_dev *pctldev, return -EINVAL; } -static int oxnas_gpio_request_enable(struct pinctrl_dev *pctldev, - struct pinctrl_gpio_range *range, - unsigned int offset) +static int oxnas_ox820_pinmux_enable(struct pinctrl_dev *pctldev, + unsigned int func, unsigned int group) +{ + struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + const struct oxnas_pin_group *pg = &pctl->groups[group]; + const struct oxnas_function *pf = &pctl->functions[func]; + const char *fname = pf->name; + struct oxnas_desc_function *functions = pg->functions; + unsigned int offset = (pg->bank ? PINMUX_820_BANK_OFFSET : 0); + u32 mask = BIT(pg->pin); + + while (functions->name) { + if (!strcmp(functions->name, fname)) { + dev_dbg(pctl->dev, + "setting function %s bank %d pin %d fct %d mask %x\n", + fname, pg->bank, pg->pin, + functions->fct, mask); + + regmap_write_bits(pctl->regmap, + offset + PINMUX_820_SECONDARY_SEL, + mask, + (functions->fct == 1 ? + mask : 0)); + regmap_write_bits(pctl->regmap, + offset + PINMUX_820_TERTIARY_SEL, + mask, + (functions->fct == 2 ? + mask : 0)); + regmap_write_bits(pctl->regmap, + offset + PINMUX_820_QUATERNARY_SEL, + mask, + (functions->fct == 3 ? + mask : 0)); + regmap_write_bits(pctl->regmap, + offset + PINMUX_820_DEBUG_SEL, + mask, + (functions->fct == 4 ? + mask : 0)); + regmap_write_bits(pctl->regmap, + offset + PINMUX_820_ALTERNATIVE_SEL, + mask, + (functions->fct == 5 ? + mask : 0)); + + return 0; + } + + functions++; + } + + dev_err(pctl->dev, "cannot mux pin %u to function %u\n", group, func); + + return -EINVAL; +} + +static int oxnas_ox810se_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int offset) { struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); struct oxnas_gpio_bank *bank = gpiochip_get_data(range->gc); @@ -415,18 +710,49 @@ static int oxnas_gpio_request_enable(struct pinctrl_dev *pctldev, regmap_write_bits(pctl->regmap, (bank->id ? - PINMUX_PRIMARY_SEL1 : - PINMUX_PRIMARY_SEL0), + PINMUX_810_PRIMARY_SEL1 : + PINMUX_810_PRIMARY_SEL0), mask, 0); regmap_write_bits(pctl->regmap, (bank->id ? - PINMUX_SECONDARY_SEL1 : - PINMUX_SECONDARY_SEL0), + PINMUX_810_SECONDARY_SEL1 : + PINMUX_810_SECONDARY_SEL0), mask, 0); regmap_write_bits(pctl->regmap, (bank->id ? - PINMUX_TERTIARY_SEL1 : - PINMUX_TERTIARY_SEL0), + PINMUX_810_TERTIARY_SEL1 : + PINMUX_810_TERTIARY_SEL0), + mask, 0); + + return 0; +} + +static int oxnas_ox820_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int offset) +{ + struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct oxnas_gpio_bank *bank = gpiochip_get_data(range->gc); + unsigned int bank_offset = (bank->id ? PINMUX_820_BANK_OFFSET : 0); + u32 mask = BIT(offset - bank->gpio_chip.base); + + dev_dbg(pctl->dev, "requesting gpio %d in bank %d (id %d) with mask 0x%x\n", + offset, bank->gpio_chip.base, bank->id, mask); + + regmap_write_bits(pctl->regmap, + bank_offset + PINMUX_820_SECONDARY_SEL, + mask, 0); + regmap_write_bits(pctl->regmap, + bank_offset + PINMUX_820_TERTIARY_SEL, + mask, 0); + regmap_write_bits(pctl->regmap, + bank_offset + PINMUX_820_QUATERNARY_SEL, + mask, 0); + regmap_write_bits(pctl->regmap, + bank_offset + PINMUX_820_DEBUG_SEL, + mask, 0); + regmap_write_bits(pctl->regmap, + bank_offset + PINMUX_820_ALTERNATIVE_SEL, mask, 0); return 0; @@ -498,17 +824,26 @@ static int oxnas_gpio_set_direction(struct pinctrl_dev *pctldev, return 0; } -static const struct pinmux_ops oxnas_pinmux_ops = { +static const struct pinmux_ops oxnas_ox810se_pinmux_ops = { .get_functions_count = oxnas_pinmux_get_functions_count, .get_function_name = oxnas_pinmux_get_function_name, .get_function_groups = oxnas_pinmux_get_function_groups, - .set_mux = oxnas_pinmux_enable, - .gpio_request_enable = oxnas_gpio_request_enable, + .set_mux = oxnas_ox810se_pinmux_enable, + .gpio_request_enable = oxnas_ox810se_gpio_request_enable, .gpio_set_direction = oxnas_gpio_set_direction, }; -static int oxnas_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin, - unsigned long *config) +static const struct pinmux_ops oxnas_ox820_pinmux_ops = { + .get_functions_count = oxnas_pinmux_get_functions_count, + .get_function_name = oxnas_pinmux_get_function_name, + .get_function_groups = oxnas_pinmux_get_function_groups, + .set_mux = oxnas_ox820_pinmux_enable, + .gpio_request_enable = oxnas_ox820_gpio_request_enable, + .gpio_set_direction = oxnas_gpio_set_direction, +}; + +static int oxnas_ox810se_pinconf_get(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *config) { struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); struct oxnas_gpio_bank *bank = pctl_to_bank(pctl, pin); @@ -521,8 +856,38 @@ static int oxnas_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin, case PIN_CONFIG_BIAS_PULL_UP: ret = regmap_read(pctl->regmap, (bank->id ? - PINMUX_PULLUP_CTRL1 : - PINMUX_PULLUP_CTRL0), + PINMUX_810_PULLUP_CTRL1 : + PINMUX_810_PULLUP_CTRL0), + &arg); + if (ret) + return ret; + + arg = !!(arg & mask); + break; + default: + return -ENOTSUPP; + } + + *config = pinconf_to_config_packed(param, arg); + + return 0; +} + +static int oxnas_ox820_pinconf_get(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *config) +{ + struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct oxnas_gpio_bank *bank = pctl_to_bank(pctl, pin); + unsigned int param = pinconf_to_config_param(*config); + unsigned int bank_offset = (bank->id ? PINMUX_820_BANK_OFFSET : 0); + u32 mask = BIT(pin - bank->gpio_chip.base); + int ret; + u32 arg; + + switch (param) { + case PIN_CONFIG_BIAS_PULL_UP: + ret = regmap_read(pctl->regmap, + bank_offset + PINMUX_820_PULLUP_CTRL, &arg); if (ret) return ret; @@ -538,8 +903,9 @@ static int oxnas_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin, return 0; } -static int oxnas_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, - unsigned long *configs, unsigned int num_configs) +static int oxnas_ox810se_pinconf_set(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *configs, + unsigned int num_configs) { struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); struct oxnas_gpio_bank *bank = pctl_to_bank(pctl, pin); @@ -561,8 +927,8 @@ static int oxnas_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, dev_dbg(pctl->dev, " pullup\n"); regmap_write_bits(pctl->regmap, (bank->id ? - PINMUX_PULLUP_CTRL1 : - PINMUX_PULLUP_CTRL0), + PINMUX_810_PULLUP_CTRL1 : + PINMUX_810_PULLUP_CTRL0), mask, mask); break; default: @@ -575,18 +941,53 @@ static int oxnas_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, return 0; } -static const struct pinconf_ops oxnas_pinconf_ops = { - .pin_config_get = oxnas_pinconf_get, - .pin_config_set = oxnas_pinconf_set, +static int oxnas_ox820_pinconf_set(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *configs, + unsigned int num_configs) +{ + struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct oxnas_gpio_bank *bank = pctl_to_bank(pctl, pin); + unsigned int bank_offset = (bank->id ? PINMUX_820_BANK_OFFSET : 0); + unsigned int param; + u32 arg; + unsigned int i; + u32 offset = pin - bank->gpio_chip.base; + u32 mask = BIT(offset); + + dev_dbg(pctl->dev, "setting pin %d bank %d mask 0x%x\n", + pin, bank->gpio_chip.base, mask); + + for (i = 0; i < num_configs; i++) { + param = pinconf_to_config_param(configs[i]); + arg = pinconf_to_config_argument(configs[i]); + + switch (param) { + case PIN_CONFIG_BIAS_PULL_UP: + dev_dbg(pctl->dev, " pullup\n"); + regmap_write_bits(pctl->regmap, + bank_offset + PINMUX_820_PULLUP_CTRL, + mask, mask); + break; + default: + dev_err(pctl->dev, "Property %u not supported\n", + param); + return -ENOTSUPP; + } + } + + return 0; +} + +static const struct pinconf_ops oxnas_ox810se_pinconf_ops = { + .pin_config_get = oxnas_ox810se_pinconf_get, + .pin_config_set = oxnas_ox810se_pinconf_set, .is_generic = true, }; -static struct pinctrl_desc oxnas_pinctrl_desc = { - .name = "oxnas-pinctrl", - .pctlops = &oxnas_pinctrl_ops, - .pmxops = &oxnas_pinmux_ops, - .confops = &oxnas_pinconf_ops, - .owner = THIS_MODULE, +static const struct pinconf_ops oxnas_ox820_pinconf_ops = { + .pin_config_get = oxnas_ox820_pinconf_get, + .pin_config_set = oxnas_ox820_pinconf_set, + .is_generic = true, }; static void oxnas_gpio_irq_ack(struct irq_data *data) @@ -699,10 +1100,78 @@ static struct oxnas_gpio_bank oxnas_gpio_banks[] = { GPIO_BANK(1), }; +static struct oxnas_pinctrl ox810se_pinctrl = { + .functions = oxnas_ox810se_functions, + .nfunctions = ARRAY_SIZE(oxnas_ox810se_functions), + .groups = oxnas_ox810se_groups, + .ngroups = ARRAY_SIZE(oxnas_ox810se_groups), + .gpio_banks = oxnas_gpio_banks, + .nbanks = ARRAY_SIZE(oxnas_gpio_banks), +}; + +static struct pinctrl_desc oxnas_ox810se_pinctrl_desc = { + .name = "oxnas-pinctrl", + .pins = oxnas_ox810se_pins, + .npins = ARRAY_SIZE(oxnas_ox810se_pins), + .pctlops = &oxnas_pinctrl_ops, + .pmxops = &oxnas_ox810se_pinmux_ops, + .confops = &oxnas_ox810se_pinconf_ops, + .owner = THIS_MODULE, +}; + +static struct oxnas_pinctrl ox820_pinctrl = { + .functions = oxnas_ox820_functions, + .nfunctions = ARRAY_SIZE(oxnas_ox820_functions), + .groups = oxnas_ox820_groups, + .ngroups = ARRAY_SIZE(oxnas_ox820_groups), + .gpio_banks = oxnas_gpio_banks, + .nbanks = ARRAY_SIZE(oxnas_gpio_banks), +}; + +static struct pinctrl_desc oxnas_ox820_pinctrl_desc = { + .name = "oxnas-pinctrl", + .pins = oxnas_ox820_pins, + .npins = ARRAY_SIZE(oxnas_ox820_pins), + .pctlops = &oxnas_pinctrl_ops, + .pmxops = &oxnas_ox820_pinmux_ops, + .confops = &oxnas_ox820_pinconf_ops, + .owner = THIS_MODULE, +}; + +static struct oxnas_pinctrl_data oxnas_ox810se_pinctrl_data = { + .desc = &oxnas_ox810se_pinctrl_desc, + .pctl = &ox810se_pinctrl, +}; + +static struct oxnas_pinctrl_data oxnas_ox820_pinctrl_data = { + .desc = &oxnas_ox820_pinctrl_desc, + .pctl = &ox820_pinctrl, +}; + +static const struct of_device_id oxnas_pinctrl_of_match[] = { + { .compatible = "oxsemi,ox810se-pinctrl", + .data = &oxnas_ox810se_pinctrl_data + }, + { .compatible = "oxsemi,ox820-pinctrl", + .data = &oxnas_ox820_pinctrl_data, + }, + { }, +}; + static int oxnas_pinctrl_probe(struct platform_device *pdev) { + const struct of_device_id *id; + const struct oxnas_pinctrl_data *data; struct oxnas_pinctrl *pctl; + id = of_match_node(oxnas_pinctrl_of_match, pdev->dev.of_node); + if (!id) + return -ENODEV; + + data = id->data; + if (!data || !data->pctl || !data->desc) + return -EINVAL; + pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL); if (!pctl) return -ENOMEM; @@ -716,20 +1185,14 @@ static int oxnas_pinctrl_probe(struct platform_device *pdev) return -ENODEV; } - pctl->pins = oxnas_pins; - pctl->npins = ARRAY_SIZE(oxnas_pins); - pctl->functions = oxnas_functions; - pctl->nfunctions = ARRAY_SIZE(oxnas_functions); - pctl->groups = oxnas_groups; - pctl->ngroups = ARRAY_SIZE(oxnas_groups); - pctl->gpio_banks = oxnas_gpio_banks; - pctl->nbanks = ARRAY_SIZE(oxnas_gpio_banks); - - oxnas_pinctrl_desc.pins = pctl->pins; - oxnas_pinctrl_desc.npins = pctl->npins; + pctl->functions = data->pctl->functions; + pctl->nfunctions = data->pctl->nfunctions; + pctl->groups = data->pctl->groups; + pctl->ngroups = data->pctl->ngroups; + pctl->gpio_banks = data->pctl->gpio_banks; + pctl->nbanks = data->pctl->nbanks; - pctl->pctldev = pinctrl_register(&oxnas_pinctrl_desc, - &pdev->dev, pctl); + pctl->pctldev = pinctrl_register(data->desc, &pdev->dev, pctl); if (IS_ERR(pctl->pctldev)) { dev_err(&pdev->dev, "Failed to register pinctrl device\n"); return PTR_ERR(pctl->pctldev); @@ -805,11 +1268,6 @@ static int oxnas_gpio_probe(struct platform_device *pdev) return 0; } -static const struct of_device_id oxnas_pinctrl_of_match[] = { - { .compatible = "oxsemi,ox810se-pinctrl", }, - { }, -}; - static struct platform_driver oxnas_pinctrl_driver = { .driver = { .name = "oxnas-pinctrl", @@ -821,6 +1279,7 @@ static struct platform_driver oxnas_pinctrl_driver = { static const struct of_device_id oxnas_gpio_of_match[] = { { .compatible = "oxsemi,ox810se-gpio", }, + { .compatible = "oxsemi,ox820-gpio", }, { }, }; diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index 49bf7dcb7ed82c082df8c24011fe69dec1ecada7..08765f58253c206b2c3d6692d19a504a7de4fbfa 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -59,6 +59,7 @@ #define GPIO_LS_SYNC 0x60 enum rockchip_pinctrl_type { + RK1108, RK2928, RK3066B, RK3188, @@ -624,6 +625,65 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux) return ret; } +#define RK1108_PULL_PMU_OFFSET 0x10 +#define RK1108_PULL_OFFSET 0x110 +#define RK1108_PULL_PINS_PER_REG 8 +#define RK1108_PULL_BITS_PER_PIN 2 +#define RK1108_PULL_BANK_STRIDE 16 + +static void rk1108_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + + /* The first 24 pins of the first bank are located in PMU */ + if (bank->bank_num == 0) { + *regmap = info->regmap_pmu; + *reg = RK1108_PULL_PMU_OFFSET; + } else { + *reg = RK1108_PULL_OFFSET; + *regmap = info->regmap_base; + /* correct the offset, as we're starting with the 2nd bank */ + *reg -= 0x10; + *reg += bank->bank_num * RK1108_PULL_BANK_STRIDE; + } + + *reg += ((pin_num / RK1108_PULL_PINS_PER_REG) * 4); + *bit = (pin_num % RK1108_PULL_PINS_PER_REG); + *bit *= RK1108_PULL_BITS_PER_PIN; +} + +#define RK1108_DRV_PMU_OFFSET 0x20 +#define RK1108_DRV_GRF_OFFSET 0x210 +#define RK1108_DRV_BITS_PER_PIN 2 +#define RK1108_DRV_PINS_PER_REG 8 +#define RK1108_DRV_BANK_STRIDE 16 + +static void rk1108_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + + /* The first 24 pins of the first bank are located in PMU */ + if (bank->bank_num == 0) { + *regmap = info->regmap_pmu; + *reg = RK1108_DRV_PMU_OFFSET; + } else { + *regmap = info->regmap_base; + *reg = RK1108_DRV_GRF_OFFSET; + + /* correct the offset, as we're starting with the 2nd bank */ + *reg -= 0x10; + *reg += bank->bank_num * RK1108_DRV_BANK_STRIDE; + } + + *reg += ((pin_num / RK1108_DRV_PINS_PER_REG) * 4); + *bit = pin_num % RK1108_DRV_PINS_PER_REG; + *bit *= RK1108_DRV_BITS_PER_PIN; +} + #define RK2928_PULL_OFFSET 0x118 #define RK2928_PULL_PINS_PER_REG 16 #define RK2928_PULL_BANK_STRIDE 8 @@ -1123,6 +1183,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num) return !(data & BIT(bit)) ? PIN_CONFIG_BIAS_PULL_PIN_DEFAULT : PIN_CONFIG_BIAS_DISABLE; + case RK1108: case RK3188: case RK3288: case RK3368: @@ -1169,6 +1230,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank, spin_unlock_irqrestore(&bank->slock, flags); break; + case RK1108: case RK3188: case RK3288: case RK3368: @@ -1358,6 +1420,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl, pull == PIN_CONFIG_BIAS_DISABLE); case RK3066B: return pull ? false : true; + case RK1108: case RK3188: case RK3288: case RK3368: @@ -2455,6 +2518,27 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev) return 0; } +static struct rockchip_pin_bank rk1108_pin_banks[] = { + PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU, + IOMUX_SOURCE_PMU, + IOMUX_SOURCE_PMU, + IOMUX_SOURCE_PMU), + PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0), + PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0, 0, 0, 0), + PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", 0, 0, 0, 0), +}; + +static struct rockchip_pin_ctrl rk1108_pin_ctrl = { + .pin_banks = rk1108_pin_banks, + .nr_banks = ARRAY_SIZE(rk1108_pin_banks), + .label = "RK1108-GPIO", + .type = RK1108, + .grf_mux_offset = 0x10, + .pmu_mux_offset = 0x0, + .pull_calc_reg = rk1108_calc_pull_reg_and_bit, + .drv_calc_reg = rk1108_calc_drv_reg_and_bit, +}; + static struct rockchip_pin_bank rk2928_pin_banks[] = { PIN_BANK(0, 32, "gpio0"), PIN_BANK(1, 32, "gpio1"), @@ -2684,6 +2768,8 @@ static struct rockchip_pin_ctrl rk3399_pin_ctrl = { }; static const struct of_device_id rockchip_pinctrl_dt_match[] = { + { .compatible = "rockchip,rk1108-pinctrl", + .data = (void *)&rk1108_pin_ctrl }, { .compatible = "rockchip,rk2928-pinctrl", .data = (void *)&rk2928_pin_ctrl }, { .compatible = "rockchip,rk3036-pinctrl", diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index bfdf720db270d8b2c47007455406d9887ed028c6..a5a0392ab8175da62001816e3f7eb7a2600f1b08 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -31,12 +31,10 @@ #include #include "core.h" +#include "devicetree.h" #include "pinconf.h" #define DRIVER_NAME "pinctrl-single" -#define PCS_MUX_PINS_NAME "pinctrl-single,pins" -#define PCS_MUX_BITS_NAME "pinctrl-single,bits" -#define PCS_REG_NAME_LEN ((sizeof(unsigned long) * 2) + 3) #define PCS_OFF_DISABLED ~0U /** @@ -141,20 +139,6 @@ struct pcs_data { int cur; }; -/** - * struct pcs_name - register name for a pin - * @name: name of the pinctrl register - * - * REVISIT: We may want to make names optional in the pinctrl - * framework as some drivers may not care about pin names to - * avoid kernel bloat. The pin names can be deciphered by user - * space tools using debugfs based on the register address and - * SoC packaging information. - */ -struct pcs_name { - char name[PCS_REG_NAME_LEN]; -}; - /** * struct pcs_soc_data - SoC specific settings * @flags: initial SoC specific PCS_FEAT_xxx values @@ -177,8 +161,11 @@ struct pcs_soc_data { * @base: virtual address of the controller * @size: size of the ioremapped area * @dev: device entry + * @np: device tree node * @pctl: pin controller device * @flags: mask of PCS_FEAT_xxx values + * @missing_nr_pinctrl_cells: for legacy binding, may go away + * @socdata: soc specific data * @lock: spinlock for register access * @mutex: mutex protecting the lists * @width: bits per mux register @@ -186,8 +173,8 @@ struct pcs_soc_data { * @fshift: function register shift * @foff: value to turn mux off * @fmax: max number of functions in fmask - * @bits_per_pin:number of bits per pin - * @names: array of register names for pins + * @bits_per_mux: number of bits per mux + * @bits_per_pin: number of bits per pin * @pins: physical pins on the SoC * @pgtree: pingroup index radix tree * @ftree: function index radix tree @@ -208,11 +195,13 @@ struct pcs_device { void __iomem *base; unsigned size; struct device *dev; + struct device_node *np; struct pinctrl_dev *pctl; unsigned flags; #define PCS_QUIRK_SHARED_IRQ (1 << 2) #define PCS_FEAT_IRQ (1 << 1) #define PCS_FEAT_PINCONF (1 << 0) + struct property *missing_nr_pinctrl_cells; struct pcs_soc_data socdata; raw_spinlock_t lock; struct mutex mutex; @@ -223,7 +212,6 @@ struct pcs_device { unsigned fmax; bool bits_per_mux; unsigned bits_per_pin; - struct pcs_name *names; struct pcs_data pins; struct radix_tree_root pgtree; struct radix_tree_root ftree; @@ -354,13 +342,17 @@ static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev, { struct pcs_device *pcs; unsigned val, mux_bytes; + unsigned long offset; + size_t pa; pcs = pinctrl_dev_get_drvdata(pctldev); mux_bytes = pcs->width / BITS_PER_BYTE; - val = pcs->read(pcs->base + pin * mux_bytes); + offset = pin * mux_bytes; + val = pcs->read(pcs->base + offset); + pa = pcs->res->start + offset; - seq_printf(s, "%08x %s " , val, DRIVER_NAME); + seq_printf(s, "%zx %08x %s ", pa, val, DRIVER_NAME); } static void pcs_dt_free_map(struct pinctrl_dev *pctldev, @@ -763,7 +755,6 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, { struct pcs_soc_data *pcs_soc = &pcs->socdata; struct pinctrl_pin_desc *pin; - struct pcs_name *pn; int i; i = pcs->pins.cur; @@ -786,10 +777,6 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, } pin = &pcs->pins.pa[i]; - pn = &pcs->names[i]; - sprintf(pn->name, "%lx.%u", - (unsigned long)pcs->res->start + offset, pin_pos); - pin->name = pn->name; pin->number = i; pcs->pins.cur++; @@ -827,12 +814,6 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs) if (!pcs->pins.pa) return -ENOMEM; - pcs->names = devm_kzalloc(pcs->dev, - sizeof(struct pcs_name) * nr_pins, - GFP_KERNEL); - if (!pcs->names) - return -ENOMEM; - pcs->desc.pins = pcs->pins.pa; pcs->desc.npins = nr_pins; @@ -1146,21 +1127,17 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, unsigned *num_maps, const char **pgnames) { + const char *name = "pinctrl-single,pins"; struct pcs_func_vals *vals; - const __be32 *mux; - int size, rows, *pins, index = 0, found = 0, res = -ENOMEM; + int rows, *pins, found = 0, res = -ENOMEM, i; struct pcs_function *function; - mux = of_get_property(np, PCS_MUX_PINS_NAME, &size); - if ((!mux) || (size < sizeof(*mux) * 2)) { - dev_err(pcs->dev, "bad data for mux %s\n", - np->name); + rows = pinctrl_count_index_with_args(np, name); + if (rows <= 0) { + dev_err(pcs->dev, "Ivalid number of rows: %d\n", rows); return -EINVAL; } - size /= sizeof(*mux); /* Number of elements in array */ - rows = size / 2; - vals = devm_kzalloc(pcs->dev, sizeof(*vals) * rows, GFP_KERNEL); if (!vals) return -ENOMEM; @@ -1169,14 +1146,28 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, if (!pins) goto free_vals; - while (index < size) { - unsigned offset, val; + for (i = 0; i < rows; i++) { + struct of_phandle_args pinctrl_spec; + unsigned int offset; int pin; - offset = be32_to_cpup(mux + index++); - val = be32_to_cpup(mux + index++); + res = pinctrl_parse_index_with_args(np, name, i, &pinctrl_spec); + if (res) + return res; + + if (pinctrl_spec.args_count < 2) { + dev_err(pcs->dev, "invalid args_count for spec: %i\n", + pinctrl_spec.args_count); + break; + } + + /* Index plus one value cell */ + offset = pinctrl_spec.args[0]; vals[found].reg = pcs->base + offset; - vals[found].val = val; + vals[found].val = pinctrl_spec.args[1]; + + dev_dbg(pcs->dev, "%s index: 0x%x value: 0x%x\n", + pinctrl_spec.np->name, offset, pinctrl_spec.args[1]); pin = pcs_get_pin_by_offset(pcs, offset); if (pin < 0) { @@ -1190,8 +1181,10 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, pgnames[0] = np->name; function = pcs_add_function(pcs, np, np->name, vals, found, pgnames, 1); - if (!function) + if (!function) { + res = -ENOMEM; goto free_pins; + } res = pcs_add_pingroup(pcs, np, np->name, pins, found); if (res < 0) @@ -1226,36 +1219,24 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, return res; } -#define PARAMS_FOR_BITS_PER_MUX 3 - static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs, struct device_node *np, struct pinctrl_map **map, unsigned *num_maps, const char **pgnames) { + const char *name = "pinctrl-single,bits"; struct pcs_func_vals *vals; - const __be32 *mux; - int size, rows, *pins, index = 0, found = 0, res = -ENOMEM; + int rows, *pins, found = 0, res = -ENOMEM, i; int npins_in_row; struct pcs_function *function; - mux = of_get_property(np, PCS_MUX_BITS_NAME, &size); - - if (!mux) { - dev_err(pcs->dev, "no valid property for %s\n", np->name); - return -EINVAL; - } - - if (size < (sizeof(*mux) * PARAMS_FOR_BITS_PER_MUX)) { - dev_err(pcs->dev, "bad data for %s\n", np->name); + rows = pinctrl_count_index_with_args(np, name); + if (rows <= 0) { + dev_err(pcs->dev, "Invalid number of rows: %d\n", rows); return -EINVAL; } - /* Number of elements in array */ - size /= sizeof(*mux); - - rows = size / PARAMS_FOR_BITS_PER_MUX; npins_in_row = pcs->width / pcs->bits_per_pin; vals = devm_kzalloc(pcs->dev, sizeof(*vals) * rows * npins_in_row, @@ -1268,15 +1249,30 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs, if (!pins) goto free_vals; - while (index < size) { + for (i = 0; i < rows; i++) { + struct of_phandle_args pinctrl_spec; unsigned offset, val; unsigned mask, bit_pos, val_pos, mask_pos, submask; unsigned pin_num_from_lsb; int pin; - offset = be32_to_cpup(mux + index++); - val = be32_to_cpup(mux + index++); - mask = be32_to_cpup(mux + index++); + res = pinctrl_parse_index_with_args(np, name, i, &pinctrl_spec); + if (res) + return res; + + if (pinctrl_spec.args_count < 3) { + dev_err(pcs->dev, "invalid args_count for spec: %i\n", + pinctrl_spec.args_count); + break; + } + + /* Index plus two value cells */ + offset = pinctrl_spec.args[0]; + val = pinctrl_spec.args[1]; + mask = pinctrl_spec.args[2]; + + dev_dbg(pcs->dev, "%s index: 0x%x value: 0x%x mask: 0x%x\n", + pinctrl_spec.np->name, offset, val, mask); /* Parse pins in each row from LSB */ while (mask) { @@ -1319,8 +1315,10 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs, pgnames[0] = np->name; function = pcs_add_function(pcs, np, np->name, vals, found, pgnames, 1); - if (!function) + if (!function) { + res = -ENOMEM; goto free_pins; + } res = pcs_add_pingroup(pcs, np, np->name, pins, found); if (res < 0) @@ -1494,17 +1492,12 @@ static void pcs_free_resources(struct pcs_device *pcs) pinctrl_unregister(pcs->pctl); pcs_free_funcs(pcs); pcs_free_pingroups(pcs); +#if IS_BUILTIN(CONFIG_PINCTRL_SINGLE) + if (pcs->missing_nr_pinctrl_cells) + of_remove_property(pcs->np, pcs->missing_nr_pinctrl_cells); +#endif } -#define PCS_GET_PROP_U32(name, reg, err) \ - do { \ - ret = of_property_read_u32(np, name, reg); \ - if (ret) { \ - dev_err(pcs->dev, err); \ - return ret; \ - } \ - } while (0); - static const struct of_device_id pcs_of_match[]; static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs) @@ -1820,6 +1813,55 @@ static int pinctrl_single_resume(struct platform_device *pdev) } #endif +/** + * pcs_quirk_missing_pinctrl_cells - handle legacy binding + * @pcs: pinctrl driver instance + * @np: device tree node + * @cells: number of cells + * + * Handle legacy binding with no #pinctrl-cells. This should be + * always two pinctrl-single,bit-per-mux and one for others. + * At some point we may want to consider removing this. + */ +static int pcs_quirk_missing_pinctrl_cells(struct pcs_device *pcs, + struct device_node *np, + int cells) +{ + struct property *p; + const char *name = "#pinctrl-cells"; + int error; + u32 val; + + error = of_property_read_u32(np, name, &val); + if (!error) + return 0; + + dev_warn(pcs->dev, "please update dts to use %s = <%i>\n", + name, cells); + + p = devm_kzalloc(pcs->dev, sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + + p->length = sizeof(__be32); + p->value = devm_kzalloc(pcs->dev, sizeof(__be32), GFP_KERNEL); + if (!p->value) + return -ENOMEM; + *(__be32 *)p->value = cpu_to_be32(cells); + + p->name = devm_kstrdup(pcs->dev, name, GFP_KERNEL); + if (!p->name) + return -ENOMEM; + + pcs->missing_nr_pinctrl_cells = p; + +#if IS_BUILTIN(CONFIG_PINCTRL_SINGLE) + error = of_add_property(np, pcs->missing_nr_pinctrl_cells); +#endif + + return error; +} + static int pcs_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -1840,6 +1882,7 @@ static int pcs_probe(struct platform_device *pdev) return -ENOMEM; } pcs->dev = &pdev->dev; + pcs->np = np; raw_spin_lock_init(&pcs->lock); mutex_init(&pcs->mutex); INIT_LIST_HEAD(&pcs->pingroups); @@ -1849,8 +1892,13 @@ static int pcs_probe(struct platform_device *pdev) pcs->flags = soc->flags; memcpy(&pcs->socdata, soc, sizeof(*soc)); - PCS_GET_PROP_U32("pinctrl-single,register-width", &pcs->width, - "register width not specified\n"); + ret = of_property_read_u32(np, "pinctrl-single,register-width", + &pcs->width); + if (ret) { + dev_err(pcs->dev, "register width not specified\n"); + + return ret; + } ret = of_property_read_u32(np, "pinctrl-single,function-mask", &pcs->fmask); @@ -1871,6 +1919,13 @@ static int pcs_probe(struct platform_device *pdev) pcs->bits_per_mux = of_property_read_bool(np, "pinctrl-single,bit-per-mux"); + ret = pcs_quirk_missing_pinctrl_cells(pcs, np, + pcs->bits_per_mux ? 2 : 1); + if (ret) { + dev_err(&pdev->dev, "unable to patch #pinctrl-cells\n"); + + return ret; + } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index 99da4cf91031b49757cc24735057b72c3d4f760e..676efcc032d26178718c601116a1a387622760c5 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c @@ -1006,7 +1006,7 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev, function = st_pctl_get_pin_function(pc, offset); if (function) - snprintf(f, 10, "Alt Fn %d", function); + snprintf(f, 10, "Alt Fn %u", function); else snprintf(f, 5, "GPIO"); @@ -1181,7 +1181,7 @@ static int st_pctl_dt_parse_groups(struct device_node *np, if (!strcmp(pp->name, "name")) continue; - if (pp && (pp->length/sizeof(__be32)) >= OF_GPIO_ARGS_MIN) { + if (pp->length / sizeof(__be32) >= OF_GPIO_ARGS_MIN) { npins++; } else { pr_warn("Invalid st,pins in %s node\n", np->name); @@ -1512,7 +1512,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info, if (info->irqmux_base || gpio_irq > 0) { err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip, 0, handle_simple_irq, - IRQ_TYPE_LEVEL_LOW); + IRQ_TYPE_NONE); if (err) { gpiochip_remove(&bank->gpio_chip); dev_info(dev, "could not add irqchip\n"); diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c new file mode 100644 index 0000000000000000000000000000000000000000..29fb7403d24e1acd01eb3bb8bb9c8f40c17bf5d1 --- /dev/null +++ b/drivers/pinctrl/pinctrl-sx150x.c @@ -0,0 +1,1275 @@ +/* + * Copyright (c) 2016, BayLibre, SAS. All rights reserved. + * Author: Neil Armstrong + * + * Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Driver for Semtech SX150X I2C GPIO Expanders + * The handling of the 4-bit chips (SX1501/SX1504/SX1507) is untested. + * + * Author: Gregory Bean + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core.h" +#include "pinconf.h" +#include "pinctrl-utils.h" + +/* The chip models of sx150x */ +enum { + SX150X_123 = 0, + SX150X_456, + SX150X_789, +}; +enum { + SX150X_789_REG_MISC_AUTOCLEAR_OFF = 1 << 0, + SX150X_MAX_REGISTER = 0xad, + SX150X_IRQ_TYPE_EDGE_RISING = 0x1, + SX150X_IRQ_TYPE_EDGE_FALLING = 0x2, + SX150X_789_RESET_KEY1 = 0x12, + SX150X_789_RESET_KEY2 = 0x34, +}; + +struct sx150x_123_pri { + u8 reg_pld_mode; + u8 reg_pld_table0; + u8 reg_pld_table1; + u8 reg_pld_table2; + u8 reg_pld_table3; + u8 reg_pld_table4; + u8 reg_advanced; +}; + +struct sx150x_456_pri { + u8 reg_pld_mode; + u8 reg_pld_table0; + u8 reg_pld_table1; + u8 reg_pld_table2; + u8 reg_pld_table3; + u8 reg_pld_table4; + u8 reg_advanced; +}; + +struct sx150x_789_pri { + u8 reg_drain; + u8 reg_polarity; + u8 reg_clock; + u8 reg_misc; + u8 reg_reset; + u8 ngpios; +}; + +struct sx150x_device_data { + u8 model; + u8 reg_pullup; + u8 reg_pulldn; + u8 reg_dir; + u8 reg_data; + u8 reg_irq_mask; + u8 reg_irq_src; + u8 reg_sense; + u8 ngpios; + union { + struct sx150x_123_pri x123; + struct sx150x_456_pri x456; + struct sx150x_789_pri x789; + } pri; + const struct pinctrl_pin_desc *pins; + unsigned int npins; +}; + +struct sx150x_pinctrl { + struct device *dev; + struct i2c_client *client; + struct pinctrl_dev *pctldev; + struct pinctrl_desc pinctrl_desc; + struct gpio_chip gpio; + struct irq_chip irq_chip; + struct regmap *regmap; + struct { + u32 sense; + u32 masked; + } irq; + struct mutex lock; + const struct sx150x_device_data *data; +}; + +static const struct pinctrl_pin_desc sx150x_4_pins[] = { + PINCTRL_PIN(0, "gpio0"), + PINCTRL_PIN(1, "gpio1"), + PINCTRL_PIN(2, "gpio2"), + PINCTRL_PIN(3, "gpio3"), + PINCTRL_PIN(4, "oscio"), +}; + +static const struct pinctrl_pin_desc sx150x_8_pins[] = { + PINCTRL_PIN(0, "gpio0"), + PINCTRL_PIN(1, "gpio1"), + PINCTRL_PIN(2, "gpio2"), + PINCTRL_PIN(3, "gpio3"), + PINCTRL_PIN(4, "gpio4"), + PINCTRL_PIN(5, "gpio5"), + PINCTRL_PIN(6, "gpio6"), + PINCTRL_PIN(7, "gpio7"), + PINCTRL_PIN(8, "oscio"), +}; + +static const struct pinctrl_pin_desc sx150x_16_pins[] = { + PINCTRL_PIN(0, "gpio0"), + PINCTRL_PIN(1, "gpio1"), + PINCTRL_PIN(2, "gpio2"), + PINCTRL_PIN(3, "gpio3"), + PINCTRL_PIN(4, "gpio4"), + PINCTRL_PIN(5, "gpio5"), + PINCTRL_PIN(6, "gpio6"), + PINCTRL_PIN(7, "gpio7"), + PINCTRL_PIN(8, "gpio8"), + PINCTRL_PIN(9, "gpio9"), + PINCTRL_PIN(10, "gpio10"), + PINCTRL_PIN(11, "gpio11"), + PINCTRL_PIN(12, "gpio12"), + PINCTRL_PIN(13, "gpio13"), + PINCTRL_PIN(14, "gpio14"), + PINCTRL_PIN(15, "gpio15"), + PINCTRL_PIN(16, "oscio"), +}; + +static const struct sx150x_device_data sx1501q_device_data = { + .model = SX150X_123, + .reg_pullup = 0x02, + .reg_pulldn = 0x03, + .reg_dir = 0x01, + .reg_data = 0x00, + .reg_irq_mask = 0x05, + .reg_irq_src = 0x08, + .reg_sense = 0x07, + .pri.x123 = { + .reg_pld_mode = 0x10, + .reg_pld_table0 = 0x11, + .reg_pld_table2 = 0x13, + .reg_advanced = 0xad, + }, + .ngpios = 4, + .pins = sx150x_4_pins, + .npins = 4, /* oscio not available */ +}; + +static const struct sx150x_device_data sx1502q_device_data = { + .model = SX150X_123, + .reg_pullup = 0x02, + .reg_pulldn = 0x03, + .reg_dir = 0x01, + .reg_data = 0x00, + .reg_irq_mask = 0x05, + .reg_irq_src = 0x08, + .reg_sense = 0x06, + .pri.x123 = { + .reg_pld_mode = 0x10, + .reg_pld_table0 = 0x11, + .reg_pld_table1 = 0x12, + .reg_pld_table2 = 0x13, + .reg_pld_table3 = 0x14, + .reg_pld_table4 = 0x15, + .reg_advanced = 0xad, + }, + .ngpios = 8, + .pins = sx150x_8_pins, + .npins = 8, /* oscio not available */ +}; + +static const struct sx150x_device_data sx1503q_device_data = { + .model = SX150X_123, + .reg_pullup = 0x04, + .reg_pulldn = 0x06, + .reg_dir = 0x02, + .reg_data = 0x00, + .reg_irq_mask = 0x08, + .reg_irq_src = 0x0e, + .reg_sense = 0x0a, + .pri.x123 = { + .reg_pld_mode = 0x20, + .reg_pld_table0 = 0x22, + .reg_pld_table1 = 0x24, + .reg_pld_table2 = 0x26, + .reg_pld_table3 = 0x28, + .reg_pld_table4 = 0x2a, + .reg_advanced = 0xad, + }, + .ngpios = 16, + .pins = sx150x_16_pins, + .npins = 16, /* oscio not available */ +}; + +static const struct sx150x_device_data sx1504q_device_data = { + .model = SX150X_456, + .reg_pullup = 0x02, + .reg_pulldn = 0x03, + .reg_dir = 0x01, + .reg_data = 0x00, + .reg_irq_mask = 0x05, + .reg_irq_src = 0x08, + .reg_sense = 0x07, + .pri.x456 = { + .reg_pld_mode = 0x10, + .reg_pld_table0 = 0x11, + .reg_pld_table2 = 0x13, + }, + .ngpios = 4, + .pins = sx150x_4_pins, + .npins = 4, /* oscio not available */ +}; + +static const struct sx150x_device_data sx1505q_device_data = { + .model = SX150X_456, + .reg_pullup = 0x02, + .reg_pulldn = 0x03, + .reg_dir = 0x01, + .reg_data = 0x00, + .reg_irq_mask = 0x05, + .reg_irq_src = 0x08, + .reg_sense = 0x06, + .pri.x456 = { + .reg_pld_mode = 0x10, + .reg_pld_table0 = 0x11, + .reg_pld_table1 = 0x12, + .reg_pld_table2 = 0x13, + .reg_pld_table3 = 0x14, + .reg_pld_table4 = 0x15, + }, + .ngpios = 8, + .pins = sx150x_8_pins, + .npins = 8, /* oscio not available */ +}; + +static const struct sx150x_device_data sx1506q_device_data = { + .model = SX150X_456, + .reg_pullup = 0x04, + .reg_pulldn = 0x06, + .reg_dir = 0x02, + .reg_data = 0x00, + .reg_irq_mask = 0x08, + .reg_irq_src = 0x0e, + .reg_sense = 0x0a, + .pri.x456 = { + .reg_pld_mode = 0x20, + .reg_pld_table0 = 0x22, + .reg_pld_table1 = 0x24, + .reg_pld_table2 = 0x26, + .reg_pld_table3 = 0x28, + .reg_pld_table4 = 0x2a, + .reg_advanced = 0xad, + }, + .ngpios = 16, + .pins = sx150x_16_pins, + .npins = 16, /* oscio not available */ +}; + +static const struct sx150x_device_data sx1507q_device_data = { + .model = SX150X_789, + .reg_pullup = 0x03, + .reg_pulldn = 0x04, + .reg_dir = 0x07, + .reg_data = 0x08, + .reg_irq_mask = 0x09, + .reg_irq_src = 0x0b, + .reg_sense = 0x0a, + .pri.x789 = { + .reg_drain = 0x05, + .reg_polarity = 0x06, + .reg_clock = 0x0d, + .reg_misc = 0x0e, + .reg_reset = 0x7d, + }, + .ngpios = 4, + .pins = sx150x_4_pins, + .npins = ARRAY_SIZE(sx150x_4_pins), +}; + +static const struct sx150x_device_data sx1508q_device_data = { + .model = SX150X_789, + .reg_pullup = 0x03, + .reg_pulldn = 0x04, + .reg_dir = 0x07, + .reg_data = 0x08, + .reg_irq_mask = 0x09, + .reg_irq_src = 0x0c, + .reg_sense = 0x0a, + .pri.x789 = { + .reg_drain = 0x05, + .reg_polarity = 0x06, + .reg_clock = 0x0f, + .reg_misc = 0x10, + .reg_reset = 0x7d, + }, + .ngpios = 8, + .pins = sx150x_8_pins, + .npins = ARRAY_SIZE(sx150x_8_pins), +}; + +static const struct sx150x_device_data sx1509q_device_data = { + .model = SX150X_789, + .reg_pullup = 0x06, + .reg_pulldn = 0x08, + .reg_dir = 0x0e, + .reg_data = 0x10, + .reg_irq_mask = 0x12, + .reg_irq_src = 0x18, + .reg_sense = 0x14, + .pri.x789 = { + .reg_drain = 0x0a, + .reg_polarity = 0x0c, + .reg_clock = 0x1e, + .reg_misc = 0x1f, + .reg_reset = 0x7d, + }, + .ngpios = 16, + .pins = sx150x_16_pins, + .npins = ARRAY_SIZE(sx150x_16_pins), +}; + +static int sx150x_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) +{ + return 0; +} + +static const char *sx150x_pinctrl_get_group_name(struct pinctrl_dev *pctldev, + unsigned int group) +{ + return NULL; +} + +static int sx150x_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, + unsigned int group, + const unsigned int **pins, + unsigned int *num_pins) +{ + return -ENOTSUPP; +} + +static const struct pinctrl_ops sx150x_pinctrl_ops = { + .get_groups_count = sx150x_pinctrl_get_groups_count, + .get_group_name = sx150x_pinctrl_get_group_name, + .get_group_pins = sx150x_pinctrl_get_group_pins, +#ifdef CONFIG_OF + .dt_node_to_map = pinconf_generic_dt_node_to_map_pin, + .dt_free_map = pinctrl_utils_free_map, +#endif +}; + +static bool sx150x_pin_is_oscio(struct sx150x_pinctrl *pctl, unsigned int pin) +{ + if (pin >= pctl->data->npins) + return false; + + /* OSCIO pin is only present in 789 devices */ + if (pctl->data->model != SX150X_789) + return false; + + return !strcmp(pctl->data->pins[pin].name, "oscio"); +} + +static int sx150x_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + struct sx150x_pinctrl *pctl = gpiochip_get_data(chip); + unsigned int value; + int ret; + + if (sx150x_pin_is_oscio(pctl, offset)) + return false; + + ret = regmap_read(pctl->regmap, pctl->data->reg_dir, &value); + if (ret < 0) + return ret; + + return !!(value & BIT(offset)); +} + +static int sx150x_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct sx150x_pinctrl *pctl = gpiochip_get_data(chip); + unsigned int value; + int ret; + + if (sx150x_pin_is_oscio(pctl, offset)) + return -EINVAL; + + ret = regmap_read(pctl->regmap, pctl->data->reg_data, &value); + if (ret < 0) + return ret; + + return !!(value & BIT(offset)); +} + +static int sx150x_gpio_set_single_ended(struct gpio_chip *chip, + unsigned int offset, + enum single_ended_mode mode) +{ + struct sx150x_pinctrl *pctl = gpiochip_get_data(chip); + int ret; + + switch (mode) { + case LINE_MODE_PUSH_PULL: + if (pctl->data->model != SX150X_789 || + sx150x_pin_is_oscio(pctl, offset)) + return 0; + + ret = regmap_write_bits(pctl->regmap, + pctl->data->pri.x789.reg_drain, + BIT(offset), 0); + break; + + case LINE_MODE_OPEN_DRAIN: + if (pctl->data->model != SX150X_789 || + sx150x_pin_is_oscio(pctl, offset)) + return -ENOTSUPP; + + ret = regmap_write_bits(pctl->regmap, + pctl->data->pri.x789.reg_drain, + BIT(offset), BIT(offset)); + break; + default: + ret = -ENOTSUPP; + break; + } + + return ret; +} + +static int __sx150x_gpio_set(struct sx150x_pinctrl *pctl, unsigned int offset, + int value) +{ + return regmap_write_bits(pctl->regmap, pctl->data->reg_data, + BIT(offset), value ? BIT(offset) : 0); +} + +static int sx150x_gpio_oscio_set(struct sx150x_pinctrl *pctl, + int value) +{ + return regmap_write(pctl->regmap, + pctl->data->pri.x789.reg_clock, + (value ? 0x1f : 0x10)); +} + +static void sx150x_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct sx150x_pinctrl *pctl = gpiochip_get_data(chip); + + if (sx150x_pin_is_oscio(pctl, offset)) + sx150x_gpio_oscio_set(pctl, value); + else + __sx150x_gpio_set(pctl, offset, value); + +} + +static void sx150x_gpio_set_multiple(struct gpio_chip *chip, + unsigned long *mask, + unsigned long *bits) +{ + struct sx150x_pinctrl *pctl = gpiochip_get_data(chip); + + regmap_write_bits(pctl->regmap, pctl->data->reg_data, *mask, *bits); +} + +static int sx150x_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + struct sx150x_pinctrl *pctl = gpiochip_get_data(chip); + + if (sx150x_pin_is_oscio(pctl, offset)) + return -EINVAL; + + return regmap_write_bits(pctl->regmap, + pctl->data->reg_dir, + BIT(offset), BIT(offset)); +} + +static int sx150x_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) +{ + struct sx150x_pinctrl *pctl = gpiochip_get_data(chip); + int ret; + + if (sx150x_pin_is_oscio(pctl, offset)) + return sx150x_gpio_oscio_set(pctl, value); + + ret = __sx150x_gpio_set(pctl, offset, value); + if (ret < 0) + return ret; + + return regmap_write_bits(pctl->regmap, + pctl->data->reg_dir, + BIT(offset), 0); +} + +static void sx150x_irq_mask(struct irq_data *d) +{ + struct sx150x_pinctrl *pctl = + gpiochip_get_data(irq_data_get_irq_chip_data(d)); + unsigned int n = d->hwirq; + + pctl->irq.masked |= BIT(n); +} + +static void sx150x_irq_unmask(struct irq_data *d) +{ + struct sx150x_pinctrl *pctl = + gpiochip_get_data(irq_data_get_irq_chip_data(d)); + unsigned int n = d->hwirq; + + pctl->irq.masked &= ~BIT(n); +} + +static void sx150x_irq_set_sense(struct sx150x_pinctrl *pctl, + unsigned int line, unsigned int sense) +{ + /* + * Every interrupt line is represented by two bits shifted + * proportionally to the line number + */ + const unsigned int n = line * 2; + const unsigned int mask = ~((SX150X_IRQ_TYPE_EDGE_RISING | + SX150X_IRQ_TYPE_EDGE_FALLING) << n); + + pctl->irq.sense &= mask; + pctl->irq.sense |= sense << n; +} + +static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type) +{ + struct sx150x_pinctrl *pctl = + gpiochip_get_data(irq_data_get_irq_chip_data(d)); + unsigned int n, val = 0; + + if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) + return -EINVAL; + + n = d->hwirq; + + if (flow_type & IRQ_TYPE_EDGE_RISING) + val |= SX150X_IRQ_TYPE_EDGE_RISING; + if (flow_type & IRQ_TYPE_EDGE_FALLING) + val |= SX150X_IRQ_TYPE_EDGE_FALLING; + + sx150x_irq_set_sense(pctl, n, val); + return 0; +} + +static irqreturn_t sx150x_irq_thread_fn(int irq, void *dev_id) +{ + struct sx150x_pinctrl *pctl = (struct sx150x_pinctrl *)dev_id; + unsigned long n, status; + unsigned int val; + int err; + + err = regmap_read(pctl->regmap, pctl->data->reg_irq_src, &val); + if (err < 0) + return IRQ_NONE; + + err = regmap_write(pctl->regmap, pctl->data->reg_irq_src, val); + if (err < 0) + return IRQ_NONE; + + status = val; + for_each_set_bit(n, &status, pctl->data->ngpios) + handle_nested_irq(irq_find_mapping(pctl->gpio.irqdomain, n)); + + return IRQ_HANDLED; +} + +static void sx150x_irq_bus_lock(struct irq_data *d) +{ + struct sx150x_pinctrl *pctl = + gpiochip_get_data(irq_data_get_irq_chip_data(d)); + + mutex_lock(&pctl->lock); +} + +static void sx150x_irq_bus_sync_unlock(struct irq_data *d) +{ + struct sx150x_pinctrl *pctl = + gpiochip_get_data(irq_data_get_irq_chip_data(d)); + + regmap_write(pctl->regmap, pctl->data->reg_irq_mask, pctl->irq.masked); + regmap_write(pctl->regmap, pctl->data->reg_sense, pctl->irq.sense); + mutex_unlock(&pctl->lock); +} + +static int sx150x_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *config) +{ + struct sx150x_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + unsigned int param = pinconf_to_config_param(*config); + int ret; + u32 arg; + unsigned int data; + + if (sx150x_pin_is_oscio(pctl, pin)) { + switch (param) { + case PIN_CONFIG_DRIVE_PUSH_PULL: + case PIN_CONFIG_OUTPUT: + ret = regmap_read(pctl->regmap, + pctl->data->pri.x789.reg_clock, + &data); + if (ret < 0) + return ret; + + if (param == PIN_CONFIG_DRIVE_PUSH_PULL) + arg = (data & 0x1f) ? 1 : 0; + else { + if ((data & 0x1f) == 0x1f) + arg = 1; + else if ((data & 0x1f) == 0x10) + arg = 0; + else + return -EINVAL; + } + + break; + default: + return -ENOTSUPP; + } + + goto out; + } + + switch (param) { + case PIN_CONFIG_BIAS_PULL_DOWN: + ret = regmap_read(pctl->regmap, + pctl->data->reg_pulldn, + &data); + data &= BIT(pin); + + if (ret < 0) + return ret; + + if (!ret) + return -EINVAL; + + arg = 1; + break; + + case PIN_CONFIG_BIAS_PULL_UP: + ret = regmap_read(pctl->regmap, + pctl->data->reg_pullup, + &data); + data &= BIT(pin); + + if (ret < 0) + return ret; + + if (!ret) + return -EINVAL; + + arg = 1; + break; + + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + if (pctl->data->model != SX150X_789) + return -ENOTSUPP; + + ret = regmap_read(pctl->regmap, + pctl->data->pri.x789.reg_drain, + &data); + data &= BIT(pin); + + if (ret < 0) + return ret; + + if (!data) + return -EINVAL; + + arg = 1; + break; + + case PIN_CONFIG_DRIVE_PUSH_PULL: + if (pctl->data->model != SX150X_789) + arg = true; + else { + ret = regmap_read(pctl->regmap, + pctl->data->pri.x789.reg_drain, + &data); + data &= BIT(pin); + + if (ret < 0) + return ret; + + if (data) + return -EINVAL; + + arg = 1; + } + break; + + case PIN_CONFIG_OUTPUT: + ret = sx150x_gpio_get_direction(&pctl->gpio, pin); + if (ret < 0) + return ret; + + if (ret) + return -EINVAL; + + ret = sx150x_gpio_get(&pctl->gpio, pin); + if (ret < 0) + return ret; + + arg = ret; + break; + + default: + return -ENOTSUPP; + } + +out: + *config = pinconf_to_config_packed(param, arg); + + return 0; +} + +static int sx150x_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *configs, unsigned int num_configs) +{ + struct sx150x_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + enum pin_config_param param; + u32 arg; + int i; + int ret; + + for (i = 0; i < num_configs; i++) { + param = pinconf_to_config_param(configs[i]); + arg = pinconf_to_config_argument(configs[i]); + + if (sx150x_pin_is_oscio(pctl, pin)) { + if (param == PIN_CONFIG_OUTPUT) { + ret = sx150x_gpio_direction_output(&pctl->gpio, + pin, arg); + if (ret < 0) + return ret; + + continue; + } else + return -ENOTSUPP; + } + + switch (param) { + case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT: + case PIN_CONFIG_BIAS_DISABLE: + ret = regmap_write_bits(pctl->regmap, + pctl->data->reg_pulldn, + BIT(pin), 0); + if (ret < 0) + return ret; + + ret = regmap_write_bits(pctl->regmap, + pctl->data->reg_pullup, + BIT(pin), 0); + if (ret < 0) + return ret; + + break; + + case PIN_CONFIG_BIAS_PULL_UP: + ret = regmap_write_bits(pctl->regmap, + pctl->data->reg_pullup, + BIT(pin), BIT(pin)); + if (ret < 0) + return ret; + + break; + + case PIN_CONFIG_BIAS_PULL_DOWN: + ret = regmap_write_bits(pctl->regmap, + pctl->data->reg_pulldn, + BIT(pin), BIT(pin)); + if (ret < 0) + return ret; + + break; + + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + ret = sx150x_gpio_set_single_ended(&pctl->gpio, + pin, LINE_MODE_OPEN_DRAIN); + if (ret < 0) + return ret; + + break; + + case PIN_CONFIG_DRIVE_PUSH_PULL: + ret = sx150x_gpio_set_single_ended(&pctl->gpio, + pin, LINE_MODE_PUSH_PULL); + if (ret < 0) + return ret; + + break; + + case PIN_CONFIG_OUTPUT: + ret = sx150x_gpio_direction_output(&pctl->gpio, + pin, arg); + if (ret < 0) + return ret; + + break; + + default: + return -ENOTSUPP; + } + } /* for each config */ + + return 0; +} + +static const struct pinconf_ops sx150x_pinconf_ops = { + .pin_config_get = sx150x_pinconf_get, + .pin_config_set = sx150x_pinconf_set, + .is_generic = true, +}; + +static const struct i2c_device_id sx150x_id[] = { + {"sx1501q", (kernel_ulong_t) &sx1501q_device_data }, + {"sx1502q", (kernel_ulong_t) &sx1502q_device_data }, + {"sx1503q", (kernel_ulong_t) &sx1503q_device_data }, + {"sx1504q", (kernel_ulong_t) &sx1504q_device_data }, + {"sx1505q", (kernel_ulong_t) &sx1505q_device_data }, + {"sx1506q", (kernel_ulong_t) &sx1506q_device_data }, + {"sx1507q", (kernel_ulong_t) &sx1507q_device_data }, + {"sx1508q", (kernel_ulong_t) &sx1508q_device_data }, + {"sx1509q", (kernel_ulong_t) &sx1509q_device_data }, + {} +}; + +static const struct of_device_id sx150x_of_match[] = { + { .compatible = "semtech,sx1501q", .data = &sx1501q_device_data }, + { .compatible = "semtech,sx1502q", .data = &sx1502q_device_data }, + { .compatible = "semtech,sx1503q", .data = &sx1503q_device_data }, + { .compatible = "semtech,sx1504q", .data = &sx1504q_device_data }, + { .compatible = "semtech,sx1505q", .data = &sx1505q_device_data }, + { .compatible = "semtech,sx1506q", .data = &sx1506q_device_data }, + { .compatible = "semtech,sx1507q", .data = &sx1507q_device_data }, + { .compatible = "semtech,sx1508q", .data = &sx1508q_device_data }, + { .compatible = "semtech,sx1509q", .data = &sx1509q_device_data }, + {}, +}; + +static int sx150x_reset(struct sx150x_pinctrl *pctl) +{ + int err; + + err = i2c_smbus_write_byte_data(pctl->client, + pctl->data->pri.x789.reg_reset, + SX150X_789_RESET_KEY1); + if (err < 0) + return err; + + err = i2c_smbus_write_byte_data(pctl->client, + pctl->data->pri.x789.reg_reset, + SX150X_789_RESET_KEY2); + return err; +} + +static int sx150x_init_misc(struct sx150x_pinctrl *pctl) +{ + u8 reg, value; + + switch (pctl->data->model) { + case SX150X_789: + reg = pctl->data->pri.x789.reg_misc; + value = SX150X_789_REG_MISC_AUTOCLEAR_OFF; + break; + case SX150X_456: + reg = pctl->data->pri.x456.reg_advanced; + value = 0x00; + + /* + * Only SX1506 has RegAdvanced, SX1504/5 are expected + * to initialize this offset to zero + */ + if (!reg) + return 0; + break; + case SX150X_123: + reg = pctl->data->pri.x123.reg_advanced; + value = 0x00; + break; + default: + WARN(1, "Unknown chip model %d\n", pctl->data->model); + return -EINVAL; + } + + return regmap_write(pctl->regmap, reg, value); +} + +static int sx150x_init_hw(struct sx150x_pinctrl *pctl) +{ + const u8 reg[] = { + [SX150X_789] = pctl->data->pri.x789.reg_polarity, + [SX150X_456] = pctl->data->pri.x456.reg_pld_mode, + [SX150X_123] = pctl->data->pri.x123.reg_pld_mode, + }; + int err; + + if (pctl->data->model == SX150X_789 && + of_property_read_bool(pctl->dev->of_node, "semtech,probe-reset")) { + err = sx150x_reset(pctl); + if (err < 0) + return err; + } + + err = sx150x_init_misc(pctl); + if (err < 0) + return err; + + /* Set all pins to work in normal mode */ + return regmap_write(pctl->regmap, reg[pctl->data->model], 0); +} + +static int sx150x_regmap_reg_width(struct sx150x_pinctrl *pctl, + unsigned int reg) +{ + const struct sx150x_device_data *data = pctl->data; + + if (reg == data->reg_sense) { + /* + * RegSense packs two bits of configuration per GPIO, + * so we'd need to read twice as many bits as there + * are GPIO in our chip + */ + return 2 * data->ngpios; + } else if ((data->model == SX150X_789 && + (reg == data->pri.x789.reg_misc || + reg == data->pri.x789.reg_clock || + reg == data->pri.x789.reg_reset)) + || + (data->model == SX150X_123 && + reg == data->pri.x123.reg_advanced) + || + (data->model == SX150X_456 && + data->pri.x456.reg_advanced && + reg == data->pri.x456.reg_advanced)) { + return 8; + } else { + return data->ngpios; + } +} + +static unsigned int sx150x_maybe_swizzle(struct sx150x_pinctrl *pctl, + unsigned int reg, unsigned int val) +{ + unsigned int a, b; + const struct sx150x_device_data *data = pctl->data; + + /* + * Whereas SX1509 presents RegSense in a simple layout as such: + * reg [ f f e e d d c c ] + * reg + 1 [ b b a a 9 9 8 8 ] + * reg + 2 [ 7 7 6 6 5 5 4 4 ] + * reg + 3 [ 3 3 2 2 1 1 0 0 ] + * + * SX1503 and SX1506 deviate from that data layout, instead storing + * their contents as follows: + * + * reg [ f f e e d d c c ] + * reg + 1 [ 7 7 6 6 5 5 4 4 ] + * reg + 2 [ b b a a 9 9 8 8 ] + * reg + 3 [ 3 3 2 2 1 1 0 0 ] + * + * so, taking that into account, we swap two + * inner bytes of a 4-byte result + */ + + if (reg == data->reg_sense && + data->ngpios == 16 && + (data->model == SX150X_123 || + data->model == SX150X_456)) { + a = val & 0x00ff0000; + b = val & 0x0000ff00; + + val &= 0xff0000ff; + val |= b << 8; + val |= a >> 8; + } + + return val; +} + +/* + * In order to mask the differences between 16 and 8 bit expander + * devices we set up a sligthly ficticious regmap that pretends to be + * a set of 32-bit (to accomodate RegSenseLow/RegSenseHigh + * pair/quartet) registers and transparently reconstructs those + * registers via multiple I2C/SMBus reads + * + * This way the rest of the driver code, interfacing with the chip via + * regmap API, can work assuming that each GPIO pin is represented by + * a group of bits at an offset proportional to GPIO number within a + * given register. + */ +static int sx150x_regmap_reg_read(void *context, unsigned int reg, + unsigned int *result) +{ + int ret, n; + struct sx150x_pinctrl *pctl = context; + struct i2c_client *i2c = pctl->client; + const int width = sx150x_regmap_reg_width(pctl, reg); + unsigned int idx, val; + + /* + * There are four potential cases covered by this function: + * + * 1) 8-pin chip, single configuration bit register + * + * This is trivial the code below just needs to read: + * reg [ 7 6 5 4 3 2 1 0 ] + * + * 2) 8-pin chip, double configuration bit register (RegSense) + * + * The read will be done as follows: + * reg [ 7 7 6 6 5 5 4 4 ] + * reg + 1 [ 3 3 2 2 1 1 0 0 ] + * + * 3) 16-pin chip, single configuration bit register + * + * The read will be done as follows: + * reg [ f e d c b a 9 8 ] + * reg + 1 [ 7 6 5 4 3 2 1 0 ] + * + * 4) 16-pin chip, double configuration bit register (RegSense) + * + * The read will be done as follows: + * reg [ f f e e d d c c ] + * reg + 1 [ b b a a 9 9 8 8 ] + * reg + 2 [ 7 7 6 6 5 5 4 4 ] + * reg + 3 [ 3 3 2 2 1 1 0 0 ] + */ + + for (n = width, val = 0, idx = reg; n > 0; n -= 8, idx++) { + val <<= 8; + + ret = i2c_smbus_read_byte_data(i2c, idx); + if (ret < 0) + return ret; + + val |= ret; + } + + *result = sx150x_maybe_swizzle(pctl, reg, val); + + return 0; +} + +static int sx150x_regmap_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + int ret, n; + struct sx150x_pinctrl *pctl = context; + struct i2c_client *i2c = pctl->client; + const int width = sx150x_regmap_reg_width(pctl, reg); + + val = sx150x_maybe_swizzle(pctl, reg, val); + + n = (width - 1) & ~7; + do { + const u8 byte = (val >> n) & 0xff; + + ret = i2c_smbus_write_byte_data(i2c, reg, byte); + if (ret < 0) + return ret; + + reg++; + n -= 8; + } while (n >= 0); + + return 0; +} + +static bool sx150x_reg_volatile(struct device *dev, unsigned int reg) +{ + struct sx150x_pinctrl *pctl = i2c_get_clientdata(to_i2c_client(dev)); + + return reg == pctl->data->reg_irq_src || reg == pctl->data->reg_data; +} + +const struct regmap_config sx150x_regmap_config = { + .reg_bits = 8, + .val_bits = 32, + + .cache_type = REGCACHE_RBTREE, + + .reg_read = sx150x_regmap_reg_read, + .reg_write = sx150x_regmap_reg_write, + + .max_register = SX150X_MAX_REGISTER, + .volatile_reg = sx150x_reg_volatile, +}; + +static int sx150x_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + static const u32 i2c_funcs = I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WRITE_WORD_DATA; + struct device *dev = &client->dev; + struct sx150x_pinctrl *pctl; + int ret; + + if (!i2c_check_functionality(client->adapter, i2c_funcs)) + return -ENOSYS; + + pctl = devm_kzalloc(dev, sizeof(*pctl), GFP_KERNEL); + if (!pctl) + return -ENOMEM; + + i2c_set_clientdata(client, pctl); + + pctl->dev = dev; + pctl->client = client; + + if (dev->of_node) + pctl->data = of_device_get_match_data(dev); + else + pctl->data = (struct sx150x_device_data *)id->driver_data; + + if (!pctl->data) + return -EINVAL; + + pctl->regmap = devm_regmap_init(dev, NULL, pctl, + &sx150x_regmap_config); + if (IS_ERR(pctl->regmap)) { + ret = PTR_ERR(pctl->regmap); + dev_err(dev, "Failed to allocate register map: %d\n", + ret); + return ret; + } + + mutex_init(&pctl->lock); + + ret = sx150x_init_hw(pctl); + if (ret) + return ret; + + /* Register GPIO controller */ + pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL); + pctl->gpio.base = -1; + pctl->gpio.ngpio = pctl->data->npins; + pctl->gpio.get_direction = sx150x_gpio_get_direction; + pctl->gpio.direction_input = sx150x_gpio_direction_input; + pctl->gpio.direction_output = sx150x_gpio_direction_output; + pctl->gpio.get = sx150x_gpio_get; + pctl->gpio.set = sx150x_gpio_set; + pctl->gpio.set_single_ended = sx150x_gpio_set_single_ended; + pctl->gpio.parent = dev; +#ifdef CONFIG_OF_GPIO + pctl->gpio.of_node = dev->of_node; +#endif + pctl->gpio.can_sleep = true; + /* + * Setting multiple pins is not safe when all pins are not + * handled by the same regmap register. The oscio pin (present + * on the SX150X_789 chips) lives in its own register, so + * would require locking that is not in place at this time. + */ + if (pctl->data->model != SX150X_789) + pctl->gpio.set_multiple = sx150x_gpio_set_multiple; + + ret = devm_gpiochip_add_data(dev, &pctl->gpio, pctl); + if (ret) + return ret; + + /* Add Interrupt support if an irq is specified */ + if (client->irq > 0) { + pctl->irq_chip.name = devm_kstrdup(dev, client->name, + GFP_KERNEL); + pctl->irq_chip.irq_mask = sx150x_irq_mask; + pctl->irq_chip.irq_unmask = sx150x_irq_unmask; + pctl->irq_chip.irq_set_type = sx150x_irq_set_type; + pctl->irq_chip.irq_bus_lock = sx150x_irq_bus_lock; + pctl->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock; + + pctl->irq.masked = ~0; + pctl->irq.sense = 0; + + /* + * Because sx150x_irq_threaded_fn invokes all of the + * nested interrrupt handlers via handle_nested_irq, + * any "handler" passed to gpiochip_irqchip_add() + * below is going to be ignored, so the choice of the + * function does not matter that much. + * + * We set it to handle_bad_irq to avoid confusion, + * plus it will be instantly noticeable if it is ever + * called (should not happen) + */ + ret = gpiochip_irqchip_add_nested(&pctl->gpio, + &pctl->irq_chip, 0, + handle_bad_irq, IRQ_TYPE_NONE); + if (ret) { + dev_err(dev, "could not connect irqchip to gpiochip\n"); + return ret; + } + + ret = devm_request_threaded_irq(dev, client->irq, NULL, + sx150x_irq_thread_fn, + IRQF_ONESHOT | IRQF_SHARED | + IRQF_TRIGGER_FALLING, + pctl->irq_chip.name, pctl); + if (ret < 0) + return ret; + + gpiochip_set_nested_irqchip(&pctl->gpio, + &pctl->irq_chip, + client->irq); + } + + /* Pinctrl_desc */ + pctl->pinctrl_desc.name = "sx150x-pinctrl"; + pctl->pinctrl_desc.pctlops = &sx150x_pinctrl_ops; + pctl->pinctrl_desc.confops = &sx150x_pinconf_ops; + pctl->pinctrl_desc.pins = pctl->data->pins; + pctl->pinctrl_desc.npins = pctl->data->npins; + pctl->pinctrl_desc.owner = THIS_MODULE; + + pctl->pctldev = pinctrl_register(&pctl->pinctrl_desc, dev, pctl); + if (IS_ERR(pctl->pctldev)) { + dev_err(dev, "Failed to register pinctrl device\n"); + return PTR_ERR(pctl->pctldev); + } + + return 0; +} + +static struct i2c_driver sx150x_driver = { + .driver = { + .name = "sx150x-pinctrl", + .of_match_table = of_match_ptr(sx150x_of_match), + }, + .probe = sx150x_probe, + .id_table = sx150x_id, +}; + +static int __init sx150x_init(void) +{ + return i2c_add_driver(&sx150x_driver); +} +subsys_initcall(sx150x_init); diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c index e0ecffcbe11f6ec58026f59e63e66b69fc5be7c4..b51a46dfdcc35503a8168af56dbb28bace6c31e9 100644 --- a/drivers/pinctrl/pinctrl-zynq.c +++ b/drivers/pinctrl/pinctrl-zynq.c @@ -247,6 +247,8 @@ static const unsigned int smc0_nor_addr25_pins[] = {1}; static const unsigned int smc0_nand_pins[] = {0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 23}; +static const unsigned int smc0_nand8_pins[] = {0, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14}; /* Note: CAN MIO clock inputs are modeled in the clock framework */ static const unsigned int can0_0_pins[] = {10, 11}; static const unsigned int can0_1_pins[] = {14, 15}; @@ -445,6 +447,7 @@ static const struct zynq_pctrl_group zynq_pctrl_groups[] = { DEFINE_ZYNQ_PINCTRL_GRP(smc0_nor_cs1), DEFINE_ZYNQ_PINCTRL_GRP(smc0_nor_addr25), DEFINE_ZYNQ_PINCTRL_GRP(smc0_nand), + DEFINE_ZYNQ_PINCTRL_GRP(smc0_nand8), DEFINE_ZYNQ_PINCTRL_GRP(can0_0), DEFINE_ZYNQ_PINCTRL_GRP(can0_1), DEFINE_ZYNQ_PINCTRL_GRP(can0_2), @@ -709,7 +712,8 @@ static const char * const sdio1_wp_groups[] = {"gpio0_0_grp", static const char * const smc0_nor_groups[] = {"smc0_nor_grp"}; static const char * const smc0_nor_cs1_groups[] = {"smc0_nor_cs1_grp"}; static const char * const smc0_nor_addr25_groups[] = {"smc0_nor_addr25_grp"}; -static const char * const smc0_nand_groups[] = {"smc0_nand_grp"}; +static const char * const smc0_nand_groups[] = {"smc0_nand_grp", + "smc0_nand8_grp"}; static const char * const can0_groups[] = {"can0_0_grp", "can0_1_grp", "can0_2_grp", "can0_3_grp", "can0_4_grp", "can0_5_grp", "can0_6_grp", "can0_7_grp", "can0_8_grp", "can0_9_grp", diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index 93ef268d5ccdbcab5a78a055dd9c4f87d133da95..3ebdc01f53c0072671c8f79afc9547d5839e5826 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -79,6 +79,15 @@ config PINCTRL_MSM8916 This is the pinctrl, pinmux, pinconf and gpiolib driver for the Qualcomm TLMM block found on the Qualcomm 8916 platform. +config PINCTRL_MSM8994 + tristate "Qualcomm 8994 pin controller driver" + depends on GPIOLIB && OF + select PINCTRL_MSM + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm TLMM block found in the Qualcomm 8994 platform. The + Qualcomm 8992 platform is also supported by this driver. + config PINCTRL_MSM8996 tristate "Qualcomm MSM8996 pin controller driver" depends on GPIOLIB && OF diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile index 8319e11cecb57c75a84d18c7641fb729e9028cb6..ab47764dbc5c6d018661a3a035bed1082e58c3dc 100644 --- a/drivers/pinctrl/qcom/Makefile +++ b/drivers/pinctrl/qcom/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o +obj-$(CONFIG_PINCTRL_MSM8994) += pinctrl-msm8994.o obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o obj-$(CONFIG_PINCTRL_QDF2XXX) += pinctrl-qdf2xxx.o obj-$(CONFIG_PINCTRL_MDM9615) += pinctrl-mdm9615.o diff --git a/drivers/pinctrl/qcom/pinctrl-msm8994.c b/drivers/pinctrl/qcom/pinctrl-msm8994.c new file mode 100644 index 0000000000000000000000000000000000000000..8e16d9ae0c39fc20c4c9217543ee6d0151b0f83d --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-msm8994.c @@ -0,0 +1,1379 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include "pinctrl-msm.h" + +#define FUNCTION(fname) \ + [MSM_MUX_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11) \ + { \ + .name = "gpio" #id, \ + .pins = gpio##id##_pins, \ + .npins = ARRAY_SIZE(gpio##id##_pins), \ + .funcs = (int[]){ \ + MSM_MUX_gpio, \ + MSM_MUX_##f1, \ + MSM_MUX_##f2, \ + MSM_MUX_##f3, \ + MSM_MUX_##f4, \ + MSM_MUX_##f5, \ + MSM_MUX_##f6, \ + MSM_MUX_##f7, \ + MSM_MUX_##f8, \ + MSM_MUX_##f9, \ + MSM_MUX_##f10, \ + MSM_MUX_##f11 \ + }, \ + .nfuncs = 12, \ + .ctl_reg = 0x1000 + 0x10 * id, \ + .io_reg = 0x1004 + 0x10 * id, \ + .intr_cfg_reg = 0x1008 + 0x10 * id, \ + .intr_status_reg = 0x100c + 0x10 * id, \ + .intr_target_reg = 0x1008 + 0x10 * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 4, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + } + +#define SDC_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_target_kpss_val = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } +static const struct pinctrl_pin_desc msm8994_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "GPIO_113"), + PINCTRL_PIN(114, "GPIO_114"), + PINCTRL_PIN(115, "GPIO_115"), + PINCTRL_PIN(116, "GPIO_116"), + PINCTRL_PIN(117, "GPIO_117"), + PINCTRL_PIN(118, "GPIO_118"), + PINCTRL_PIN(119, "GPIO_119"), + PINCTRL_PIN(120, "GPIO_120"), + PINCTRL_PIN(121, "GPIO_121"), + PINCTRL_PIN(122, "GPIO_122"), + PINCTRL_PIN(123, "GPIO_123"), + PINCTRL_PIN(124, "GPIO_124"), + PINCTRL_PIN(125, "GPIO_125"), + PINCTRL_PIN(126, "GPIO_126"), + PINCTRL_PIN(127, "GPIO_127"), + PINCTRL_PIN(128, "GPIO_128"), + PINCTRL_PIN(129, "GPIO_129"), + PINCTRL_PIN(130, "GPIO_130"), + PINCTRL_PIN(131, "GPIO_131"), + PINCTRL_PIN(132, "GPIO_132"), + PINCTRL_PIN(133, "GPIO_133"), + PINCTRL_PIN(134, "GPIO_134"), + PINCTRL_PIN(135, "GPIO_135"), + PINCTRL_PIN(136, "GPIO_136"), + PINCTRL_PIN(137, "GPIO_137"), + PINCTRL_PIN(138, "GPIO_138"), + PINCTRL_PIN(139, "GPIO_139"), + PINCTRL_PIN(140, "GPIO_140"), + PINCTRL_PIN(141, "GPIO_141"), + PINCTRL_PIN(142, "GPIO_142"), + PINCTRL_PIN(143, "GPIO_143"), + PINCTRL_PIN(144, "GPIO_144"), + PINCTRL_PIN(145, "GPIO_145"), + PINCTRL_PIN(146, "SDC1_RCLK"), + PINCTRL_PIN(147, "SDC1_CLK"), + PINCTRL_PIN(148, "SDC1_CMD"), + PINCTRL_PIN(149, "SDC1_DATA"), + PINCTRL_PIN(150, "SDC2_CLK"), + PINCTRL_PIN(151, "SDC2_CMD"), + PINCTRL_PIN(152, "SDC2_DATA"), + PINCTRL_PIN(153, "SDC3_CLK"), + PINCTRL_PIN(154, "SDC3_CMD"), + PINCTRL_PIN(155, "SDC3_DATA"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); +DECLARE_MSM_GPIO_PINS(108); +DECLARE_MSM_GPIO_PINS(109); +DECLARE_MSM_GPIO_PINS(110); +DECLARE_MSM_GPIO_PINS(111); +DECLARE_MSM_GPIO_PINS(112); +DECLARE_MSM_GPIO_PINS(113); +DECLARE_MSM_GPIO_PINS(114); +DECLARE_MSM_GPIO_PINS(115); +DECLARE_MSM_GPIO_PINS(116); +DECLARE_MSM_GPIO_PINS(117); +DECLARE_MSM_GPIO_PINS(118); +DECLARE_MSM_GPIO_PINS(119); +DECLARE_MSM_GPIO_PINS(120); +DECLARE_MSM_GPIO_PINS(121); +DECLARE_MSM_GPIO_PINS(122); +DECLARE_MSM_GPIO_PINS(123); +DECLARE_MSM_GPIO_PINS(124); +DECLARE_MSM_GPIO_PINS(125); +DECLARE_MSM_GPIO_PINS(126); +DECLARE_MSM_GPIO_PINS(127); +DECLARE_MSM_GPIO_PINS(128); +DECLARE_MSM_GPIO_PINS(129); +DECLARE_MSM_GPIO_PINS(130); +DECLARE_MSM_GPIO_PINS(131); +DECLARE_MSM_GPIO_PINS(132); +DECLARE_MSM_GPIO_PINS(133); +DECLARE_MSM_GPIO_PINS(134); +DECLARE_MSM_GPIO_PINS(135); +DECLARE_MSM_GPIO_PINS(136); +DECLARE_MSM_GPIO_PINS(137); +DECLARE_MSM_GPIO_PINS(138); +DECLARE_MSM_GPIO_PINS(139); +DECLARE_MSM_GPIO_PINS(140); +DECLARE_MSM_GPIO_PINS(141); +DECLARE_MSM_GPIO_PINS(142); +DECLARE_MSM_GPIO_PINS(143); +DECLARE_MSM_GPIO_PINS(144); +DECLARE_MSM_GPIO_PINS(145); + +static const unsigned int sdc1_rclk_pins[] = { 146 }; +static const unsigned int sdc1_clk_pins[] = { 147 }; +static const unsigned int sdc1_cmd_pins[] = { 148 }; +static const unsigned int sdc1_data_pins[] = { 149 }; +static const unsigned int sdc2_clk_pins[] = { 150 }; +static const unsigned int sdc2_cmd_pins[] = { 151 }; +static const unsigned int sdc2_data_pins[] = { 152 }; +static const unsigned int sdc3_clk_pins[] = { 153 }; +static const unsigned int sdc3_cmd_pins[] = { 154 }; +static const unsigned int sdc3_data_pins[] = { 155 }; + +enum msm8994_functions { + MSM_MUX_audio_ref_clk, + MSM_MUX_blsp_i2c1, + MSM_MUX_blsp_i2c2, + MSM_MUX_blsp_i2c3, + MSM_MUX_blsp_i2c4, + MSM_MUX_blsp_i2c5, + MSM_MUX_blsp_i2c6, + MSM_MUX_blsp_i2c7, + MSM_MUX_blsp_i2c8, + MSM_MUX_blsp_i2c9, + MSM_MUX_blsp_i2c10, + MSM_MUX_blsp_i2c11, + MSM_MUX_blsp_i2c12, + MSM_MUX_blsp_spi1, + MSM_MUX_blsp_spi1_cs1, + MSM_MUX_blsp_spi1_cs2, + MSM_MUX_blsp_spi1_cs3, + MSM_MUX_blsp_spi2, + MSM_MUX_blsp_spi2_cs1, + MSM_MUX_blsp_spi2_cs2, + MSM_MUX_blsp_spi2_cs3, + MSM_MUX_blsp_spi3, + MSM_MUX_blsp_spi4, + MSM_MUX_blsp_spi5, + MSM_MUX_blsp_spi6, + MSM_MUX_blsp_spi7, + MSM_MUX_blsp_spi8, + MSM_MUX_blsp_spi9, + MSM_MUX_blsp_spi10, + MSM_MUX_blsp_spi10_cs1, + MSM_MUX_blsp_spi10_cs2, + MSM_MUX_blsp_spi10_cs3, + MSM_MUX_blsp_spi11, + MSM_MUX_blsp_spi12, + MSM_MUX_blsp_uart1, + MSM_MUX_blsp_uart2, + MSM_MUX_blsp_uart3, + MSM_MUX_blsp_uart4, + MSM_MUX_blsp_uart5, + MSM_MUX_blsp_uart6, + MSM_MUX_blsp_uart7, + MSM_MUX_blsp_uart8, + MSM_MUX_blsp_uart9, + MSM_MUX_blsp_uart10, + MSM_MUX_blsp_uart11, + MSM_MUX_blsp_uart12, + MSM_MUX_blsp_uim1, + MSM_MUX_blsp_uim2, + MSM_MUX_blsp_uim3, + MSM_MUX_blsp_uim4, + MSM_MUX_blsp_uim5, + MSM_MUX_blsp_uim6, + MSM_MUX_blsp_uim7, + MSM_MUX_blsp_uim8, + MSM_MUX_blsp_uim9, + MSM_MUX_blsp_uim10, + MSM_MUX_blsp_uim11, + MSM_MUX_blsp_uim12, + MSM_MUX_blsp11_i2c_scl_b, + MSM_MUX_blsp11_i2c_sda_b, + MSM_MUX_blsp11_uart_rx_b, + MSM_MUX_blsp11_uart_tx_b, + MSM_MUX_cam_mclk0, + MSM_MUX_cam_mclk1, + MSM_MUX_cam_mclk2, + MSM_MUX_cam_mclk3, + MSM_MUX_cci_async_in0, + MSM_MUX_cci_async_in1, + MSM_MUX_cci_async_in2, + MSM_MUX_cci_i2c0, + MSM_MUX_cci_i2c1, + MSM_MUX_cci_timer0, + MSM_MUX_cci_timer1, + MSM_MUX_cci_timer2, + MSM_MUX_cci_timer3, + MSM_MUX_cci_timer4, + MSM_MUX_gcc_gp1_clk_a, + MSM_MUX_gcc_gp1_clk_b, + MSM_MUX_gcc_gp2_clk_a, + MSM_MUX_gcc_gp2_clk_b, + MSM_MUX_gcc_gp3_clk_a, + MSM_MUX_gcc_gp3_clk_b, + MSM_MUX_gp_mn, + MSM_MUX_gp_pdm0, + MSM_MUX_gp_pdm1, + MSM_MUX_gp_pdm2, + MSM_MUX_gp0_clk, + MSM_MUX_gp1_clk, + MSM_MUX_gps_tx, + MSM_MUX_gsm_tx, + MSM_MUX_hdmi_cec, + MSM_MUX_hdmi_ddc, + MSM_MUX_hdmi_hpd, + MSM_MUX_hdmi_rcv, + MSM_MUX_mdp_vsync, + MSM_MUX_mss_lte, + MSM_MUX_nav_pps, + MSM_MUX_nav_tsync, + MSM_MUX_qdss_cti_trig_in_a, + MSM_MUX_qdss_cti_trig_in_b, + MSM_MUX_qdss_cti_trig_in_c, + MSM_MUX_qdss_cti_trig_in_d, + MSM_MUX_qdss_cti_trig_out_a, + MSM_MUX_qdss_cti_trig_out_b, + MSM_MUX_qdss_cti_trig_out_c, + MSM_MUX_qdss_cti_trig_out_d, + MSM_MUX_qdss_traceclk_a, + MSM_MUX_qdss_traceclk_b, + MSM_MUX_qdss_tracectl_a, + MSM_MUX_qdss_tracectl_b, + MSM_MUX_qdss_tracedata_a, + MSM_MUX_qdss_tracedata_b, + MSM_MUX_qua_mi2s, + MSM_MUX_pci_e0, + MSM_MUX_pci_e1, + MSM_MUX_pri_mi2s, + MSM_MUX_sdc4, + MSM_MUX_sec_mi2s, + MSM_MUX_slimbus, + MSM_MUX_spkr_i2s, + MSM_MUX_ter_mi2s, + MSM_MUX_tsif1, + MSM_MUX_tsif2, + MSM_MUX_uim1, + MSM_MUX_uim2, + MSM_MUX_uim3, + MSM_MUX_uim4, + MSM_MUX_uim_batt_alarm, + MSM_MUX_gpio, + MSM_MUX_NA, +}; + +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", + "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", + "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", + "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", + "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", + "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", + "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", + "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122", + "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128", + "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134", + "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140", + "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", +}; + +static const char * const blsp_spi1_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3" +}; +static const char * const blsp_uart1_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3" +}; +static const char * const blsp_uim1_groups[] = { + "gpio0", "gpio1" +}; +static const char * const hdmi_rcv_groups[] = { + "gpio0" +}; +static const char * const blsp_i2c1_groups[] = { + "gpio2", "gpio3" +}; +static const char * const blsp_spi2_groups[] = { + "gpio4", "gpio5", "gpio6", "gpio7" +}; +static const char * const blsp_uart2_groups[] = { + "gpio4", "gpio5", "gpio6", "gpio7" +}; +static const char * const blsp_uim2_groups[] = { + "gpio4", "gpio5" +}; +static const char * const qdss_cti_trig_out_b_groups[] = { + "gpio4", +}; +static const char * const qdss_cti_trig_in_b_groups[] = { + "gpio5", +}; +static const char * const blsp_i2c2_groups[] = { + "gpio6", "gpio7" +}; +static const char * const blsp_spi3_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11" +}; +static const char * const blsp_uart3_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11" +}; +static const char * const blsp_uim3_groups[] = { + "gpio8", "gpio9" +}; +static const char * const blsp_spi1_cs1_groups[] = { + "gpio8" +}; +static const char * const blsp_spi1_cs2_groups[] = { + "gpio9", "gpio11" +}; +static const char * const mdp_vsync_groups[] = { + "gpio10", "gpio11", "gpio12" +}; +static const char * const blsp_i2c3_groups[] = { + "gpio10", "gpio11" +}; +static const char * const blsp_spi1_cs3_groups[] = { + "gpio10" +}; +static const char * const qdss_tracedata_b_groups[] = { + "gpio13", "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", + "gpio19", "gpio21", "gpio22", "gpio23", "gpio25", "gpio26", + "gpio57", "gpio58", "gpio92", "gpio93", +}; +static const char * const cam_mclk0_groups[] = { + "gpio13" +}; +static const char * const cam_mclk1_groups[] = { + "gpio14" +}; +static const char * const cam_mclk2_groups[] = { + "gpio15" +}; +static const char * const cam_mclk3_groups[] = { + "gpio16" +}; +static const char * const cci_i2c0_groups[] = { + "gpio17", "gpio18" +}; +static const char * const blsp_spi4_groups[] = { + "gpio17", "gpio18", "gpio19", "gpio20" +}; +static const char * const blsp_uart4_groups[] = { + "gpio17", "gpio18", "gpio19", "gpio20" +}; +static const char * const blsp_uim4_groups[] = { + "gpio17", "gpio18" +}; +static const char * const cci_i2c1_groups[] = { + "gpio19", "gpio20" +}; +static const char * const blsp_i2c4_groups[] = { + "gpio19", "gpio20" +}; +static const char * const cci_timer0_groups[] = { + "gpio21" +}; +static const char * const blsp_spi5_groups[] = { + "gpio21", "gpio22", "gpio23", "gpio24" +}; +static const char * const blsp_uart5_groups[] = { + "gpio21", "gpio22", "gpio23", "gpio24" +}; +static const char * const blsp_uim5_groups[] = { + "gpio21", "gpio22" +}; +static const char * const cci_timer1_groups[] = { + "gpio22" +}; +static const char * const cci_timer2_groups[] = { + "gpio23" +}; +static const char * const blsp_i2c5_groups[] = { + "gpio23", "gpio24" +}; +static const char * const cci_timer3_groups[] = { + "gpio24" +}; +static const char * const cci_async_in1_groups[] = { + "gpio24" +}; +static const char * const cci_timer4_groups[] = { + "gpio25" +}; +static const char * const cci_async_in2_groups[] = { + "gpio25" +}; +static const char * const blsp_spi6_groups[] = { + "gpio25", "gpio26", "gpio27", "gpio28" +}; +static const char * const blsp_uart6_groups[] = { + "gpio25", "gpio26", "gpio27", "gpio28" +}; +static const char * const blsp_uim6_groups[] = { + "gpio25", "gpio26" +}; +static const char * const cci_async_in0_groups[] = { + "gpio26" +}; +static const char * const gp0_clk_groups[] = { + "gpio26" +}; +static const char * const gp1_clk_groups[] = { + "gpio27", "gpio57", "gpio78" +}; +static const char * const blsp_i2c6_groups[] = { + "gpio27", "gpio28" +}; +static const char * const qdss_tracectl_a_groups[] = { + "gpio27", +}; +static const char * const qdss_traceclk_a_groups[] = { + "gpio28", +}; +static const char * const gp_mn_groups[] = { + "gpio29" +}; +static const char * const hdmi_cec_groups[] = { + "gpio31" +}; +static const char * const hdmi_ddc_groups[] = { + "gpio32", "gpio33" +}; +static const char * const hdmi_hpd_groups[] = { + "gpio34" +}; +static const char * const uim3_groups[] = { + "gpio35", "gpio36", "gpio37", "gpio38" +}; +static const char * const pci_e1_groups[] = { + "gpio35", "gpio36", +}; +static const char * const blsp_spi7_groups[] = { + "gpio41", "gpio42", "gpio43", "gpio44" +}; +static const char * const blsp_uart7_groups[] = { + "gpio41", "gpio42", "gpio43", "gpio44" +}; +static const char * const blsp_uim7_groups[] = { + "gpio41", "gpio42" +}; +static const char * const qdss_cti_trig_out_c_groups[] = { + "gpio41", +}; +static const char * const qdss_cti_trig_in_c_groups[] = { + "gpio42", +}; +static const char * const blsp_i2c7_groups[] = { + "gpio43", "gpio44" +}; +static const char * const blsp_spi8_groups[] = { + "gpio45", "gpio46", "gpio47", "gpio48" +}; +static const char * const blsp_uart8_groups[] = { + "gpio45", "gpio46", "gpio47", "gpio48" +}; +static const char * const blsp_uim8_groups[] = { + "gpio45", "gpio46" +}; +static const char * const blsp_i2c8_groups[] = { + "gpio47", "gpio48" +}; +static const char * const blsp_spi10_cs1_groups[] = { + "gpio47", "gpio67" +}; +static const char * const blsp_spi10_cs2_groups[] = { + "gpio48", "gpio68" +}; +static const char * const uim2_groups[] = { + "gpio49", "gpio50", "gpio51", "gpio52" +}; +static const char * const blsp_spi9_groups[] = { + "gpio49", "gpio50", "gpio51", "gpio52" +}; +static const char * const blsp_uart9_groups[] = { + "gpio49", "gpio50", "gpio51", "gpio52" +}; +static const char * const blsp_uim9_groups[] = { + "gpio49", "gpio50" +}; +static const char * const blsp_i2c9_groups[] = { + "gpio51", "gpio52" +}; +static const char * const pci_e0_groups[] = { + "gpio53", "gpio54", +}; +static const char * const uim4_groups[] = { + "gpio53", "gpio54", "gpio55", "gpio56" +}; +static const char * const blsp_spi10_groups[] = { + "gpio53", "gpio54", "gpio55", "gpio56" +}; +static const char * const blsp_uart10_groups[] = { + "gpio53", "gpio54", "gpio55", "gpio56" +}; +static const char * const blsp_uim10_groups[] = { + "gpio53", "gpio54" +}; +static const char * const qdss_tracedata_a_groups[] = { + "gpio53", "gpio54", "gpio63", "gpio64", "gpio65", + "gpio66", "gpio67", "gpio74", "gpio75", "gpio76", + "gpio77", "gpio85", "gpio86", "gpio87", "gpio89", + "gpio90" +}; +static const char * const gp_pdm0_groups[] = { + "gpio54", "gpio95" +}; +static const char * const blsp_i2c10_groups[] = { + "gpio55", "gpio56" +}; +static const char * const qdss_cti_trig_in_a_groups[] = { + "gpio55", +}; +static const char * const qdss_cti_trig_out_a_groups[] = { + "gpio56", +}; +static const char * const qua_mi2s_groups[] = { + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", +}; +static const char * const gcc_gp1_clk_a_groups[] = { + "gpio57" +}; +static const char * const gcc_gp2_clk_a_groups[] = { + "gpio58" +}; +static const char * const gcc_gp3_clk_a_groups[] = { + "gpio59" +}; +static const char * const blsp_spi2_cs1_groups[] = { + "gpio62" +}; +static const char * const blsp_spi2_cs2_groups[] = { + "gpio63" +}; +static const char * const gp_pdm2_groups[] = { + "gpio63", "gpio79" +}; +static const char * const pri_mi2s_groups[] = { + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68" +}; +static const char * const blsp_spi2_cs3_groups[] = { + "gpio66" +}; +static const char * const spkr_i2s_groups[] = { + "gpio69", "gpio70", "gpio71", "gpio72" +}; +static const char * const audio_ref_clk_groups[] = { + "gpio69" +}; +static const char * const slimbus_groups[] = { + "gpio70", "gpio71" +}; +static const char * const ter_mi2s_groups[] = { + "gpio73", "gpio74", "gpio75", "gpio76", "gpio77" +}; +static const char * const gp_pdm1_groups[] = { + "gpio74", "gpio86" +}; +static const char * const sec_mi2s_groups[] = { + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82" +}; +static const char * const gcc_gp1_clk_b_groups[] = { + "gpio78" +}; +static const char * const blsp_spi11_groups[] = { + "gpio81", "gpio82", "gpio83", "gpio84" +}; +static const char * const blsp_uart11_groups[] = { + "gpio81", "gpio82", "gpio83", "gpio84" +}; +static const char * const blsp_uim11_groups[] = { + "gpio81", "gpio82" +}; +static const char * const gcc_gp2_clk_b_groups[] = { + "gpio81" +}; +static const char * const gcc_gp3_clk_b_groups[] = { + "gpio82" +}; +static const char * const blsp_i2c11_groups[] = { + "gpio83", "gpio84" +}; +static const char * const blsp_uart12_groups[] = { + "gpio85", "gpio86", "gpio87", "gpio88" +}; +static const char * const blsp_uim12_groups[] = { + "gpio85", "gpio86" +}; +static const char * const blsp_i2c12_groups[] = { + "gpio87", "gpio88" +}; +static const char * const blsp_spi12_groups[] = { + "gpio85", "gpio86", "gpio87", "gpio88" +}; +static const char * const tsif1_groups[] = { + "gpio89", "gpio90", "gpio91", "gpio110", "gpio111" +}; +static const char * const blsp_spi10_cs3_groups[] = { + "gpio90" +}; +static const char * const sdc4_groups[] = { + "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96" +}; +static const char * const qdss_traceclk_b_groups[] = { + "gpio91", +}; +static const char * const tsif2_groups[] = { + "gpio92", "gpio93", "gpio94", "gpio95", "gpio96" +}; +static const char * const qdss_tracectl_b_groups[] = { + "gpio94", +}; +static const char * const qdss_cti_trig_out_d_groups[] = { + "gpio95", +}; +static const char * const qdss_cti_trig_in_d_groups[] = { + "gpio96", +}; +static const char * const uim1_groups[] = { + "gpio97", "gpio98", "gpio99", "gpio100" +}; +static const char * const uim_batt_alarm_groups[] = { + "gpio101" +}; +static const char * const blsp11_uart_tx_b_groups[] = { + "gpio111" +}; +static const char * const blsp11_uart_rx_b_groups[] = { + "gpio112" +}; +static const char * const blsp11_i2c_sda_b_groups[] = { + "gpio113" +}; +static const char * const blsp11_i2c_scl_b_groups[] = { + "gpio114" +}; +static const char * const gsm_tx_groups[] = { + "gpio126", "gpio131", "gpio132", "gpio133" +}; +static const char * const nav_tsync_groups[] = { + "gpio127" +}; +static const char * const nav_pps_groups[] = { + "gpio127" +}; +static const char * const gps_tx_groups[] = { + "gpio130" +}; +static const char * const mss_lte_groups[] = { + "gpio134", "gpio135" +}; + +static const struct msm_function msm8994_functions[] = { + FUNCTION(audio_ref_clk), + FUNCTION(blsp_i2c1), + FUNCTION(blsp_i2c2), + FUNCTION(blsp_i2c3), + FUNCTION(blsp_i2c4), + FUNCTION(blsp_i2c5), + FUNCTION(blsp_i2c6), + FUNCTION(blsp_i2c7), + FUNCTION(blsp_i2c8), + FUNCTION(blsp_i2c9), + FUNCTION(blsp_i2c10), + FUNCTION(blsp_i2c11), + FUNCTION(blsp_i2c12), + FUNCTION(blsp_spi1), + FUNCTION(blsp_spi1_cs1), + FUNCTION(blsp_spi1_cs2), + FUNCTION(blsp_spi1_cs3), + FUNCTION(blsp_spi2), + FUNCTION(blsp_spi2_cs1), + FUNCTION(blsp_spi2_cs2), + FUNCTION(blsp_spi2_cs3), + FUNCTION(blsp_spi3), + FUNCTION(blsp_spi4), + FUNCTION(blsp_spi5), + FUNCTION(blsp_spi6), + FUNCTION(blsp_spi7), + FUNCTION(blsp_spi8), + FUNCTION(blsp_spi9), + FUNCTION(blsp_spi10), + FUNCTION(blsp_spi10_cs1), + FUNCTION(blsp_spi10_cs2), + FUNCTION(blsp_spi10_cs3), + FUNCTION(blsp_spi11), + FUNCTION(blsp_spi12), + FUNCTION(blsp_uart1), + FUNCTION(blsp_uart2), + FUNCTION(blsp_uart3), + FUNCTION(blsp_uart4), + FUNCTION(blsp_uart5), + FUNCTION(blsp_uart6), + FUNCTION(blsp_uart7), + FUNCTION(blsp_uart8), + FUNCTION(blsp_uart9), + FUNCTION(blsp_uart10), + FUNCTION(blsp_uart11), + FUNCTION(blsp_uart12), + FUNCTION(blsp_uim1), + FUNCTION(blsp_uim2), + FUNCTION(blsp_uim3), + FUNCTION(blsp_uim4), + FUNCTION(blsp_uim5), + FUNCTION(blsp_uim6), + FUNCTION(blsp_uim7), + FUNCTION(blsp_uim8), + FUNCTION(blsp_uim9), + FUNCTION(blsp_uim10), + FUNCTION(blsp_uim11), + FUNCTION(blsp_uim12), + FUNCTION(blsp11_i2c_scl_b), + FUNCTION(blsp11_i2c_sda_b), + FUNCTION(blsp11_uart_rx_b), + FUNCTION(blsp11_uart_tx_b), + FUNCTION(cam_mclk0), + FUNCTION(cam_mclk1), + FUNCTION(cam_mclk2), + FUNCTION(cam_mclk3), + FUNCTION(cci_async_in0), + FUNCTION(cci_async_in1), + FUNCTION(cci_async_in2), + FUNCTION(cci_i2c0), + FUNCTION(cci_i2c1), + FUNCTION(cci_timer0), + FUNCTION(cci_timer1), + FUNCTION(cci_timer2), + FUNCTION(cci_timer3), + FUNCTION(cci_timer4), + FUNCTION(gcc_gp1_clk_a), + FUNCTION(gcc_gp1_clk_b), + FUNCTION(gcc_gp2_clk_a), + FUNCTION(gcc_gp2_clk_b), + FUNCTION(gcc_gp3_clk_a), + FUNCTION(gcc_gp3_clk_b), + FUNCTION(gp_mn), + FUNCTION(gp_pdm0), + FUNCTION(gp_pdm1), + FUNCTION(gp_pdm2), + FUNCTION(gp0_clk), + FUNCTION(gp1_clk), + FUNCTION(gps_tx), + FUNCTION(gsm_tx), + FUNCTION(hdmi_cec), + FUNCTION(hdmi_ddc), + FUNCTION(hdmi_hpd), + FUNCTION(hdmi_rcv), + FUNCTION(mdp_vsync), + FUNCTION(mss_lte), + FUNCTION(nav_pps), + FUNCTION(nav_tsync), + FUNCTION(qdss_cti_trig_in_a), + FUNCTION(qdss_cti_trig_in_b), + FUNCTION(qdss_cti_trig_in_c), + FUNCTION(qdss_cti_trig_in_d), + FUNCTION(qdss_cti_trig_out_a), + FUNCTION(qdss_cti_trig_out_b), + FUNCTION(qdss_cti_trig_out_c), + FUNCTION(qdss_cti_trig_out_d), + FUNCTION(qdss_traceclk_a), + FUNCTION(qdss_traceclk_b), + FUNCTION(qdss_tracectl_a), + FUNCTION(qdss_tracectl_b), + FUNCTION(qdss_tracedata_a), + FUNCTION(qdss_tracedata_b), + FUNCTION(qua_mi2s), + FUNCTION(pci_e0), + FUNCTION(pci_e1), + FUNCTION(pri_mi2s), + FUNCTION(sdc4), + FUNCTION(sec_mi2s), + FUNCTION(slimbus), + FUNCTION(spkr_i2s), + FUNCTION(ter_mi2s), + FUNCTION(tsif1), + FUNCTION(tsif2), + FUNCTION(uim_batt_alarm), + FUNCTION(uim1), + FUNCTION(uim2), + FUNCTION(uim3), + FUNCTION(uim4), + FUNCTION(gpio), +}; + +static const struct msm_pingroup msm8994_groups[] = { + PINGROUP(0, blsp_spi1, blsp_uart1, blsp_uim1, hdmi_rcv, NA, NA, NA, + NA, NA, NA, NA), + PINGROUP(1, blsp_spi1, blsp_uart1, blsp_uim1, NA, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(4, blsp_spi2, blsp_uart2, blsp_uim2, NA, qdss_cti_trig_out_b, + NA, NA, NA, NA, NA, NA), + PINGROUP(5, blsp_spi2, blsp_uart2, blsp_uim2, NA, qdss_cti_trig_in_b, + NA, NA, NA, NA, NA, NA), + PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(8, blsp_spi3, blsp_uart3, blsp_uim3, blsp_spi1_cs1, NA, NA, + NA, NA, NA, NA, NA), + PINGROUP(9, blsp_spi3, blsp_uart3, blsp_uim3, blsp_spi1_cs2, NA, NA, + NA, NA, NA, NA, NA), + PINGROUP(10, mdp_vsync, blsp_spi3, blsp_uart3, blsp_i2c3, + blsp_spi1_cs3, NA, NA, NA, NA, NA, NA), + PINGROUP(11, mdp_vsync, blsp_spi3, blsp_uart3, blsp_i2c3, + blsp_spi1_cs2, NA, NA, NA, NA, NA, NA), + PINGROUP(12, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(13, cam_mclk0, NA, NA, qdss_tracedata_b, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(14, cam_mclk1, NA, NA, qdss_tracedata_b, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(15, cam_mclk2, NA, qdss_tracedata_b, NA, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(16, cam_mclk3, NA, qdss_tracedata_b, NA, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(17, cci_i2c0, blsp_spi4, blsp_uart4, blsp_uim4, NA, + qdss_tracedata_b, NA, NA, NA, NA, NA), + PINGROUP(18, cci_i2c0, blsp_spi4, blsp_uart4, blsp_uim4, NA, + qdss_tracedata_b, NA, NA, NA, NA, NA), + PINGROUP(19, cci_i2c1, blsp_spi4, blsp_uart4, blsp_i2c4, NA, + qdss_tracedata_b, NA, NA, NA, NA, NA), + PINGROUP(20, cci_i2c1, blsp_spi4, blsp_uart4, blsp_i2c4, NA, NA, NA, + NA, NA, NA, NA), + PINGROUP(21, cci_timer0, blsp_spi5, blsp_uart5, blsp_uim5, NA, + qdss_tracedata_b, NA, NA, NA, NA, NA), + PINGROUP(22, cci_timer1, blsp_spi5, blsp_uart5, blsp_uim5, NA, + qdss_tracedata_b, NA, NA, NA, NA, NA), + PINGROUP(23, cci_timer2, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, + qdss_tracedata_b, NA, NA, NA, NA), + PINGROUP(24, cci_timer3, cci_async_in1, blsp_spi5, blsp_uart5, + blsp_i2c5, NA, NA, NA, NA, NA, NA), + PINGROUP(25, cci_timer4, cci_async_in2, blsp_spi6, blsp_uart6, + blsp_uim6, NA, NA, qdss_tracedata_b, NA, NA, NA), + PINGROUP(26, cci_async_in0, blsp_spi6, blsp_uart6, blsp_uim6, gp0_clk, + NA, qdss_tracedata_b, NA, NA, NA, NA), + PINGROUP(27, blsp_spi6, blsp_uart6, blsp_i2c6, gp1_clk, + qdss_tracectl_a, NA, NA, NA, NA, NA, NA), + PINGROUP(28, blsp_spi6, blsp_uart6, blsp_i2c6, qdss_traceclk_a, NA, + NA, NA, NA, NA, NA, NA), + PINGROUP(29, gp_mn, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(30, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(31, hdmi_cec, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(32, hdmi_ddc, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(33, hdmi_ddc, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(34, hdmi_hpd, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(35, uim3, pci_e1, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(36, uim3, pci_e1, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(37, uim3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(38, uim3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(39, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(40, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(41, blsp_spi7, blsp_uart7, blsp_uim7, qdss_cti_trig_out_c, + NA, NA, NA, NA, NA, NA, NA), + PINGROUP(42, blsp_spi7, blsp_uart7, blsp_uim7, qdss_cti_trig_in_c, NA, + NA, NA, NA, NA, NA, NA), + PINGROUP(43, blsp_spi7, blsp_uart7, blsp_i2c7, NA, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(44, blsp_spi7, blsp_uart7, blsp_i2c7, NA, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(45, blsp_spi8, blsp_uart8, blsp_uim8, NA, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(46, blsp_spi8, blsp_uart8, blsp_uim8, NA, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(47, blsp_spi8, blsp_uart8, blsp_i2c8, blsp_spi10_cs1, NA, NA, + NA, NA, NA, NA, NA), + PINGROUP(48, blsp_spi8, blsp_uart8, blsp_i2c8, blsp_spi10_cs2, NA, NA, + NA, NA, NA, NA, NA), + PINGROUP(49, uim2, blsp_spi9, blsp_uart9, blsp_uim9, NA, NA, NA, NA, + NA, NA, NA), + PINGROUP(50, uim2, blsp_spi9, blsp_uart9, blsp_uim9, NA, NA, NA, NA, + NA, NA, NA), + PINGROUP(51, uim2, blsp_spi9, blsp_uart9, blsp_i2c9, NA, NA, NA, NA, + NA, NA, NA), + PINGROUP(52, uim2, blsp_spi9, blsp_uart9, blsp_i2c9, NA, NA, NA, NA, + NA, NA, NA), + PINGROUP(53, uim4, pci_e0, blsp_spi10, blsp_uart10, blsp_uim10, NA, + NA, qdss_tracedata_a, NA, NA, NA), + PINGROUP(54, uim4, pci_e0, blsp_spi10, blsp_uart10, blsp_uim10, + gp_pdm0, NA, NA, qdss_tracedata_a, NA, NA), + PINGROUP(55, uim4, blsp_spi10, blsp_uart10, blsp_i2c10, NA, NA, NA, + qdss_cti_trig_in_a, NA, NA, NA), + PINGROUP(56, uim4, blsp_spi10, blsp_uart10, blsp_i2c10, NA, NA, + qdss_cti_trig_out_a, NA, NA, NA, NA), + PINGROUP(57, qua_mi2s, gcc_gp1_clk_a, NA, NA, qdss_tracedata_b, NA, NA, + NA, NA, NA, NA), + PINGROUP(58, qua_mi2s, gcc_gp2_clk_a, NA, NA, qdss_tracedata_b, NA, NA, + NA, NA, NA, NA), + PINGROUP(59, qua_mi2s, gcc_gp3_clk_a, NA, NA, NA, NA, NA, NA, NA, NA, + NA), + PINGROUP(60, qua_mi2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(61, qua_mi2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(62, qua_mi2s, blsp_spi2_cs1, NA, NA, NA, NA, NA, NA, NA, NA, + NA), + PINGROUP(63, qua_mi2s, blsp_spi2_cs2, gp_pdm2, NA, NA, NA, NA, NA, + qdss_tracedata_a, NA, NA), + PINGROUP(64, pri_mi2s, NA, NA, NA, qdss_tracedata_a, NA, NA, NA, NA, + NA, NA), + PINGROUP(65, pri_mi2s, NA, NA, NA, qdss_tracedata_a, NA, NA, NA, NA, + NA, NA), + PINGROUP(66, pri_mi2s, blsp_spi2_cs3, NA, NA, NA, qdss_tracedata_a, + NA, NA, NA, NA, NA), + PINGROUP(67, pri_mi2s, blsp_spi10_cs1, NA, NA, NA, qdss_tracedata_a, + NA, NA, NA, NA, NA), + PINGROUP(68, pri_mi2s, blsp_spi10_cs2, NA, NA, NA, NA, NA, NA, NA, NA, + NA), + PINGROUP(69, spkr_i2s, audio_ref_clk, NA, NA, NA, NA, NA, NA, NA, NA, + NA), + PINGROUP(70, slimbus, spkr_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(71, slimbus, spkr_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(72, spkr_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(73, ter_mi2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(74, ter_mi2s, gp_pdm1, NA, NA, NA, qdss_tracedata_a, NA, NA, + NA, NA, NA), + PINGROUP(75, ter_mi2s, NA, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(76, ter_mi2s, NA, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(77, ter_mi2s, NA, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(78, sec_mi2s, gcc_gp1_clk_b, NA, NA, NA, NA, NA, NA, NA, NA, + NA), + PINGROUP(79, sec_mi2s, gp_pdm2, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(80, sec_mi2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(81, sec_mi2s, blsp_spi11, blsp_uart11, blsp_uim11, + gcc_gp2_clk_b, NA, NA, NA, NA, NA, NA), + PINGROUP(82, sec_mi2s, blsp_spi11, blsp_uart11, blsp_uim11, + gcc_gp3_clk_b, NA, NA, NA, NA, NA, NA), + PINGROUP(83, blsp_spi11, blsp_uart11, blsp_i2c11, NA, NA, NA, NA, NA, + NA, NA, NA), + PINGROUP(84, blsp_spi11, blsp_uart11, blsp_i2c11, NA, NA, NA, NA, NA, + NA, NA, NA), + PINGROUP(85, blsp_spi12, blsp_uart12, blsp_uim12, NA, NA, + qdss_tracedata_a, NA, NA, NA, NA, NA), + PINGROUP(86, blsp_spi12, blsp_uart12, blsp_uim12, gp_pdm1, NA, + qdss_tracedata_a, NA, NA, NA, NA, NA), + PINGROUP(87, blsp_spi12, blsp_uart12, blsp_i2c12, NA, + qdss_tracedata_a, NA, NA, NA, NA, NA, NA), + PINGROUP(88, blsp_spi12, blsp_uart12, blsp_i2c12, NA, NA, NA, NA, NA, + NA, NA, NA), + PINGROUP(89, tsif1, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA, + NA), + PINGROUP(90, tsif1, blsp_spi10_cs3, qdss_tracedata_a, NA, NA, NA, NA, + NA, NA, NA, NA), + PINGROUP(91, tsif1, sdc4, NA, NA, NA, NA, qdss_traceclk_b, NA, NA, NA, + NA), + PINGROUP(92, tsif2, sdc4, NA, NA, qdss_tracedata_b, NA, NA, NA, NA, + NA, NA), + PINGROUP(93, tsif2, sdc4, NA, NA, NA, NA, qdss_tracedata_b, NA, NA, + NA, NA), + PINGROUP(94, tsif2, sdc4, NA, NA, NA, NA, qdss_tracectl_b, NA, NA, NA, + NA), + PINGROUP(95, tsif2, sdc4, gp_pdm0, NA, NA, NA, qdss_cti_trig_out_d, + NA, NA, NA, NA), + PINGROUP(96, tsif2, sdc4, qdss_cti_trig_in_d, NA, NA, NA, NA, NA, NA, + NA, NA), + PINGROUP(97, uim1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(98, uim1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(99, uim1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(100, uim1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(101, uim_batt_alarm, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(102, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(103, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(104, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(105, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(106, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(107, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(108, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(109, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(110, tsif1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(111, tsif1, blsp11_uart_tx_b, NA, NA, NA, NA, NA, NA, NA, NA, + NA), + PINGROUP(112, blsp11_uart_rx_b, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA), + PINGROUP(113, blsp11_i2c_sda_b, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA), + PINGROUP(114, blsp11_i2c_scl_b, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA), + PINGROUP(115, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(116, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(117, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(118, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(119, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(120, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(121, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(122, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(123, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(124, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(125, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(126, NA, gsm_tx, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(127, NA, nav_tsync, nav_pps, NA, NA, NA, NA, NA, NA, NA, + NA), + PINGROUP(128, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(129, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(130, gps_tx, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(131, gsm_tx, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(132, gsm_tx, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(133, gsm_tx, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(134, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(135, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(136, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(137, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(138, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(139, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(140, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(141, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(142, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(143, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(144, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(145, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + SDC_PINGROUP(sdc1_rclk, 0x2044, 15, 0), + SDC_PINGROUP(sdc1_clk, 0x2044, 13, 6), + SDC_PINGROUP(sdc1_cmd, 0x2044, 11, 3), + SDC_PINGROUP(sdc1_data, 0x2044, 9, 0), + SDC_PINGROUP(sdc2_clk, 0x2048, 14, 6), + SDC_PINGROUP(sdc2_cmd, 0x2048, 11, 3), + SDC_PINGROUP(sdc2_data, 0x2048, 9, 0), + SDC_PINGROUP(sdc3_clk, 0x206c, 14, 6), + SDC_PINGROUP(sdc3_cmd, 0x206c, 11, 3), + SDC_PINGROUP(sdc3_data, 0x206c, 9, 0), +}; + +#define NUM_GPIO_PINGROUPS 146 + +static const struct msm_pinctrl_soc_data msm8994_pinctrl = { + .pins = msm8994_pins, + .npins = ARRAY_SIZE(msm8994_pins), + .functions = msm8994_functions, + .nfunctions = ARRAY_SIZE(msm8994_functions), + .groups = msm8994_groups, + .ngroups = ARRAY_SIZE(msm8994_groups), + .ngpios = NUM_GPIO_PINGROUPS, +}; + +static int msm8994_pinctrl_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &msm8994_pinctrl); +} + +static const struct of_device_id msm8994_pinctrl_of_match[] = { + { .compatible = "qcom,msm8992-pinctrl", }, + { .compatible = "qcom,msm8994-pinctrl", }, + { } +}; + +static struct platform_driver msm8994_pinctrl_driver = { + .driver = { + .name = "msm8994-pinctrl", + .of_match_table = msm8994_pinctrl_of_match, + }, + .probe = msm8994_pinctrl_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init msm8994_pinctrl_init(void) +{ + return platform_driver_register(&msm8994_pinctrl_driver); +} +arch_initcall(msm8994_pinctrl_init); + +static void __exit msm8994_pinctrl_exit(void) +{ + platform_driver_unregister(&msm8994_pinctrl_driver); +} +module_exit(msm8994_pinctrl_exit); + +MODULE_DESCRIPTION("Qualcomm MSM8994 pinctrl driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, msm8994_pinctrl_of_match); diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index d32fa2b5ff82ba6de5a586cc9ee6fdc29e7f71cd..12f7d1eb65bc71e891f3eb88a750953dbd1ce68b 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -61,16 +61,15 @@ static void exynos_irq_mask(struct irq_data *irqd) struct irq_chip *chip = irq_data_get_irq_chip(irqd); struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); - struct samsung_pinctrl_drv_data *d = bank->drvdata; unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset; unsigned long mask; unsigned long flags; spin_lock_irqsave(&bank->slock, flags); - mask = readl(d->virt_base + reg_mask); + mask = readl(bank->eint_base + reg_mask); mask |= 1 << irqd->hwirq; - writel(mask, d->virt_base + reg_mask); + writel(mask, bank->eint_base + reg_mask); spin_unlock_irqrestore(&bank->slock, flags); } @@ -80,10 +79,9 @@ static void exynos_irq_ack(struct irq_data *irqd) struct irq_chip *chip = irq_data_get_irq_chip(irqd); struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); - struct samsung_pinctrl_drv_data *d = bank->drvdata; unsigned long reg_pend = our_chip->eint_pend + bank->eint_offset; - writel(1 << irqd->hwirq, d->virt_base + reg_pend); + writel(1 << irqd->hwirq, bank->eint_base + reg_pend); } static void exynos_irq_unmask(struct irq_data *irqd) @@ -91,7 +89,6 @@ static void exynos_irq_unmask(struct irq_data *irqd) struct irq_chip *chip = irq_data_get_irq_chip(irqd); struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); - struct samsung_pinctrl_drv_data *d = bank->drvdata; unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset; unsigned long mask; unsigned long flags; @@ -109,9 +106,9 @@ static void exynos_irq_unmask(struct irq_data *irqd) spin_lock_irqsave(&bank->slock, flags); - mask = readl(d->virt_base + reg_mask); + mask = readl(bank->eint_base + reg_mask); mask &= ~(1 << irqd->hwirq); - writel(mask, d->virt_base + reg_mask); + writel(mask, bank->eint_base + reg_mask); spin_unlock_irqrestore(&bank->slock, flags); } @@ -121,7 +118,6 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type) struct irq_chip *chip = irq_data_get_irq_chip(irqd); struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); - struct samsung_pinctrl_drv_data *d = bank->drvdata; unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq; unsigned int con, trig_type; unsigned long reg_con = our_chip->eint_con + bank->eint_offset; @@ -152,10 +148,10 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type) else irq_set_handler_locked(irqd, handle_level_irq); - con = readl(d->virt_base + reg_con); + con = readl(bank->eint_base + reg_con); con &= ~(EXYNOS_EINT_CON_MASK << shift); con |= trig_type << shift; - writel(con, d->virt_base + reg_con); + writel(con, bank->eint_base + reg_con); return 0; } @@ -166,7 +162,6 @@ static int exynos_irq_request_resources(struct irq_data *irqd) struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); const struct samsung_pin_bank_type *bank_type = bank->type; - struct samsung_pinctrl_drv_data *d = bank->drvdata; unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq; unsigned long reg_con = our_chip->eint_con + bank->eint_offset; unsigned long flags; @@ -188,10 +183,10 @@ static int exynos_irq_request_resources(struct irq_data *irqd) spin_lock_irqsave(&bank->slock, flags); - con = readl(d->virt_base + reg_con); + con = readl(bank->eint_base + reg_con); con &= ~(mask << shift); con |= EXYNOS_EINT_FUNC << shift; - writel(con, d->virt_base + reg_con); + writel(con, bank->eint_base + reg_con); spin_unlock_irqrestore(&bank->slock, flags); @@ -206,7 +201,6 @@ static void exynos_irq_release_resources(struct irq_data *irqd) struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); const struct samsung_pin_bank_type *bank_type = bank->type; - struct samsung_pinctrl_drv_data *d = bank->drvdata; unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq; unsigned long reg_con = our_chip->eint_con + bank->eint_offset; unsigned long flags; @@ -221,10 +215,10 @@ static void exynos_irq_release_resources(struct irq_data *irqd) spin_lock_irqsave(&bank->slock, flags); - con = readl(d->virt_base + reg_con); + con = readl(bank->eint_base + reg_con); con &= ~(mask << shift); con |= FUNC_INPUT << shift; - writel(con, d->virt_base + reg_con); + writel(con, bank->eint_base + reg_con); spin_unlock_irqrestore(&bank->slock, flags); @@ -274,7 +268,7 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data) struct samsung_pin_bank *bank = d->pin_banks; unsigned int svc, group, pin, virq; - svc = readl(d->virt_base + EXYNOS_SVC_OFFSET); + svc = readl(bank->eint_base + EXYNOS_SVC_OFFSET); group = EXYNOS_SVC_GROUP(svc); pin = svc & EXYNOS_SVC_NUM_MASK; @@ -452,7 +446,6 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc); - struct samsung_pinctrl_drv_data *d = eintd->banks[0]->drvdata; unsigned long pend; unsigned long mask; int i; @@ -461,9 +454,9 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc) for (i = 0; i < eintd->nr_banks; ++i) { struct samsung_pin_bank *b = eintd->banks[i]; - pend = readl(d->virt_base + b->irq_chip->eint_pend + pend = readl(b->eint_base + b->irq_chip->eint_pend + b->eint_offset); - mask = readl(d->virt_base + b->irq_chip->eint_mask + mask = readl(b->eint_base + b->irq_chip->eint_mask + b->eint_offset); exynos_irq_demux_eint(pend & ~mask, b->irq_domain); } @@ -581,7 +574,7 @@ static void exynos_pinctrl_suspend_bank( struct samsung_pin_bank *bank) { struct exynos_eint_gpio_save *save = bank->soc_priv; - void __iomem *regs = drvdata->virt_base; + void __iomem *regs = bank->eint_base; save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET + bank->eint_offset); @@ -610,7 +603,7 @@ static void exynos_pinctrl_resume_bank( struct samsung_pin_bank *bank) { struct exynos_eint_gpio_save *save = bank->soc_priv; - void __iomem *regs = drvdata->virt_base; + void __iomem *regs = bank->eint_base; pr_debug("%s: con %#010x => %#010x\n", bank->name, readl(regs + EXYNOS_GPIO_ECON_OFFSET @@ -1346,6 +1339,11 @@ static const struct samsung_pin_bank_data exynos5433_pin_banks0[] = { EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04), EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08), EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c), + EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1), + EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1), + EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1), + EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1), + EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1), }; /* pin banks of exynos5433 pin-controller - AUD */ @@ -1427,6 +1425,7 @@ const struct samsung_pin_ctrl exynos5433_pin_ctrl[] = { .eint_wkup_init = exynos_eint_wkup_init, .suspend = exynos_pinctrl_suspend, .resume = exynos_pinctrl_resume, + .nr_ext_resources = 1, }, { /* pin-controller instance 1 data */ .pin_banks = exynos5433_pin_banks1, diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h index 0f0f7cedb2dcc7c89d94cc481de9d29ab7dd613d..5821525a2c8473fa026b3dc84cd6eb999c243b2a 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.h +++ b/drivers/pinctrl/samsung/pinctrl-exynos.h @@ -79,6 +79,17 @@ .name = id \ } +#define EXYNOS_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \ + { \ + .type = &bank_type_alive, \ + .pctl_offset = reg, \ + .nr_pins = pins, \ + .eint_type = EINT_TYPE_WKUP, \ + .eint_offset = offs, \ + .name = id, \ + .pctl_res_idx = pctl_idx, \ + } \ + /** * struct exynos_weint_data: irq specific data for all the wakeup interrupts * generated by the external wakeup interrupt controller. diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c index 3d92f827da7a83fc08a0d24e86347004ff01ba0b..b82a003546ae88f8c2f3e8cebad87b1547ff21d7 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c @@ -151,7 +151,7 @@ static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d, u32 val; /* Make sure that pin is configured as interrupt */ - reg = d->virt_base + bank->pctl_offset; + reg = bank->pctl_base + bank->pctl_offset; shift = pin * bank_type->fld_width[PINCFG_TYPE_FUNC]; mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1; @@ -184,7 +184,7 @@ static int s3c24xx_eint_type(struct irq_data *data, unsigned int type) s3c24xx_eint_set_handler(data, type); /* Set up interrupt trigger */ - reg = d->virt_base + EINT_REG(index); + reg = bank->eint_base + EINT_REG(index); shift = EINT_OFFS(index); val = readl(reg); @@ -259,32 +259,29 @@ static void s3c2410_demux_eint0_3(struct irq_desc *desc) static void s3c2412_eint0_3_ack(struct irq_data *data) { struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); - struct samsung_pinctrl_drv_data *d = bank->drvdata; unsigned long bitval = 1UL << data->hwirq; - writel(bitval, d->virt_base + EINTPEND_REG); + writel(bitval, bank->eint_base + EINTPEND_REG); } static void s3c2412_eint0_3_mask(struct irq_data *data) { struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); - struct samsung_pinctrl_drv_data *d = bank->drvdata; unsigned long mask; - mask = readl(d->virt_base + EINTMASK_REG); + mask = readl(bank->eint_base + EINTMASK_REG); mask |= (1UL << data->hwirq); - writel(mask, d->virt_base + EINTMASK_REG); + writel(mask, bank->eint_base + EINTMASK_REG); } static void s3c2412_eint0_3_unmask(struct irq_data *data) { struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); - struct samsung_pinctrl_drv_data *d = bank->drvdata; unsigned long mask; - mask = readl(d->virt_base + EINTMASK_REG); + mask = readl(bank->eint_base + EINTMASK_REG); mask &= ~(1UL << data->hwirq); - writel(mask, d->virt_base + EINTMASK_REG); + writel(mask, bank->eint_base + EINTMASK_REG); } static struct irq_chip s3c2412_eint0_3_chip = { @@ -319,34 +316,31 @@ static void s3c2412_demux_eint0_3(struct irq_desc *desc) static void s3c24xx_eint_ack(struct irq_data *data) { struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); - struct samsung_pinctrl_drv_data *d = bank->drvdata; unsigned char index = bank->eint_offset + data->hwirq; - writel(1UL << index, d->virt_base + EINTPEND_REG); + writel(1UL << index, bank->eint_base + EINTPEND_REG); } static void s3c24xx_eint_mask(struct irq_data *data) { struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); - struct samsung_pinctrl_drv_data *d = bank->drvdata; unsigned char index = bank->eint_offset + data->hwirq; unsigned long mask; - mask = readl(d->virt_base + EINTMASK_REG); + mask = readl(bank->eint_base + EINTMASK_REG); mask |= (1UL << index); - writel(mask, d->virt_base + EINTMASK_REG); + writel(mask, bank->eint_base + EINTMASK_REG); } static void s3c24xx_eint_unmask(struct irq_data *data) { struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data); - struct samsung_pinctrl_drv_data *d = bank->drvdata; unsigned char index = bank->eint_offset + data->hwirq; unsigned long mask; - mask = readl(d->virt_base + EINTMASK_REG); + mask = readl(bank->eint_base + EINTMASK_REG); mask &= ~(1UL << index); - writel(mask, d->virt_base + EINTMASK_REG); + writel(mask, bank->eint_base + EINTMASK_REG); } static struct irq_chip s3c24xx_eint_chip = { @@ -362,13 +356,14 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc, { struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); - struct samsung_pinctrl_drv_data *d = data->drvdata; + struct irq_data *irqd = irq_desc_get_irq_data(desc); + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); unsigned int pend, mask; chained_irq_enter(chip, desc); - pend = readl(d->virt_base + EINTPEND_REG); - mask = readl(d->virt_base + EINTMASK_REG); + pend = readl(bank->eint_base + EINTPEND_REG); + mask = readl(bank->eint_base + EINTMASK_REG); pend &= ~mask; pend &= range; diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c index 43407ab248f5121d0ca5625112592a32a9f1a968..4c632812ccff2dbd8086b0640f58774edf23e0fb 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c @@ -280,7 +280,7 @@ static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d, u32 val; /* Make sure that pin is configured as interrupt */ - reg = d->virt_base + bank->pctl_offset; + reg = bank->pctl_base + bank->pctl_offset; shift = pin; if (bank_type->fld_width[PINCFG_TYPE_FUNC] * shift >= 32) { /* 4-bit bank type with 2 con regs */ @@ -308,9 +308,8 @@ static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d, static inline void s3c64xx_gpio_irq_set_mask(struct irq_data *irqd, bool mask) { struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); - struct samsung_pinctrl_drv_data *d = bank->drvdata; unsigned char index = EINT_OFFS(bank->eint_offset) + irqd->hwirq; - void __iomem *reg = d->virt_base + EINTMASK_REG(bank->eint_offset); + void __iomem *reg = bank->eint_base + EINTMASK_REG(bank->eint_offset); u32 val; val = readl(reg); @@ -334,9 +333,8 @@ static void s3c64xx_gpio_irq_mask(struct irq_data *irqd) static void s3c64xx_gpio_irq_ack(struct irq_data *irqd) { struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); - struct samsung_pinctrl_drv_data *d = bank->drvdata; unsigned char index = EINT_OFFS(bank->eint_offset) + irqd->hwirq; - void __iomem *reg = d->virt_base + EINTPEND_REG(bank->eint_offset); + void __iomem *reg = bank->eint_base + EINTPEND_REG(bank->eint_offset); writel(1 << index, reg); } @@ -359,7 +357,7 @@ static int s3c64xx_gpio_irq_set_type(struct irq_data *irqd, unsigned int type) s3c64xx_irq_set_handler(irqd, type); /* Set up interrupt trigger */ - reg = d->virt_base + EINTCON_REG(bank->eint_offset); + reg = bank->eint_base + EINTCON_REG(bank->eint_offset); shift = EINT_OFFS(bank->eint_offset) + irqd->hwirq; shift = 4 * (shift / 4); /* 4 EINTs per trigger selector */ @@ -411,7 +409,8 @@ static void s3c64xx_eint_gpio_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc); - struct samsung_pinctrl_drv_data *drvdata = data->drvdata; + struct irq_data *irqd = irq_desc_get_irq_data(desc); + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); chained_irq_enter(chip, desc); @@ -421,7 +420,7 @@ static void s3c64xx_eint_gpio_irq(struct irq_desc *desc) unsigned int pin; unsigned int virq; - svc = readl(drvdata->virt_base + SERVICE_REG); + svc = readl(bank->eint_base + SERVICE_REG); group = SVC_GROUP(svc); pin = svc & SVC_NUM_MASK; @@ -518,15 +517,15 @@ static inline void s3c64xx_eint0_irq_set_mask(struct irq_data *irqd, bool mask) { struct s3c64xx_eint0_domain_data *ddata = irq_data_get_irq_chip_data(irqd); - struct samsung_pinctrl_drv_data *d = ddata->bank->drvdata; + struct samsung_pin_bank *bank = ddata->bank; u32 val; - val = readl(d->virt_base + EINT0MASK_REG); + val = readl(bank->eint_base + EINT0MASK_REG); if (mask) val |= 1 << ddata->eints[irqd->hwirq]; else val &= ~(1 << ddata->eints[irqd->hwirq]); - writel(val, d->virt_base + EINT0MASK_REG); + writel(val, bank->eint_base + EINT0MASK_REG); } static void s3c64xx_eint0_irq_unmask(struct irq_data *irqd) @@ -543,10 +542,10 @@ static void s3c64xx_eint0_irq_ack(struct irq_data *irqd) { struct s3c64xx_eint0_domain_data *ddata = irq_data_get_irq_chip_data(irqd); - struct samsung_pinctrl_drv_data *d = ddata->bank->drvdata; + struct samsung_pin_bank *bank = ddata->bank; writel(1 << ddata->eints[irqd->hwirq], - d->virt_base + EINT0PEND_REG); + bank->eint_base + EINT0PEND_REG); } static int s3c64xx_eint0_irq_set_type(struct irq_data *irqd, unsigned int type) @@ -554,7 +553,7 @@ static int s3c64xx_eint0_irq_set_type(struct irq_data *irqd, unsigned int type) struct s3c64xx_eint0_domain_data *ddata = irq_data_get_irq_chip_data(irqd); struct samsung_pin_bank *bank = ddata->bank; - struct samsung_pinctrl_drv_data *d = bank->drvdata; + struct samsung_pinctrl_drv_data *d = ddata->bank->drvdata; void __iomem *reg; int trigger; u8 shift; @@ -569,7 +568,7 @@ static int s3c64xx_eint0_irq_set_type(struct irq_data *irqd, unsigned int type) s3c64xx_irq_set_handler(irqd, type); /* Set up interrupt trigger */ - reg = d->virt_base + EINT0CON0_REG; + reg = bank->eint_base + EINT0CON0_REG; shift = ddata->eints[irqd->hwirq]; if (shift >= EINT_MAX_PER_REG) { reg += 4; @@ -601,14 +600,19 @@ static struct irq_chip s3c64xx_eint0_irq_chip = { static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range) { struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_data *irqd = irq_desc_get_irq_data(desc); + struct s3c64xx_eint0_domain_data *ddata = + irq_data_get_irq_chip_data(irqd); + struct samsung_pin_bank *bank = ddata->bank; + struct s3c64xx_eint0_data *data = irq_desc_get_handler_data(desc); - struct samsung_pinctrl_drv_data *drvdata = data->drvdata; + unsigned int pend, mask; chained_irq_enter(chip, desc); - pend = readl(drvdata->virt_base + EINT0PEND_REG); - mask = readl(drvdata->virt_base + EINT0MASK_REG); + pend = readl(bank->eint_base + EINT0PEND_REG); + mask = readl(bank->eint_base + EINT0MASK_REG); pend = pend & range & ~mask; pend &= range; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 620727fabe6437feb63f6d4b24f11949616131f4..41e62391c33ca10e34e9322780926fb7637ebe39 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -33,6 +33,9 @@ #include "../core.h" #include "pinctrl-samsung.h" +/* maximum number of the memory resources */ +#define SAMSUNG_PINCTRL_NUM_RESOURCES 2 + /* list of all possible config options supported */ static struct pin_config { const char *property; @@ -345,7 +348,7 @@ static void pin_to_reg_bank(struct samsung_pinctrl_drv_data *drvdata, ((b->pin_base + b->nr_pins - 1) < pin)) b++; - *reg = drvdata->virt_base + b->pctl_offset; + *reg = b->pctl_base + b->pctl_offset; *offset = pin - b->pin_base; if (bank) *bank = b; @@ -526,7 +529,7 @@ static void samsung_gpio_set_value(struct gpio_chip *gc, void __iomem *reg; u32 data; - reg = bank->drvdata->virt_base + bank->pctl_offset; + reg = bank->pctl_base + bank->pctl_offset; data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]); data &= ~(1 << offset); @@ -554,7 +557,7 @@ static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset) struct samsung_pin_bank *bank = gpiochip_get_data(gc); const struct samsung_pin_bank_type *type = bank->type; - reg = bank->drvdata->virt_base + bank->pctl_offset; + reg = bank->pctl_base + bank->pctl_offset; data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]); data >>= offset; @@ -581,8 +584,8 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc, type = bank->type; drvdata = bank->drvdata; - reg = drvdata->virt_base + bank->pctl_offset + - type->reg_offset[PINCFG_TYPE_FUNC]; + reg = bank->pctl_base + bank->pctl_offset + + type->reg_offset[PINCFG_TYPE_FUNC]; mask = (1 << type->fld_width[PINCFG_TYPE_FUNC]) - 1; shift = offset * type->fld_width[PINCFG_TYPE_FUNC]; @@ -979,6 +982,8 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, const struct samsung_pin_bank_data *bdata; const struct samsung_pin_ctrl *ctrl; struct samsung_pin_bank *bank; + struct resource *res; + void __iomem *virt_base[SAMSUNG_PINCTRL_NUM_RESOURCES]; int i; id = of_alias_get_id(node, "pinctrl"); @@ -997,6 +1002,17 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, if (!d->pin_banks) return ERR_PTR(-ENOMEM); + if (ctrl->nr_ext_resources + 1 > SAMSUNG_PINCTRL_NUM_RESOURCES) + return ERR_PTR(-EINVAL); + + for (i = 0; i < ctrl->nr_ext_resources + 1; i++) { + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + virt_base[i] = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (IS_ERR(virt_base[i])) + return ERR_PTR(-EIO); + } + bank = d->pin_banks; bdata = ctrl->pin_banks; for (i = 0; i < ctrl->nr_banks; ++i, ++bdata, ++bank) { @@ -1013,6 +1029,9 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, bank->drvdata = d; bank->pin_base = d->nr_pins; d->nr_pins += bank->nr_pins; + + bank->eint_base = virt_base[0]; + bank->pctl_base = virt_base[bdata->pctl_res_idx]; } for_each_child_of_node(node, np) { @@ -1052,11 +1071,6 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) } drvdata->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - drvdata->virt_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(drvdata->virt_base)) - return PTR_ERR(drvdata->virt_base); - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res) drvdata->irq = res->start; @@ -1094,12 +1108,11 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) static void samsung_pinctrl_suspend_dev( struct samsung_pinctrl_drv_data *drvdata) { - void __iomem *virt_base = drvdata->virt_base; int i; for (i = 0; i < drvdata->nr_banks; i++) { struct samsung_pin_bank *bank = &drvdata->pin_banks[i]; - void __iomem *reg = virt_base + bank->pctl_offset; + void __iomem *reg = bank->pctl_base + bank->pctl_offset; const u8 *offs = bank->type->reg_offset; const u8 *widths = bank->type->fld_width; enum pincfg_type type; @@ -1140,7 +1153,6 @@ static void samsung_pinctrl_suspend_dev( */ static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata) { - void __iomem *virt_base = drvdata->virt_base; int i; if (drvdata->resume) @@ -1148,7 +1160,7 @@ static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata) for (i = 0; i < drvdata->nr_banks; i++) { struct samsung_pin_bank *bank = &drvdata->pin_banks[i]; - void __iomem *reg = virt_base + bank->pctl_offset; + void __iomem *reg = bank->pctl_base + bank->pctl_offset; const u8 *offs = bank->type->reg_offset; const u8 *widths = bank->type->fld_width; enum pincfg_type type; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h index cd31bfaf62cb6e33d296e1ba9171933a64434ac4..043cb6c11180e5c8241d47feddbbd0f74fe4d31c 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.h +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h @@ -116,6 +116,7 @@ struct samsung_pin_bank_type { * struct samsung_pin_bank_data: represent a controller pin-bank (init data). * @type: type of the bank (register offsets and bitfield widths) * @pctl_offset: starting offset of the pin-bank registers. + * @pctl_res_idx: index of base address for pin-bank registers. * @nr_pins: number of pins included in this bank. * @eint_func: function to set in CON register to configure pin as EINT. * @eint_type: type of the external interrupt supported by the bank. @@ -126,6 +127,7 @@ struct samsung_pin_bank_type { struct samsung_pin_bank_data { const struct samsung_pin_bank_type *type; u32 pctl_offset; + u8 pctl_res_idx; u8 nr_pins; u8 eint_func; enum eint_type eint_type; @@ -137,8 +139,10 @@ struct samsung_pin_bank_data { /** * struct samsung_pin_bank: represent a controller pin-bank. * @type: type of the bank (register offsets and bitfield widths) + * @pctl_base: base address of the pin-bank registers * @pctl_offset: starting offset of the pin-bank registers. * @nr_pins: number of pins included in this bank. + * @eint_base: base address of the pin-bank EINT registers. * @eint_func: function to set in CON register to configure pin as EINT. * @eint_type: type of the external interrupt supported by the bank. * @eint_mask: bit mask of pins which support EINT function. @@ -157,8 +161,10 @@ struct samsung_pin_bank_data { */ struct samsung_pin_bank { const struct samsung_pin_bank_type *type; + void __iomem *pctl_base; u32 pctl_offset; u8 nr_pins; + void __iomem *eint_base; u8 eint_func; enum eint_type eint_type; u32 eint_mask; @@ -182,6 +188,7 @@ struct samsung_pin_bank { * struct samsung_pin_ctrl: represent a pin controller. * @pin_banks: list of pin banks included in this controller. * @nr_banks: number of pin banks. + * @nr_ext_resources: number of the extra base address for pin banks. * @eint_gpio_init: platform specific callback to setup the external gpio * interrupts for the controller. * @eint_wkup_init: platform specific callback to setup the external wakeup @@ -190,6 +197,7 @@ struct samsung_pin_bank { struct samsung_pin_ctrl { const struct samsung_pin_bank_data *pin_banks; u32 nr_banks; + int nr_ext_resources; int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *); int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *); @@ -200,7 +208,6 @@ struct samsung_pin_ctrl { /** * struct samsung_pinctrl_drv_data: wrapper for holding driver data together. * @node: global list node - * @virt_base: register base address of the controller. * @dev: device instance representing the controller. * @irq: interrpt number used by the controller to notify gpio interrupts. * @ctrl: pin controller instance managed by the driver. @@ -215,7 +222,6 @@ struct samsung_pin_ctrl { */ struct samsung_pinctrl_drv_data { struct list_head node; - void __iomem *virt_base; struct device *dev; int irq; diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c index f3a8897d4e8f407f04980c8c404b0e5c4643ec2e..cf80ce1dd7cea5a0fdd9c524a6c5a03e1d93e0f2 100644 --- a/drivers/pinctrl/sh-pfc/core.c +++ b/drivers/pinctrl/sh-pfc/core.c @@ -389,6 +389,21 @@ int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type) return 0; } +const struct sh_pfc_bias_info * +sh_pfc_pin_to_bias_info(const struct sh_pfc_bias_info *info, + unsigned int num, unsigned int pin) +{ + unsigned int i; + + for (i = 0; i < num; i++) + if (info[i].pin == pin) + return &info[i]; + + WARN_ONCE(1, "Pin %u is not in bias info list\n", pin); + + return NULL; +} + static int sh_pfc_init_ranges(struct sh_pfc *pfc) { struct sh_pfc_pin_range *range; diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h index 0bbdea5849f41713a135b5dd3ae6bc810717e2b3..6d598dd63720856774c0f7b2267f631fc67204a0 100644 --- a/drivers/pinctrl/sh-pfc/core.h +++ b/drivers/pinctrl/sh-pfc/core.h @@ -33,4 +33,8 @@ void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width, int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin); int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type); +const struct sh_pfc_bias_info * +sh_pfc_pin_to_bias_info(const struct sh_pfc_bias_info *info, + unsigned int num, unsigned int pin); + #endif /* __SH_PFC_CORE_H__ */ diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c index 18ef7042b3d1b04761b2e194d11601a1f1d20fe6..c3af9ebee4afc7b0cc4d73a7aad0144cbcae8dc9 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c @@ -24,6 +24,7 @@ #include #include +#include "core.h" #include "sh_pfc.h" #define PORT_GP_PUP_1(bank, pin, fn, sfx) \ @@ -2918,183 +2919,182 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define PUPR4 0x110 #define PUPR5 0x114 -static const struct { - u16 reg : 11; - u16 bit : 5; -} pullups[] = { - [RCAR_GP_PIN(0, 6)] = { PUPR0, 0 }, /* A0 */ - [RCAR_GP_PIN(0, 7)] = { PUPR0, 1 }, /* A1 */ - [RCAR_GP_PIN(0, 8)] = { PUPR0, 2 }, /* A2 */ - [RCAR_GP_PIN(0, 9)] = { PUPR0, 3 }, /* A3 */ - [RCAR_GP_PIN(0, 10)] = { PUPR0, 4 }, /* A4 */ - [RCAR_GP_PIN(0, 11)] = { PUPR0, 5 }, /* A5 */ - [RCAR_GP_PIN(0, 12)] = { PUPR0, 6 }, /* A6 */ - [RCAR_GP_PIN(0, 13)] = { PUPR0, 7 }, /* A7 */ - [RCAR_GP_PIN(0, 14)] = { PUPR0, 8 }, /* A8 */ - [RCAR_GP_PIN(0, 15)] = { PUPR0, 9 }, /* A9 */ - [RCAR_GP_PIN(0, 16)] = { PUPR0, 10 }, /* A10 */ - [RCAR_GP_PIN(0, 17)] = { PUPR0, 11 }, /* A11 */ - [RCAR_GP_PIN(0, 18)] = { PUPR0, 12 }, /* A12 */ - [RCAR_GP_PIN(0, 19)] = { PUPR0, 13 }, /* A13 */ - [RCAR_GP_PIN(0, 20)] = { PUPR0, 14 }, /* A14 */ - [RCAR_GP_PIN(0, 21)] = { PUPR0, 15 }, /* A15 */ - [RCAR_GP_PIN(0, 22)] = { PUPR0, 16 }, /* A16 */ - [RCAR_GP_PIN(0, 23)] = { PUPR0, 17 }, /* A17 */ - [RCAR_GP_PIN(0, 24)] = { PUPR0, 18 }, /* A18 */ - [RCAR_GP_PIN(0, 25)] = { PUPR0, 19 }, /* A19 */ - [RCAR_GP_PIN(0, 26)] = { PUPR0, 20 }, /* A20 */ - [RCAR_GP_PIN(0, 27)] = { PUPR0, 21 }, /* A21 */ - [RCAR_GP_PIN(0, 28)] = { PUPR0, 22 }, /* A22 */ - [RCAR_GP_PIN(0, 29)] = { PUPR0, 23 }, /* A23 */ - [RCAR_GP_PIN(0, 30)] = { PUPR0, 24 }, /* A24 */ - [RCAR_GP_PIN(0, 31)] = { PUPR0, 25 }, /* A25 */ - [RCAR_GP_PIN(1, 3)] = { PUPR0, 26 }, /* /EX_CS0 */ - [RCAR_GP_PIN(1, 4)] = { PUPR0, 27 }, /* /EX_CS1 */ - [RCAR_GP_PIN(1, 5)] = { PUPR0, 28 }, /* /EX_CS2 */ - [RCAR_GP_PIN(1, 6)] = { PUPR0, 29 }, /* /EX_CS3 */ - [RCAR_GP_PIN(1, 7)] = { PUPR0, 30 }, /* /EX_CS4 */ - [RCAR_GP_PIN(1, 8)] = { PUPR0, 31 }, /* /EX_CS5 */ - - [RCAR_GP_PIN(0, 0)] = { PUPR1, 0 }, /* /PRESETOUT */ - [RCAR_GP_PIN(0, 5)] = { PUPR1, 1 }, /* /BS */ - [RCAR_GP_PIN(1, 0)] = { PUPR1, 2 }, /* RD//WR */ - [RCAR_GP_PIN(1, 1)] = { PUPR1, 3 }, /* /WE0 */ - [RCAR_GP_PIN(1, 2)] = { PUPR1, 4 }, /* /WE1 */ - [RCAR_GP_PIN(1, 11)] = { PUPR1, 5 }, /* EX_WAIT0 */ - [RCAR_GP_PIN(1, 9)] = { PUPR1, 6 }, /* DREQ0 */ - [RCAR_GP_PIN(1, 10)] = { PUPR1, 7 }, /* DACK0 */ - [RCAR_GP_PIN(1, 12)] = { PUPR1, 8 }, /* IRQ0 */ - [RCAR_GP_PIN(1, 13)] = { PUPR1, 9 }, /* IRQ1 */ - - [RCAR_GP_PIN(1, 22)] = { PUPR2, 0 }, /* DU0_DR0 */ - [RCAR_GP_PIN(1, 23)] = { PUPR2, 1 }, /* DU0_DR1 */ - [RCAR_GP_PIN(1, 24)] = { PUPR2, 2 }, /* DU0_DR2 */ - [RCAR_GP_PIN(1, 25)] = { PUPR2, 3 }, /* DU0_DR3 */ - [RCAR_GP_PIN(1, 26)] = { PUPR2, 4 }, /* DU0_DR4 */ - [RCAR_GP_PIN(1, 27)] = { PUPR2, 5 }, /* DU0_DR5 */ - [RCAR_GP_PIN(1, 28)] = { PUPR2, 6 }, /* DU0_DR6 */ - [RCAR_GP_PIN(1, 29)] = { PUPR2, 7 }, /* DU0_DR7 */ - [RCAR_GP_PIN(1, 30)] = { PUPR2, 8 }, /* DU0_DG0 */ - [RCAR_GP_PIN(1, 31)] = { PUPR2, 9 }, /* DU0_DG1 */ - [RCAR_GP_PIN(2, 0)] = { PUPR2, 10 }, /* DU0_DG2 */ - [RCAR_GP_PIN(2, 1)] = { PUPR2, 11 }, /* DU0_DG3 */ - [RCAR_GP_PIN(2, 2)] = { PUPR2, 12 }, /* DU0_DG4 */ - [RCAR_GP_PIN(2, 3)] = { PUPR2, 13 }, /* DU0_DG5 */ - [RCAR_GP_PIN(2, 4)] = { PUPR2, 14 }, /* DU0_DG6 */ - [RCAR_GP_PIN(2, 5)] = { PUPR2, 15 }, /* DU0_DG7 */ - [RCAR_GP_PIN(2, 6)] = { PUPR2, 16 }, /* DU0_DB0 */ - [RCAR_GP_PIN(2, 7)] = { PUPR2, 17 }, /* DU0_DB1 */ - [RCAR_GP_PIN(2, 8)] = { PUPR2, 18 }, /* DU0_DB2 */ - [RCAR_GP_PIN(2, 9)] = { PUPR2, 19 }, /* DU0_DB3 */ - [RCAR_GP_PIN(2, 10)] = { PUPR2, 20 }, /* DU0_DB4 */ - [RCAR_GP_PIN(2, 11)] = { PUPR2, 21 }, /* DU0_DB5 */ - [RCAR_GP_PIN(2, 12)] = { PUPR2, 22 }, /* DU0_DB6 */ - [RCAR_GP_PIN(2, 13)] = { PUPR2, 23 }, /* DU0_DB7 */ - [RCAR_GP_PIN(2, 14)] = { PUPR2, 24 }, /* DU0_DOTCLKIN */ - [RCAR_GP_PIN(2, 15)] = { PUPR2, 25 }, /* DU0_DOTCLKOUT0 */ - [RCAR_GP_PIN(2, 17)] = { PUPR2, 26 }, /* DU0_HSYNC */ - [RCAR_GP_PIN(2, 18)] = { PUPR2, 27 }, /* DU0_VSYNC */ - [RCAR_GP_PIN(2, 19)] = { PUPR2, 28 }, /* DU0_EXODDF */ - [RCAR_GP_PIN(2, 20)] = { PUPR2, 29 }, /* DU0_DISP */ - [RCAR_GP_PIN(2, 21)] = { PUPR2, 30 }, /* DU0_CDE */ - [RCAR_GP_PIN(2, 16)] = { PUPR2, 31 }, /* DU0_DOTCLKOUT1 */ - - [RCAR_GP_PIN(3, 24)] = { PUPR3, 0 }, /* VI0_CLK */ - [RCAR_GP_PIN(3, 25)] = { PUPR3, 1 }, /* VI0_CLKENB */ - [RCAR_GP_PIN(3, 26)] = { PUPR3, 2 }, /* VI0_FIELD */ - [RCAR_GP_PIN(3, 27)] = { PUPR3, 3 }, /* /VI0_HSYNC */ - [RCAR_GP_PIN(3, 28)] = { PUPR3, 4 }, /* /VI0_VSYNC */ - [RCAR_GP_PIN(3, 29)] = { PUPR3, 5 }, /* VI0_DATA0 */ - [RCAR_GP_PIN(3, 30)] = { PUPR3, 6 }, /* VI0_DATA1 */ - [RCAR_GP_PIN(3, 31)] = { PUPR3, 7 }, /* VI0_DATA2 */ - [RCAR_GP_PIN(4, 0)] = { PUPR3, 8 }, /* VI0_DATA3 */ - [RCAR_GP_PIN(4, 1)] = { PUPR3, 9 }, /* VI0_DATA4 */ - [RCAR_GP_PIN(4, 2)] = { PUPR3, 10 }, /* VI0_DATA5 */ - [RCAR_GP_PIN(4, 3)] = { PUPR3, 11 }, /* VI0_DATA6 */ - [RCAR_GP_PIN(4, 4)] = { PUPR3, 12 }, /* VI0_DATA7 */ - [RCAR_GP_PIN(4, 5)] = { PUPR3, 13 }, /* VI0_G2 */ - [RCAR_GP_PIN(4, 6)] = { PUPR3, 14 }, /* VI0_G3 */ - [RCAR_GP_PIN(4, 7)] = { PUPR3, 15 }, /* VI0_G4 */ - [RCAR_GP_PIN(4, 8)] = { PUPR3, 16 }, /* VI0_G5 */ - [RCAR_GP_PIN(4, 21)] = { PUPR3, 17 }, /* VI1_DATA12 */ - [RCAR_GP_PIN(4, 22)] = { PUPR3, 18 }, /* VI1_DATA13 */ - [RCAR_GP_PIN(4, 23)] = { PUPR3, 19 }, /* VI1_DATA14 */ - [RCAR_GP_PIN(4, 24)] = { PUPR3, 20 }, /* VI1_DATA15 */ - [RCAR_GP_PIN(4, 9)] = { PUPR3, 21 }, /* ETH_REF_CLK */ - [RCAR_GP_PIN(4, 10)] = { PUPR3, 22 }, /* ETH_TXD0 */ - [RCAR_GP_PIN(4, 11)] = { PUPR3, 23 }, /* ETH_TXD1 */ - [RCAR_GP_PIN(4, 12)] = { PUPR3, 24 }, /* ETH_CRS_DV */ - [RCAR_GP_PIN(4, 13)] = { PUPR3, 25 }, /* ETH_TX_EN */ - [RCAR_GP_PIN(4, 14)] = { PUPR3, 26 }, /* ETH_RX_ER */ - [RCAR_GP_PIN(4, 15)] = { PUPR3, 27 }, /* ETH_RXD0 */ - [RCAR_GP_PIN(4, 16)] = { PUPR3, 28 }, /* ETH_RXD1 */ - [RCAR_GP_PIN(4, 17)] = { PUPR3, 29 }, /* ETH_MDC */ - [RCAR_GP_PIN(4, 18)] = { PUPR3, 30 }, /* ETH_MDIO */ - [RCAR_GP_PIN(4, 19)] = { PUPR3, 31 }, /* ETH_LINK */ - - [RCAR_GP_PIN(3, 6)] = { PUPR4, 0 }, /* SSI_SCK012 */ - [RCAR_GP_PIN(3, 7)] = { PUPR4, 1 }, /* SSI_WS012 */ - [RCAR_GP_PIN(3, 10)] = { PUPR4, 2 }, /* SSI_SDATA0 */ - [RCAR_GP_PIN(3, 9)] = { PUPR4, 3 }, /* SSI_SDATA1 */ - [RCAR_GP_PIN(3, 8)] = { PUPR4, 4 }, /* SSI_SDATA2 */ - [RCAR_GP_PIN(3, 2)] = { PUPR4, 5 }, /* SSI_SCK34 */ - [RCAR_GP_PIN(3, 3)] = { PUPR4, 6 }, /* SSI_WS34 */ - [RCAR_GP_PIN(3, 5)] = { PUPR4, 7 }, /* SSI_SDATA3 */ - [RCAR_GP_PIN(3, 4)] = { PUPR4, 8 }, /* SSI_SDATA4 */ - [RCAR_GP_PIN(2, 31)] = { PUPR4, 9 }, /* SSI_SCK5 */ - [RCAR_GP_PIN(3, 0)] = { PUPR4, 10 }, /* SSI_WS5 */ - [RCAR_GP_PIN(3, 1)] = { PUPR4, 11 }, /* SSI_SDATA5 */ - [RCAR_GP_PIN(2, 28)] = { PUPR4, 12 }, /* SSI_SCK6 */ - [RCAR_GP_PIN(2, 29)] = { PUPR4, 13 }, /* SSI_WS6 */ - [RCAR_GP_PIN(2, 30)] = { PUPR4, 14 }, /* SSI_SDATA6 */ - [RCAR_GP_PIN(2, 24)] = { PUPR4, 15 }, /* SSI_SCK78 */ - [RCAR_GP_PIN(2, 25)] = { PUPR4, 16 }, /* SSI_WS78 */ - [RCAR_GP_PIN(2, 27)] = { PUPR4, 17 }, /* SSI_SDATA7 */ - [RCAR_GP_PIN(2, 26)] = { PUPR4, 18 }, /* SSI_SDATA8 */ - [RCAR_GP_PIN(3, 23)] = { PUPR4, 19 }, /* TCLK0 */ - [RCAR_GP_PIN(3, 11)] = { PUPR4, 20 }, /* SD0_CLK */ - [RCAR_GP_PIN(3, 12)] = { PUPR4, 21 }, /* SD0_CMD */ - [RCAR_GP_PIN(3, 13)] = { PUPR4, 22 }, /* SD0_DAT0 */ - [RCAR_GP_PIN(3, 14)] = { PUPR4, 23 }, /* SD0_DAT1 */ - [RCAR_GP_PIN(3, 15)] = { PUPR4, 24 }, /* SD0_DAT2 */ - [RCAR_GP_PIN(3, 16)] = { PUPR4, 25 }, /* SD0_DAT3 */ - [RCAR_GP_PIN(3, 17)] = { PUPR4, 26 }, /* SD0_CD */ - [RCAR_GP_PIN(3, 18)] = { PUPR4, 27 }, /* SD0_WP */ - [RCAR_GP_PIN(2, 22)] = { PUPR4, 28 }, /* AUDIO_CLKA */ - [RCAR_GP_PIN(2, 23)] = { PUPR4, 29 }, /* AUDIO_CLKB */ - [RCAR_GP_PIN(1, 14)] = { PUPR4, 30 }, /* IRQ2 */ - [RCAR_GP_PIN(1, 15)] = { PUPR4, 31 }, /* IRQ3 */ - - [RCAR_GP_PIN(0, 1)] = { PUPR5, 0 }, /* PENC0 */ - [RCAR_GP_PIN(0, 2)] = { PUPR5, 1 }, /* PENC1 */ - [RCAR_GP_PIN(0, 3)] = { PUPR5, 2 }, /* USB_OVC0 */ - [RCAR_GP_PIN(0, 4)] = { PUPR5, 3 }, /* USB_OVC1 */ - [RCAR_GP_PIN(1, 16)] = { PUPR5, 4 }, /* SCIF_CLK */ - [RCAR_GP_PIN(1, 17)] = { PUPR5, 5 }, /* TX0 */ - [RCAR_GP_PIN(1, 18)] = { PUPR5, 6 }, /* RX0 */ - [RCAR_GP_PIN(1, 19)] = { PUPR5, 7 }, /* SCK0 */ - [RCAR_GP_PIN(1, 20)] = { PUPR5, 8 }, /* /CTS0 */ - [RCAR_GP_PIN(1, 21)] = { PUPR5, 9 }, /* /RTS0 */ - [RCAR_GP_PIN(3, 19)] = { PUPR5, 10 }, /* HSPI_CLK0 */ - [RCAR_GP_PIN(3, 20)] = { PUPR5, 11 }, /* /HSPI_CS0 */ - [RCAR_GP_PIN(3, 21)] = { PUPR5, 12 }, /* HSPI_RX0 */ - [RCAR_GP_PIN(3, 22)] = { PUPR5, 13 }, /* HSPI_TX0 */ - [RCAR_GP_PIN(4, 20)] = { PUPR5, 14 }, /* ETH_MAGIC */ - [RCAR_GP_PIN(4, 25)] = { PUPR5, 15 }, /* AVS1 */ - [RCAR_GP_PIN(4, 26)] = { PUPR5, 16 }, /* AVS2 */ +static const struct sh_pfc_bias_info bias_info[] = { + { RCAR_GP_PIN(0, 6), PUPR0, 0 }, /* A0 */ + { RCAR_GP_PIN(0, 7), PUPR0, 1 }, /* A1 */ + { RCAR_GP_PIN(0, 8), PUPR0, 2 }, /* A2 */ + { RCAR_GP_PIN(0, 9), PUPR0, 3 }, /* A3 */ + { RCAR_GP_PIN(0, 10), PUPR0, 4 }, /* A4 */ + { RCAR_GP_PIN(0, 11), PUPR0, 5 }, /* A5 */ + { RCAR_GP_PIN(0, 12), PUPR0, 6 }, /* A6 */ + { RCAR_GP_PIN(0, 13), PUPR0, 7 }, /* A7 */ + { RCAR_GP_PIN(0, 14), PUPR0, 8 }, /* A8 */ + { RCAR_GP_PIN(0, 15), PUPR0, 9 }, /* A9 */ + { RCAR_GP_PIN(0, 16), PUPR0, 10 }, /* A10 */ + { RCAR_GP_PIN(0, 17), PUPR0, 11 }, /* A11 */ + { RCAR_GP_PIN(0, 18), PUPR0, 12 }, /* A12 */ + { RCAR_GP_PIN(0, 19), PUPR0, 13 }, /* A13 */ + { RCAR_GP_PIN(0, 20), PUPR0, 14 }, /* A14 */ + { RCAR_GP_PIN(0, 21), PUPR0, 15 }, /* A15 */ + { RCAR_GP_PIN(0, 22), PUPR0, 16 }, /* A16 */ + { RCAR_GP_PIN(0, 23), PUPR0, 17 }, /* A17 */ + { RCAR_GP_PIN(0, 24), PUPR0, 18 }, /* A18 */ + { RCAR_GP_PIN(0, 25), PUPR0, 19 }, /* A19 */ + { RCAR_GP_PIN(0, 26), PUPR0, 20 }, /* A20 */ + { RCAR_GP_PIN(0, 27), PUPR0, 21 }, /* A21 */ + { RCAR_GP_PIN(0, 28), PUPR0, 22 }, /* A22 */ + { RCAR_GP_PIN(0, 29), PUPR0, 23 }, /* A23 */ + { RCAR_GP_PIN(0, 30), PUPR0, 24 }, /* A24 */ + { RCAR_GP_PIN(0, 31), PUPR0, 25 }, /* A25 */ + { RCAR_GP_PIN(1, 3), PUPR0, 26 }, /* /EX_CS0 */ + { RCAR_GP_PIN(1, 4), PUPR0, 27 }, /* /EX_CS1 */ + { RCAR_GP_PIN(1, 5), PUPR0, 28 }, /* /EX_CS2 */ + { RCAR_GP_PIN(1, 6), PUPR0, 29 }, /* /EX_CS3 */ + { RCAR_GP_PIN(1, 7), PUPR0, 30 }, /* /EX_CS4 */ + { RCAR_GP_PIN(1, 8), PUPR0, 31 }, /* /EX_CS5 */ + + { RCAR_GP_PIN(0, 0), PUPR1, 0 }, /* /PRESETOUT */ + { RCAR_GP_PIN(0, 5), PUPR1, 1 }, /* /BS */ + { RCAR_GP_PIN(1, 0), PUPR1, 2 }, /* RD//WR */ + { RCAR_GP_PIN(1, 1), PUPR1, 3 }, /* /WE0 */ + { RCAR_GP_PIN(1, 2), PUPR1, 4 }, /* /WE1 */ + { RCAR_GP_PIN(1, 11), PUPR1, 5 }, /* EX_WAIT0 */ + { RCAR_GP_PIN(1, 9), PUPR1, 6 }, /* DREQ0 */ + { RCAR_GP_PIN(1, 10), PUPR1, 7 }, /* DACK0 */ + { RCAR_GP_PIN(1, 12), PUPR1, 8 }, /* IRQ0 */ + { RCAR_GP_PIN(1, 13), PUPR1, 9 }, /* IRQ1 */ + + { RCAR_GP_PIN(1, 22), PUPR2, 0 }, /* DU0_DR0 */ + { RCAR_GP_PIN(1, 23), PUPR2, 1 }, /* DU0_DR1 */ + { RCAR_GP_PIN(1, 24), PUPR2, 2 }, /* DU0_DR2 */ + { RCAR_GP_PIN(1, 25), PUPR2, 3 }, /* DU0_DR3 */ + { RCAR_GP_PIN(1, 26), PUPR2, 4 }, /* DU0_DR4 */ + { RCAR_GP_PIN(1, 27), PUPR2, 5 }, /* DU0_DR5 */ + { RCAR_GP_PIN(1, 28), PUPR2, 6 }, /* DU0_DR6 */ + { RCAR_GP_PIN(1, 29), PUPR2, 7 }, /* DU0_DR7 */ + { RCAR_GP_PIN(1, 30), PUPR2, 8 }, /* DU0_DG0 */ + { RCAR_GP_PIN(1, 31), PUPR2, 9 }, /* DU0_DG1 */ + { RCAR_GP_PIN(2, 0), PUPR2, 10 }, /* DU0_DG2 */ + { RCAR_GP_PIN(2, 1), PUPR2, 11 }, /* DU0_DG3 */ + { RCAR_GP_PIN(2, 2), PUPR2, 12 }, /* DU0_DG4 */ + { RCAR_GP_PIN(2, 3), PUPR2, 13 }, /* DU0_DG5 */ + { RCAR_GP_PIN(2, 4), PUPR2, 14 }, /* DU0_DG6 */ + { RCAR_GP_PIN(2, 5), PUPR2, 15 }, /* DU0_DG7 */ + { RCAR_GP_PIN(2, 6), PUPR2, 16 }, /* DU0_DB0 */ + { RCAR_GP_PIN(2, 7), PUPR2, 17 }, /* DU0_DB1 */ + { RCAR_GP_PIN(2, 8), PUPR2, 18 }, /* DU0_DB2 */ + { RCAR_GP_PIN(2, 9), PUPR2, 19 }, /* DU0_DB3 */ + { RCAR_GP_PIN(2, 10), PUPR2, 20 }, /* DU0_DB4 */ + { RCAR_GP_PIN(2, 11), PUPR2, 21 }, /* DU0_DB5 */ + { RCAR_GP_PIN(2, 12), PUPR2, 22 }, /* DU0_DB6 */ + { RCAR_GP_PIN(2, 13), PUPR2, 23 }, /* DU0_DB7 */ + { RCAR_GP_PIN(2, 14), PUPR2, 24 }, /* DU0_DOTCLKIN */ + { RCAR_GP_PIN(2, 15), PUPR2, 25 }, /* DU0_DOTCLKOUT0 */ + { RCAR_GP_PIN(2, 17), PUPR2, 26 }, /* DU0_HSYNC */ + { RCAR_GP_PIN(2, 18), PUPR2, 27 }, /* DU0_VSYNC */ + { RCAR_GP_PIN(2, 19), PUPR2, 28 }, /* DU0_EXODDF */ + { RCAR_GP_PIN(2, 20), PUPR2, 29 }, /* DU0_DISP */ + { RCAR_GP_PIN(2, 21), PUPR2, 30 }, /* DU0_CDE */ + { RCAR_GP_PIN(2, 16), PUPR2, 31 }, /* DU0_DOTCLKOUT1 */ + + { RCAR_GP_PIN(3, 24), PUPR3, 0 }, /* VI0_CLK */ + { RCAR_GP_PIN(3, 25), PUPR3, 1 }, /* VI0_CLKENB */ + { RCAR_GP_PIN(3, 26), PUPR3, 2 }, /* VI0_FIELD */ + { RCAR_GP_PIN(3, 27), PUPR3, 3 }, /* /VI0_HSYNC */ + { RCAR_GP_PIN(3, 28), PUPR3, 4 }, /* /VI0_VSYNC */ + { RCAR_GP_PIN(3, 29), PUPR3, 5 }, /* VI0_DATA0 */ + { RCAR_GP_PIN(3, 30), PUPR3, 6 }, /* VI0_DATA1 */ + { RCAR_GP_PIN(3, 31), PUPR3, 7 }, /* VI0_DATA2 */ + { RCAR_GP_PIN(4, 0), PUPR3, 8 }, /* VI0_DATA3 */ + { RCAR_GP_PIN(4, 1), PUPR3, 9 }, /* VI0_DATA4 */ + { RCAR_GP_PIN(4, 2), PUPR3, 10 }, /* VI0_DATA5 */ + { RCAR_GP_PIN(4, 3), PUPR3, 11 }, /* VI0_DATA6 */ + { RCAR_GP_PIN(4, 4), PUPR3, 12 }, /* VI0_DATA7 */ + { RCAR_GP_PIN(4, 5), PUPR3, 13 }, /* VI0_G2 */ + { RCAR_GP_PIN(4, 6), PUPR3, 14 }, /* VI0_G3 */ + { RCAR_GP_PIN(4, 7), PUPR3, 15 }, /* VI0_G4 */ + { RCAR_GP_PIN(4, 8), PUPR3, 16 }, /* VI0_G5 */ + { RCAR_GP_PIN(4, 21), PUPR3, 17 }, /* VI1_DATA12 */ + { RCAR_GP_PIN(4, 22), PUPR3, 18 }, /* VI1_DATA13 */ + { RCAR_GP_PIN(4, 23), PUPR3, 19 }, /* VI1_DATA14 */ + { RCAR_GP_PIN(4, 24), PUPR3, 20 }, /* VI1_DATA15 */ + { RCAR_GP_PIN(4, 9), PUPR3, 21 }, /* ETH_REF_CLK */ + { RCAR_GP_PIN(4, 10), PUPR3, 22 }, /* ETH_TXD0 */ + { RCAR_GP_PIN(4, 11), PUPR3, 23 }, /* ETH_TXD1 */ + { RCAR_GP_PIN(4, 12), PUPR3, 24 }, /* ETH_CRS_DV */ + { RCAR_GP_PIN(4, 13), PUPR3, 25 }, /* ETH_TX_EN */ + { RCAR_GP_PIN(4, 14), PUPR3, 26 }, /* ETH_RX_ER */ + { RCAR_GP_PIN(4, 15), PUPR3, 27 }, /* ETH_RXD0 */ + { RCAR_GP_PIN(4, 16), PUPR3, 28 }, /* ETH_RXD1 */ + { RCAR_GP_PIN(4, 17), PUPR3, 29 }, /* ETH_MDC */ + { RCAR_GP_PIN(4, 18), PUPR3, 30 }, /* ETH_MDIO */ + { RCAR_GP_PIN(4, 19), PUPR3, 31 }, /* ETH_LINK */ + + { RCAR_GP_PIN(3, 6), PUPR4, 0 }, /* SSI_SCK012 */ + { RCAR_GP_PIN(3, 7), PUPR4, 1 }, /* SSI_WS012 */ + { RCAR_GP_PIN(3, 10), PUPR4, 2 }, /* SSI_SDATA0 */ + { RCAR_GP_PIN(3, 9), PUPR4, 3 }, /* SSI_SDATA1 */ + { RCAR_GP_PIN(3, 8), PUPR4, 4 }, /* SSI_SDATA2 */ + { RCAR_GP_PIN(3, 2), PUPR4, 5 }, /* SSI_SCK34 */ + { RCAR_GP_PIN(3, 3), PUPR4, 6 }, /* SSI_WS34 */ + { RCAR_GP_PIN(3, 5), PUPR4, 7 }, /* SSI_SDATA3 */ + { RCAR_GP_PIN(3, 4), PUPR4, 8 }, /* SSI_SDATA4 */ + { RCAR_GP_PIN(2, 31), PUPR4, 9 }, /* SSI_SCK5 */ + { RCAR_GP_PIN(3, 0), PUPR4, 10 }, /* SSI_WS5 */ + { RCAR_GP_PIN(3, 1), PUPR4, 11 }, /* SSI_SDATA5 */ + { RCAR_GP_PIN(2, 28), PUPR4, 12 }, /* SSI_SCK6 */ + { RCAR_GP_PIN(2, 29), PUPR4, 13 }, /* SSI_WS6 */ + { RCAR_GP_PIN(2, 30), PUPR4, 14 }, /* SSI_SDATA6 */ + { RCAR_GP_PIN(2, 24), PUPR4, 15 }, /* SSI_SCK78 */ + { RCAR_GP_PIN(2, 25), PUPR4, 16 }, /* SSI_WS78 */ + { RCAR_GP_PIN(2, 27), PUPR4, 17 }, /* SSI_SDATA7 */ + { RCAR_GP_PIN(2, 26), PUPR4, 18 }, /* SSI_SDATA8 */ + { RCAR_GP_PIN(3, 23), PUPR4, 19 }, /* TCLK0 */ + { RCAR_GP_PIN(3, 11), PUPR4, 20 }, /* SD0_CLK */ + { RCAR_GP_PIN(3, 12), PUPR4, 21 }, /* SD0_CMD */ + { RCAR_GP_PIN(3, 13), PUPR4, 22 }, /* SD0_DAT0 */ + { RCAR_GP_PIN(3, 14), PUPR4, 23 }, /* SD0_DAT1 */ + { RCAR_GP_PIN(3, 15), PUPR4, 24 }, /* SD0_DAT2 */ + { RCAR_GP_PIN(3, 16), PUPR4, 25 }, /* SD0_DAT3 */ + { RCAR_GP_PIN(3, 17), PUPR4, 26 }, /* SD0_CD */ + { RCAR_GP_PIN(3, 18), PUPR4, 27 }, /* SD0_WP */ + { RCAR_GP_PIN(2, 22), PUPR4, 28 }, /* AUDIO_CLKA */ + { RCAR_GP_PIN(2, 23), PUPR4, 29 }, /* AUDIO_CLKB */ + { RCAR_GP_PIN(1, 14), PUPR4, 30 }, /* IRQ2 */ + { RCAR_GP_PIN(1, 15), PUPR4, 31 }, /* IRQ3 */ + + { RCAR_GP_PIN(0, 1), PUPR5, 0 }, /* PENC0 */ + { RCAR_GP_PIN(0, 2), PUPR5, 1 }, /* PENC1 */ + { RCAR_GP_PIN(0, 3), PUPR5, 2 }, /* USB_OVC0 */ + { RCAR_GP_PIN(0, 4), PUPR5, 3 }, /* USB_OVC1 */ + { RCAR_GP_PIN(1, 16), PUPR5, 4 }, /* SCIF_CLK */ + { RCAR_GP_PIN(1, 17), PUPR5, 5 }, /* TX0 */ + { RCAR_GP_PIN(1, 18), PUPR5, 6 }, /* RX0 */ + { RCAR_GP_PIN(1, 19), PUPR5, 7 }, /* SCK0 */ + { RCAR_GP_PIN(1, 20), PUPR5, 8 }, /* /CTS0 */ + { RCAR_GP_PIN(1, 21), PUPR5, 9 }, /* /RTS0 */ + { RCAR_GP_PIN(3, 19), PUPR5, 10 }, /* HSPI_CLK0 */ + { RCAR_GP_PIN(3, 20), PUPR5, 11 }, /* /HSPI_CS0 */ + { RCAR_GP_PIN(3, 21), PUPR5, 12 }, /* HSPI_RX0 */ + { RCAR_GP_PIN(3, 22), PUPR5, 13 }, /* HSPI_TX0 */ + { RCAR_GP_PIN(4, 20), PUPR5, 14 }, /* ETH_MAGIC */ + { RCAR_GP_PIN(4, 25), PUPR5, 15 }, /* AVS1 */ + { RCAR_GP_PIN(4, 26), PUPR5, 16 }, /* AVS2 */ }; static unsigned int r8a7778_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin) { + const struct sh_pfc_bias_info *info; void __iomem *addr; - if (WARN_ON_ONCE(!pullups[pin].reg)) + info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin); + if (!info) return PIN_CONFIG_BIAS_DISABLE; - addr = pfc->windows->virt + pullups[pin].reg; + addr = pfc->windows->virt + info->reg; - if (ioread32(addr) & BIT(pullups[pin].bit)) + if (ioread32(addr) & BIT(info->bit)) return PIN_CONFIG_BIAS_PULL_UP; else return PIN_CONFIG_BIAS_DISABLE; @@ -3103,15 +3103,17 @@ static unsigned int r8a7778_pinmux_get_bias(struct sh_pfc *pfc, static void r8a7778_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin, unsigned int bias) { + const struct sh_pfc_bias_info *info; void __iomem *addr; u32 value; u32 bit; - if (WARN_ON_ONCE(!pullups[pin].reg)) + info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin); + if (!info) return; - addr = pfc->windows->virt + pullups[pin].reg; - bit = BIT(pullups[pin].bit); + addr = pfc->windows->virt + info->reg; + bit = BIT(info->bit); value = ioread32(addr) & ~bit; if (bias == PIN_CONFIG_BIAS_PULL_UP) diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c index 2e8cc2adbed7e2514096b9b75c4a9012d90066a4..135ed5cbeb44c5e6aaf76ca30a96e5c52751f7b3 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c @@ -523,6 +523,22 @@ MOD_SEL0_2_1 MOD_SEL1_2 \ MOD_SEL1_1 \ MOD_SEL1_0 MOD_SEL2_0 +/* + * These pins are not able to be muxed but have other properties + * that can be set, such as drive-strength or pull-up/pull-down enable. + */ +#define PINMUX_STATIC \ + FM(QSPI0_SPCLK) FM(QSPI0_SSL) FM(QSPI0_MOSI_IO0) FM(QSPI0_MISO_IO1) \ + FM(QSPI0_IO2) FM(QSPI0_IO3) \ + FM(QSPI1_SPCLK) FM(QSPI1_SSL) FM(QSPI1_MOSI_IO0) FM(QSPI1_MISO_IO1) \ + FM(QSPI1_IO2) FM(QSPI1_IO3) \ + FM(RPC_INT) FM(RPC_WP) FM(RPC_RESET) \ + FM(AVB_TX_CTL) FM(AVB_TXC) FM(AVB_TD0) FM(AVB_TD1) FM(AVB_TD2) FM(AVB_TD3) \ + FM(AVB_RX_CTL) FM(AVB_RXC) FM(AVB_RD0) FM(AVB_RD1) FM(AVB_RD2) FM(AVB_RD3) \ + FM(AVB_TXCREFCLK) FM(AVB_MDIO) \ + FM(CLKOUT) FM(PRESETOUT) \ + FM(DU_DOTCLKIN0) FM(DU_DOTCLKIN1) FM(DU_DOTCLKIN2) FM(DU_DOTCLKIN3) \ + FM(TMS) FM(TDO) FM(ASEBRK) FM(MLB_REF) enum { PINMUX_RESERVED = 0, @@ -548,6 +564,7 @@ enum { PINMUX_GPSR PINMUX_IPSR PINMUX_MOD_SELS + PINMUX_STATIC PINMUX_MARK_END, #undef F_ #undef FM @@ -1412,10 +1429,78 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP17_7_4, STP_ISSYNC_0_E, SEL_SSP1_0_4), PINMUX_IPSR_MSEL(IP17_7_4, RIF2_D1_B, SEL_DRIF2_1), PINMUX_IPSR_GPSR(IP17_7_4, TPU0TO3), + +/* + * Static pins can not be muxed between different functions but + * still needs a mark entry in the pinmux list. Add each static + * pin to the list without an associated function. The sh-pfc + * core will do the right thing and skip trying to mux then pin + * while still applying configuration to it + */ +#define FM(x) PINMUX_DATA(x##_MARK, 0), + PINMUX_STATIC +#undef FM }; +/* + * R8A7795 has 8 banks with 32 PGIOS in each => 256 GPIOs. + * Physical layout rows: A - AW, cols: 1 - 39. + */ +#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r)) +#define PIN_NUMBER(r, c) (((r) - 'A') * 39 + (c) + 300) +#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c) + static const struct sh_pfc_pin pinmux_pins[] = { PINMUX_GPIO_GP_ALL(), + + /* + * Pins not associated with a GPIO port. + * + * The pin positions are different between different r8a7795 + * packages, all that is needed for the pfc driver is a unique + * number for each pin. To this end use the pin layout from + * R-Car H3SiP to calculate a unique number for each pin. + */ + SH_PFC_PIN_NAMED_CFG('A', 8, AVB_TX_CTL, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('A', 9, AVB_MDIO, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('A', 12, AVB_TXCREFCLK, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('A', 13, AVB_RD0, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('A', 14, AVB_RD2, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('A', 16, AVB_RX_CTL, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('A', 17, AVB_TD2, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('A', 18, AVB_TD0, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('A', 19, AVB_TXC, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('B', 13, AVB_RD1, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('B', 14, AVB_RD3, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('B', 17, AVB_TD3, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('B', 18, AVB_TD1, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('B', 19, AVB_RXC, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('C', 1, PRESETOUT#, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('F', 1, CLKOUT, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('H', 37, MLB_REF, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('V', 3, QSPI1_SPCLK, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('V', 5, QSPI1_SSL, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('V', 6, RPC_WP#, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('V', 7, RPC_RESET#, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('W', 3, QSPI0_SPCLK, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('Y', 3, QSPI0_SSL, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('Y', 6, QSPI0_IO2, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG('Y', 7, RPC_INT#, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('B'), 4, QSPI0_MISO_IO1, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('B'), 6, QSPI0_IO3, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 3, QSPI1_IO3, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 5, QSPI0_MOSI_IO0, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 7, QSPI1_MOSI_IO0, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('D'), 38, FSCLKST#, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('E'), 4, QSPI1_IO2, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('E'), 5, QSPI1_MISO_IO1, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('P'), 7, DU_DOTCLKIN0, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('P'), 8, DU_DOTCLKIN1, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 7, DU_DOTCLKIN2, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 8, DU_DOTCLKIN3, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 30, TMS, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 28, TDO, SH_PFC_PIN_CFG_DRIVE_STRENGTH), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 30, ASEBRK, SH_PFC_PIN_CFG_DRIVE_STRENGTH), }; /* - AUDIO CLOCK ------------------------------------------------------------ */ @@ -1563,11 +1648,33 @@ static const unsigned int avb_phy_int_mux[] = { AVB_PHY_INT_MARK, }; static const unsigned int avb_mdc_pins[] = { - /* AVB_MDC */ - RCAR_GP_PIN(2, 9), + /* AVB_MDC, AVB_MDIO */ + RCAR_GP_PIN(2, 9), PIN_NUMBER('A', 9), }; static const unsigned int avb_mdc_mux[] = { - AVB_MDC_MARK, + AVB_MDC_MARK, AVB_MDIO_MARK, +}; +static const unsigned int avb_mii_pins[] = { + /* + * AVB_TX_CTL, AVB_TXC, AVB_TD0, + * AVB_TD1, AVB_TD2, AVB_TD3, + * AVB_RX_CTL, AVB_RXC, AVB_RD0, + * AVB_RD1, AVB_RD2, AVB_RD3, + * AVB_TXCREFCLK + */ + PIN_NUMBER('A', 8), PIN_NUMBER('A', 19), PIN_NUMBER('A', 18), + PIN_NUMBER('B', 18), PIN_NUMBER('A', 17), PIN_NUMBER('B', 17), + PIN_NUMBER('A', 16), PIN_NUMBER('B', 19), PIN_NUMBER('A', 13), + PIN_NUMBER('B', 13), PIN_NUMBER('A', 14), PIN_NUMBER('B', 14), + PIN_NUMBER('A', 12), + +}; +static const unsigned int avb_mii_mux[] = { + AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK, + AVB_TD1_MARK, AVB_TD2_MARK, AVB_TD3_MARK, + AVB_RX_CTL_MARK, AVB_RXC_MARK, AVB_RD0_MARK, + AVB_RD1_MARK, AVB_RD2_MARK, AVB_RD3_MARK, + AVB_TXCREFCLK_MARK, }; static const unsigned int avb_avtp_pps_pins[] = { /* AVB_AVTP_PPS */ @@ -3613,6 +3720,55 @@ static const unsigned int usb2_mux[] = { USB2_PWEN_MARK, USB2_OVC_MARK, }; +/* - QSPI0 ------------------------------------------------------------------ */ +static const unsigned int qspi0_ctrl_pins[] = { + /* QSPI0_SPCLK, QSPI0_SSL */ + PIN_NUMBER('W', 3), PIN_NUMBER('Y', 3), +}; +static const unsigned int qspi0_ctrl_mux[] = { + QSPI0_SPCLK_MARK, QSPI0_SSL_MARK, +}; +static const unsigned int qspi0_data2_pins[] = { + /* QSPI0_MOSI_IO0, QSPI0_MISO_IO1 */ + PIN_A_NUMBER('C', 5), PIN_A_NUMBER('B', 4), +}; +static const unsigned int qspi0_data2_mux[] = { + QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK, +}; +static const unsigned int qspi0_data4_pins[] = { + /* QSPI0_MOSI_IO0, QSPI0_MISO_IO1, QSPI0_IO2, QSPI0_IO3 */ + PIN_A_NUMBER('C', 5), PIN_A_NUMBER('B', 4), + PIN_NUMBER('Y', 6), PIN_A_NUMBER('B', 6), +}; +static const unsigned int qspi0_data4_mux[] = { + QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK, + QSPI0_IO2_MARK, QSPI0_IO3_MARK, +}; +/* - QSPI1 ------------------------------------------------------------------ */ +static const unsigned int qspi1_ctrl_pins[] = { + /* QSPI1_SPCLK, QSPI1_SSL */ + PIN_NUMBER('V', 3), PIN_NUMBER('V', 5), +}; +static const unsigned int qspi1_ctrl_mux[] = { + QSPI1_SPCLK_MARK, QSPI1_SSL_MARK, +}; +static const unsigned int qspi1_data2_pins[] = { + /* QSPI1_MOSI_IO0, QSPI1_MISO_IO1 */ + PIN_A_NUMBER('C', 7), PIN_A_NUMBER('E', 5), +}; +static const unsigned int qspi1_data2_mux[] = { + QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK, +}; +static const unsigned int qspi1_data4_pins[] = { + /* QSPI1_MOSI_IO0, QSPI1_MISO_IO1, QSPI1_IO2, QSPI1_IO3 */ + PIN_A_NUMBER('C', 7), PIN_A_NUMBER('E', 5), + PIN_A_NUMBER('E', 4), PIN_A_NUMBER('C', 3), +}; +static const unsigned int qspi1_data4_mux[] = { + QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK, + QSPI1_IO2_MARK, QSPI1_IO3_MARK, +}; + static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(audio_clk_a_a), SH_PFC_PIN_GROUP(audio_clk_a_b), @@ -3635,6 +3791,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(avb_magic), SH_PFC_PIN_GROUP(avb_phy_int), SH_PFC_PIN_GROUP(avb_mdc), + SH_PFC_PIN_GROUP(avb_mii), SH_PFC_PIN_GROUP(avb_avtp_pps), SH_PFC_PIN_GROUP(avb_avtp_match_a), SH_PFC_PIN_GROUP(avb_avtp_capture_a), @@ -3912,6 +4069,12 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(usb0), SH_PFC_PIN_GROUP(usb1), SH_PFC_PIN_GROUP(usb2), + SH_PFC_PIN_GROUP(qspi0_ctrl), + SH_PFC_PIN_GROUP(qspi0_data2), + SH_PFC_PIN_GROUP(qspi0_data4), + SH_PFC_PIN_GROUP(qspi1_ctrl), + SH_PFC_PIN_GROUP(qspi1_data2), + SH_PFC_PIN_GROUP(qspi1_data4), }; static const char * const audio_clk_groups[] = { @@ -3939,6 +4102,7 @@ static const char * const avb_groups[] = { "avb_magic", "avb_phy_int", "avb_mdc", + "avb_mii", "avb_avtp_pps", "avb_avtp_match_a", "avb_avtp_capture_a", @@ -4356,6 +4520,18 @@ static const char * const usb2_groups[] = { "usb2", }; +static const char * const qspi0_groups[] = { + "qspi0_ctrl", + "qspi0_data2", + "qspi0_data4", +}; + +static const char * const qspi1_groups[] = { + "qspi1_ctrl", + "qspi1_data2", + "qspi1_data4", +}; + static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(audio_clk), SH_PFC_FUNCTION(avb), @@ -4405,6 +4581,8 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(usb0), SH_PFC_FUNCTION(usb1), SH_PFC_FUNCTION(usb2), + SH_PFC_FUNCTION(qspi0), + SH_PFC_FUNCTION(qspi1), }; static const struct pinmux_cfg_reg pinmux_config_regs[] = { @@ -4962,10 +5140,45 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { }; static const struct pinmux_drive_reg pinmux_drive_regs[] = { + { PINMUX_DRIVE_REG("DRVCTRL0", 0xe6060300) { + { PIN_NUMBER('W', 3), 28, 2 }, /* QSPI0_SPCLK */ + { PIN_A_NUMBER('C', 5), 24, 2 }, /* QSPI0_MOSI_IO0 */ + { PIN_A_NUMBER('B', 4), 20, 2 }, /* QSPI0_MISO_IO1 */ + { PIN_NUMBER('Y', 6), 16, 2 }, /* QSPI0_IO2 */ + { PIN_A_NUMBER('B', 6), 12, 2 }, /* QSPI0_IO3 */ + { PIN_NUMBER('Y', 3), 8, 2 }, /* QSPI0_SSL */ + { PIN_NUMBER('V', 3), 4, 2 }, /* QSPI1_SPCLK */ + { PIN_A_NUMBER('C', 7), 0, 2 }, /* QSPI1_MOSI_IO0 */ + } }, + { PINMUX_DRIVE_REG("DRVCTRL1", 0xe6060304) { + { PIN_A_NUMBER('E', 5), 28, 2 }, /* QSPI1_MISO_IO1 */ + { PIN_A_NUMBER('E', 4), 24, 2 }, /* QSPI1_IO2 */ + { PIN_A_NUMBER('C', 3), 20, 2 }, /* QSPI1_IO3 */ + { PIN_NUMBER('V', 5), 16, 2 }, /* QSPI1_SSL */ + { PIN_NUMBER('Y', 7), 12, 2 }, /* RPC_INT# */ + { PIN_NUMBER('V', 6), 8, 2 }, /* RPC_WP# */ + { PIN_NUMBER('V', 7), 4, 2 }, /* RPC_RESET# */ + { PIN_NUMBER('A', 16), 0, 3 }, /* AVB_RX_CTL */ + } }, + { PINMUX_DRIVE_REG("DRVCTRL2", 0xe6060308) { + { PIN_NUMBER('B', 19), 28, 3 }, /* AVB_RXC */ + { PIN_NUMBER('A', 13), 24, 3 }, /* AVB_RD0 */ + { PIN_NUMBER('B', 13), 20, 3 }, /* AVB_RD1 */ + { PIN_NUMBER('A', 14), 16, 3 }, /* AVB_RD2 */ + { PIN_NUMBER('B', 14), 12, 3 }, /* AVB_RD3 */ + { PIN_NUMBER('A', 8), 8, 3 }, /* AVB_TX_CTL */ + { PIN_NUMBER('A', 19), 4, 3 }, /* AVB_TXC */ + { PIN_NUMBER('A', 18), 0, 3 }, /* AVB_TD0 */ + } }, { PINMUX_DRIVE_REG("DRVCTRL3", 0xe606030c) { - { RCAR_GP_PIN(2, 9), 8, 3 }, /* AVB_MDC */ - { RCAR_GP_PIN(2, 10), 4, 3 }, /* AVB_MAGIC */ - { RCAR_GP_PIN(2, 11), 0, 3 }, /* AVB_PHY_INT */ + { PIN_NUMBER('B', 18), 28, 3 }, /* AVB_TD1 */ + { PIN_NUMBER('A', 17), 24, 3 }, /* AVB_TD2 */ + { PIN_NUMBER('B', 17), 20, 3 }, /* AVB_TD3 */ + { PIN_NUMBER('A', 12), 16, 3 }, /* AVB_TXCREFCLK */ + { PIN_NUMBER('A', 9), 12, 3 }, /* AVB_MDIO */ + { RCAR_GP_PIN(2, 9), 8, 3 }, /* AVB_MDC */ + { RCAR_GP_PIN(2, 10), 4, 3 }, /* AVB_MAGIC */ + { RCAR_GP_PIN(2, 11), 0, 3 }, /* AVB_PHY_INT */ } }, { PINMUX_DRIVE_REG("DRVCTRL4", 0xe6060310) { { RCAR_GP_PIN(2, 12), 28, 3 }, /* AVB_LINK */ @@ -5008,6 +5221,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = { { RCAR_GP_PIN(1, 19), 0, 3 }, /* A19 */ } }, { PINMUX_DRIVE_REG("DRVCTRL8", 0xe6060320) { + { PIN_NUMBER('F', 1), 28, 3 }, /* CLKOUT */ { RCAR_GP_PIN(1, 20), 24, 3 }, /* CS0 */ { RCAR_GP_PIN(1, 21), 20, 3 }, /* CS1_A26 */ { RCAR_GP_PIN(1, 22), 16, 3 }, /* BS */ @@ -5018,6 +5232,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = { } }, { PINMUX_DRIVE_REG("DRVCTRL9", 0xe6060324) { { RCAR_GP_PIN(1, 27), 28, 3 }, /* EX_WAIT0 */ + { PIN_NUMBER('C', 1), 24, 3 }, /* PRESETOUT# */ { RCAR_GP_PIN(0, 0), 20, 3 }, /* D0 */ { RCAR_GP_PIN(0, 1), 16, 3 }, /* D1 */ { RCAR_GP_PIN(0, 2), 12, 3 }, /* D2 */ @@ -5036,20 +5251,30 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = { { RCAR_GP_PIN(0, 13), 0, 3 }, /* D13 */ } }, { PINMUX_DRIVE_REG("DRVCTRL11", 0xe606032c) { - { RCAR_GP_PIN(0, 14), 28, 3 }, /* D14 */ - { RCAR_GP_PIN(0, 15), 24, 3 }, /* D15 */ - { RCAR_GP_PIN(7, 0), 20, 3 }, /* AVS1 */ - { RCAR_GP_PIN(7, 1), 16, 3 }, /* AVS2 */ - { RCAR_GP_PIN(7, 2), 12, 3 }, /* HDMI0_CEC */ - { RCAR_GP_PIN(7, 3), 8, 3 }, /* HDMI1_CEC */ + { RCAR_GP_PIN(0, 14), 28, 3 }, /* D14 */ + { RCAR_GP_PIN(0, 15), 24, 3 }, /* D15 */ + { RCAR_GP_PIN(7, 0), 20, 3 }, /* AVS1 */ + { RCAR_GP_PIN(7, 1), 16, 3 }, /* AVS2 */ + { RCAR_GP_PIN(7, 2), 12, 3 }, /* HDMI0_CEC */ + { RCAR_GP_PIN(7, 3), 8, 3 }, /* HDMI1_CEC */ + { PIN_A_NUMBER('P', 7), 4, 2 }, /* DU_DOTCLKIN0 */ + { PIN_A_NUMBER('P', 8), 0, 2 }, /* DU_DOTCLKIN1 */ + } }, + { PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) { + { PIN_A_NUMBER('R', 7), 28, 2 }, /* DU_DOTCLKIN2 */ + { PIN_A_NUMBER('R', 8), 24, 2 }, /* DU_DOTCLKIN3 */ + { PIN_A_NUMBER('D', 38), 20, 2 }, /* FSCLKST# */ + { PIN_A_NUMBER('R', 30), 4, 2 }, /* TMS */ } }, { PINMUX_DRIVE_REG("DRVCTRL13", 0xe6060334) { - { RCAR_GP_PIN(3, 0), 20, 3 }, /* SD0_CLK */ - { RCAR_GP_PIN(3, 1), 16, 3 }, /* SD0_CMD */ - { RCAR_GP_PIN(3, 2), 12, 3 }, /* SD0_DAT0 */ - { RCAR_GP_PIN(3, 3), 8, 3 }, /* SD0_DAT1 */ - { RCAR_GP_PIN(3, 4), 4, 3 }, /* SD0_DAT2 */ - { RCAR_GP_PIN(3, 5), 0, 3 }, /* SD0_DAT3 */ + { PIN_A_NUMBER('T', 28), 28, 2 }, /* TDO */ + { PIN_A_NUMBER('T', 30), 24, 2 }, /* ASEBRK */ + { RCAR_GP_PIN(3, 0), 20, 3 }, /* SD0_CLK */ + { RCAR_GP_PIN(3, 1), 16, 3 }, /* SD0_CMD */ + { RCAR_GP_PIN(3, 2), 12, 3 }, /* SD0_DAT0 */ + { RCAR_GP_PIN(3, 3), 8, 3 }, /* SD0_DAT1 */ + { RCAR_GP_PIN(3, 4), 4, 3 }, /* SD0_DAT2 */ + { RCAR_GP_PIN(3, 5), 0, 3 }, /* SD0_DAT3 */ } }, { PINMUX_DRIVE_REG("DRVCTRL14", 0xe6060338) { { RCAR_GP_PIN(3, 6), 28, 3 }, /* SD1_CLK */ @@ -5118,6 +5343,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = { { RCAR_GP_PIN(5, 23), 16, 3 }, /* MLB_CLK */ { RCAR_GP_PIN(5, 24), 12, 3 }, /* MLB_SIG */ { RCAR_GP_PIN(5, 25), 8, 3 }, /* MLB_DAT */ + { PIN_NUMBER('H', 37), 4, 3 }, /* MLB_REF */ { RCAR_GP_PIN(6, 0), 0, 3 }, /* SSI_SCK01239 */ } }, { PINMUX_DRIVE_REG("DRVCTRL21", 0xe6060354) { @@ -5188,206 +5414,206 @@ static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc #define PU5 0x14 #define PU6 0x18 -static const struct { - u16 reg : 11; - u16 bit : 5; -} pullups[] = { - [RCAR_GP_PIN(2, 11)] = { PU0, 31 }, /* AVB_PHY_INT */ - [RCAR_GP_PIN(2, 10)] = { PU0, 30 }, /* AVB_MAGIC */ - [RCAR_GP_PIN(2, 9)] = { PU0, 29 }, /* AVB_MDC */ - - [RCAR_GP_PIN(1, 19)] = { PU1, 31 }, /* A19 */ - [RCAR_GP_PIN(1, 18)] = { PU1, 30 }, /* A18 */ - [RCAR_GP_PIN(1, 17)] = { PU1, 29 }, /* A17 */ - [RCAR_GP_PIN(1, 16)] = { PU1, 28 }, /* A16 */ - [RCAR_GP_PIN(1, 15)] = { PU1, 27 }, /* A15 */ - [RCAR_GP_PIN(1, 14)] = { PU1, 26 }, /* A14 */ - [RCAR_GP_PIN(1, 13)] = { PU1, 25 }, /* A13 */ - [RCAR_GP_PIN(1, 12)] = { PU1, 24 }, /* A12 */ - [RCAR_GP_PIN(1, 11)] = { PU1, 23 }, /* A11 */ - [RCAR_GP_PIN(1, 10)] = { PU1, 22 }, /* A10 */ - [RCAR_GP_PIN(1, 9)] = { PU1, 21 }, /* A9 */ - [RCAR_GP_PIN(1, 8)] = { PU1, 20 }, /* A8 */ - [RCAR_GP_PIN(1, 7)] = { PU1, 19 }, /* A7 */ - [RCAR_GP_PIN(1, 6)] = { PU1, 18 }, /* A6 */ - [RCAR_GP_PIN(1, 5)] = { PU1, 17 }, /* A5 */ - [RCAR_GP_PIN(1, 4)] = { PU1, 16 }, /* A4 */ - [RCAR_GP_PIN(1, 3)] = { PU1, 15 }, /* A3 */ - [RCAR_GP_PIN(1, 2)] = { PU1, 14 }, /* A2 */ - [RCAR_GP_PIN(1, 1)] = { PU1, 13 }, /* A1 */ - [RCAR_GP_PIN(1, 0)] = { PU1, 12 }, /* A0 */ - [RCAR_GP_PIN(2, 8)] = { PU1, 11 }, /* PWM2_A */ - [RCAR_GP_PIN(2, 7)] = { PU1, 10 }, /* PWM1_A */ - [RCAR_GP_PIN(2, 6)] = { PU1, 9 }, /* PWM0 */ - [RCAR_GP_PIN(2, 5)] = { PU1, 8 }, /* IRQ5 */ - [RCAR_GP_PIN(2, 4)] = { PU1, 7 }, /* IRQ4 */ - [RCAR_GP_PIN(2, 3)] = { PU1, 6 }, /* IRQ3 */ - [RCAR_GP_PIN(2, 2)] = { PU1, 5 }, /* IRQ2 */ - [RCAR_GP_PIN(2, 1)] = { PU1, 4 }, /* IRQ1 */ - [RCAR_GP_PIN(2, 0)] = { PU1, 3 }, /* IRQ0 */ - [RCAR_GP_PIN(2, 14)] = { PU1, 2 }, /* AVB_AVTP_CAPTURE_A */ - [RCAR_GP_PIN(2, 13)] = { PU1, 1 }, /* AVB_AVTP_MATCH_A */ - [RCAR_GP_PIN(2, 12)] = { PU1, 0 }, /* AVB_LINK */ - - [RCAR_GP_PIN(7, 3)] = { PU2, 29 }, /* HDMI1_CEC */ - [RCAR_GP_PIN(7, 2)] = { PU2, 28 }, /* HDMI0_CEC */ - [RCAR_GP_PIN(7, 1)] = { PU2, 27 }, /* AVS2 */ - [RCAR_GP_PIN(7, 0)] = { PU2, 26 }, /* AVS1 */ - [RCAR_GP_PIN(0, 15)] = { PU2, 25 }, /* D15 */ - [RCAR_GP_PIN(0, 14)] = { PU2, 24 }, /* D14 */ - [RCAR_GP_PIN(0, 13)] = { PU2, 23 }, /* D13 */ - [RCAR_GP_PIN(0, 12)] = { PU2, 22 }, /* D12 */ - [RCAR_GP_PIN(0, 11)] = { PU2, 21 }, /* D11 */ - [RCAR_GP_PIN(0, 10)] = { PU2, 20 }, /* D10 */ - [RCAR_GP_PIN(0, 9)] = { PU2, 19 }, /* D9 */ - [RCAR_GP_PIN(0, 8)] = { PU2, 18 }, /* D8 */ - [RCAR_GP_PIN(0, 7)] = { PU2, 17 }, /* D7 */ - [RCAR_GP_PIN(0, 6)] = { PU2, 16 }, /* D6 */ - [RCAR_GP_PIN(0, 5)] = { PU2, 15 }, /* D5 */ - [RCAR_GP_PIN(0, 4)] = { PU2, 14 }, /* D4 */ - [RCAR_GP_PIN(0, 3)] = { PU2, 13 }, /* D3 */ - [RCAR_GP_PIN(0, 2)] = { PU2, 12 }, /* D2 */ - [RCAR_GP_PIN(0, 1)] = { PU2, 11 }, /* D1 */ - [RCAR_GP_PIN(0, 0)] = { PU2, 10 }, /* D0 */ - [RCAR_GP_PIN(1, 27)] = { PU2, 8 }, /* EX_WAIT0_A */ - [RCAR_GP_PIN(1, 26)] = { PU2, 7 }, /* WE1_N */ - [RCAR_GP_PIN(1, 25)] = { PU2, 6 }, /* WE0_N */ - [RCAR_GP_PIN(1, 24)] = { PU2, 5 }, /* RD_WR_N */ - [RCAR_GP_PIN(1, 23)] = { PU2, 4 }, /* RD_N */ - [RCAR_GP_PIN(1, 22)] = { PU2, 3 }, /* BS_N */ - [RCAR_GP_PIN(1, 21)] = { PU2, 2 }, /* CS1_N_A26 */ - [RCAR_GP_PIN(1, 20)] = { PU2, 1 }, /* CS0_N */ - - [RCAR_GP_PIN(4, 9)] = { PU3, 31 }, /* SD3_DAT0 */ - [RCAR_GP_PIN(4, 8)] = { PU3, 30 }, /* SD3_CMD */ - [RCAR_GP_PIN(4, 7)] = { PU3, 29 }, /* SD3_CLK */ - [RCAR_GP_PIN(4, 6)] = { PU3, 28 }, /* SD2_DS */ - [RCAR_GP_PIN(4, 5)] = { PU3, 27 }, /* SD2_DAT3 */ - [RCAR_GP_PIN(4, 4)] = { PU3, 26 }, /* SD2_DAT2 */ - [RCAR_GP_PIN(4, 3)] = { PU3, 25 }, /* SD2_DAT1 */ - [RCAR_GP_PIN(4, 2)] = { PU3, 24 }, /* SD2_DAT0 */ - [RCAR_GP_PIN(4, 1)] = { PU3, 23 }, /* SD2_CMD */ - [RCAR_GP_PIN(4, 0)] = { PU3, 22 }, /* SD2_CLK */ - [RCAR_GP_PIN(3, 11)] = { PU3, 21 }, /* SD1_DAT3 */ - [RCAR_GP_PIN(3, 10)] = { PU3, 20 }, /* SD1_DAT2 */ - [RCAR_GP_PIN(3, 9)] = { PU3, 19 }, /* SD1_DAT1 */ - [RCAR_GP_PIN(3, 8)] = { PU3, 18 }, /* SD1_DAT0 */ - [RCAR_GP_PIN(3, 7)] = { PU3, 17 }, /* SD1_CMD */ - [RCAR_GP_PIN(3, 6)] = { PU3, 16 }, /* SD1_CLK */ - [RCAR_GP_PIN(3, 5)] = { PU3, 15 }, /* SD0_DAT3 */ - [RCAR_GP_PIN(3, 4)] = { PU3, 14 }, /* SD0_DAT2 */ - [RCAR_GP_PIN(3, 3)] = { PU3, 13 }, /* SD0_DAT1 */ - [RCAR_GP_PIN(3, 2)] = { PU3, 12 }, /* SD0_DAT0 */ - [RCAR_GP_PIN(3, 1)] = { PU3, 11 }, /* SD0_CMD */ - [RCAR_GP_PIN(3, 0)] = { PU3, 10 }, /* SD0_CLK */ - - [RCAR_GP_PIN(5, 19)] = { PU4, 31 }, /* MSIOF0_SS1 */ - [RCAR_GP_PIN(5, 18)] = { PU4, 30 }, /* MSIOF0_SYNC */ - [RCAR_GP_PIN(5, 17)] = { PU4, 29 }, /* MSIOF0_SCK */ - [RCAR_GP_PIN(5, 16)] = { PU4, 28 }, /* HRTS0_N */ - [RCAR_GP_PIN(5, 15)] = { PU4, 27 }, /* HCTS0_N */ - [RCAR_GP_PIN(5, 14)] = { PU4, 26 }, /* HTX0 */ - [RCAR_GP_PIN(5, 13)] = { PU4, 25 }, /* HRX0 */ - [RCAR_GP_PIN(5, 12)] = { PU4, 24 }, /* HSCK0 */ - [RCAR_GP_PIN(5, 11)] = { PU4, 23 }, /* RX2_A */ - [RCAR_GP_PIN(5, 10)] = { PU4, 22 }, /* TX2_A */ - [RCAR_GP_PIN(5, 9)] = { PU4, 21 }, /* SCK2 */ - [RCAR_GP_PIN(5, 8)] = { PU4, 20 }, /* RTS1_N_TANS */ - [RCAR_GP_PIN(5, 7)] = { PU4, 19 }, /* CTS1_N */ - [RCAR_GP_PIN(5, 6)] = { PU4, 18 }, /* TX1_A */ - [RCAR_GP_PIN(5, 5)] = { PU4, 17 }, /* RX1_A */ - [RCAR_GP_PIN(5, 4)] = { PU4, 16 }, /* RTS0_N_TANS */ - [RCAR_GP_PIN(5, 3)] = { PU4, 15 }, /* CTS0_N */ - [RCAR_GP_PIN(5, 2)] = { PU4, 14 }, /* TX0 */ - [RCAR_GP_PIN(5, 1)] = { PU4, 13 }, /* RX0 */ - [RCAR_GP_PIN(5, 0)] = { PU4, 12 }, /* SCK0 */ - [RCAR_GP_PIN(3, 15)] = { PU4, 11 }, /* SD1_WP */ - [RCAR_GP_PIN(3, 14)] = { PU4, 10 }, /* SD1_CD */ - [RCAR_GP_PIN(3, 13)] = { PU4, 9 }, /* SD0_WP */ - [RCAR_GP_PIN(3, 12)] = { PU4, 8 }, /* SD0_CD */ - [RCAR_GP_PIN(4, 17)] = { PU4, 7 }, /* SD3_DS */ - [RCAR_GP_PIN(4, 16)] = { PU4, 6 }, /* SD3_DAT7 */ - [RCAR_GP_PIN(4, 15)] = { PU4, 5 }, /* SD3_DAT6 */ - [RCAR_GP_PIN(4, 14)] = { PU4, 4 }, /* SD3_DAT5 */ - [RCAR_GP_PIN(4, 13)] = { PU4, 3 }, /* SD3_DAT4 */ - [RCAR_GP_PIN(4, 12)] = { PU4, 2 }, /* SD3_DAT3 */ - [RCAR_GP_PIN(4, 11)] = { PU4, 1 }, /* SD3_DAT2 */ - [RCAR_GP_PIN(4, 10)] = { PU4, 0 }, /* SD3_DAT1 */ - - [RCAR_GP_PIN(6, 24)] = { PU5, 31 }, /* USB0_PWEN */ - [RCAR_GP_PIN(6, 23)] = { PU5, 30 }, /* AUDIO_CLKB_B */ - [RCAR_GP_PIN(6, 22)] = { PU5, 29 }, /* AUDIO_CLKA_A */ - [RCAR_GP_PIN(6, 21)] = { PU5, 28 }, /* SSI_SDATA9_A */ - [RCAR_GP_PIN(6, 20)] = { PU5, 27 }, /* SSI_SDATA8 */ - [RCAR_GP_PIN(6, 19)] = { PU5, 26 }, /* SSI_SDATA7 */ - [RCAR_GP_PIN(6, 18)] = { PU5, 25 }, /* SSI_WS78 */ - [RCAR_GP_PIN(6, 17)] = { PU5, 24 }, /* SSI_SCK78 */ - [RCAR_GP_PIN(6, 16)] = { PU5, 23 }, /* SSI_SDATA6 */ - [RCAR_GP_PIN(6, 15)] = { PU5, 22 }, /* SSI_WS6 */ - [RCAR_GP_PIN(6, 14)] = { PU5, 21 }, /* SSI_SCK6 */ - [RCAR_GP_PIN(6, 13)] = { PU5, 20 }, /* SSI_SDATA5 */ - [RCAR_GP_PIN(6, 12)] = { PU5, 19 }, /* SSI_WS5 */ - [RCAR_GP_PIN(6, 11)] = { PU5, 18 }, /* SSI_SCK5 */ - [RCAR_GP_PIN(6, 10)] = { PU5, 17 }, /* SSI_SDATA4 */ - [RCAR_GP_PIN(6, 9)] = { PU5, 16 }, /* SSI_WS4 */ - [RCAR_GP_PIN(6, 8)] = { PU5, 15 }, /* SSI_SCK4 */ - [RCAR_GP_PIN(6, 7)] = { PU5, 14 }, /* SSI_SDATA3 */ - [RCAR_GP_PIN(6, 6)] = { PU5, 13 }, /* SSI_WS34 */ - [RCAR_GP_PIN(6, 5)] = { PU5, 12 }, /* SSI_SCK34 */ - [RCAR_GP_PIN(6, 4)] = { PU5, 11 }, /* SSI_SDATA2_A */ - [RCAR_GP_PIN(6, 3)] = { PU5, 10 }, /* SSI_SDATA1_A */ - [RCAR_GP_PIN(6, 2)] = { PU5, 9 }, /* SSI_SDATA0 */ - [RCAR_GP_PIN(6, 1)] = { PU5, 8 }, /* SSI_WS01239 */ - [RCAR_GP_PIN(6, 0)] = { PU5, 7 }, /* SSI_SCK01239 */ - [RCAR_GP_PIN(5, 25)] = { PU5, 5 }, /* MLB_DAT */ - [RCAR_GP_PIN(5, 24)] = { PU5, 4 }, /* MLB_SIG */ - [RCAR_GP_PIN(5, 23)] = { PU5, 3 }, /* MLB_CLK */ - [RCAR_GP_PIN(5, 22)] = { PU5, 2 }, /* MSIOF0_RXD */ - [RCAR_GP_PIN(5, 21)] = { PU5, 1 }, /* MSIOF0_SS2 */ - [RCAR_GP_PIN(5, 20)] = { PU5, 0 }, /* MSIOF0_TXD */ - - [RCAR_GP_PIN(6, 31)] = { PU6, 6 }, /* USB31_OVC */ - [RCAR_GP_PIN(6, 30)] = { PU6, 5 }, /* USB31_PWEN */ - [RCAR_GP_PIN(6, 29)] = { PU6, 4 }, /* USB30_OVC */ - [RCAR_GP_PIN(6, 28)] = { PU6, 3 }, /* USB30_PWEN */ - [RCAR_GP_PIN(6, 27)] = { PU6, 2 }, /* USB1_OVC */ - [RCAR_GP_PIN(6, 26)] = { PU6, 1 }, /* USB1_PWEN */ - [RCAR_GP_PIN(6, 25)] = { PU6, 0 }, /* USB0_OVC */ +static const struct sh_pfc_bias_info bias_info[] = { + { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */ + { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */ + { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */ + + { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */ + { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */ + { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */ + { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */ + { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */ + { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */ + { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */ + { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */ + { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */ + { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */ + { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */ + { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */ + { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */ + { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */ + { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */ + { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */ + { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */ + { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */ + { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */ + { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */ + { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */ + { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */ + { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */ + { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */ + { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */ + { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */ + { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */ + { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */ + { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */ + { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */ + { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */ + { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */ + + { RCAR_GP_PIN(7, 3), PU2, 29 }, /* HDMI1_CEC */ + { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */ + { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */ + { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */ + { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */ + { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */ + { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */ + { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */ + { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */ + { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */ + { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */ + { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */ + { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */ + { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */ + { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */ + { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */ + { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */ + { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */ + { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */ + { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */ + { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */ + { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */ + { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */ + { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */ + { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */ + { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */ + { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N_A26 */ + { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */ + + { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */ + { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */ + { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */ + { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */ + { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */ + { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */ + { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */ + { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */ + { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */ + { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */ + { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */ + { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */ + { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */ + { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */ + { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */ + { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */ + { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */ + { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */ + { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */ + { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */ + { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */ + { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */ + + { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */ + { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */ + { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */ + { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */ + { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */ + { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */ + { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */ + { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */ + { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */ + { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */ + { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */ + { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */ + { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */ + { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */ + { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */ + { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */ + { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */ + { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */ + { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */ + { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */ + { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */ + { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */ + { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */ + { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */ + { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */ + { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */ + { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */ + { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */ + { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */ + { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */ + { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */ + { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */ + + { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */ + { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */ + { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */ + { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */ + { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */ + { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */ + { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */ + { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */ + { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */ + { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */ + { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */ + { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */ + { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */ + { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */ + { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */ + { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */ + { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */ + { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */ + { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS34 */ + { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK34 */ + { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */ + { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */ + { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */ + { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */ + { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */ + { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */ + { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */ + { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */ + { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */ + { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */ + { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */ + + { RCAR_GP_PIN(6, 31), PU6, 6 }, /* USB31_OVC */ + { RCAR_GP_PIN(6, 30), PU6, 5 }, /* USB31_PWEN */ + { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */ + { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */ + { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */ + { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */ + { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */ }; static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin) { + const struct sh_pfc_bias_info *info; u32 reg; u32 bit; - if (WARN_ON_ONCE(!pullups[pin].reg)) + info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin); + if (!info) return PIN_CONFIG_BIAS_DISABLE; - reg = pullups[pin].reg; - bit = BIT(pullups[pin].bit); + reg = info->reg; + bit = BIT(info->bit); - if (sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit) { - if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit) - return PIN_CONFIG_BIAS_PULL_UP; - else - return PIN_CONFIG_BIAS_PULL_DOWN; - } else + if (!(sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit)) return PIN_CONFIG_BIAS_DISABLE; + else if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit) + return PIN_CONFIG_BIAS_PULL_UP; + else + return PIN_CONFIG_BIAS_PULL_DOWN; } static void r8a7795_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin, unsigned int bias) { + const struct sh_pfc_bias_info *info; u32 enable, updown; u32 reg; u32 bit; - if (WARN_ON_ONCE(!pullups[pin].reg)) + info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin); + if (!info) return; - reg = pullups[pin].reg; - bit = BIT(pullups[pin].bit); + reg = info->reg; + bit = BIT(info->bit); enable = sh_pfc_read_reg(pfc, PUEN + reg, 32) & ~bit; if (bias != PIN_CONFIG_BIAS_DISABLE) diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c index dc9b671ccf2e51261d26becbbe32572de67b6b2e..7e16545a2c3c9db82d4f8777ecca5b733af9a21f 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c @@ -122,22 +122,22 @@ #define GPSR3_0 F_(SD0_CLK, IP7_19_16) /* GPSR4 */ -#define GPSR4_17 F_(SD3_DS, IP11_11_8) -#define GPSR4_16 F_(SD3_DAT7, IP10_7_4) -#define GPSR4_15 F_(SD3_DAT6, IP10_3_0) -#define GPSR4_14 F_(SD3_DAT5, IP9_31_28) -#define GPSR4_13 F_(SD3_DAT4, IP9_27_24) +#define GPSR4_17 F_(SD3_DS, IP11_7_4) +#define GPSR4_16 F_(SD3_DAT7, IP11_3_0) +#define GPSR4_15 F_(SD3_DAT6, IP10_31_28) +#define GPSR4_14 F_(SD3_DAT5, IP10_27_24) +#define GPSR4_13 F_(SD3_DAT4, IP10_23_20) #define GPSR4_12 F_(SD3_DAT3, IP10_19_16) #define GPSR4_11 F_(SD3_DAT2, IP10_15_12) #define GPSR4_10 F_(SD3_DAT1, IP10_11_8) #define GPSR4_9 F_(SD3_DAT0, IP10_7_4) #define GPSR4_8 F_(SD3_CMD, IP10_3_0) #define GPSR4_7 F_(SD3_CLK, IP9_31_28) -#define GPSR4_6 F_(SD2_DS, IP9_23_20) -#define GPSR4_5 F_(SD2_DAT3, IP9_19_16) -#define GPSR4_4 F_(SD2_DAT2, IP9_15_12) -#define GPSR4_3 F_(SD2_DAT1, IP9_11_8) -#define GPSR4_2 F_(SD2_DAT0, IP9_7_4) +#define GPSR4_6 F_(SD2_DS, IP9_27_24) +#define GPSR4_5 F_(SD2_DAT3, IP9_23_20) +#define GPSR4_4 F_(SD2_DAT2, IP9_19_16) +#define GPSR4_3 F_(SD2_DAT1, IP9_15_12) +#define GPSR4_2 F_(SD2_DAT0, IP9_11_8) #define GPSR4_1 F_(SD2_CMD, IP9_7_4) #define GPSR4_0 F_(SD2_CLK, IP9_3_0) @@ -1490,6 +1490,418 @@ static const struct sh_pfc_pin pinmux_pins[] = { PINMUX_GPIO_GP_ALL(), }; +/* - EtherAVB --------------------------------------------------------------- */ +static const unsigned int avb_link_pins[] = { + /* AVB_LINK */ + RCAR_GP_PIN(2, 12), +}; +static const unsigned int avb_link_mux[] = { + AVB_LINK_MARK, +}; +static const unsigned int avb_magic_pins[] = { + /* AVB_MAGIC_ */ + RCAR_GP_PIN(2, 10), +}; +static const unsigned int avb_magic_mux[] = { + AVB_MAGIC_MARK, +}; +static const unsigned int avb_phy_int_pins[] = { + /* AVB_PHY_INT */ + RCAR_GP_PIN(2, 11), +}; +static const unsigned int avb_phy_int_mux[] = { + AVB_PHY_INT_MARK, +}; +static const unsigned int avb_mdc_pins[] = { + /* AVB_MDC */ + RCAR_GP_PIN(2, 9), +}; +static const unsigned int avb_mdc_mux[] = { + AVB_MDC_MARK, +}; +static const unsigned int avb_avtp_pps_pins[] = { + /* AVB_AVTP_PPS */ + RCAR_GP_PIN(2, 6), +}; +static const unsigned int avb_avtp_pps_mux[] = { + AVB_AVTP_PPS_MARK, +}; +static const unsigned int avb_avtp_match_a_pins[] = { + /* AVB_AVTP_MATCH_A */ + RCAR_GP_PIN(2, 13), +}; +static const unsigned int avb_avtp_match_a_mux[] = { + AVB_AVTP_MATCH_A_MARK, +}; +static const unsigned int avb_avtp_capture_a_pins[] = { + /* AVB_AVTP_CAPTURE_A */ + RCAR_GP_PIN(2, 14), +}; +static const unsigned int avb_avtp_capture_a_mux[] = { + AVB_AVTP_CAPTURE_A_MARK, +}; +static const unsigned int avb_avtp_match_b_pins[] = { + /* AVB_AVTP_MATCH_B */ + RCAR_GP_PIN(1, 8), +}; +static const unsigned int avb_avtp_match_b_mux[] = { + AVB_AVTP_MATCH_B_MARK, +}; +static const unsigned int avb_avtp_capture_b_pins[] = { + /* AVB_AVTP_CAPTURE_B */ + RCAR_GP_PIN(1, 11), +}; +static const unsigned int avb_avtp_capture_b_mux[] = { + AVB_AVTP_CAPTURE_B_MARK, +}; + +/* - DRIF0 --------------------------------------------------------------- */ +static const unsigned int drif0_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9), +}; +static const unsigned int drif0_ctrl_a_mux[] = { + RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK, +}; +static const unsigned int drif0_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 10), +}; +static const unsigned int drif0_data0_a_mux[] = { + RIF0_D0_A_MARK, +}; +static const unsigned int drif0_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 7), +}; +static const unsigned int drif0_data1_a_mux[] = { + RIF0_D1_A_MARK, +}; +static const unsigned int drif0_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4), +}; +static const unsigned int drif0_ctrl_b_mux[] = { + RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK, +}; +static const unsigned int drif0_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(5, 1), +}; +static const unsigned int drif0_data0_b_mux[] = { + RIF0_D0_B_MARK, +}; +static const unsigned int drif0_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(5, 2), +}; +static const unsigned int drif0_data1_b_mux[] = { + RIF0_D1_B_MARK, +}; +static const unsigned int drif0_ctrl_c_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 15), +}; +static const unsigned int drif0_ctrl_c_mux[] = { + RIF0_CLK_C_MARK, RIF0_SYNC_C_MARK, +}; +static const unsigned int drif0_data0_c_pins[] = { + /* D0 */ + RCAR_GP_PIN(5, 13), +}; +static const unsigned int drif0_data0_c_mux[] = { + RIF0_D0_C_MARK, +}; +static const unsigned int drif0_data1_c_pins[] = { + /* D1 */ + RCAR_GP_PIN(5, 14), +}; +static const unsigned int drif0_data1_c_mux[] = { + RIF0_D1_C_MARK, +}; +/* - DRIF1 --------------------------------------------------------------- */ +static const unsigned int drif1_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18), +}; +static const unsigned int drif1_ctrl_a_mux[] = { + RIF1_CLK_A_MARK, RIF1_SYNC_A_MARK, +}; +static const unsigned int drif1_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 19), +}; +static const unsigned int drif1_data0_a_mux[] = { + RIF1_D0_A_MARK, +}; +static const unsigned int drif1_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 20), +}; +static const unsigned int drif1_data1_a_mux[] = { + RIF1_D1_A_MARK, +}; +static const unsigned int drif1_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 3), +}; +static const unsigned int drif1_ctrl_b_mux[] = { + RIF1_CLK_B_MARK, RIF1_SYNC_B_MARK, +}; +static const unsigned int drif1_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(5, 7), +}; +static const unsigned int drif1_data0_b_mux[] = { + RIF1_D0_B_MARK, +}; +static const unsigned int drif1_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(5, 8), +}; +static const unsigned int drif1_data1_b_mux[] = { + RIF1_D1_B_MARK, +}; +static const unsigned int drif1_ctrl_c_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11), +}; +static const unsigned int drif1_ctrl_c_mux[] = { + RIF1_CLK_C_MARK, RIF1_SYNC_C_MARK, +}; +static const unsigned int drif1_data0_c_pins[] = { + /* D0 */ + RCAR_GP_PIN(5, 6), +}; +static const unsigned int drif1_data0_c_mux[] = { + RIF1_D0_C_MARK, +}; +static const unsigned int drif1_data1_c_pins[] = { + /* D1 */ + RCAR_GP_PIN(5, 10), +}; +static const unsigned int drif1_data1_c_mux[] = { + RIF1_D1_C_MARK, +}; +/* - DRIF2 --------------------------------------------------------------- */ +static const unsigned int drif2_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9), +}; +static const unsigned int drif2_ctrl_a_mux[] = { + RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK, +}; +static const unsigned int drif2_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 7), +}; +static const unsigned int drif2_data0_a_mux[] = { + RIF2_D0_A_MARK, +}; +static const unsigned int drif2_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 10), +}; +static const unsigned int drif2_data1_a_mux[] = { + RIF2_D1_A_MARK, +}; +static const unsigned int drif2_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27), +}; +static const unsigned int drif2_ctrl_b_mux[] = { + RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK, +}; +static const unsigned int drif2_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 30), +}; +static const unsigned int drif2_data0_b_mux[] = { + RIF2_D0_B_MARK, +}; +static const unsigned int drif2_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 31), +}; +static const unsigned int drif2_data1_b_mux[] = { + RIF2_D1_B_MARK, +}; +/* - DRIF3 --------------------------------------------------------------- */ +static const unsigned int drif3_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18), +}; +static const unsigned int drif3_ctrl_a_mux[] = { + RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK, +}; +static const unsigned int drif3_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 19), +}; +static const unsigned int drif3_data0_a_mux[] = { + RIF3_D0_A_MARK, +}; +static const unsigned int drif3_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 20), +}; +static const unsigned int drif3_data1_a_mux[] = { + RIF3_D1_A_MARK, +}; +static const unsigned int drif3_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25), +}; +static const unsigned int drif3_ctrl_b_mux[] = { + RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK, +}; +static const unsigned int drif3_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 28), +}; +static const unsigned int drif3_data0_b_mux[] = { + RIF3_D0_B_MARK, +}; +static const unsigned int drif3_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 29), +}; +static const unsigned int drif3_data1_b_mux[] = { + RIF3_D1_B_MARK, +}; + +/* - DU --------------------------------------------------------------------- */ +static const unsigned int du_rgb666_pins[] = { + /* R[7:2], G[7:2], B[7:2] */ + RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13), + RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10), + RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13), + RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18), + RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 5), + RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 2), +}; +static const unsigned int du_rgb666_mux[] = { + DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK, + DU_DR3_MARK, DU_DR2_MARK, + DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK, + DU_DG3_MARK, DU_DG2_MARK, + DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK, + DU_DB3_MARK, DU_DB2_MARK, +}; +static const unsigned int du_rgb888_pins[] = { + /* R[7:0], G[7:0], B[7:0] */ + RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13), + RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10), + RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 8), + RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13), + RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18), + RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16), + RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 5), + RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 2), + RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0), +}; +static const unsigned int du_rgb888_mux[] = { + DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK, + DU_DR3_MARK, DU_DR2_MARK, DU_DR1_MARK, DU_DR0_MARK, + DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK, + DU_DG3_MARK, DU_DG2_MARK, DU_DG1_MARK, DU_DG0_MARK, + DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK, + DU_DB3_MARK, DU_DB2_MARK, DU_DB1_MARK, DU_DB0_MARK, +}; +static const unsigned int du_clk_out_0_pins[] = { + /* CLKOUT */ + RCAR_GP_PIN(1, 27), +}; +static const unsigned int du_clk_out_0_mux[] = { + DU_DOTCLKOUT0_MARK +}; +static const unsigned int du_clk_out_1_pins[] = { + /* CLKOUT */ + RCAR_GP_PIN(2, 3), +}; +static const unsigned int du_clk_out_1_mux[] = { + DU_DOTCLKOUT1_MARK +}; +static const unsigned int du_sync_pins[] = { + /* EXVSYNC/VSYNC, EXHSYNC/HSYNC */ + RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 4), +}; +static const unsigned int du_sync_mux[] = { + DU_EXVSYNC_DU_VSYNC_MARK, DU_EXHSYNC_DU_HSYNC_MARK +}; +static const unsigned int du_oddf_pins[] = { + /* EXDISP/EXODDF/EXCDE */ + RCAR_GP_PIN(2, 2), +}; +static const unsigned int du_oddf_mux[] = { + DU_EXODDF_DU_ODDF_DISP_CDE_MARK, +}; +static const unsigned int du_cde_pins[] = { + /* CDE */ + RCAR_GP_PIN(2, 0), +}; +static const unsigned int du_cde_mux[] = { + DU_CDE_MARK, +}; +static const unsigned int du_disp_pins[] = { + /* DISP */ + RCAR_GP_PIN(2, 1), +}; +static const unsigned int du_disp_mux[] = { + DU_DISP_MARK, +}; + +/* - I2C -------------------------------------------------------------------- */ +static const unsigned int i2c1_a_pins[] = { + /* SDA, SCL */ + RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10), +}; +static const unsigned int i2c1_a_mux[] = { + SDA1_A_MARK, SCL1_A_MARK, +}; +static const unsigned int i2c1_b_pins[] = { + /* SDA, SCL */ + RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 23), +}; +static const unsigned int i2c1_b_mux[] = { + SDA1_B_MARK, SCL1_B_MARK, +}; +static const unsigned int i2c2_a_pins[] = { + /* SDA, SCL */ + RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4), +}; +static const unsigned int i2c2_a_mux[] = { + SDA2_A_MARK, SCL2_A_MARK, +}; +static const unsigned int i2c2_b_pins[] = { + /* SDA, SCL */ + RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12), +}; +static const unsigned int i2c2_b_mux[] = { + SDA2_B_MARK, SCL2_B_MARK, +}; +static const unsigned int i2c6_a_pins[] = { + /* SDA, SCL */ + RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11), +}; +static const unsigned int i2c6_a_mux[] = { + SDA6_A_MARK, SCL6_A_MARK, +}; +static const unsigned int i2c6_b_pins[] = { + /* SDA, SCL */ + RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25), +}; +static const unsigned int i2c6_b_mux[] = { + SDA6_B_MARK, SCL6_B_MARK, +}; +static const unsigned int i2c6_c_pins[] = { + /* SDA, SCL */ + RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), +}; +static const unsigned int i2c6_c_mux[] = { + SDA6_C_MARK, SCL6_C_MARK, +}; + /* - SCIF0 ------------------------------------------------------------------ */ static const unsigned int scif0_data_pins[] = { /* RX, TX */ @@ -1912,6 +2324,60 @@ static const unsigned int sdhi3_ds_mux[] = { }; static const struct sh_pfc_pin_group pinmux_groups[] = { + SH_PFC_PIN_GROUP(avb_link), + SH_PFC_PIN_GROUP(avb_magic), + SH_PFC_PIN_GROUP(avb_phy_int), + SH_PFC_PIN_GROUP(avb_mdc), + SH_PFC_PIN_GROUP(avb_avtp_pps), + SH_PFC_PIN_GROUP(avb_avtp_match_a), + SH_PFC_PIN_GROUP(avb_avtp_capture_a), + SH_PFC_PIN_GROUP(avb_avtp_match_b), + SH_PFC_PIN_GROUP(avb_avtp_capture_b), + SH_PFC_PIN_GROUP(drif0_ctrl_a), + SH_PFC_PIN_GROUP(drif0_data0_a), + SH_PFC_PIN_GROUP(drif0_data1_a), + SH_PFC_PIN_GROUP(drif0_ctrl_b), + SH_PFC_PIN_GROUP(drif0_data0_b), + SH_PFC_PIN_GROUP(drif0_data1_b), + SH_PFC_PIN_GROUP(drif0_ctrl_c), + SH_PFC_PIN_GROUP(drif0_data0_c), + SH_PFC_PIN_GROUP(drif0_data1_c), + SH_PFC_PIN_GROUP(drif1_ctrl_a), + SH_PFC_PIN_GROUP(drif1_data0_a), + SH_PFC_PIN_GROUP(drif1_data1_a), + SH_PFC_PIN_GROUP(drif1_ctrl_b), + SH_PFC_PIN_GROUP(drif1_data0_b), + SH_PFC_PIN_GROUP(drif1_data1_b), + SH_PFC_PIN_GROUP(drif1_ctrl_c), + SH_PFC_PIN_GROUP(drif1_data0_c), + SH_PFC_PIN_GROUP(drif1_data1_c), + SH_PFC_PIN_GROUP(drif2_ctrl_a), + SH_PFC_PIN_GROUP(drif2_data0_a), + SH_PFC_PIN_GROUP(drif2_data1_a), + SH_PFC_PIN_GROUP(drif2_ctrl_b), + SH_PFC_PIN_GROUP(drif2_data0_b), + SH_PFC_PIN_GROUP(drif2_data1_b), + SH_PFC_PIN_GROUP(drif3_ctrl_a), + SH_PFC_PIN_GROUP(drif3_data0_a), + SH_PFC_PIN_GROUP(drif3_data1_a), + SH_PFC_PIN_GROUP(drif3_ctrl_b), + SH_PFC_PIN_GROUP(drif3_data0_b), + SH_PFC_PIN_GROUP(drif3_data1_b), + SH_PFC_PIN_GROUP(du_rgb666), + SH_PFC_PIN_GROUP(du_rgb888), + SH_PFC_PIN_GROUP(du_clk_out_0), + SH_PFC_PIN_GROUP(du_clk_out_1), + SH_PFC_PIN_GROUP(du_sync), + SH_PFC_PIN_GROUP(du_oddf), + SH_PFC_PIN_GROUP(du_cde), + SH_PFC_PIN_GROUP(du_disp), + SH_PFC_PIN_GROUP(i2c1_a), + SH_PFC_PIN_GROUP(i2c1_b), + SH_PFC_PIN_GROUP(i2c2_a), + SH_PFC_PIN_GROUP(i2c2_b), + SH_PFC_PIN_GROUP(i2c6_a), + SH_PFC_PIN_GROUP(i2c6_b), + SH_PFC_PIN_GROUP(i2c6_c), SH_PFC_PIN_GROUP(scif0_data), SH_PFC_PIN_GROUP(scif0_clk), SH_PFC_PIN_GROUP(scif0_ctrl), @@ -1969,6 +2435,87 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(sdhi3_ds), }; +static const char * const avb_groups[] = { + "avb_link", + "avb_magic", + "avb_phy_int", + "avb_mdc", + "avb_avtp_pps", + "avb_avtp_match_a", + "avb_avtp_capture_a", + "avb_avtp_match_b", + "avb_avtp_capture_b", +}; + +static const char * const drif0_groups[] = { + "drif0_ctrl_a", + "drif0_data0_a", + "drif0_data1_a", + "drif0_ctrl_b", + "drif0_data0_b", + "drif0_data1_b", + "drif0_ctrl_c", + "drif0_data0_c", + "drif0_data1_c", +}; + +static const char * const drif1_groups[] = { + "drif1_ctrl_a", + "drif1_data0_a", + "drif1_data1_a", + "drif1_ctrl_b", + "drif1_data0_b", + "drif1_data1_b", + "drif1_ctrl_c", + "drif1_data0_c", + "drif1_data1_c", +}; + +static const char * const drif2_groups[] = { + "drif2_ctrl_a", + "drif2_data0_a", + "drif2_data1_a", + "drif2_ctrl_b", + "drif2_data0_b", + "drif2_data1_b", +}; + +static const char * const drif3_groups[] = { + "drif3_ctrl_a", + "drif3_data0_a", + "drif3_data1_a", + "drif3_ctrl_b", + "drif3_data0_b", + "drif3_data1_b", +}; + +static const char * const du_groups[] = { + "du_rgb666", + "du_rgb888", + "du_clk_out_0", + "du_clk_out_1", + "du_sync", + "du_oddf", + "du_cde", + "du_disp", +}; + +static const char * const i2c1_groups[] = { + "i2c1_a", + "i2c1_b", +}; + +static const char * const i2c2_groups[] = { + "i2c2_a", + "i2c2_b", +}; + +static const char * const i2c6_groups[] = { + "i2c6_a", + "i2c6_b", + "i2c6_c", +}; + static const char * const scif0_groups[] = { "scif0_data", "scif0_clk", @@ -2058,6 +2605,15 @@ static const char * const sdhi3_groups[] = { }; static const struct sh_pfc_function pinmux_functions[] = { + SH_PFC_FUNCTION(avb), + SH_PFC_FUNCTION(drif0), + SH_PFC_FUNCTION(drif1), + SH_PFC_FUNCTION(drif2), + SH_PFC_FUNCTION(drif3), + SH_PFC_FUNCTION(du), + SH_PFC_FUNCTION(i2c1), + SH_PFC_FUNCTION(i2c2), + SH_PFC_FUNCTION(i2c6), SH_PFC_FUNCTION(scif0), SH_PFC_FUNCTION(scif1), SH_PFC_FUNCTION(scif2), diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c index c5772584594ce3c48686f5df0315586dc2e9f8b2..fcacfa73ef6e9b657fb043186fcd6c9cd0d2dfeb 100644 --- a/drivers/pinctrl/sh-pfc/pinctrl.c +++ b/drivers/pinctrl/sh-pfc/pinctrl.c @@ -570,7 +570,8 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin, switch (param) { case PIN_CONFIG_BIAS_DISABLE: - return true; + return pin->configs & + (SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN); case PIN_CONFIG_BIAS_PULL_UP: return pin->configs & SH_PFC_PIN_CFG_PULL_UP; diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index 2345421103db342e537187380f088240e3b1bf59..e42cc7a8d10ee75c0af30bb6cc200c9a31f5d3e1 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h @@ -189,6 +189,12 @@ struct sh_pfc_window { unsigned long size; }; +struct sh_pfc_bias_info { + u16 pin; + u16 reg : 11; + u16 bit : 5; +}; + struct sh_pfc_pin_range; struct sh_pfc { @@ -540,6 +546,14 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info; .configs = SH_PFC_PIN_CFG_NO_GPIO, \ } +/* SH_PFC_PIN_NAMED_CFG - Expand to a sh_pfc_pin entry with the given name */ +#define SH_PFC_PIN_NAMED_CFG(row, col, _name, cfgs) \ + { \ + .pin = PIN_NUMBER(row, col), \ + .name = __stringify(PIN_##_name), \ + .configs = SH_PFC_PIN_CFG_NO_GPIO | cfgs, \ + } + /* PINMUX_DATA_ALL - Expand to a list of PORT_name_DATA, PORT_name_FN0, * PORT_name_OUT, PORT_name_IN marks */ diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index 200667f08c373eb027f2bcc4253f564563586e96..efc43711ff5cbcff2c94838e06ead49623ab9118 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -1092,9 +1092,11 @@ int stm32_pctl_probe(struct platform_device *pdev) return -EINVAL; } - ret = stm32_pctrl_dt_setup_irq(pdev, pctl); - if (ret) - return ret; + if (of_find_property(np, "interrupt-parent", NULL)) { + ret = stm32_pctrl_dt_setup_irq(pdev, pctl); + if (ret) + return ret; + } for_each_child_of_node(np, child) if (of_property_read_bool(child, "gpio-controller")) diff --git a/drivers/pinctrl/stm32/pinctrl-stm32f429.c b/drivers/pinctrl/stm32/pinctrl-stm32f429.c index e9b15dc0654bc67371e2f1967350b560cd78a508..990b867b96254b59f748c87b27355384833bdf58 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32f429.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32f429.c @@ -1584,8 +1584,4 @@ static struct platform_driver stm32f429_pinctrl_driver = { }, }; -static int __init stm32f429_pinctrl_init(void) -{ - return platform_driver_register(&stm32f429_pinctrl_driver); -} -device_initcall(stm32f429_pinctrl_init); +builtin_platform_driver(stm32f429_pinctrl_driver); diff --git a/drivers/pinctrl/sunxi/pinctrl-gr8.c b/drivers/pinctrl/sunxi/pinctrl-gr8.c index 2904d2b7378b98195841cbcb6ae61b9125910fbc..2f232c3a057950ed9d89fabd00b003f46535b4e8 100644 --- a/drivers/pinctrl/sunxi/pinctrl-gr8.c +++ b/drivers/pinctrl/sunxi/pinctrl-gr8.c @@ -12,7 +12,7 @@ * warranty of any kind, whether express or implied. */ -#include +#include #include #include #include @@ -525,7 +525,6 @@ static const struct of_device_id sun5i_gr8_pinctrl_match[] = { { .compatible = "nextthing,gr8-pinctrl", }, {} }; -MODULE_DEVICE_TABLE(of, sun5i_gr8_pinctrl_match); static struct platform_driver sun5i_gr8_pinctrl_driver = { .probe = sun5i_gr8_pinctrl_probe, @@ -534,8 +533,4 @@ static struct platform_driver sun5i_gr8_pinctrl_driver = { .of_match_table = sun5i_gr8_pinctrl_match, }, }; -module_platform_driver(sun5i_gr8_pinctrl_driver); - -MODULE_AUTHOR("Mylene Josserand +#include #include #include #include @@ -1035,7 +1035,6 @@ static const struct of_device_id sun4i_a10_pinctrl_match[] = { { .compatible = "allwinner,sun4i-a10-pinctrl", }, {} }; -MODULE_DEVICE_TABLE(of, sun4i_a10_pinctrl_match); static struct platform_driver sun4i_a10_pinctrl_driver = { .probe = sun4i_a10_pinctrl_probe, @@ -1044,8 +1043,4 @@ static struct platform_driver sun4i_a10_pinctrl_driver = { .of_match_table = sun4i_a10_pinctrl_match, }, }; -module_platform_driver(sun4i_a10_pinctrl_driver); - -MODULE_AUTHOR("Maxime Ripard +#include #include #include #include @@ -674,7 +674,6 @@ static const struct of_device_id sun5i_a10s_pinctrl_match[] = { { .compatible = "allwinner,sun5i-a10s-pinctrl", }, {} }; -MODULE_DEVICE_TABLE(of, sun5i_a10s_pinctrl_match); static struct platform_driver sun5i_a10s_pinctrl_driver = { .probe = sun5i_a10s_pinctrl_probe, @@ -683,8 +682,4 @@ static struct platform_driver sun5i_a10s_pinctrl_driver = { .of_match_table = sun5i_a10s_pinctrl_match, }, }; -module_platform_driver(sun5i_a10s_pinctrl_driver); - -MODULE_AUTHOR("Maxime Ripard +#include #include #include #include @@ -392,7 +392,6 @@ static const struct of_device_id sun5i_a13_pinctrl_match[] = { { .compatible = "allwinner,sun5i-a13-pinctrl", }, {} }; -MODULE_DEVICE_TABLE(of, sun5i_a13_pinctrl_match); static struct platform_driver sun5i_a13_pinctrl_driver = { .probe = sun5i_a13_pinctrl_probe, @@ -401,8 +400,4 @@ static struct platform_driver sun5i_a13_pinctrl_driver = { .of_match_table = sun5i_a13_pinctrl_match, }, }; -module_platform_driver(sun5i_a13_pinctrl_driver); - -MODULE_AUTHOR("Maxime Ripard +#include #include #include #include @@ -136,7 +136,6 @@ static const struct of_device_id sun6i_a31_r_pinctrl_match[] = { { .compatible = "allwinner,sun6i-a31-r-pinctrl", }, {} }; -MODULE_DEVICE_TABLE(of, sun6i_a31_r_pinctrl_match); static struct platform_driver sun6i_a31_r_pinctrl_driver = { .probe = sun6i_a31_r_pinctrl_probe, @@ -145,9 +144,4 @@ static struct platform_driver sun6i_a31_r_pinctrl_driver = { .of_match_table = sun6i_a31_r_pinctrl_match, }, }; -module_platform_driver(sun6i_a31_r_pinctrl_driver); - -MODULE_AUTHOR("Boris Brezillon +#include #include #include #include @@ -934,7 +934,6 @@ static const struct of_device_id sun6i_a31_pinctrl_match[] = { { .compatible = "allwinner,sun6i-a31-pinctrl", }, {} }; -MODULE_DEVICE_TABLE(of, sun6i_a31_pinctrl_match); static struct platform_driver sun6i_a31_pinctrl_driver = { .probe = sun6i_a31_pinctrl_probe, @@ -943,8 +942,4 @@ static struct platform_driver sun6i_a31_pinctrl_driver = { .of_match_table = sun6i_a31_pinctrl_match, }, }; -module_platform_driver(sun6i_a31_pinctrl_driver); - -MODULE_AUTHOR("Maxime Ripard +#include #include #include #include @@ -798,7 +798,6 @@ static const struct of_device_id sun6i_a31s_pinctrl_match[] = { { .compatible = "allwinner,sun6i-a31s-pinctrl", }, {} }; -MODULE_DEVICE_TABLE(of, sun6i_a31s_pinctrl_match); static struct platform_driver sun6i_a31s_pinctrl_driver = { .probe = sun6i_a31s_pinctrl_probe, @@ -807,8 +806,4 @@ static struct platform_driver sun6i_a31s_pinctrl_driver = { .of_match_table = sun6i_a31s_pinctrl_match, }, }; -module_platform_driver(sun6i_a31s_pinctrl_driver); - -MODULE_AUTHOR("Hans de Goede "); -MODULE_DESCRIPTION("Allwinner A31s pinctrl driver"); -MODULE_LICENSE("GPL"); +builtin_platform_driver(sun6i_a31s_pinctrl_driver); diff --git a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c index 435ad30f45dbf7a46ac8ff299efe66f60b4e405e..b6f4c68ffb39eac5bdf821b35c2c7767a49a23d3 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c @@ -10,7 +10,7 @@ * warranty of any kind, whether express or implied. */ -#include +#include #include #include #include @@ -1045,7 +1045,6 @@ static const struct of_device_id sun7i_a20_pinctrl_match[] = { { .compatible = "allwinner,sun7i-a20-pinctrl", }, {} }; -MODULE_DEVICE_TABLE(of, sun7i_a20_pinctrl_match); static struct platform_driver sun7i_a20_pinctrl_driver = { .probe = sun7i_a20_pinctrl_probe, @@ -1054,8 +1053,4 @@ static struct platform_driver sun7i_a20_pinctrl_driver = { .of_match_table = sun7i_a20_pinctrl_match, }, }; -module_platform_driver(sun7i_a20_pinctrl_driver); - -MODULE_AUTHOR("Maxime Ripard +#include #include #include #include @@ -123,7 +123,6 @@ static const struct of_device_id sun8i_a23_r_pinctrl_match[] = { { .compatible = "allwinner,sun8i-a23-r-pinctrl", }, {} }; -MODULE_DEVICE_TABLE(of, sun8i_a23_r_pinctrl_match); static struct platform_driver sun8i_a23_r_pinctrl_driver = { .probe = sun8i_a23_r_pinctrl_probe, @@ -132,10 +131,4 @@ static struct platform_driver sun8i_a23_r_pinctrl_driver = { .of_match_table = sun8i_a23_r_pinctrl_match, }, }; -module_platform_driver(sun8i_a23_r_pinctrl_driver); - -MODULE_AUTHOR("Chen-Yu Tsai "); -MODULE_AUTHOR("Boris Brezillon +#include #include #include #include @@ -575,7 +575,6 @@ static const struct of_device_id sun8i_a23_pinctrl_match[] = { { .compatible = "allwinner,sun8i-a23-pinctrl", }, {} }; -MODULE_DEVICE_TABLE(of, sun8i_a23_pinctrl_match); static struct platform_driver sun8i_a23_pinctrl_driver = { .probe = sun8i_a23_pinctrl_probe, @@ -584,9 +583,4 @@ static struct platform_driver sun8i_a23_pinctrl_driver = { .of_match_table = sun8i_a23_pinctrl_match, }, }; -module_platform_driver(sun8i_a23_pinctrl_driver); - -MODULE_AUTHOR("Chen-Yu Tsai "); -MODULE_AUTHOR("Maxime Ripard +#include #include #include #include @@ -498,7 +498,6 @@ static const struct of_device_id sun8i_a33_pinctrl_match[] = { { .compatible = "allwinner,sun8i-a33-pinctrl", }, {} }; -MODULE_DEVICE_TABLE(of, sun8i_a33_pinctrl_match); static struct platform_driver sun8i_a33_pinctrl_driver = { .probe = sun8i_a33_pinctrl_probe, @@ -507,8 +506,4 @@ static struct platform_driver sun8i_a33_pinctrl_driver = { .of_match_table = sun8i_a33_pinctrl_match, }, }; -module_platform_driver(sun8i_a33_pinctrl_driver); - -MODULE_AUTHOR("Vishnu Patekar "); -MODULE_DESCRIPTION("Allwinner a33 pinctrl driver"); -MODULE_LICENSE("GPL"); +builtin_platform_driver(sun8i_a33_pinctrl_driver); diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c index 90b973e159821c5889d95e58513bb04661bf8ff5..9aec1d2232dd830e2c19a8e1e394e6033e621c21 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c @@ -12,7 +12,7 @@ * warranty of any kind, whether express or implied. */ -#include +#include #include #include #include @@ -587,7 +587,6 @@ static const struct of_device_id sun8i_a83t_pinctrl_match[] = { { .compatible = "allwinner,sun8i-a83t-pinctrl", }, {} }; -MODULE_DEVICE_TABLE(of, sun8i_a83t_pinctrl_match); static struct platform_driver sun8i_a83t_pinctrl_driver = { .probe = sun8i_a83t_pinctrl_probe, @@ -596,8 +595,4 @@ static struct platform_driver sun8i_a83t_pinctrl_driver = { .of_match_table = sun8i_a83t_pinctrl_match, }, }; -module_platform_driver(sun8i_a83t_pinctrl_driver); - -MODULE_AUTHOR("Vishnu Patekar "); -MODULE_DESCRIPTION("Allwinner a83t pinctrl driver"); -MODULE_LICENSE("GPL"); +builtin_platform_driver(sun8i_a83t_pinctrl_driver); diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c index 1b580ba764531a6d04d2293486f9e1b78a466f45..bc14e954d7a2abd923add205a57cb850e72fb65d 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c @@ -10,7 +10,7 @@ * warranty of any kind, whether express or implied. */ -#include +#include #include #include #include @@ -733,7 +733,6 @@ static const struct of_device_id sun9i_a80_pinctrl_match[] = { { .compatible = "allwinner,sun9i-a80-pinctrl", }, {} }; -MODULE_DEVICE_TABLE(of, sun9i_a80_pinctrl_match); static struct platform_driver sun9i_a80_pinctrl_driver = { .probe = sun9i_a80_pinctrl_probe, @@ -742,8 +741,4 @@ static struct platform_driver sun9i_a80_pinctrl_driver = { .of_match_table = sun9i_a80_pinctrl_match, }, }; -module_platform_driver(sun9i_a80_pinctrl_driver); - -MODULE_AUTHOR("Maxime Ripard "); -MODULE_DESCRIPTION("Allwinner A80 pinctrl driver"); -MODULE_LICENSE("GPL"); +builtin_platform_driver(sun9i_a80_pinctrl_driver); diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 0facbea5f465f0912630770e1b4cbc6255d817da..0eb51e33cb1be5412ab11d10e7cdb474a2faa061 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -28,6 +28,8 @@ #include #include +#include + #include "../core.h" #include "pinctrl-sunxi.h" @@ -145,6 +147,171 @@ static int sunxi_pctrl_get_group_pins(struct pinctrl_dev *pctldev, return 0; } +static bool sunxi_pctrl_has_bias_prop(struct device_node *node) +{ + return of_find_property(node, "bias-pull-up", NULL) || + of_find_property(node, "bias-pull-down", NULL) || + of_find_property(node, "bias-disable", NULL) || + of_find_property(node, "allwinner,pull", NULL); +} + +static bool sunxi_pctrl_has_drive_prop(struct device_node *node) +{ + return of_find_property(node, "drive-strength", NULL) || + of_find_property(node, "allwinner,drive", NULL); +} + +static int sunxi_pctrl_parse_bias_prop(struct device_node *node) +{ + u32 val; + + /* Try the new style binding */ + if (of_find_property(node, "bias-pull-up", NULL)) + return PIN_CONFIG_BIAS_PULL_UP; + + if (of_find_property(node, "bias-pull-down", NULL)) + return PIN_CONFIG_BIAS_PULL_DOWN; + + if (of_find_property(node, "bias-disable", NULL)) + return PIN_CONFIG_BIAS_DISABLE; + + /* And fall back to the old binding */ + if (of_property_read_u32(node, "allwinner,pull", &val)) + return -EINVAL; + + switch (val) { + case SUN4I_PINCTRL_NO_PULL: + return PIN_CONFIG_BIAS_DISABLE; + case SUN4I_PINCTRL_PULL_UP: + return PIN_CONFIG_BIAS_PULL_UP; + case SUN4I_PINCTRL_PULL_DOWN: + return PIN_CONFIG_BIAS_PULL_DOWN; + } + + return -EINVAL; +} + +static int sunxi_pctrl_parse_drive_prop(struct device_node *node) +{ + u32 val; + + /* Try the new style binding */ + if (!of_property_read_u32(node, "drive-strength", &val)) { + /* We can't go below 10mA ... */ + if (val < 10) + return -EINVAL; + + /* ... and only up to 40 mA ... */ + if (val > 40) + val = 40; + + /* by steps of 10 mA */ + return rounddown(val, 10); + } + + /* And then fall back to the old binding */ + if (of_property_read_u32(node, "allwinner,drive", &val)) + return -EINVAL; + + return (val + 1) * 10; +} + +static const char *sunxi_pctrl_parse_function_prop(struct device_node *node) +{ + const char *function; + int ret; + + /* Try the generic binding */ + ret = of_property_read_string(node, "function", &function); + if (!ret) + return function; + + /* And fall back to our legacy one */ + ret = of_property_read_string(node, "allwinner,function", &function); + if (!ret) + return function; + + return NULL; +} + +static const char *sunxi_pctrl_find_pins_prop(struct device_node *node, + int *npins) +{ + int count; + + /* Try the generic binding */ + count = of_property_count_strings(node, "pins"); + if (count > 0) { + *npins = count; + return "pins"; + } + + /* And fall back to our legacy one */ + count = of_property_count_strings(node, "allwinner,pins"); + if (count > 0) { + *npins = count; + return "allwinner,pins"; + } + + return NULL; +} + +static unsigned long *sunxi_pctrl_build_pin_config(struct device_node *node, + unsigned int *len) +{ + unsigned long *pinconfig; + unsigned int configlen = 0, idx = 0; + int ret; + + if (sunxi_pctrl_has_drive_prop(node)) + configlen++; + if (sunxi_pctrl_has_bias_prop(node)) + configlen++; + + /* + * If we don't have any configuration, bail out + */ + if (!configlen) + return NULL; + + pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL); + if (!pinconfig) + return ERR_PTR(-ENOMEM); + + if (sunxi_pctrl_has_drive_prop(node)) { + int drive = sunxi_pctrl_parse_drive_prop(node); + if (drive < 0) { + ret = drive; + goto err_free; + } + + pinconfig[idx++] = pinconf_to_config_packed(PIN_CONFIG_DRIVE_STRENGTH, + drive); + } + + if (sunxi_pctrl_has_bias_prop(node)) { + int pull = sunxi_pctrl_parse_bias_prop(node); + int arg = 0; + if (pull < 0) { + ret = pull; + goto err_free; + } + + if (pull != PIN_CONFIG_BIAS_DISABLE) + arg = 1; /* hardware uses weak pull resistors */ + + pinconfig[idx++] = pinconf_to_config_packed(pull, arg); + } + + + *len = configlen; + return pinconfig; + +err_free: + kfree(pinconfig); + return ERR_PTR(ret); +} + static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *node, struct pinctrl_map **map, @@ -153,38 +320,48 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev, struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); unsigned long *pinconfig; struct property *prop; - const char *function; + const char *function, *pin_prop; const char *group; - int ret, nmaps, i = 0; - u32 val; + int ret, npins, nmaps, configlen = 0, i = 0; *map = NULL; *num_maps = 0; - ret = of_property_read_string(node, "allwinner,function", &function); - if (ret) { - dev_err(pctl->dev, - "missing allwinner,function property in node %s\n", + function = sunxi_pctrl_parse_function_prop(node); + if (!function) { + dev_err(pctl->dev, "missing function property in node %s\n", node->name); return -EINVAL; } - nmaps = of_property_count_strings(node, "allwinner,pins") * 2; - if (nmaps < 0) { - dev_err(pctl->dev, - "missing allwinner,pins property in node %s\n", + pin_prop = sunxi_pctrl_find_pins_prop(node, &npins); + if (!pin_prop) { + dev_err(pctl->dev, "missing pins property in node %s\n", node->name); return -EINVAL; } + /* + * We have two maps for each pin: one for the function, one + * for the configuration (bias, strength, etc). + * + * We might be slightly overshooting, since we might not have + * any configuration. + */ + nmaps = npins * 2; *map = kmalloc(nmaps * sizeof(struct pinctrl_map), GFP_KERNEL); if (!*map) return -ENOMEM; - of_property_for_each_string(node, "allwinner,pins", prop, group) { + pinconfig = sunxi_pctrl_build_pin_config(node, &configlen); + if (IS_ERR(pinconfig)) { + ret = PTR_ERR(pinconfig); + goto err_free_map; + } + + of_property_for_each_string(node, pin_prop, prop, group) { struct sunxi_pinctrl_group *grp = sunxi_pinctrl_find_group_by_name(pctl, group); - int j = 0, configlen = 0; if (!grp) { dev_err(pctl->dev, "unknown pin %s", group); @@ -205,45 +382,31 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev, i++; - (*map)[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; - (*map)[i].data.configs.group_or_pin = group; - - if (of_find_property(node, "allwinner,drive", NULL)) - configlen++; - if (of_find_property(node, "allwinner,pull", NULL)) - configlen++; - - pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL); - if (!pinconfig) { - kfree(*map); - return -ENOMEM; + if (pinconfig) { + (*map)[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; + (*map)[i].data.configs.group_or_pin = group; + (*map)[i].data.configs.configs = pinconfig; + (*map)[i].data.configs.num_configs = configlen; + i++; } - - if (!of_property_read_u32(node, "allwinner,drive", &val)) { - u16 strength = (val + 1) * 10; - pinconfig[j++] = - pinconf_to_config_packed(PIN_CONFIG_DRIVE_STRENGTH, - strength); - } - - if (!of_property_read_u32(node, "allwinner,pull", &val)) { - enum pin_config_param pull = PIN_CONFIG_END; - if (val == 1) - pull = PIN_CONFIG_BIAS_PULL_UP; - else if (val == 2) - pull = PIN_CONFIG_BIAS_PULL_DOWN; - pinconfig[j++] = pinconf_to_config_packed(pull, 0); - } - - (*map)[i].data.configs.configs = pinconfig; - (*map)[i].data.configs.num_configs = configlen; - - i++; } - *num_maps = nmaps; + *num_maps = i; + + /* + * We know have the number of maps we need, we can resize our + * map array + */ + *map = krealloc(*map, i * sizeof(struct pinctrl_map), GFP_KERNEL); + if (!*map) + return -ENOMEM; return 0; + +err_free_map: + kfree(*map); + *map = NULL; + return ret; } static void sunxi_pctrl_dt_free_map(struct pinctrl_dev *pctldev, @@ -252,9 +415,17 @@ static void sunxi_pctrl_dt_free_map(struct pinctrl_dev *pctldev, { int i; - for (i = 0; i < num_maps; i++) { - if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP) - kfree(map[i].data.configs.configs); + /* pin config is never in the first map */ + for (i = 1; i < num_maps; i++) { + if (map[i].type != PIN_MAP_TYPE_CONFIGS_GROUP) + continue; + + /* + * All the maps share the same pin config, + * free only the first one we find. + */ + kfree(map[i].data.configs.configs); + break; } kfree(map); @@ -268,15 +439,91 @@ static const struct pinctrl_ops sunxi_pctrl_ops = { .get_group_pins = sunxi_pctrl_get_group_pins, }; +static int sunxi_pconf_reg(unsigned pin, enum pin_config_param param, + u32 *offset, u32 *shift, u32 *mask) +{ + switch (param) { + case PIN_CONFIG_DRIVE_STRENGTH: + *offset = sunxi_dlevel_reg(pin); + *shift = sunxi_dlevel_offset(pin); + *mask = DLEVEL_PINS_MASK; + break; + + case PIN_CONFIG_BIAS_PULL_UP: + case PIN_CONFIG_BIAS_PULL_DOWN: + case PIN_CONFIG_BIAS_DISABLE: + *offset = sunxi_pull_reg(pin); + *shift = sunxi_pull_offset(pin); + *mask = PULL_PINS_MASK; + break; + + default: + return -ENOTSUPP; + } + + return 0; +} + +static int sunxi_pconf_get(struct pinctrl_dev *pctldev, unsigned pin, + unsigned long *config) +{ + struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + enum pin_config_param param = pinconf_to_config_param(*config); + u32 offset, shift, mask, val; + u16 arg; + int ret; + + pin -= pctl->desc->pin_base; + + ret = sunxi_pconf_reg(pin, param, &offset, &shift, &mask); + if (ret < 0) + return ret; + + val = (readl(pctl->membase + offset) >> shift) & mask; + + switch (pinconf_to_config_param(*config)) { + case PIN_CONFIG_DRIVE_STRENGTH: + arg = (val + 1) * 10; + break; + + case PIN_CONFIG_BIAS_PULL_UP: + if (val != SUN4I_PINCTRL_PULL_UP) + return -EINVAL; + arg = 1; /* hardware is weak pull-up */ + break; + + case PIN_CONFIG_BIAS_PULL_DOWN: + if (val != SUN4I_PINCTRL_PULL_DOWN) + return -EINVAL; + arg = 1; /* hardware is weak pull-down */ + break; + + case PIN_CONFIG_BIAS_DISABLE: + if (val != SUN4I_PINCTRL_NO_PULL) + return -EINVAL; + arg = 0; + break; + + default: + /* sunxi_pconf_reg should catch anything unsupported */ + WARN_ON(1); + return -ENOTSUPP; + } + + *config = pinconf_to_config_packed(param, arg); + + return 0; +} + static int sunxi_pconf_group_get(struct pinctrl_dev *pctldev, unsigned group, unsigned long *config) { struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct sunxi_pinctrl_group *g = &pctl->groups[group]; - *config = pctl->groups[group].config; - - return 0; + /* We only support 1 pin per group. Chain it to the pin callback */ + return sunxi_pconf_get(pctldev, g->pin, config); } static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, @@ -286,23 +533,27 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, { struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); struct sunxi_pinctrl_group *g = &pctl->groups[group]; - unsigned long flags; unsigned pin = g->pin - pctl->desc->pin_base; - u32 val, mask; - u16 strength; - u8 dlevel; int i; - spin_lock_irqsave(&pctl->lock, flags); - for (i = 0; i < num_configs; i++) { - switch (pinconf_to_config_param(configs[i])) { + enum pin_config_param param; + unsigned long flags; + u32 offset, shift, mask, reg; + u16 arg, val; + int ret; + + param = pinconf_to_config_param(configs[i]); + arg = pinconf_to_config_argument(configs[i]); + + ret = sunxi_pconf_reg(pin, param, &offset, &shift, &mask); + if (ret < 0) + return ret; + + switch (param) { case PIN_CONFIG_DRIVE_STRENGTH: - strength = pinconf_to_config_argument(configs[i]); - if (strength > 40) { - spin_unlock_irqrestore(&pctl->lock, flags); + if (arg < 10 || arg > 40) return -EINVAL; - } /* * We convert from mA to what the register expects: * 0: 10mA @@ -310,38 +561,40 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, * 2: 30mA * 3: 40mA */ - dlevel = strength / 10 - 1; - val = readl(pctl->membase + sunxi_dlevel_reg(pin)); - mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(pin); - writel((val & ~mask) - | dlevel << sunxi_dlevel_offset(pin), - pctl->membase + sunxi_dlevel_reg(pin)); + val = arg / 10 - 1; + break; + case PIN_CONFIG_BIAS_DISABLE: + val = 0; break; case PIN_CONFIG_BIAS_PULL_UP: - val = readl(pctl->membase + sunxi_pull_reg(pin)); - mask = PULL_PINS_MASK << sunxi_pull_offset(pin); - writel((val & ~mask) | 1 << sunxi_pull_offset(pin), - pctl->membase + sunxi_pull_reg(pin)); + if (arg == 0) + return -EINVAL; + val = 1; break; case PIN_CONFIG_BIAS_PULL_DOWN: - val = readl(pctl->membase + sunxi_pull_reg(pin)); - mask = PULL_PINS_MASK << sunxi_pull_offset(pin); - writel((val & ~mask) | 2 << sunxi_pull_offset(pin), - pctl->membase + sunxi_pull_reg(pin)); + if (arg == 0) + return -EINVAL; + val = 2; break; default: - break; + /* sunxi_pconf_reg should catch anything unsupported */ + WARN_ON(1); + return -ENOTSUPP; } - /* cache the config value */ - g->config = configs[i]; - } /* for each config */ - spin_unlock_irqrestore(&pctl->lock, flags); + spin_lock_irqsave(&pctl->lock, flags); + reg = readl(pctl->membase + offset); + reg &= ~(mask << shift); + writel(reg | val << shift, pctl->membase + offset); + spin_unlock_irqrestore(&pctl->lock, flags); + } /* for each config */ return 0; } static const struct pinconf_ops sunxi_pconf_ops = { + .is_generic = true, + .pin_config_get = sunxi_pconf_get, .pin_config_group_get = sunxi_pconf_group_get, .pin_config_group_set = sunxi_pconf_group_set, }; @@ -870,6 +1123,91 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev) return 0; } +static int sunxi_pinctrl_get_debounce_div(struct clk *clk, int freq, int *diff) +{ + unsigned long clock = clk_get_rate(clk); + unsigned int best_diff, best_div; + int i; + + best_diff = abs(freq - clock); + best_div = 0; + + for (i = 1; i < 8; i++) { + int cur_diff = abs(freq - (clock >> i)); + + if (cur_diff < best_diff) { + best_diff = cur_diff; + best_div = i; + } + } + + *diff = best_diff; + return best_div; +} + +static int sunxi_pinctrl_setup_debounce(struct sunxi_pinctrl *pctl, + struct device_node *node) +{ + unsigned int hosc_diff, losc_diff; + unsigned int hosc_div, losc_div; + struct clk *hosc, *losc; + u8 div, src; + int i, ret; + + /* Deal with old DTs that didn't have the oscillators */ + if (of_count_phandle_with_args(node, "clocks", "#clock-cells") != 3) + return 0; + + /* If we don't have any setup, bail out */ + if (!of_find_property(node, "input-debounce", NULL)) + return 0; + + losc = devm_clk_get(pctl->dev, "losc"); + if (IS_ERR(losc)) + return PTR_ERR(losc); + + hosc = devm_clk_get(pctl->dev, "hosc"); + if (IS_ERR(hosc)) + return PTR_ERR(hosc); + + for (i = 0; i < pctl->desc->irq_banks; i++) { + unsigned long debounce_freq; + u32 debounce; + + ret = of_property_read_u32_index(node, "input-debounce", + i, &debounce); + if (ret) + return ret; + + if (!debounce) + continue; + + debounce_freq = DIV_ROUND_CLOSEST(USEC_PER_SEC, debounce); + losc_div = sunxi_pinctrl_get_debounce_div(losc, + debounce_freq, + &losc_diff); + + hosc_div = sunxi_pinctrl_get_debounce_div(hosc, + debounce_freq, + &hosc_diff); + + if (hosc_diff < losc_diff) { + div = hosc_div; + src = 1; + } else { + div = losc_div; + src = 0; + } + + writel(src | div << 4, + pctl->membase + + sunxi_irq_debounce_reg_from_bank(i, + pctl->desc->irq_bank_base)); + } + + return 0; +} + int sunxi_pinctrl_init(struct platform_device *pdev, const struct sunxi_pinctrl_desc *desc) { @@ -1032,6 +1370,8 @@ int sunxi_pinctrl_init(struct platform_device *pdev, pctl); } + sunxi_pinctrl_setup_debounce(pctl, node); + dev_info(&pdev->dev, "initialized sunXi PIO driver\n"); return 0; diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h index 0afce1ab12d039515a1ea40f66bdb3806ca9f5ca..f78a44a03189fa2bf7124c1ce5f9d47c8e37278c 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h @@ -69,6 +69,8 @@ #define IRQ_STATUS_IRQ_BITS 1 #define IRQ_STATUS_IRQ_MASK ((1 << IRQ_STATUS_IRQ_BITS) - 1) +#define IRQ_DEBOUNCE_REG 0x218 + #define IRQ_MEM_SIZE 0x20 #define IRQ_EDGE_RISING 0x00 @@ -109,7 +111,6 @@ struct sunxi_pinctrl_function { struct sunxi_pinctrl_group { const char *name; - unsigned long config; unsigned pin; }; @@ -266,6 +267,11 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq) return irq_num * IRQ_CTRL_IRQ_BITS; } +static inline u32 sunxi_irq_debounce_reg_from_bank(u8 bank, unsigned bank_base) +{ + return IRQ_DEBOUNCE_REG + (bank_base + bank) * IRQ_MEM_SIZE; +} + static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base) { return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE; diff --git a/drivers/pinctrl/vt8500/pinctrl-vt8500.c b/drivers/pinctrl/vt8500/pinctrl-vt8500.c index ca946b3dbdb4f717b2543ee9fade272c4c0926ea..767f340d6b115f00d798a06aa86612c500c8bbe7 100644 --- a/drivers/pinctrl/vt8500/pinctrl-vt8500.c +++ b/drivers/pinctrl/vt8500/pinctrl-vt8500.c @@ -14,7 +14,7 @@ */ #include -#include +#include #include #include #include @@ -473,11 +473,6 @@ static int vt8500_pinctrl_probe(struct platform_device *pdev) return wmt_pinctrl_probe(pdev, data); } -static int vt8500_pinctrl_remove(struct platform_device *pdev) -{ - return wmt_pinctrl_remove(pdev); -} - static const struct of_device_id wmt_pinctrl_of_match[] = { { .compatible = "via,vt8500-pinctrl" }, { /* sentinel */ }, @@ -485,16 +480,10 @@ static const struct of_device_id wmt_pinctrl_of_match[] = { static struct platform_driver wmt_pinctrl_driver = { .probe = vt8500_pinctrl_probe, - .remove = vt8500_pinctrl_remove, .driver = { .name = "pinctrl-vt8500", .of_match_table = wmt_pinctrl_of_match, + .suppress_bind_attrs = true, }, }; - -module_platform_driver(wmt_pinctrl_driver); - -MODULE_AUTHOR("Tony Prisk "); -MODULE_DESCRIPTION("VIA VT8500 Pincontrol driver"); -MODULE_LICENSE("GPL v2"); -MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match); +builtin_platform_driver(wmt_pinctrl_driver); diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8505.c b/drivers/pinctrl/vt8500/pinctrl-wm8505.c index 626fc7ec017462948e784715851f84dd58281b35..a56fdbd87e42b9da3bd3c80976eb2f28155fe7cc 100644 --- a/drivers/pinctrl/vt8500/pinctrl-wm8505.c +++ b/drivers/pinctrl/vt8500/pinctrl-wm8505.c @@ -14,7 +14,7 @@ */ #include -#include +#include #include #include #include @@ -504,11 +504,6 @@ static int wm8505_pinctrl_probe(struct platform_device *pdev) return wmt_pinctrl_probe(pdev, data); } -static int wm8505_pinctrl_remove(struct platform_device *pdev) -{ - return wmt_pinctrl_remove(pdev); -} - static const struct of_device_id wmt_pinctrl_of_match[] = { { .compatible = "wm,wm8505-pinctrl" }, { /* sentinel */ }, @@ -516,16 +511,10 @@ static const struct of_device_id wmt_pinctrl_of_match[] = { static struct platform_driver wmt_pinctrl_driver = { .probe = wm8505_pinctrl_probe, - .remove = wm8505_pinctrl_remove, .driver = { .name = "pinctrl-wm8505", .of_match_table = wmt_pinctrl_of_match, + .suppress_bind_attrs = true, }, }; - -module_platform_driver(wmt_pinctrl_driver); - -MODULE_AUTHOR("Tony Prisk "); -MODULE_DESCRIPTION("Wondermedia WM8505 Pincontrol driver"); -MODULE_LICENSE("GPL v2"); -MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match); +builtin_platform_driver(wmt_pinctrl_driver); diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8650.c b/drivers/pinctrl/vt8500/pinctrl-wm8650.c index 8953aba8bfc2261e5c6cef9392d69cb4c5b1209f..270dd491f5a1e8c82d8968bdbdb49d24ee284903 100644 --- a/drivers/pinctrl/vt8500/pinctrl-wm8650.c +++ b/drivers/pinctrl/vt8500/pinctrl-wm8650.c @@ -14,7 +14,7 @@ */ #include -#include +#include #include #include #include @@ -342,11 +342,6 @@ static int wm8650_pinctrl_probe(struct platform_device *pdev) return wmt_pinctrl_probe(pdev, data); } -static int wm8650_pinctrl_remove(struct platform_device *pdev) -{ - return wmt_pinctrl_remove(pdev); -} - static const struct of_device_id wmt_pinctrl_of_match[] = { { .compatible = "wm,wm8650-pinctrl" }, { /* sentinel */ }, @@ -354,16 +349,10 @@ static const struct of_device_id wmt_pinctrl_of_match[] = { static struct platform_driver wmt_pinctrl_driver = { .probe = wm8650_pinctrl_probe, - .remove = wm8650_pinctrl_remove, .driver = { .name = "pinctrl-wm8650", .of_match_table = wmt_pinctrl_of_match, + .suppress_bind_attrs = true, }, }; - -module_platform_driver(wmt_pinctrl_driver); - -MODULE_AUTHOR("Tony Prisk "); -MODULE_DESCRIPTION("Wondermedia WM8650 Pincontrol driver"); -MODULE_LICENSE("GPL v2"); -MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match); +builtin_platform_driver(wmt_pinctrl_driver); diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8750.c b/drivers/pinctrl/vt8500/pinctrl-wm8750.c index c79053d430db5ad6894ebf60d10e30fdbfe05b06..74f7b3a18f3a859272598f58b800407ff9b871a4 100644 --- a/drivers/pinctrl/vt8500/pinctrl-wm8750.c +++ b/drivers/pinctrl/vt8500/pinctrl-wm8750.c @@ -14,7 +14,7 @@ */ #include -#include +#include #include #include #include @@ -381,11 +381,6 @@ static int wm8750_pinctrl_probe(struct platform_device *pdev) return wmt_pinctrl_probe(pdev, data); } -static int wm8750_pinctrl_remove(struct platform_device *pdev) -{ - return wmt_pinctrl_remove(pdev); -} - static const struct of_device_id wmt_pinctrl_of_match[] = { { .compatible = "wm,wm8750-pinctrl" }, { /* sentinel */ }, @@ -393,16 +388,10 @@ static const struct of_device_id wmt_pinctrl_of_match[] = { static struct platform_driver wmt_pinctrl_driver = { .probe = wm8750_pinctrl_probe, - .remove = wm8750_pinctrl_remove, .driver = { .name = "pinctrl-wm8750", .of_match_table = wmt_pinctrl_of_match, + .suppress_bind_attrs = true, }, }; - -module_platform_driver(wmt_pinctrl_driver); - -MODULE_AUTHOR("Tony Prisk "); -MODULE_DESCRIPTION("Wondermedia WM8750 Pincontrol driver"); -MODULE_LICENSE("GPL v2"); -MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match); +builtin_platform_driver(wmt_pinctrl_driver); diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8850.c b/drivers/pinctrl/vt8500/pinctrl-wm8850.c index f232b163c735cfb4e9a4c3fa11033c9d5142739f..45792aa7a06e31efdd9536bc392df0395ce8c77c 100644 --- a/drivers/pinctrl/vt8500/pinctrl-wm8850.c +++ b/drivers/pinctrl/vt8500/pinctrl-wm8850.c @@ -14,7 +14,7 @@ */ #include -#include +#include #include #include #include @@ -360,11 +360,6 @@ static int wm8850_pinctrl_probe(struct platform_device *pdev) return wmt_pinctrl_probe(pdev, data); } -static int wm8850_pinctrl_remove(struct platform_device *pdev) -{ - return wmt_pinctrl_remove(pdev); -} - static const struct of_device_id wmt_pinctrl_of_match[] = { { .compatible = "wm,wm8850-pinctrl" }, { /* sentinel */ }, @@ -372,16 +367,10 @@ static const struct of_device_id wmt_pinctrl_of_match[] = { static struct platform_driver wmt_pinctrl_driver = { .probe = wm8850_pinctrl_probe, - .remove = wm8850_pinctrl_remove, .driver = { .name = "pinctrl-wm8850", .of_match_table = wmt_pinctrl_of_match, + .suppress_bind_attrs = true, }, }; - -module_platform_driver(wmt_pinctrl_driver); - -MODULE_AUTHOR("Tony Prisk "); -MODULE_DESCRIPTION("Wondermedia WM8850 Pincontrol driver"); -MODULE_LICENSE("GPL v2"); -MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match); +builtin_platform_driver(wmt_pinctrl_driver); diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c index cbc6386316783daabc445cb26d9d7d56d9673380..270ca2a47a8c9a8237921800d64c46192f3e3437 100644 --- a/drivers/pinctrl/vt8500/pinctrl-wmt.c +++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -608,12 +607,3 @@ int wmt_pinctrl_probe(struct platform_device *pdev, gpiochip_remove(&data->gpio_chip); return err; } - -int wmt_pinctrl_remove(struct platform_device *pdev) -{ - struct wmt_pinctrl_data *data = platform_get_drvdata(pdev); - - gpiochip_remove(&data->gpio_chip); - - return 0; -} diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.h b/drivers/pinctrl/vt8500/pinctrl-wmt.h index 41f5f2deb5d6215983012c82e9f827577ed0001c..885613396fe78aba18d3ec87647319f1d63e04ad 100644 --- a/drivers/pinctrl/vt8500/pinctrl-wmt.h +++ b/drivers/pinctrl/vt8500/pinctrl-wmt.h @@ -76,4 +76,3 @@ struct wmt_pinctrl_data { int wmt_pinctrl_probe(struct platform_device *pdev, struct wmt_pinctrl_data *data); -int wmt_pinctrl_remove(struct platform_device *pdev); diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c index 8abd80dbcbed7974b4ace4265dd9dad1bca89edd..47268ecedc4dd7474ca4b9f789c39f5290ed98aa 100644 --- a/drivers/platform/chrome/cros_ec_dev.c +++ b/drivers/platform/chrome/cros_ec_dev.c @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -87,6 +88,41 @@ static int ec_get_version(struct cros_ec_dev *ec, char *str, int maxlen) return ret; } +static int cros_ec_check_features(struct cros_ec_dev *ec, int feature) +{ + struct cros_ec_command *msg; + int ret; + + if (ec->features[0] == -1U && ec->features[1] == -1U) { + /* features bitmap not read yet */ + + msg = kmalloc(sizeof(*msg) + sizeof(ec->features), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->version = 0; + msg->command = EC_CMD_GET_FEATURES + ec->cmd_offset; + msg->insize = sizeof(ec->features); + msg->outsize = 0; + + ret = cros_ec_cmd_xfer(ec->ec_dev, msg); + if (ret < 0 || msg->result != EC_RES_SUCCESS) { + dev_warn(ec->dev, "cannot get EC features: %d/%d\n", + ret, msg->result); + memset(ec->features, 0, sizeof(ec->features)); + } + + memcpy(ec->features, msg->data, sizeof(ec->features)); + + dev_dbg(ec->dev, "EC features %08x %08x\n", + ec->features[0], ec->features[1]); + + kfree(msg); + } + + return ec->features[feature / 32] & EC_FEATURE_MASK_0(feature); +} + /* Device file ops */ static int ec_device_open(struct inode *inode, struct file *filp) { @@ -230,6 +266,123 @@ static void __remove(struct device *dev) kfree(ec); } +static void cros_ec_sensors_register(struct cros_ec_dev *ec) +{ + /* + * Issue a command to get the number of sensor reported. + * Build an array of sensors driver and register them all. + */ + int ret, i, id, sensor_num; + struct mfd_cell *sensor_cells; + struct cros_ec_sensor_platform *sensor_platforms; + int sensor_type[MOTIONSENSE_TYPE_MAX]; + struct ec_params_motion_sense *params; + struct ec_response_motion_sense *resp; + struct cros_ec_command *msg; + + msg = kzalloc(sizeof(struct cros_ec_command) + + max(sizeof(*params), sizeof(*resp)), GFP_KERNEL); + if (msg == NULL) + return; + + msg->version = 2; + msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset; + msg->outsize = sizeof(*params); + msg->insize = sizeof(*resp); + + params = (struct ec_params_motion_sense *)msg->data; + params->cmd = MOTIONSENSE_CMD_DUMP; + + ret = cros_ec_cmd_xfer(ec->ec_dev, msg); + if (ret < 0 || msg->result != EC_RES_SUCCESS) { + dev_warn(ec->dev, "cannot get EC sensor information: %d/%d\n", + ret, msg->result); + goto error; + } + + resp = (struct ec_response_motion_sense *)msg->data; + sensor_num = resp->dump.sensor_count; + /* Allocate 2 extra sensors in case lid angle or FIFO are needed */ + sensor_cells = kzalloc(sizeof(struct mfd_cell) * (sensor_num + 2), + GFP_KERNEL); + if (sensor_cells == NULL) + goto error; + + sensor_platforms = kzalloc(sizeof(struct cros_ec_sensor_platform) * + (sensor_num + 1), GFP_KERNEL); + if (sensor_platforms == NULL) + goto error_platforms; + + memset(sensor_type, 0, sizeof(sensor_type)); + id = 0; + for (i = 0; i < sensor_num; i++) { + params->cmd = MOTIONSENSE_CMD_INFO; + params->info.sensor_num = i; + ret = cros_ec_cmd_xfer(ec->ec_dev, msg); + if (ret < 0 || msg->result != EC_RES_SUCCESS) { + dev_warn(ec->dev, "no info for EC sensor %d : %d/%d\n", + i, ret, msg->result); + continue; + } + switch (resp->info.type) { + case MOTIONSENSE_TYPE_ACCEL: + sensor_cells[id].name = "cros-ec-accel"; + break; + case MOTIONSENSE_TYPE_GYRO: + sensor_cells[id].name = "cros-ec-gyro"; + break; + case MOTIONSENSE_TYPE_MAG: + sensor_cells[id].name = "cros-ec-mag"; + break; + case MOTIONSENSE_TYPE_PROX: + sensor_cells[id].name = "cros-ec-prox"; + break; + case MOTIONSENSE_TYPE_LIGHT: + sensor_cells[id].name = "cros-ec-light"; + break; + case MOTIONSENSE_TYPE_ACTIVITY: + sensor_cells[id].name = "cros-ec-activity"; + break; + default: + dev_warn(ec->dev, "unknown type %d\n", resp->info.type); + continue; + } + sensor_platforms[id].sensor_num = i; + sensor_cells[id].id = sensor_type[resp->info.type]; + sensor_cells[id].platform_data = &sensor_platforms[id]; + sensor_cells[id].pdata_size = + sizeof(struct cros_ec_sensor_platform); + + sensor_type[resp->info.type]++; + id++; + } + if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2) { + sensor_platforms[id].sensor_num = sensor_num; + + sensor_cells[id].name = "cros-ec-angle"; + sensor_cells[id].id = 0; + sensor_cells[id].platform_data = &sensor_platforms[id]; + sensor_cells[id].pdata_size = + sizeof(struct cros_ec_sensor_platform); + id++; + } + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) { + sensor_cells[id].name = "cros-ec-ring"; + id++; + } + + ret = mfd_add_devices(ec->dev, 0, sensor_cells, id, + NULL, 0, NULL); + if (ret) + dev_err(ec->dev, "failed to add EC sensors\n"); + + kfree(sensor_platforms); +error_platforms: + kfree(sensor_cells); +error: + kfree(msg); +} + static int ec_device_probe(struct platform_device *pdev) { int retval = -ENOMEM; @@ -245,6 +398,8 @@ static int ec_device_probe(struct platform_device *pdev) ec->ec_dev = dev_get_drvdata(dev->parent); ec->dev = dev; ec->cmd_offset = ec_platform->cmd_offset; + ec->features[0] = -1U; /* Not cached yet */ + ec->features[1] = -1U; /* Not cached yet */ device_initialize(&ec->class_dev); cdev_init(&ec->cdev, &fops); @@ -282,6 +437,10 @@ static int ec_device_probe(struct platform_device *pdev) goto dev_reg_failed; } + /* check whether this EC is a sensor hub. */ + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE)) + cros_ec_sensors_register(ec); + return 0; dev_reg_failed: diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index 1aba2c74160eb5c51ce49dd679de2dd0090fe2dd..2b21033f11f02e742c2cf756ecdea975abcfe27e 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c @@ -308,10 +308,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, * returns a small amount, then there's no need to pin that * much memory to the process. */ - down_read(¤t->mm->mmap_sem); - ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE, - &page, NULL); - up_read(¤t->mm->mmap_sem); + ret = get_user_pages_unlocked(address, 1, &page, + is_write ? 0 : FOLL_WRITE); if (ret < 0) break; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index b8a21d7b25d4c34e4042caf624ac4c86211c11d0..5fe8be089b8b2dc51ced608e0790c7ce05b8dd18 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -363,6 +363,18 @@ config IDEAPAD_LAPTOP This is a driver for Lenovo IdeaPad netbooks contains drivers for rfkill switch, hotkey, fan control and backlight control. +config SURFACE3_WMI + tristate "Surface 3 WMI Driver" + depends on ACPI_WMI + depends on DMI + depends on INPUT + depends on SPI + ---help--- + Say Y here if you have a Surface 3. + + To compile this driver as a module, choose M here: the module will + be called surface3-wmi. + config THINKPAD_ACPI tristate "ThinkPad ACPI Laptop Extras" depends on ACPI @@ -1005,12 +1017,27 @@ config INTEL_PMC_IPC The PMC is an ARC processor which defines IPC commands for communication with other entities in the CPU. +config INTEL_BXTWC_PMIC_TMU + tristate "Intel BXT Whiskey Cove TMU Driver" + depends on REGMAP + depends on INTEL_SOC_PMIC && INTEL_PMC_IPC + ---help--- + Select this driver to use Intel BXT Whiskey Cove PMIC TMU feature. + This driver enables the alarm wakeup functionality in the TMU unit + of Whiskey Cove PMIC. + config SURFACE_PRO3_BUTTON tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet" depends on ACPI && INPUT ---help--- This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet. +config SURFACE_3_BUTTON + tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet" + depends on ACPI && KEYBOARD_GPIO + ---help--- + This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet. + config INTEL_PUNIT_IPC tristate "Intel P-Unit IPC Driver" ---help--- @@ -1027,4 +1054,26 @@ config INTEL_TELEMETRY used to get various SoC events and parameters directly via debugfs files. Various tools may use this interface for SoC state monitoring. + +config MLX_PLATFORM + tristate "Mellanox Technologies platform support" + depends on X86_64 + ---help--- + This option enables system support for the Mellanox Technologies + platform. The Mellanox systems provide data center networking + solutions based on Virtual Protocol Interconnect (VPI) technology + enable seamless connectivity to 56/100Gb/s InfiniBand or 10/40/56GbE + connection. + + If you have a Mellanox system, say Y or M here. + +config MLX_CPLD_PLATFORM + tristate "Mellanox platform hotplug driver support" + default n + select HWMON + select I2C + ---help--- + This driver handles hot-plug events for the power suppliers, power + cables and fans on the wide range Mellanox IB and Ethernet systems. + endif # X86_PLATFORM_DEVICES diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 2efa86d2a1a7df6cb4a3c5861c0d71e0ada39168..d4111f0f8a78fdbcc7a10ef5e236bdda1cdffc15 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o obj-$(CONFIG_ACPI_WMI) += wmi.o obj-$(CONFIG_MSI_WMI) += msi-wmi.o +obj-$(CONFIG_SURFACE3_WMI) += surface3-wmi.o obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o # toshiba_acpi must link after wmi to ensure that wmi devices are found @@ -66,8 +67,12 @@ obj-$(CONFIG_PVPANIC) += pvpanic.o obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o obj-$(CONFIG_SURFACE_PRO3_BUTTON) += surfacepro3_button.o +obj-$(CONFIG_SURFACE_3_BUTTON) += surface3_button.o obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o +obj-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += intel_bxtwc_tmu.o obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \ intel_telemetry_pltdrv.o \ intel_telemetry_debugfs.o obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o +obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o +obj-$(CONFIG_MLX_CPLD_PLATFORM) += mlxcpld-hotplug.o diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 79d64ea00bfb7e17fa908020e62c029dd01ff483..a66192f692e31afbad13294039121e81f0727f26 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -355,6 +355,32 @@ static const struct dmi_system_id acer_blacklist[] __initconst = { {} }; +static const struct dmi_system_id amw0_whitelist[] __initconst = { + { + .ident = "Acer", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + }, + }, + { + .ident = "Gateway", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Gateway"), + }, + }, + { + .ident = "Packard Bell", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Packard Bell"), + }, + }, + {} +}; + +/* + * This quirk table is only for Acer/Gateway/Packard Bell family + * that those machines are supported by acer-wmi driver. + */ static const struct dmi_system_id acer_quirks[] __initconst = { { .callback = dmi_matched, @@ -464,6 +490,17 @@ static const struct dmi_system_id acer_quirks[] __initconst = { }, .driver_data = &quirk_acer_travelmate_2490, }, + {} +}; + +/* + * This quirk list is for those non-acer machines that have AMW0_GUID1 + * but supported by acer-wmi in past days. Keeping this quirk list here + * is only for backward compatible. Please do not add new machine to + * here anymore. Those non-acer machines should be supported by + * appropriate wmi drivers. + */ +static const struct dmi_system_id non_acer_quirks[] __initconst = { { .callback = dmi_matched, .ident = "Fujitsu Siemens Amilo Li 1718", @@ -598,6 +635,7 @@ static void __init find_quirks(void) { if (!force_series) { dmi_check_system(acer_quirks); + dmi_check_system(non_acer_quirks); } else if (force_series == 2490) { quirks = &quirk_acer_travelmate_2490; } @@ -2107,6 +2145,24 @@ static int __init acer_wmi_init(void) find_quirks(); + /* + * The AMW0_GUID1 wmi is not only found on Acer family but also other + * machines like Lenovo, Fujitsu and Medion. In the past days, + * acer-wmi driver handled those non-Acer machines by quirks list. + * But actually acer-wmi driver was loaded on any machines that have + * AMW0_GUID1. This behavior is strange because those machines should + * be supported by appropriate wmi drivers. e.g. fujitsu-laptop, + * ideapad-laptop. So, here checks the machine that has AMW0_GUID1 + * should be in Acer/Gateway/Packard Bell white list, or it's already + * in the past quirk list. + */ + if (wmi_has_guid(AMW0_GUID1) && + !dmi_check_system(amw0_whitelist) && + quirks == &quirk_unknown) { + pr_err("Unsupported machine has AMW0_GUID1, unable to load\n"); + return -ENODEV; + } + /* * Detect which ACPI-WMI interface we're using. */ diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 26e4cbc34db8f8e3b515f2e4e3a2c0c836e59c4b..5be4783e40d4c9e9ca547d89d5faa6c1437f927a 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -116,8 +116,13 @@ static struct quirk_entry quirk_asus_ux303ub = { .wmi_backlight_native = true, }; +static struct quirk_entry quirk_asus_x550lb = { + .xusb2pr = 0x01D9, +}; + static int dmi_matched(const struct dmi_system_id *dmi) { + pr_info("Identified laptop model '%s'\n", dmi->ident); quirks = dmi->driver_data; return 1; } @@ -173,6 +178,15 @@ static const struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_wapf4, }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X45U", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X45U"), + }, + .driver_data = &quirk_asus_wapf4, + }, { .callback = dmi_matched, .ident = "ASUSTeK COMPUTER INC. X456UA", @@ -398,6 +412,15 @@ static const struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_ux303ub, }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X550LB", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X550LB"), + }, + .driver_data = &quirk_asus_x550lb, + }, {}, }; diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index ce6ca31a2d09a207c1e80c157427aa3960079b73..43cb680adbb42045aaeea332220460440180215a 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -156,6 +156,9 @@ MODULE_LICENSE("GPL"); #define ASUS_FAN_CTRL_MANUAL 1 #define ASUS_FAN_CTRL_AUTO 2 +#define USB_INTEL_XUSB2PR 0xD0 +#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 + struct bios_args { u32 arg0; u32 arg1; @@ -1080,6 +1083,29 @@ static int asus_wmi_rfkill_init(struct asus_wmi *asus) return result; } +static void asus_wmi_set_xusb2pr(struct asus_wmi *asus) +{ + struct pci_dev *xhci_pdev; + u32 orig_ports_available; + u32 ports_available = asus->driver->quirks->xusb2pr; + + xhci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI, + NULL); + + if (!xhci_pdev) + return; + + pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, + &orig_ports_available); + + pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, + cpu_to_le32(ports_available)); + + pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n", + orig_ports_available, ports_available); +} + /* * Hwmon device */ @@ -2087,6 +2113,9 @@ static int asus_wmi_add(struct platform_device *pdev) if (asus->driver->quirks->wmi_backlight_native) acpi_video_set_dmi_backlight_type(acpi_backlight_native); + if (asus->driver->quirks->xusb2pr) + asus_wmi_set_xusb2pr(asus); + if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { err = asus_wmi_backlight_init(asus); if (err && err != -ENODEV) diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h index 0e19014e9f5420415824d79a4aeca426eccc9a8c..fdff626c3b51b039f3b63473a6cf333d04fda819 100644 --- a/drivers/platform/x86/asus-wmi.h +++ b/drivers/platform/x86/asus-wmi.h @@ -53,6 +53,7 @@ struct quirk_entry { * and let the ACPI interrupt to send out the key event. */ int no_display_toggle; + u32 xusb2pr; bool (*i8042_filter)(unsigned char data, unsigned char str, struct serio *serio); diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 2c2f02b2e08a13a7421cbcc73aaad4f8ecce0afa..14392a01ab360aceaf80c8424e2b4854ac5ea264 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -1904,38 +1904,40 @@ static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev) return 0; } -static void kbd_led_level_set(struct led_classdev *led_cdev, - enum led_brightness value) +static int kbd_led_level_set(struct led_classdev *led_cdev, + enum led_brightness value) { struct kbd_state state; struct kbd_state new_state; u16 num; + int ret; if (kbd_get_max_level()) { - if (kbd_get_state(&state)) - return; + ret = kbd_get_state(&state); + if (ret) + return ret; new_state = state; - if (kbd_set_level(&new_state, value)) - return; - kbd_set_state_safe(&new_state, &state); - return; + ret = kbd_set_level(&new_state, value); + if (ret) + return ret; + return kbd_set_state_safe(&new_state, &state); } if (kbd_get_valid_token_counts()) { for (num = kbd_token_bits; num != 0 && value > 0; --value) num &= num - 1; /* clear the first bit set */ if (num == 0) - return; - kbd_set_token_bit(ffs(num) - 1); - return; + return 0; + return kbd_set_token_bit(ffs(num) - 1); } pr_warn("Keyboard brightness level control not supported\n"); + return -ENXIO; } static struct led_classdev kbd_led = { .name = "dell::kbd_backlight", - .brightness_set = kbd_led_level_set, + .brightness_set_blocking = kbd_led_level_set, .brightness_get = kbd_led_level_get, .groups = kbd_led_groups, }; diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index da2fe18162e1196b932eb42baeea1ad47f411ee3..75e637047d36d781adcb7d7aa0da8d5891e3a0e2 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -114,7 +114,7 @@ static const struct key_entry dell_wmi_keymap_type_0000[] __initconst = { { KE_IGNORE, 0xe00e, { KEY_RESERVED } }, /* Wifi Catcher */ - { KE_KEY, 0xe011, { KEY_PROG2 } }, + { KE_KEY, 0xe011, { KEY_WLAN } }, /* Ambient light sensor toggle */ { KE_IGNORE, 0xe013, { KEY_RESERVED } }, @@ -274,6 +274,16 @@ static const struct key_entry dell_wmi_keymap_type_0010[] __initconst = { /* Stealth mode toggle */ { KE_IGNORE, 0x155, { KEY_RESERVED } }, + + /* Rugged magnetic dock attach/detach events */ + { KE_IGNORE, 0x156, { KEY_RESERVED } }, + { KE_IGNORE, 0x157, { KEY_RESERVED } }, + + /* Rugged programmable (P1/P2/P3 keys) */ + { KE_KEY, 0x850, { KEY_PROG1 } }, + { KE_KEY, 0x851, { KEY_PROG2 } }, + { KE_KEY, 0x852, { KEY_PROG3 } }, + }; /* diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index a2323941e67707711f6e2c2738057d2f21744e4d..410741acb3c92dabe36417800f564a943c5d42ec 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -870,6 +870,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G50-30"), }, }, + { + .ident = "Lenovo ideapad Y700-15ACZ", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ACZ"), + }, + }, { .ident = "Lenovo ideapad Y700-15ISK", .matches = { @@ -933,6 +940,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"), }, }, + { + .ident = "Lenovo Yoga 900", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_BOARD_NAME, "VIUU4"), + }, + }, { .ident = "Lenovo YOGA 910-13IKB", .matches = { diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index ed5874217ee76cf4364d8bbd6f8e49f26e19c215..cb3ab2b212b1768b8355aced7d7956f9e1ed2185 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -69,7 +69,7 @@ static int intel_hid_set_enable(struct device *device, int enable) arg0.integer.value = enable; status = acpi_evaluate_object(ACPI_HANDLE(device), "HDSM", &args, NULL); - if (!ACPI_SUCCESS(status)) { + if (ACPI_FAILURE(status)) { dev_warn(device, "failed to %sable hotkeys\n", enable ? "en" : "dis"); return -EIO; @@ -148,7 +148,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) } status = acpi_evaluate_integer(handle, "HDEM", NULL, &ev_index); - if (!ACPI_SUCCESS(status)) { + if (ACPI_FAILURE(status)) { dev_warn(&device->dev, "failed to get event index\n"); return; } @@ -167,7 +167,7 @@ static int intel_hid_probe(struct platform_device *device) int err; status = acpi_evaluate_integer(handle, "HDMM", NULL, &mode); - if (!ACPI_SUCCESS(status)) { + if (ACPI_FAILURE(status)) { dev_warn(&device->dev, "failed to read mode\n"); return -ENODEV; } @@ -264,7 +264,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; if (acpi_match_device_ids(dev, ids) == 0) - if (acpi_create_platform_device(dev)) + if (acpi_create_platform_device(dev, NULL)) dev_info(&dev->dev, "intel-hid: created platform device\n"); diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c index 04cf5dffdfd9152436719e69955df00afe30e144..bbe4c06c769f66eabf8ed956cbb3361ef8407220 100644 --- a/drivers/platform/x86/intel-smartconnect.c +++ b/drivers/platform/x86/intel-smartconnect.c @@ -29,7 +29,7 @@ static int smartconnect_acpi_init(struct acpi_device *acpi) acpi_status status; status = acpi_evaluate_integer(acpi->handle, "GAOS", NULL, &value); - if (!ACPI_SUCCESS(status)) + if (ACPI_FAILURE(status)) return -EINVAL; if (value & 0x1) { diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index 146d02f8c9bc01c99c791bb45ba8df77ce7bb64b..554e82ebe83ca1fd102affcc5357bea0fa429c56 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -49,34 +49,19 @@ static int intel_vbtn_input_setup(struct platform_device *device) struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); int ret; - priv->input_dev = input_allocate_device(); + priv->input_dev = devm_input_allocate_device(&device->dev); if (!priv->input_dev) return -ENOMEM; ret = sparse_keymap_setup(priv->input_dev, intel_vbtn_keymap, NULL); if (ret) - goto err_free_device; + return ret; priv->input_dev->dev.parent = &device->dev; priv->input_dev->name = "Intel Virtual Button driver"; priv->input_dev->id.bustype = BUS_HOST; - ret = input_register_device(priv->input_dev); - if (ret) - goto err_free_device; - - return 0; - -err_free_device: - input_free_device(priv->input_dev); - return ret; -} - -static void intel_vbtn_input_destroy(struct platform_device *device) -{ - struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); - - input_unregister_device(priv->input_dev); + return input_register_device(priv->input_dev); } static void notify_handler(acpi_handle handle, u32 event, void *context) @@ -97,7 +82,7 @@ static int intel_vbtn_probe(struct platform_device *device) int err; status = acpi_evaluate_object(handle, "VBDL", NULL, NULL); - if (!ACPI_SUCCESS(status)) { + if (ACPI_FAILURE(status)) { dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n"); return -ENODEV; } @@ -117,24 +102,16 @@ static int intel_vbtn_probe(struct platform_device *device) ACPI_DEVICE_NOTIFY, notify_handler, device); - if (ACPI_FAILURE(status)) { - err = -EBUSY; - goto err_remove_input; - } + if (ACPI_FAILURE(status)) + return -EBUSY; return 0; - -err_remove_input: - intel_vbtn_input_destroy(device); - - return err; } static int intel_vbtn_remove(struct platform_device *device) { acpi_handle handle = ACPI_HANDLE(&device->dev); - intel_vbtn_input_destroy(device); acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); /* @@ -164,7 +141,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; if (acpi_match_device_ids(dev, ids) == 0) - if (acpi_create_platform_device(dev)) + if (acpi_create_platform_device(dev, NULL)) dev_info(&dev->dev, "intel-vbtn: created platform device\n"); diff --git a/drivers/platform/x86/intel_bxtwc_tmu.c b/drivers/platform/x86/intel_bxtwc_tmu.c new file mode 100644 index 0000000000000000000000000000000000000000..e202abd5b0df2d61708d044ddeba090862cc98cb --- /dev/null +++ b/drivers/platform/x86/intel_bxtwc_tmu.c @@ -0,0 +1,162 @@ +/* + * intel_bxtwc_tmu.c - Intel BXT Whiskey Cove PMIC TMU driver + * + * Copyright (C) 2016 Intel Corporation. All rights reserved. + * + * This driver adds TMU (Time Management Unit) support for Intel BXT platform. + * It enables the alarm wake-up functionality in the TMU unit of Whiskey Cove + * PMIC. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#define BXTWC_TMUIRQ 0x4fb6 +#define BXTWC_MIRQLVL1 0x4e0e +#define BXTWC_MTMUIRQ_REG 0x4fb7 +#define BXTWC_MIRQLVL1_MTMU BIT(1) +#define BXTWC_TMU_WK_ALRM BIT(1) +#define BXTWC_TMU_SYS_ALRM BIT(2) +#define BXTWC_TMU_ALRM_MASK (BXTWC_TMU_WK_ALRM | BXTWC_TMU_SYS_ALRM) +#define BXTWC_TMU_ALRM_IRQ (BXTWC_TMU_WK_ALRM | BXTWC_TMU_SYS_ALRM) + +struct wcove_tmu { + int irq; + struct device *dev; + struct regmap *regmap; +}; + +static irqreturn_t bxt_wcove_tmu_irq_handler(int irq, void *data) +{ + struct wcove_tmu *wctmu = data; + unsigned int tmu_irq; + + /* Read TMU interrupt reg */ + regmap_read(wctmu->regmap, BXTWC_TMUIRQ, &tmu_irq); + if (tmu_irq & BXTWC_TMU_ALRM_IRQ) { + /* clear TMU irq */ + regmap_write(wctmu->regmap, BXTWC_TMUIRQ, tmu_irq); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static int bxt_wcove_tmu_probe(struct platform_device *pdev) +{ + struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); + struct regmap_irq_chip_data *regmap_irq_chip; + struct wcove_tmu *wctmu; + int ret, virq, irq; + + wctmu = devm_kzalloc(&pdev->dev, sizeof(*wctmu), GFP_KERNEL); + if (!wctmu) + return -ENOMEM; + + wctmu->dev = &pdev->dev; + wctmu->regmap = pmic->regmap; + + irq = platform_get_irq(pdev, 0); + + if (irq < 0) { + dev_err(&pdev->dev, "invalid irq %d\n", irq); + return irq; + } + + regmap_irq_chip = pmic->irq_chip_data_tmu; + virq = regmap_irq_get_virq(regmap_irq_chip, irq); + if (virq < 0) { + dev_err(&pdev->dev, + "failed to get virtual interrupt=%d\n", irq); + return virq; + } + + ret = devm_request_threaded_irq(&pdev->dev, virq, + NULL, bxt_wcove_tmu_irq_handler, + IRQF_ONESHOT, "bxt_wcove_tmu", wctmu); + if (ret) { + dev_err(&pdev->dev, "request irq failed: %d,virq: %d\n", + ret, virq); + return ret; + } + wctmu->irq = virq; + + /* Enable TMU interrupts */ + regmap_update_bits(wctmu->regmap, BXTWC_MIRQLVL1, + BXTWC_MIRQLVL1_MTMU, 0); + + /* Unmask TMU second level Wake & System alarm */ + regmap_update_bits(wctmu->regmap, BXTWC_MTMUIRQ_REG, + BXTWC_TMU_ALRM_MASK, 0); + + platform_set_drvdata(pdev, wctmu); + return 0; +} + +static int bxt_wcove_tmu_remove(struct platform_device *pdev) +{ + struct wcove_tmu *wctmu = platform_get_drvdata(pdev); + unsigned int val; + + /* Mask TMU interrupts */ + regmap_read(wctmu->regmap, BXTWC_MIRQLVL1, &val); + regmap_write(wctmu->regmap, BXTWC_MIRQLVL1, + val | BXTWC_MIRQLVL1_MTMU); + regmap_read(wctmu->regmap, BXTWC_MTMUIRQ_REG, &val); + regmap_write(wctmu->regmap, BXTWC_MTMUIRQ_REG, + val | BXTWC_TMU_ALRM_MASK); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int bxtwc_tmu_suspend(struct device *dev) +{ + struct wcove_tmu *wctmu = dev_get_drvdata(dev); + + enable_irq_wake(wctmu->irq); + return 0; +} + +static int bxtwc_tmu_resume(struct device *dev) +{ + struct wcove_tmu *wctmu = dev_get_drvdata(dev); + + disable_irq_wake(wctmu->irq); + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(bxtwc_tmu_pm_ops, bxtwc_tmu_suspend, bxtwc_tmu_resume); + +static const struct platform_device_id bxt_wcove_tmu_id_table[] = { + { .name = "bxt_wcove_tmu" }, + {}, +}; +MODULE_DEVICE_TABLE(platform, bxt_wcove_tmu_id_table); + +static struct platform_driver bxt_wcove_tmu_driver = { + .probe = bxt_wcove_tmu_probe, + .remove = bxt_wcove_tmu_remove, + .driver = { + .name = "bxt_wcove_tmu", + .pm = &bxtwc_tmu_pm_ops, + }, + .id_table = bxt_wcove_tmu_id_table, +}; + +module_platform_driver(bxt_wcove_tmu_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Nilesh Bacchewar "); +MODULE_DESCRIPTION("BXT Whiskey Cove TMU Driver"); diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c index 9f713b832ba3ce8810d6bfcb2c1f7b697d0b26b7..0df3c9d375096e77234aba0048fdd59366b12696 100644 --- a/drivers/platform/x86/intel_mid_thermal.c +++ b/drivers/platform/x86/intel_mid_thermal.c @@ -415,6 +415,7 @@ static struct thermal_device_info *initialize_sensor(int index) return td_info; } +#ifdef CONFIG_PM_SLEEP /** * mid_thermal_resume - resume routine * @dev: device structure @@ -442,6 +443,7 @@ static int mid_thermal_suspend(struct device *dev) */ return configure_adc(0); } +#endif static SIMPLE_DEV_PM_OPS(mid_thermal_pm, mid_thermal_suspend, mid_thermal_resume); diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index e8b1b836ca2d3cd04e884e26a1916bc8a450daec..b130b8c9b9d7b429e9dbb0b17926285337ab1d78 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -19,10 +19,12 @@ */ #include +#include #include #include #include #include +#include #include #include @@ -32,16 +34,106 @@ static struct pmc_dev pmc; +static const struct pmc_bit_map spt_pll_map[] = { + {"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0}, + {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1}, + {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2}, + {"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3}, + {}, +}; + +static const struct pmc_bit_map spt_mphy_map[] = { + {"MPHY CORE LANE 0", SPT_PMC_BIT_MPHY_LANE0}, + {"MPHY CORE LANE 1", SPT_PMC_BIT_MPHY_LANE1}, + {"MPHY CORE LANE 2", SPT_PMC_BIT_MPHY_LANE2}, + {"MPHY CORE LANE 3", SPT_PMC_BIT_MPHY_LANE3}, + {"MPHY CORE LANE 4", SPT_PMC_BIT_MPHY_LANE4}, + {"MPHY CORE LANE 5", SPT_PMC_BIT_MPHY_LANE5}, + {"MPHY CORE LANE 6", SPT_PMC_BIT_MPHY_LANE6}, + {"MPHY CORE LANE 7", SPT_PMC_BIT_MPHY_LANE7}, + {"MPHY CORE LANE 8", SPT_PMC_BIT_MPHY_LANE8}, + {"MPHY CORE LANE 9", SPT_PMC_BIT_MPHY_LANE9}, + {"MPHY CORE LANE 10", SPT_PMC_BIT_MPHY_LANE10}, + {"MPHY CORE LANE 11", SPT_PMC_BIT_MPHY_LANE11}, + {"MPHY CORE LANE 12", SPT_PMC_BIT_MPHY_LANE12}, + {"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13}, + {"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14}, + {"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15}, + {}, +}; + +static const struct pmc_bit_map spt_pfear_map[] = { + {"PMC", SPT_PMC_BIT_PMC}, + {"OPI-DMI", SPT_PMC_BIT_OPI}, + {"SPI / eSPI", SPT_PMC_BIT_SPI}, + {"XHCI", SPT_PMC_BIT_XHCI}, + {"SPA", SPT_PMC_BIT_SPA}, + {"SPB", SPT_PMC_BIT_SPB}, + {"SPC", SPT_PMC_BIT_SPC}, + {"GBE", SPT_PMC_BIT_GBE}, + {"SATA", SPT_PMC_BIT_SATA}, + {"HDA-PGD0", SPT_PMC_BIT_HDA_PGD0}, + {"HDA-PGD1", SPT_PMC_BIT_HDA_PGD1}, + {"HDA-PGD2", SPT_PMC_BIT_HDA_PGD2}, + {"HDA-PGD3", SPT_PMC_BIT_HDA_PGD3}, + {"RSVD", SPT_PMC_BIT_RSVD_0B}, + {"LPSS", SPT_PMC_BIT_LPSS}, + {"LPC", SPT_PMC_BIT_LPC}, + {"SMB", SPT_PMC_BIT_SMB}, + {"ISH", SPT_PMC_BIT_ISH}, + {"P2SB", SPT_PMC_BIT_P2SB}, + {"DFX", SPT_PMC_BIT_DFX}, + {"SCC", SPT_PMC_BIT_SCC}, + {"RSVD", SPT_PMC_BIT_RSVD_0C}, + {"FUSE", SPT_PMC_BIT_FUSE}, + {"CAMERA", SPT_PMC_BIT_CAMREA}, + {"RSVD", SPT_PMC_BIT_RSVD_0D}, + {"USB3-OTG", SPT_PMC_BIT_USB3_OTG}, + {"EXI", SPT_PMC_BIT_EXI}, + {"CSE", SPT_PMC_BIT_CSE}, + {"CSME_KVM", SPT_PMC_BIT_CSME_KVM}, + {"CSME_PMT", SPT_PMC_BIT_CSME_PMT}, + {"CSME_CLINK", SPT_PMC_BIT_CSME_CLINK}, + {"CSME_PTIO", SPT_PMC_BIT_CSME_PTIO}, + {"CSME_USBR", SPT_PMC_BIT_CSME_USBR}, + {"CSME_SUSRAM", SPT_PMC_BIT_CSME_SUSRAM}, + {"CSME_SMT", SPT_PMC_BIT_CSME_SMT}, + {"RSVD", SPT_PMC_BIT_RSVD_1A}, + {"CSME_SMS2", SPT_PMC_BIT_CSME_SMS2}, + {"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1}, + {"CSME_RTC", SPT_PMC_BIT_CSME_RTC}, + {"CSME_PSF", SPT_PMC_BIT_CSME_PSF}, + {}, +}; + +static const struct pmc_reg_map spt_reg_map = { + .pfear_sts = spt_pfear_map, + .mphy_sts = spt_mphy_map, + .pll_sts = spt_pll_map, +}; + static const struct pci_device_id pmc_pci_ids[] = { - { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), (kernel_ulong_t)NULL }, + { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), + (kernel_ulong_t)&spt_reg_map }, { 0, }, }; +static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) +{ + return readb(pmcdev->regbase + offset); +} + static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset) { return readl(pmcdev->regbase + reg_offset); } +static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int + reg_offset, u32 val) +{ + writel(val, pmcdev->regbase + reg_offset); +} + static inline u32 pmc_core_adjust_slp_s0_step(u32 value) { return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP; @@ -90,6 +182,245 @@ static int pmc_core_dev_state_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n"); +static int pmc_core_check_read_lock_bit(void) +{ + struct pmc_dev *pmcdev = &pmc; + u32 value; + + value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_CFG_OFFSET); + return test_bit(SPT_PMC_READ_DISABLE_BIT, + (unsigned long *)&value); +} + +#if IS_ENABLED(CONFIG_DEBUG_FS) +static void pmc_core_display_map(struct seq_file *s, int index, + u8 pf_reg, const struct pmc_bit_map *pf_map) +{ + seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n", + index, pf_map[index].name, + pf_map[index].bit_mask & pf_reg ? "Off" : "On"); +} + +static int pmc_core_ppfear_sts_show(struct seq_file *s, void *unused) +{ + struct pmc_dev *pmcdev = s->private; + const struct pmc_bit_map *map = pmcdev->map->pfear_sts; + u8 pf_regs[NUM_ENTRIES]; + int index, iter; + + iter = SPT_PMC_XRAM_PPFEAR0A; + + for (index = 0; index < NUM_ENTRIES; index++, iter++) + pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter); + + for (index = 0; map[index].name; index++) + pmc_core_display_map(s, index, pf_regs[index / 8], map); + + return 0; +} + +static int pmc_core_ppfear_sts_open(struct inode *inode, struct file *file) +{ + return single_open(file, pmc_core_ppfear_sts_show, inode->i_private); +} + +static const struct file_operations pmc_core_ppfear_ops = { + .open = pmc_core_ppfear_sts_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* This function should return link status, 0 means ready */ +static int pmc_core_mtpmc_link_status(void) +{ + struct pmc_dev *pmcdev = &pmc; + u32 value; + + value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET); + return test_bit(SPT_PMC_MSG_FULL_STS_BIT, + (unsigned long *)&value); +} + +static int pmc_core_send_msg(u32 *addr_xram) +{ + struct pmc_dev *pmcdev = &pmc; + u32 dest; + int timeout; + + for (timeout = NUM_RETRIES; timeout > 0; timeout--) { + if (pmc_core_mtpmc_link_status() == 0) + break; + msleep(5); + } + + if (timeout <= 0 && pmc_core_mtpmc_link_status()) + return -EBUSY; + + dest = (*addr_xram & MTPMC_MASK) | (1U << 1); + pmc_core_reg_write(pmcdev, SPT_PMC_MTPMC_OFFSET, dest); + return 0; +} + +static int pmc_core_mphy_pg_sts_show(struct seq_file *s, void *unused) +{ + struct pmc_dev *pmcdev = s->private; + const struct pmc_bit_map *map = pmcdev->map->mphy_sts; + u32 mphy_core_reg_low, mphy_core_reg_high; + u32 val_low, val_high; + int index, err = 0; + + if (pmcdev->pmc_xram_read_bit) { + seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS."); + return 0; + } + + mphy_core_reg_low = (SPT_PMC_MPHY_CORE_STS_0 << 16); + mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16); + + mutex_lock(&pmcdev->lock); + + if (pmc_core_send_msg(&mphy_core_reg_low) != 0) { + err = -EBUSY; + goto out_unlock; + } + + msleep(10); + val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET); + + if (pmc_core_send_msg(&mphy_core_reg_high) != 0) { + err = -EBUSY; + goto out_unlock; + } + + msleep(10); + val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET); + + for (index = 0; map[index].name && index < 8; index++) { + seq_printf(s, "%-32s\tState: %s\n", + map[index].name, + map[index].bit_mask & val_low ? "Not power gated" : + "Power gated"); + } + + for (index = 8; map[index].name; index++) { + seq_printf(s, "%-32s\tState: %s\n", + map[index].name, + map[index].bit_mask & val_high ? "Not power gated" : + "Power gated"); + } + +out_unlock: + mutex_unlock(&pmcdev->lock); + return err; +} + +static int pmc_core_mphy_pg_sts_open(struct inode *inode, struct file *file) +{ + return single_open(file, pmc_core_mphy_pg_sts_show, inode->i_private); +} + +static const struct file_operations pmc_core_mphy_pg_ops = { + .open = pmc_core_mphy_pg_sts_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int pmc_core_pll_show(struct seq_file *s, void *unused) +{ + struct pmc_dev *pmcdev = s->private; + const struct pmc_bit_map *map = pmcdev->map->pll_sts; + u32 mphy_common_reg, val; + int index, err = 0; + + if (pmcdev->pmc_xram_read_bit) { + seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS."); + return 0; + } + + mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16); + mutex_lock(&pmcdev->lock); + + if (pmc_core_send_msg(&mphy_common_reg) != 0) { + err = -EBUSY; + goto out_unlock; + } + + /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */ + msleep(10); + val = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET); + + for (index = 0; map[index].name ; index++) { + seq_printf(s, "%-32s\tState: %s\n", + map[index].name, + map[index].bit_mask & val ? "Active" : "Idle"); + } + +out_unlock: + mutex_unlock(&pmcdev->lock); + return err; +} + +static int pmc_core_pll_open(struct inode *inode, struct file *file) +{ + return single_open(file, pmc_core_pll_show, inode->i_private); +} + +static const struct file_operations pmc_core_pll_ops = { + .open = pmc_core_pll_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user +*userbuf, size_t count, loff_t *ppos) +{ + struct pmc_dev *pmcdev = &pmc; + u32 val, buf_size, fd; + int err = 0; + + buf_size = count < 64 ? count : 64; + mutex_lock(&pmcdev->lock); + + if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) { + err = -EFAULT; + goto out_unlock; + } + + if (val > NUM_IP_IGN_ALLOWED) { + err = -EINVAL; + goto out_unlock; + } + + fd = pmc_core_reg_read(pmcdev, SPT_PMC_LTR_IGNORE_OFFSET); + fd |= (1U << val); + pmc_core_reg_write(pmcdev, SPT_PMC_LTR_IGNORE_OFFSET, fd); + +out_unlock: + mutex_unlock(&pmcdev->lock); + return err == 0 ? count : err; +} + +static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused) +{ + return 0; +} + +static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file) +{ + return single_open(file, pmc_core_ltr_ignore_show, inode->i_private); +} + +static const struct file_operations pmc_core_ltr_ignore_ops = { + .open = pmc_core_ltr_ignore_open, + .read = seq_read, + .write = pmc_core_ltr_ignore_write, + .llseek = seq_lseek, + .release = single_release, +}; + static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) { debugfs_remove_recursive(pmcdev->dbgfs_dir); @@ -106,20 +437,59 @@ static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev) pmcdev->dbgfs_dir = dir; file = debugfs_create_file("slp_s0_residency_usec", S_IFREG | S_IRUGO, dir, pmcdev, &pmc_core_dev_state); + if (!file) + goto err; - if (!file) { - pmc_core_dbgfs_unregister(pmcdev); - return -ENODEV; - } + file = debugfs_create_file("pch_ip_power_gating_status", + S_IFREG | S_IRUGO, dir, pmcdev, + &pmc_core_ppfear_ops); + if (!file) + goto err; + + file = debugfs_create_file("mphy_core_lanes_power_gating_status", + S_IFREG | S_IRUGO, dir, pmcdev, + &pmc_core_mphy_pg_ops); + if (!file) + goto err; + + file = debugfs_create_file("pll_status", + S_IFREG | S_IRUGO, dir, pmcdev, + &pmc_core_pll_ops); + if (!file) + goto err; + + file = debugfs_create_file("ltr_ignore", + S_IFREG | S_IRUGO, dir, pmcdev, + &pmc_core_ltr_ignore_ops); + + if (!file) + goto err; return 0; +err: + pmc_core_dbgfs_unregister(pmcdev); + return -ENODEV; } +#else +static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev) +{ + return 0; +} + +static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) +{ +} +#endif /* CONFIG_DEBUG_FS */ static const struct x86_cpu_id intel_pmc_core_ids[] = { { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_MOBILE, X86_FEATURE_MWAIT, (kernel_ulong_t)NULL}, { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_DESKTOP, X86_FEATURE_MWAIT, (kernel_ulong_t)NULL}, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_KABYLAKE_MOBILE, X86_FEATURE_MWAIT, + (kernel_ulong_t)NULL}, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_KABYLAKE_DESKTOP, X86_FEATURE_MWAIT, + (kernel_ulong_t)NULL}, {} }; @@ -128,6 +498,7 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id) struct device *ptr_dev = &dev->dev; struct pmc_dev *pmcdev = &pmc; const struct x86_cpu_id *cpu_id; + const struct pmc_reg_map *map = (struct pmc_reg_map *)id->driver_data; int err; cpu_id = x86_match_cpu(intel_pmc_core_ids); @@ -149,6 +520,7 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id) dev_dbg(&dev->dev, "PMC Core: failed to read PCI config space.\n"); return err; } + pmcdev->base_addr &= PMC_BASE_ADDR_MASK; dev_dbg(&dev->dev, "PMC Core: PWRMBASE is %#x\n", pmcdev->base_addr); pmcdev->regbase = devm_ioremap_nocache(ptr_dev, @@ -159,6 +531,10 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENOMEM; } + mutex_init(&pmcdev->lock); + pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(); + pmcdev->map = map; + err = pmc_core_dbgfs_register(pmcdev); if (err < 0) dev_warn(&dev->dev, "PMC Core: debugfs register failed.\n"); diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h index e3f671f4d12228e8ee9a11c471c5f8d68dc1a549..5a48e77284795d0b7864452a5c2e0d0b8a4206bd 100644 --- a/drivers/platform/x86/intel_pmc_core.h +++ b/drivers/platform/x86/intel_pmc_core.h @@ -26,8 +26,111 @@ #define SPT_PMC_BASE_ADDR_OFFSET 0x48 #define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET 0x13c -#define SPT_PMC_MMIO_REG_LEN 0x100 +#define SPT_PMC_PM_CFG_OFFSET 0x18 +#define SPT_PMC_PM_STS_OFFSET 0x1c +#define SPT_PMC_MTPMC_OFFSET 0x20 +#define SPT_PMC_MFPMC_OFFSET 0x38 +#define SPT_PMC_LTR_IGNORE_OFFSET 0x30C +#define SPT_PMC_MPHY_CORE_STS_0 0x1143 +#define SPT_PMC_MPHY_CORE_STS_1 0x1142 +#define SPT_PMC_MPHY_COM_STS_0 0x1155 +#define SPT_PMC_MMIO_REG_LEN 0x1000 #define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64 +#define PMC_BASE_ADDR_MASK ~(SPT_PMC_MMIO_REG_LEN - 1) +#define MTPMC_MASK 0xffff0000 +#define NUM_ENTRIES 5 +#define SPT_PMC_READ_DISABLE_BIT 0x16 +#define SPT_PMC_MSG_FULL_STS_BIT 0x18 +#define NUM_RETRIES 100 +#define NUM_IP_IGN_ALLOWED 17 + +/* Sunrise Point: PGD PFET Enable Ack Status Registers */ +enum ppfear_regs { + SPT_PMC_XRAM_PPFEAR0A = 0x590, + SPT_PMC_XRAM_PPFEAR0B, + SPT_PMC_XRAM_PPFEAR0C, + SPT_PMC_XRAM_PPFEAR0D, + SPT_PMC_XRAM_PPFEAR1A, +}; + +#define SPT_PMC_BIT_PMC BIT(0) +#define SPT_PMC_BIT_OPI BIT(1) +#define SPT_PMC_BIT_SPI BIT(2) +#define SPT_PMC_BIT_XHCI BIT(3) +#define SPT_PMC_BIT_SPA BIT(4) +#define SPT_PMC_BIT_SPB BIT(5) +#define SPT_PMC_BIT_SPC BIT(6) +#define SPT_PMC_BIT_GBE BIT(7) + +#define SPT_PMC_BIT_SATA BIT(0) +#define SPT_PMC_BIT_HDA_PGD0 BIT(1) +#define SPT_PMC_BIT_HDA_PGD1 BIT(2) +#define SPT_PMC_BIT_HDA_PGD2 BIT(3) +#define SPT_PMC_BIT_HDA_PGD3 BIT(4) +#define SPT_PMC_BIT_RSVD_0B BIT(5) +#define SPT_PMC_BIT_LPSS BIT(6) +#define SPT_PMC_BIT_LPC BIT(7) + +#define SPT_PMC_BIT_SMB BIT(0) +#define SPT_PMC_BIT_ISH BIT(1) +#define SPT_PMC_BIT_P2SB BIT(2) +#define SPT_PMC_BIT_DFX BIT(3) +#define SPT_PMC_BIT_SCC BIT(4) +#define SPT_PMC_BIT_RSVD_0C BIT(5) +#define SPT_PMC_BIT_FUSE BIT(6) +#define SPT_PMC_BIT_CAMREA BIT(7) + +#define SPT_PMC_BIT_RSVD_0D BIT(0) +#define SPT_PMC_BIT_USB3_OTG BIT(1) +#define SPT_PMC_BIT_EXI BIT(2) +#define SPT_PMC_BIT_CSE BIT(3) +#define SPT_PMC_BIT_CSME_KVM BIT(4) +#define SPT_PMC_BIT_CSME_PMT BIT(5) +#define SPT_PMC_BIT_CSME_CLINK BIT(6) +#define SPT_PMC_BIT_CSME_PTIO BIT(7) + +#define SPT_PMC_BIT_CSME_USBR BIT(0) +#define SPT_PMC_BIT_CSME_SUSRAM BIT(1) +#define SPT_PMC_BIT_CSME_SMT BIT(2) +#define SPT_PMC_BIT_RSVD_1A BIT(3) +#define SPT_PMC_BIT_CSME_SMS2 BIT(4) +#define SPT_PMC_BIT_CSME_SMS1 BIT(5) +#define SPT_PMC_BIT_CSME_RTC BIT(6) +#define SPT_PMC_BIT_CSME_PSF BIT(7) + +#define SPT_PMC_BIT_MPHY_LANE0 BIT(0) +#define SPT_PMC_BIT_MPHY_LANE1 BIT(1) +#define SPT_PMC_BIT_MPHY_LANE2 BIT(2) +#define SPT_PMC_BIT_MPHY_LANE3 BIT(3) +#define SPT_PMC_BIT_MPHY_LANE4 BIT(4) +#define SPT_PMC_BIT_MPHY_LANE5 BIT(5) +#define SPT_PMC_BIT_MPHY_LANE6 BIT(6) +#define SPT_PMC_BIT_MPHY_LANE7 BIT(7) + +#define SPT_PMC_BIT_MPHY_LANE8 BIT(0) +#define SPT_PMC_BIT_MPHY_LANE9 BIT(1) +#define SPT_PMC_BIT_MPHY_LANE10 BIT(2) +#define SPT_PMC_BIT_MPHY_LANE11 BIT(3) +#define SPT_PMC_BIT_MPHY_LANE12 BIT(4) +#define SPT_PMC_BIT_MPHY_LANE13 BIT(5) +#define SPT_PMC_BIT_MPHY_LANE14 BIT(6) +#define SPT_PMC_BIT_MPHY_LANE15 BIT(7) + +#define SPT_PMC_BIT_MPHY_CMN_LANE0 BIT(0) +#define SPT_PMC_BIT_MPHY_CMN_LANE1 BIT(1) +#define SPT_PMC_BIT_MPHY_CMN_LANE2 BIT(2) +#define SPT_PMC_BIT_MPHY_CMN_LANE3 BIT(3) + +struct pmc_bit_map { + const char *name; + u32 bit_mask; +}; + +struct pmc_reg_map { + const struct pmc_bit_map *pfear_sts; + const struct pmc_bit_map *mphy_sts; + const struct pmc_bit_map *pll_sts; +}; /** * struct pmc_dev - pmc device structure @@ -43,8 +146,13 @@ struct pmc_dev { u32 base_addr; void __iomem *regbase; + const struct pmc_reg_map *map; +#if IS_ENABLED(CONFIG_DEBUG_FS) struct dentry *dbgfs_dir; +#endif /* CONFIG_DEBUG_FS */ bool has_slp_s0_res; + int pmc_xram_read_bit; + struct mutex lock; /* generic mutex lock for PMC Core */ }; #endif /* PMC_CORE_H */ diff --git a/arch/x86/platform/mellanox/mlx-platform.c b/drivers/platform/x86/mlx-platform.c similarity index 73% rename from arch/x86/platform/mellanox/mlx-platform.c rename to drivers/platform/x86/mlx-platform.c index 7dcfcca97399752a1c0475fdfa5543b9f3227d8d..97b4c3a219c0c79f3a3ed9359bc30bb13a439ce6 100644 --- a/arch/x86/platform/mellanox/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -1,5 +1,4 @@ /* - * arch/x86/platform/mellanox/mlx-platform.c * Copyright (c) 2016 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Vadim Pasternak * @@ -39,6 +38,7 @@ #include #include #include +#include #define MLX_PLAT_DEVICE_NAME "mlxplat" @@ -70,6 +70,7 @@ struct mlxplat_priv { struct platform_device *pdev_i2c; struct platform_device *pdev_mux[MLXPLAT_CPLD_LPC_MUX_DEVS]; + struct platform_device *pdev_hotplug; }; /* Regions for LPC I2C controller and LPC base register space */ @@ -121,7 +122,87 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = { }; -static struct platform_device *mlxplat_dev; +/* Platform hotplug devices */ +static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_psu[] = { + { + .brdinfo = { I2C_BOARD_INFO("24c02", 0x51) }, + .bus = 10, + }, + { + .brdinfo = { I2C_BOARD_INFO("24c02", 0x50) }, + .bus = 10, + }, +}; + +static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_pwr[] = { + { + .brdinfo = { I2C_BOARD_INFO("dps460", 0x59) }, + .bus = 10, + }, + { + .brdinfo = { I2C_BOARD_INFO("dps460", 0x58) }, + .bus = 10, + }, +}; + +static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_fan[] = { + { + .brdinfo = { I2C_BOARD_INFO("24c32", 0x50) }, + .bus = 11, + }, + { + .brdinfo = { I2C_BOARD_INFO("24c32", 0x50) }, + .bus = 12, + }, + { + .brdinfo = { I2C_BOARD_INFO("24c32", 0x50) }, + .bus = 13, + }, + { + .brdinfo = { I2C_BOARD_INFO("24c32", 0x50) }, + .bus = 14, + }, +}; + +/* Platform hotplug default data */ +static +struct mlxcpld_hotplug_platform_data mlxplat_mlxcpld_hotplug_default_data = { + .top_aggr_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x3a), + .top_aggr_mask = 0x48, + .top_aggr_psu_mask = 0x08, + .psu_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x58), + .psu_mask = 0x03, + .psu_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_psu), + .psu = mlxplat_mlxcpld_hotplug_psu, + .top_aggr_pwr_mask = 0x08, + .pwr_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x64), + .pwr_mask = 0x03, + .pwr_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_pwr), + .pwr = mlxplat_mlxcpld_hotplug_pwr, + .top_aggr_fan_mask = 0x40, + .fan_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x88), + .fan_mask = 0x0f, + .fan_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_fan), + .fan = mlxplat_mlxcpld_hotplug_fan, +}; + +/* Platform hotplug MSN21xx system family data */ +static +struct mlxcpld_hotplug_platform_data mlxplat_mlxcpld_hotplug_msn21xx_data = { + .top_aggr_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x3a), + .top_aggr_mask = 0x04, + .top_aggr_pwr_mask = 0x04, + .pwr_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x64), + .pwr_mask = 0x03, + .pwr_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_pwr), +}; + +static struct resource mlxplat_mlxcpld_hotplug_resources[] = { + [0] = DEFINE_RES_IRQ_NAMED(17, "mlxcpld-hotplug"), +}; + +struct platform_device *mlxplat_dev; +struct mlxcpld_hotplug_platform_data *mlxplat_hotplug; static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) { @@ -132,6 +213,7 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_default_channels[i]); } + mlxplat_hotplug = &mlxplat_mlxcpld_hotplug_default_data; return 1; }; @@ -145,6 +227,7 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); } + mlxplat_hotplug = &mlxplat_mlxcpld_hotplug_msn21xx_data; return 1; }; @@ -216,7 +299,7 @@ static int __init mlxplat_init(void) if (IS_ERR(priv->pdev_i2c)) { err = PTR_ERR(priv->pdev_i2c); goto fail_alloc; - }; + } for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { priv->pdev_mux[i] = platform_device_register_resndata( @@ -230,6 +313,16 @@ static int __init mlxplat_init(void) } } + priv->pdev_hotplug = platform_device_register_resndata( + &mlxplat_dev->dev, "mlxcpld-hotplug", -1, + mlxplat_mlxcpld_hotplug_resources, + ARRAY_SIZE(mlxplat_mlxcpld_hotplug_resources), + mlxplat_hotplug, sizeof(*mlxplat_hotplug)); + if (IS_ERR(priv->pdev_hotplug)) { + err = PTR_ERR(priv->pdev_hotplug); + goto fail_platform_mux_register; + } + return 0; fail_platform_mux_register: @@ -248,6 +341,8 @@ static void __exit mlxplat_exit(void) struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev); int i; + platform_device_unregister(priv->pdev_hotplug); + for (i = ARRAY_SIZE(mlxplat_mux_data) - 1; i >= 0 ; i--) platform_device_unregister(priv->pdev_mux[i]); diff --git a/drivers/platform/x86/mlxcpld-hotplug.c b/drivers/platform/x86/mlxcpld-hotplug.c new file mode 100644 index 0000000000000000000000000000000000000000..aff3686b3b37b6970a479910715f0810caccc1b0 --- /dev/null +++ b/drivers/platform/x86/mlxcpld-hotplug.c @@ -0,0 +1,515 @@ +/* + * drivers/platform/x86/mlxcpld-hotplug.c + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Vadim Pasternak + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Offset of event and mask registers from status register */ +#define MLXCPLD_HOTPLUG_EVENT_OFF 1 +#define MLXCPLD_HOTPLUG_MASK_OFF 2 +#define MLXCPLD_HOTPLUG_AGGR_MASK_OFF 1 + +#define MLXCPLD_HOTPLUG_ATTRS_NUM 8 + +/** + * enum mlxcpld_hotplug_attr_type - sysfs attributes for hotplug events: + * @MLXCPLD_HOTPLUG_ATTR_TYPE_PSU: power supply unit attribute; + * @MLXCPLD_HOTPLUG_ATTR_TYPE_PWR: power cable attribute; + * @MLXCPLD_HOTPLUG_ATTR_TYPE_FAN: FAN drawer attribute; + */ +enum mlxcpld_hotplug_attr_type { + MLXCPLD_HOTPLUG_ATTR_TYPE_PSU, + MLXCPLD_HOTPLUG_ATTR_TYPE_PWR, + MLXCPLD_HOTPLUG_ATTR_TYPE_FAN, +}; + +/** + * struct mlxcpld_hotplug_priv_data - platform private data: + * @irq: platform interrupt number; + * @pdev: platform device; + * @plat: platform data; + * @hwmon: hwmon device; + * @mlxcpld_hotplug_attr: sysfs attributes array; + * @mlxcpld_hotplug_dev_attr: sysfs sensor device attribute array; + * @group: sysfs attribute group; + * @groups: list of sysfs attribute group for hwmon registration; + * @dwork: delayed work template; + * @lock: spin lock; + * @aggr_cache: last value of aggregation register status; + * @psu_cache: last value of PSU register status; + * @pwr_cache: last value of power register status; + * @fan_cache: last value of FAN register status; + */ +struct mlxcpld_hotplug_priv_data { + int irq; + struct platform_device *pdev; + struct mlxcpld_hotplug_platform_data *plat; + struct device *hwmon; + struct attribute *mlxcpld_hotplug_attr[MLXCPLD_HOTPLUG_ATTRS_NUM + 1]; + struct sensor_device_attribute_2 + mlxcpld_hotplug_dev_attr[MLXCPLD_HOTPLUG_ATTRS_NUM]; + struct attribute_group group; + const struct attribute_group *groups[2]; + struct delayed_work dwork; + spinlock_t lock; + u8 aggr_cache; + u8 psu_cache; + u8 pwr_cache; + u8 fan_cache; +}; + +static ssize_t mlxcpld_hotplug_attr_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct mlxcpld_hotplug_priv_data *priv = platform_get_drvdata(pdev); + int index = to_sensor_dev_attr_2(attr)->index; + int nr = to_sensor_dev_attr_2(attr)->nr; + u8 reg_val = 0; + + switch (nr) { + case MLXCPLD_HOTPLUG_ATTR_TYPE_PSU: + /* Bit = 0 : PSU is present. */ + reg_val = !!!(inb(priv->plat->psu_reg_offset) & BIT(index)); + break; + + case MLXCPLD_HOTPLUG_ATTR_TYPE_PWR: + /* Bit = 1 : power cable is attached. */ + reg_val = !!(inb(priv->plat->pwr_reg_offset) & BIT(index % + priv->plat->pwr_count)); + break; + + case MLXCPLD_HOTPLUG_ATTR_TYPE_FAN: + /* Bit = 0 : FAN is present. */ + reg_val = !!!(inb(priv->plat->fan_reg_offset) & BIT(index % + priv->plat->fan_count)); + break; + } + + return sprintf(buf, "%u\n", reg_val); +} + +#define PRIV_ATTR(i) priv->mlxcpld_hotplug_attr[i] +#define PRIV_DEV_ATTR(i) priv->mlxcpld_hotplug_dev_attr[i] +static int mlxcpld_hotplug_attr_init(struct mlxcpld_hotplug_priv_data *priv) +{ + int num_attrs = priv->plat->psu_count + priv->plat->pwr_count + + priv->plat->fan_count; + int i; + + priv->group.attrs = devm_kzalloc(&priv->pdev->dev, num_attrs * + sizeof(struct attribute *), + GFP_KERNEL); + if (!priv->group.attrs) + return -ENOMEM; + + for (i = 0; i < num_attrs; i++) { + PRIV_ATTR(i) = &PRIV_DEV_ATTR(i).dev_attr.attr; + + if (i < priv->plat->psu_count) { + PRIV_ATTR(i)->name = devm_kasprintf(&priv->pdev->dev, + GFP_KERNEL, "psu%u", i + 1); + PRIV_DEV_ATTR(i).nr = MLXCPLD_HOTPLUG_ATTR_TYPE_PSU; + } else if (i < priv->plat->psu_count + priv->plat->pwr_count) { + PRIV_ATTR(i)->name = devm_kasprintf(&priv->pdev->dev, + GFP_KERNEL, "pwr%u", i % + priv->plat->pwr_count + 1); + PRIV_DEV_ATTR(i).nr = MLXCPLD_HOTPLUG_ATTR_TYPE_PWR; + } else { + PRIV_ATTR(i)->name = devm_kasprintf(&priv->pdev->dev, + GFP_KERNEL, "fan%u", i % + priv->plat->fan_count + 1); + PRIV_DEV_ATTR(i).nr = MLXCPLD_HOTPLUG_ATTR_TYPE_FAN; + } + + if (!PRIV_ATTR(i)->name) { + dev_err(&priv->pdev->dev, "Memory allocation failed for sysfs attribute %d.\n", + i + 1); + return -ENOMEM; + } + + PRIV_DEV_ATTR(i).dev_attr.attr.name = PRIV_ATTR(i)->name; + PRIV_DEV_ATTR(i).dev_attr.attr.mode = S_IRUGO; + PRIV_DEV_ATTR(i).dev_attr.show = mlxcpld_hotplug_attr_show; + PRIV_DEV_ATTR(i).index = i; + sysfs_attr_init(&PRIV_DEV_ATTR(i).dev_attr.attr); + } + + priv->group.attrs = priv->mlxcpld_hotplug_attr; + priv->groups[0] = &priv->group; + priv->groups[1] = NULL; + + return 0; +} + +static int mlxcpld_hotplug_device_create(struct device *dev, + struct mlxcpld_hotplug_device *item) +{ + item->adapter = i2c_get_adapter(item->bus); + if (!item->adapter) { + dev_err(dev, "Failed to get adapter for bus %d\n", + item->bus); + return -EFAULT; + } + + item->client = i2c_new_device(item->adapter, &item->brdinfo); + if (!item->client) { + dev_err(dev, "Failed to create client %s at bus %d at addr 0x%02x\n", + item->brdinfo.type, item->bus, item->brdinfo.addr); + i2c_put_adapter(item->adapter); + item->adapter = NULL; + return -EFAULT; + } + + return 0; +} + +static void mlxcpld_hotplug_device_destroy(struct mlxcpld_hotplug_device *item) +{ + if (item->client) { + i2c_unregister_device(item->client); + item->client = NULL; + } + + if (item->adapter) { + i2c_put_adapter(item->adapter); + item->adapter = NULL; + } +} + +static inline void +mlxcpld_hotplug_work_helper(struct device *dev, + struct mlxcpld_hotplug_device *item, u8 is_inverse, + u16 offset, u8 mask, u8 *cache) +{ + u8 val, asserted; + int bit; + + /* Mask event. */ + outb(0, offset + MLXCPLD_HOTPLUG_MASK_OFF); + /* Read status. */ + val = inb(offset) & mask; + asserted = *cache ^ val; + *cache = val; + + /* + * Validate if item related to received signal type is valid. + * It should never happen, excepted the situation when some + * piece of hardware is broken. In such situation just produce + * error message and return. Caller must continue to handle the + * signals from other devices if any. + */ + if (unlikely(!item)) { + dev_err(dev, "False signal is received: register at offset 0x%02x, mask 0x%02x.\n", + offset, mask); + return; + } + + for_each_set_bit(bit, (unsigned long *)&asserted, 8) { + if (val & BIT(bit)) { + if (is_inverse) + mlxcpld_hotplug_device_destroy(item + bit); + else + mlxcpld_hotplug_device_create(dev, item + bit); + } else { + if (is_inverse) + mlxcpld_hotplug_device_create(dev, item + bit); + else + mlxcpld_hotplug_device_destroy(item + bit); + } + } + + /* Acknowledge event. */ + outb(0, offset + MLXCPLD_HOTPLUG_EVENT_OFF); + /* Unmask event. */ + outb(mask, offset + MLXCPLD_HOTPLUG_MASK_OFF); +} + +/* + * mlxcpld_hotplug_work_handler - performs traversing of CPLD interrupt + * registers according to the below hierarchy schema: + * + * Aggregation registers (status/mask) + * PSU registers: *---* + * *-----------------* | | + * |status/event/mask|----->| * | + * *-----------------* | | + * Power registers: | | + * *-----------------* | | + * |status/event/mask|----->| * |---> CPU + * *-----------------* | | + * FAN registers: + * *-----------------* | | + * |status/event/mask|----->| * | + * *-----------------* | | + * *---* + * In case some system changed are detected: FAN in/out, PSU in/out, power + * cable attached/detached, relevant device is created or destroyed. + */ +static void mlxcpld_hotplug_work_handler(struct work_struct *work) +{ + struct mlxcpld_hotplug_priv_data *priv = container_of(work, + struct mlxcpld_hotplug_priv_data, dwork.work); + u8 val, aggr_asserted; + unsigned long flags; + + /* Mask aggregation event. */ + outb(0, priv->plat->top_aggr_offset + MLXCPLD_HOTPLUG_AGGR_MASK_OFF); + /* Read aggregation status. */ + val = inb(priv->plat->top_aggr_offset) & priv->plat->top_aggr_mask; + aggr_asserted = priv->aggr_cache ^ val; + priv->aggr_cache = val; + + /* Handle PSU configuration changes. */ + if (aggr_asserted & priv->plat->top_aggr_psu_mask) + mlxcpld_hotplug_work_helper(&priv->pdev->dev, priv->plat->psu, + 1, priv->plat->psu_reg_offset, + priv->plat->psu_mask, + &priv->psu_cache); + + /* Handle power cable configuration changes. */ + if (aggr_asserted & priv->plat->top_aggr_pwr_mask) + mlxcpld_hotplug_work_helper(&priv->pdev->dev, priv->plat->pwr, + 0, priv->plat->pwr_reg_offset, + priv->plat->pwr_mask, + &priv->pwr_cache); + + /* Handle FAN configuration changes. */ + if (aggr_asserted & priv->plat->top_aggr_fan_mask) + mlxcpld_hotplug_work_helper(&priv->pdev->dev, priv->plat->fan, + 1, priv->plat->fan_reg_offset, + priv->plat->fan_mask, + &priv->fan_cache); + + if (aggr_asserted) { + spin_lock_irqsave(&priv->lock, flags); + + /* + * It is possible, that some signals have been inserted, while + * interrupt has been masked by mlxcpld_hotplug_work_handler. + * In this case such signals will be missed. In order to handle + * these signals delayed work is canceled and work task + * re-scheduled for immediate execution. It allows to handle + * missed signals, if any. In other case work handler just + * validates that no new signals have been received during + * masking. + */ + cancel_delayed_work(&priv->dwork); + schedule_delayed_work(&priv->dwork, 0); + + spin_unlock_irqrestore(&priv->lock, flags); + + return; + } + + /* Unmask aggregation event (no need acknowledge). */ + outb(priv->plat->top_aggr_mask, priv->plat->top_aggr_offset + + MLXCPLD_HOTPLUG_AGGR_MASK_OFF); +} + +static void mlxcpld_hotplug_set_irq(struct mlxcpld_hotplug_priv_data *priv) +{ + /* Clear psu presense event. */ + outb(0, priv->plat->psu_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF); + /* Set psu initial status as mask and unmask psu event. */ + priv->psu_cache = priv->plat->psu_mask; + outb(priv->plat->psu_mask, priv->plat->psu_reg_offset + + MLXCPLD_HOTPLUG_MASK_OFF); + + /* Clear power cable event. */ + outb(0, priv->plat->pwr_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF); + /* Keep power initial status as zero and unmask power event. */ + outb(priv->plat->pwr_mask, priv->plat->pwr_reg_offset + + MLXCPLD_HOTPLUG_MASK_OFF); + + /* Clear fan presense event. */ + outb(0, priv->plat->fan_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF); + /* Set fan initial status as mask and unmask fan event. */ + priv->fan_cache = priv->plat->fan_mask; + outb(priv->plat->fan_mask, priv->plat->fan_reg_offset + + MLXCPLD_HOTPLUG_MASK_OFF); + + /* Keep aggregation initial status as zero and unmask events. */ + outb(priv->plat->top_aggr_mask, priv->plat->top_aggr_offset + + MLXCPLD_HOTPLUG_AGGR_MASK_OFF); + + /* Invoke work handler for initializing hot plug devices setting. */ + mlxcpld_hotplug_work_handler(&priv->dwork.work); + + enable_irq(priv->irq); +} + +static void mlxcpld_hotplug_unset_irq(struct mlxcpld_hotplug_priv_data *priv) +{ + int i; + + disable_irq(priv->irq); + cancel_delayed_work_sync(&priv->dwork); + + /* Mask aggregation event. */ + outb(0, priv->plat->top_aggr_offset + MLXCPLD_HOTPLUG_AGGR_MASK_OFF); + + /* Mask psu presense event. */ + outb(0, priv->plat->psu_reg_offset + MLXCPLD_HOTPLUG_MASK_OFF); + /* Clear psu presense event. */ + outb(0, priv->plat->psu_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF); + + /* Mask power cable event. */ + outb(0, priv->plat->pwr_reg_offset + MLXCPLD_HOTPLUG_MASK_OFF); + /* Clear power cable event. */ + outb(0, priv->plat->pwr_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF); + + /* Mask fan presense event. */ + outb(0, priv->plat->fan_reg_offset + MLXCPLD_HOTPLUG_MASK_OFF); + /* Clear fan presense event. */ + outb(0, priv->plat->fan_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF); + + /* Remove all the attached devices. */ + for (i = 0; i < priv->plat->psu_count; i++) + mlxcpld_hotplug_device_destroy(priv->plat->psu + i); + + for (i = 0; i < priv->plat->pwr_count; i++) + mlxcpld_hotplug_device_destroy(priv->plat->pwr + i); + + for (i = 0; i < priv->plat->fan_count; i++) + mlxcpld_hotplug_device_destroy(priv->plat->fan + i); +} + +static irqreturn_t mlxcpld_hotplug_irq_handler(int irq, void *dev) +{ + struct mlxcpld_hotplug_priv_data *priv = + (struct mlxcpld_hotplug_priv_data *)dev; + + /* Schedule work task for immediate execution.*/ + schedule_delayed_work(&priv->dwork, 0); + + return IRQ_HANDLED; +} + +static int mlxcpld_hotplug_probe(struct platform_device *pdev) +{ + struct mlxcpld_hotplug_platform_data *pdata; + struct mlxcpld_hotplug_priv_data *priv; + int err; + + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) { + dev_err(&pdev->dev, "Failed to get platform data.\n"); + return -EINVAL; + } + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->pdev = pdev; + priv->plat = pdata; + + priv->irq = platform_get_irq(pdev, 0); + if (priv->irq < 0) { + dev_err(&pdev->dev, "Failed to get platform irq: %d\n", + priv->irq); + return priv->irq; + } + + err = devm_request_irq(&pdev->dev, priv->irq, + mlxcpld_hotplug_irq_handler, 0, pdev->name, + priv); + if (err) { + dev_err(&pdev->dev, "Failed to request irq: %d\n", err); + return err; + } + disable_irq(priv->irq); + + INIT_DELAYED_WORK(&priv->dwork, mlxcpld_hotplug_work_handler); + spin_lock_init(&priv->lock); + + err = mlxcpld_hotplug_attr_init(priv); + if (err) { + dev_err(&pdev->dev, "Failed to allocate attributes: %d\n", err); + return err; + } + + priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev, + "mlxcpld_hotplug", priv, priv->groups); + if (IS_ERR(priv->hwmon)) { + dev_err(&pdev->dev, "Failed to register hwmon device %ld\n", + PTR_ERR(priv->hwmon)); + return PTR_ERR(priv->hwmon); + } + + platform_set_drvdata(pdev, priv); + + /* Perform initial interrupts setup. */ + mlxcpld_hotplug_set_irq(priv); + + return 0; +} + +static int mlxcpld_hotplug_remove(struct platform_device *pdev) +{ + struct mlxcpld_hotplug_priv_data *priv = platform_get_drvdata(pdev); + + /* Clean interrupts setup. */ + mlxcpld_hotplug_unset_irq(priv); + + return 0; +} + +static struct platform_driver mlxcpld_hotplug_driver = { + .driver = { + .name = "mlxcpld-hotplug", + }, + .probe = mlxcpld_hotplug_probe, + .remove = mlxcpld_hotplug_remove, +}; + +module_platform_driver(mlxcpld_hotplug_driver); + +MODULE_AUTHOR("Vadim Pasternak "); +MODULE_DESCRIPTION("Mellanox CPLD hotplug platform driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_ALIAS("platform:mlxcpld-hotplug"); diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c index 978e6d6405729cb32391f73c65ac23a1b0eeebef..9a32f8627eccfd2054e96ec895234c0ca29cd70d 100644 --- a/drivers/platform/x86/msi-wmi.c +++ b/drivers/platform/x86/msi-wmi.c @@ -283,7 +283,7 @@ static int __init msi_wmi_input_setup(void) if (err) goto err_free_keymap; - last_pressed = ktime_set(0, 0); + last_pressed = 0; return 0; diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index 3f870972247c4ac6b9103e8c1a86f7634b8ff051..59b8eb626dcce6547296c5e84e5dfa34f4e64bb7 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c @@ -458,7 +458,7 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc) rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY, NULL, &result); - if (!ACPI_SUCCESS(rc)) { + if (ACPI_FAILURE(rc)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "error getting hotkey status\n")); return; diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index c890a49587e45da50ff4305164dcf6cd9f6162d4..aa2ee51d3547b3630c2199f7ad569efbf738d7a6 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -68,7 +68,7 @@ #include #include #endif -#include +#include #include #define dprintk(fmt, ...) \ diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c new file mode 100644 index 0000000000000000000000000000000000000000..cbf4d83a727106ee0f7e42ca1b868616d994c0c3 --- /dev/null +++ b/drivers/platform/x86/surface3-wmi.c @@ -0,0 +1,297 @@ +/* + * Driver for the LID cover switch of the Surface 3 + * + * Copyright (c) 2016 Red Hat Inc. + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; version 2 of the License. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Benjamin Tissoires "); +MODULE_DESCRIPTION("Surface 3 platform driver"); +MODULE_LICENSE("GPL"); + +#define ACPI_BUTTON_HID_LID "PNP0C0D" +#define SPI_CTL_OBJ_NAME "SPI" +#define SPI_TS_OBJ_NAME "NTRG" + +#define SURFACE3_LID_GUID "F7CC25EC-D20B-404C-8903-0ED4359C18AE" + +MODULE_ALIAS("wmi:" SURFACE3_LID_GUID); + +static const struct dmi_system_id surface3_dmi_table[] = { +#if defined(CONFIG_X86) + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"), + }, + }, +#endif + { } +}; + +struct surface3_wmi { + struct acpi_device *touchscreen_adev; + struct acpi_device *pnp0c0d_adev; + struct acpi_hotplug_context hp; + struct input_dev *input; +}; + +static struct platform_device *s3_wmi_pdev; + +static struct surface3_wmi s3_wmi; + +static DEFINE_MUTEX(s3_wmi_lock); + +static int s3_wmi_query_block(const char *guid, int instance, int *ret) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + acpi_status status; + union acpi_object *obj; + int error = 0; + + mutex_lock(&s3_wmi_lock); + status = wmi_query_block(guid, instance, &output); + + obj = output.pointer; + + if (!obj || obj->type != ACPI_TYPE_INTEGER) { + if (obj) { + pr_err("query block returned object type: %d - buffer length:%d\n", + obj->type, + obj->type == ACPI_TYPE_BUFFER ? + obj->buffer.length : 0); + } + error = -EINVAL; + goto out_free_unlock; + } + *ret = obj->integer.value; + out_free_unlock: + kfree(obj); + mutex_unlock(&s3_wmi_lock); + return error; +} + +static inline int s3_wmi_query_lid(int *ret) +{ + return s3_wmi_query_block(SURFACE3_LID_GUID, 0, ret); +} + +static int s3_wmi_send_lid_state(void) +{ + int ret, lid_sw; + + ret = s3_wmi_query_lid(&lid_sw); + if (ret) + return ret; + + input_report_switch(s3_wmi.input, SW_LID, lid_sw); + input_sync(s3_wmi.input); + + return 0; +} + +static int s3_wmi_hp_notify(struct acpi_device *adev, u32 value) +{ + return s3_wmi_send_lid_state(); +} + +static acpi_status s3_wmi_attach_spi_device(acpi_handle handle, + u32 level, + void *data, + void **return_value) +{ + struct acpi_device *adev, **ts_adev; + + if (acpi_bus_get_device(handle, &adev)) + return AE_OK; + + ts_adev = data; + + if (strncmp(acpi_device_bid(adev), SPI_TS_OBJ_NAME, + strlen(SPI_TS_OBJ_NAME))) + return AE_OK; + + if (*ts_adev) { + pr_err("duplicate entry %s\n", SPI_TS_OBJ_NAME); + return AE_OK; + } + + *ts_adev = adev; + + return AE_OK; +} + +static int s3_wmi_check_platform_device(struct device *dev, void *data) +{ + struct acpi_device *adev, *ts_adev; + acpi_handle handle; + acpi_status status; + + /* ignore non ACPI devices */ + handle = ACPI_HANDLE(dev); + if (!handle || acpi_bus_get_device(handle, &adev)) + return 0; + + /* check for LID ACPI switch */ + if (!strcmp(ACPI_BUTTON_HID_LID, acpi_device_hid(adev))) { + s3_wmi.pnp0c0d_adev = adev; + return 0; + } + + /* ignore non SPI controllers */ + if (strncmp(acpi_device_bid(adev), SPI_CTL_OBJ_NAME, + strlen(SPI_CTL_OBJ_NAME))) + return 0; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, + s3_wmi_attach_spi_device, NULL, + &ts_adev, NULL); + if (ACPI_FAILURE(status)) + dev_warn(dev, "failed to enumerate SPI slaves\n"); + + if (!ts_adev) + return 0; + + s3_wmi.touchscreen_adev = ts_adev; + + return 0; +} + +static int s3_wmi_create_and_register_input(struct platform_device *pdev) +{ + struct input_dev *input; + int error; + + input = devm_input_allocate_device(&pdev->dev); + if (!input) + return -ENOMEM; + + input->name = "Lid Switch"; + input->phys = "button/input0"; + input->id.bustype = BUS_HOST; + input->id.product = 0x0005; + + input_set_capability(input, EV_SW, SW_LID); + + error = input_register_device(input); + if (error) + goto out_err; + + s3_wmi.input = input; + + return 0; + out_err: + input_free_device(s3_wmi.input); + return error; +} + +static int __init s3_wmi_probe(struct platform_device *pdev) +{ + int error; + + if (!dmi_check_system(surface3_dmi_table)) + return -ENODEV; + + memset(&s3_wmi, 0, sizeof(s3_wmi)); + + bus_for_each_dev(&platform_bus_type, NULL, NULL, + s3_wmi_check_platform_device); + + if (!s3_wmi.touchscreen_adev) + return -ENODEV; + + acpi_bus_trim(s3_wmi.pnp0c0d_adev); + + error = s3_wmi_create_and_register_input(pdev); + if (error) + goto restore_acpi_lid; + + acpi_initialize_hp_context(s3_wmi.touchscreen_adev, &s3_wmi.hp, + s3_wmi_hp_notify, NULL); + + s3_wmi_send_lid_state(); + + return 0; + + restore_acpi_lid: + acpi_bus_scan(s3_wmi.pnp0c0d_adev->handle); + return error; +} + +static int s3_wmi_remove(struct platform_device *device) +{ + /* remove the hotplug context from the acpi device */ + s3_wmi.touchscreen_adev->hp = NULL; + + /* reinstall the actual PNPC0C0D LID default handle */ + acpi_bus_scan(s3_wmi.pnp0c0d_adev->handle); + return 0; +} + +#ifdef CONFIG_PM +static int s3_wmi_resume(struct device *dev) +{ + s3_wmi_send_lid_state(); + return 0; +} +#endif +static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume); + +static struct platform_driver s3_wmi_driver = { + .driver = { + .name = "surface3-wmi", + .pm = &s3_wmi_pm, + }, + .remove = s3_wmi_remove, +}; + +static int __init s3_wmi_init(void) +{ + int error; + + s3_wmi_pdev = platform_device_alloc("surface3-wmi", -1); + if (!s3_wmi_pdev) + return -ENOMEM; + + error = platform_device_add(s3_wmi_pdev); + if (error) + goto err_device_put; + + error = platform_driver_probe(&s3_wmi_driver, s3_wmi_probe); + if (error) + goto err_device_del; + + pr_info("Surface 3 WMI Extras loaded\n"); + return 0; + + err_device_del: + platform_device_del(s3_wmi_pdev); + err_device_put: + platform_device_put(s3_wmi_pdev); + return error; +} + +static void __exit s3_wmi_exit(void) +{ + platform_device_unregister(s3_wmi_pdev); + platform_driver_unregister(&s3_wmi_driver); +} + +module_init(s3_wmi_init); +module_exit(s3_wmi_exit); diff --git a/drivers/platform/x86/surface3_button.c b/drivers/platform/x86/surface3_button.c new file mode 100644 index 0000000000000000000000000000000000000000..8bfd7f613d362190c90e2df294a93931361c8a7e --- /dev/null +++ b/drivers/platform/x86/surface3_button.c @@ -0,0 +1,250 @@ +/* + * Supports for the button array on the Surface tablets. + * + * (C) Copyright 2016 Red Hat, Inc + * + * Based on soc_button_array.c: + * + * {C} Copyright 2014 Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define SURFACE_BUTTON_OBJ_NAME "TEV2" +#define MAX_NBUTTONS 4 + +/* + * Some of the buttons like volume up/down are auto repeat, while others + * are not. To support both, we register two platform devices, and put + * buttons into them based on whether the key should be auto repeat. + */ +#define BUTTON_TYPES 2 + +/* + * Power button, Home button, Volume buttons support is supposed to + * be covered by drivers/input/misc/soc_button_array.c, which is implemented + * according to "Windows ACPI Design Guide for SoC Platforms". + * However surface 3 seems not to obey the specs, instead it uses + * device TEV2(MSHW0028) for declaring the GPIOs. The gpios are also slightly + * different in which the Home button is active high. + * Compared to surfacepro3_button.c which also handles MSHW0028, the Surface 3 + * is a reduce platform and thus uses GPIOs, not ACPI events. + * We choose an I2C driver here because we need to access the resources + * declared under the device node, while surfacepro3_button.c only needs + * the ACPI companion node. + */ +static const struct acpi_device_id surface3_acpi_match[] = { + { "MSHW0028", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, surface3_acpi_match); + +struct surface3_button_info { + const char *name; + int acpi_index; + unsigned int event_type; + unsigned int event_code; + bool autorepeat; + bool wakeup; + bool active_low; +}; + +struct surface3_button_data { + struct platform_device *children[BUTTON_TYPES]; +}; + +/* + * Get the Nth GPIO number from the ACPI object. + */ +static int surface3_button_lookup_gpio(struct device *dev, int acpi_index) +{ + struct gpio_desc *desc; + int gpio; + + desc = gpiod_get_index(dev, NULL, acpi_index, GPIOD_ASIS); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + gpio = desc_to_gpio(desc); + + gpiod_put(desc); + + return gpio; +} + +static struct platform_device * +surface3_button_device_create(struct i2c_client *client, + const struct surface3_button_info *button_info, + bool autorepeat) +{ + const struct surface3_button_info *info; + struct platform_device *pd; + struct gpio_keys_button *gpio_keys; + struct gpio_keys_platform_data *gpio_keys_pdata; + int n_buttons = 0; + int gpio; + int error; + + gpio_keys_pdata = devm_kzalloc(&client->dev, + sizeof(*gpio_keys_pdata) + + sizeof(*gpio_keys) * MAX_NBUTTONS, + GFP_KERNEL); + if (!gpio_keys_pdata) + return ERR_PTR(-ENOMEM); + + gpio_keys = (void *)(gpio_keys_pdata + 1); + + for (info = button_info; info->name; info++) { + if (info->autorepeat != autorepeat) + continue; + + gpio = surface3_button_lookup_gpio(&client->dev, + info->acpi_index); + if (!gpio_is_valid(gpio)) + continue; + + gpio_keys[n_buttons].type = info->event_type; + gpio_keys[n_buttons].code = info->event_code; + gpio_keys[n_buttons].gpio = gpio; + gpio_keys[n_buttons].active_low = info->active_low; + gpio_keys[n_buttons].desc = info->name; + gpio_keys[n_buttons].wakeup = info->wakeup; + n_buttons++; + } + + if (n_buttons == 0) { + error = -ENODEV; + goto err_free_mem; + } + + gpio_keys_pdata->buttons = gpio_keys; + gpio_keys_pdata->nbuttons = n_buttons; + gpio_keys_pdata->rep = autorepeat; + + pd = platform_device_alloc("gpio-keys", PLATFORM_DEVID_AUTO); + if (!pd) { + error = -ENOMEM; + goto err_free_mem; + } + + error = platform_device_add_data(pd, gpio_keys_pdata, + sizeof(*gpio_keys_pdata)); + if (error) + goto err_free_pdev; + + error = platform_device_add(pd); + if (error) + goto err_free_pdev; + + return pd; + +err_free_pdev: + platform_device_put(pd); +err_free_mem: + devm_kfree(&client->dev, gpio_keys_pdata); + return ERR_PTR(error); +} + +static int surface3_button_remove(struct i2c_client *client) +{ + struct surface3_button_data *priv = i2c_get_clientdata(client); + + int i; + + for (i = 0; i < BUTTON_TYPES; i++) + if (priv->children[i]) + platform_device_unregister(priv->children[i]); + + return 0; +} + +static struct surface3_button_info surface3_button_surface3[] = { + { "power", 0, EV_KEY, KEY_POWER, false, true, true }, + { "home", 1, EV_KEY, KEY_LEFTMETA, false, true, false }, + { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false, true }, + { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false, true }, + { } +}; + +static int surface3_button_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct surface3_button_data *priv; + struct platform_device *pd; + int i; + int error; + + if (strncmp(acpi_device_bid(ACPI_COMPANION(&client->dev)), + SURFACE_BUTTON_OBJ_NAME, + strlen(SURFACE_BUTTON_OBJ_NAME))) + return -ENODEV; + + if (gpiod_count(dev, KBUILD_MODNAME) <= 0) { + dev_dbg(dev, "no GPIO attached, ignoring...\n"); + return -ENODEV; + } + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + i2c_set_clientdata(client, priv); + + for (i = 0; i < BUTTON_TYPES; i++) { + pd = surface3_button_device_create(client, + surface3_button_surface3, + i == 0); + if (IS_ERR(pd)) { + error = PTR_ERR(pd); + if (error != -ENODEV) { + surface3_button_remove(client); + return error; + } + continue; + } + + priv->children[i] = pd; + } + + if (!priv->children[0] && !priv->children[1]) + return -ENODEV; + + return 0; +} + +static const struct i2c_device_id surface3_id[] = { + { } +}; +MODULE_DEVICE_TABLE(i2c, surface3_id); + +static struct i2c_driver surface3_driver = { + .probe = surface3_button_probe, + .remove = surface3_button_remove, + .id_table = surface3_id, + .driver = { + .name = "surface3", + .acpi_match_table = ACPI_PTR(surface3_acpi_match), + }, +}; +module_i2c_driver(surface3_driver); + +MODULE_AUTHOR("Benjamin Tissoires "); +MODULE_DESCRIPTION("surface3 button array driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index b65ce7519411a87f2d279360df1b8fe252efce0b..cacb43fb1df7fcbd5f643788fa960a4cdb6e4e36 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -82,7 +82,7 @@ #include #include #include -#include +#include #include /* ThinkPad CMOS commands */ @@ -128,6 +128,7 @@ enum { /* ACPI HIDs */ #define TPACPI_ACPI_IBM_HKEY_HID "IBM0068" #define TPACPI_ACPI_LENOVO_HKEY_HID "LEN0068" +#define TPACPI_ACPI_LENOVO_HKEY_V2_HID "LEN0268" #define TPACPI_ACPI_EC_HID "PNP0C09" /* Input IDs */ @@ -190,6 +191,9 @@ enum tpacpi_hkey_event_t { TP_HKEY_EV_LID_OPEN = 0x5002, /* laptop lid opened */ TP_HKEY_EV_TABLET_TABLET = 0x5009, /* tablet swivel up */ TP_HKEY_EV_TABLET_NOTEBOOK = 0x500a, /* tablet swivel down */ + TP_HKEY_EV_TABLET_CHANGED = 0x60c0, /* X1 Yoga (2016): + * enter/leave tablet mode + */ TP_HKEY_EV_PEN_INSERTED = 0x500b, /* tablet pen inserted */ TP_HKEY_EV_PEN_REMOVED = 0x500c, /* tablet pen removed */ TP_HKEY_EV_BRGHT_CHANGED = 0x5010, /* backlight control event */ @@ -302,7 +306,12 @@ static struct { u32 hotkey:1; u32 hotkey_mask:1; u32 hotkey_wlsw:1; - u32 hotkey_tablet:1; + enum { + TP_HOTKEY_TABLET_NONE = 0, + TP_HOTKEY_TABLET_USES_MHKG, + /* X1 Yoga 2016, seen on BIOS N1FET44W */ + TP_HOTKEY_TABLET_USES_CMMD, + } hotkey_tablet; u32 kbdlight:1; u32 light:1; u32 light_status:1; @@ -2059,6 +2068,8 @@ static void hotkey_poll_setup(const bool may_warn); /* HKEY.MHKG() return bits */ #define TP_HOTKEY_TABLET_MASK (1 << 3) +/* ThinkPad X1 Yoga (2016) */ +#define TP_EC_CMMD_TABLET_MODE 0x6 static int hotkey_get_wlsw(void) { @@ -2083,10 +2094,23 @@ static int hotkey_get_tablet_mode(int *status) { int s; - if (!acpi_evalf(hkey_handle, &s, "MHKG", "d")) - return -EIO; + switch (tp_features.hotkey_tablet) { + case TP_HOTKEY_TABLET_USES_MHKG: + if (!acpi_evalf(hkey_handle, &s, "MHKG", "d")) + return -EIO; + + *status = ((s & TP_HOTKEY_TABLET_MASK) != 0); + break; + case TP_HOTKEY_TABLET_USES_CMMD: + if (!acpi_evalf(ec_handle, &s, "CMMD", "d")) + return -EIO; + + *status = (s == TP_EC_CMMD_TABLET_MODE); + break; + default: + break; + } - *status = ((s & TP_HOTKEY_TABLET_MASK) != 0); return 0; } @@ -3117,6 +3141,37 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = { typedef u16 tpacpi_keymap_entry_t; typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN]; +static int hotkey_init_tablet_mode(void) +{ + int in_tablet_mode = 0, res; + char *type = NULL; + + if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) { + /* For X41t, X60t, X61t Tablets... */ + tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_MHKG; + in_tablet_mode = !!(res & TP_HOTKEY_TABLET_MASK); + type = "MHKG"; + } else if (acpi_evalf(ec_handle, &res, "CMMD", "qd")) { + /* For X1 Yoga (2016) */ + tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_CMMD; + in_tablet_mode = res == TP_EC_CMMD_TABLET_MODE; + type = "CMMD"; + } + + if (!tp_features.hotkey_tablet) + return 0; + + pr_info("Tablet mode switch found (type: %s), currently in %s mode\n", + type, in_tablet_mode ? "tablet" : "laptop"); + + res = add_to_attr_set(hotkey_dev_attributes, + &dev_attr_hotkey_tablet_mode.attr); + if (res) + return -1; + + return in_tablet_mode; +} + static int __init hotkey_init(struct ibm_init_struct *iibm) { /* Requirements for changing the default keymaps: @@ -3464,21 +3519,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) res = add_to_attr_set(hotkey_dev_attributes, &dev_attr_hotkey_radio_sw.attr); - /* For X41t, X60t, X61t Tablets... */ - if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) { - tp_features.hotkey_tablet = 1; - tabletsw_state = !!(status & TP_HOTKEY_TABLET_MASK); - pr_info("possible tablet mode switch found; " - "ThinkPad in %s mode\n", - (tabletsw_state) ? "tablet" : "laptop"); - res = add_to_attr_set(hotkey_dev_attributes, - &dev_attr_hotkey_tablet_mode.attr); - } + res = hotkey_init_tablet_mode(); + if (res < 0) + goto err_exit; - if (!res) - res = register_attr_set_with_sysfs( - hotkey_dev_attributes, - &tpacpi_pdev->dev.kobj); + tabletsw_state = res; + + res = register_attr_set_with_sysfs(hotkey_dev_attributes, + &tpacpi_pdev->dev.kobj); if (res) goto err_exit; @@ -3899,6 +3947,12 @@ static bool hotkey_notify_6xxx(const u32 hkey, *ignore_acpi_ev = true; return true; + case TP_HKEY_EV_TABLET_CHANGED: + tpacpi_input_send_tabletsw(); + hotkey_tablet_mode_notify_change(); + *send_acpi_ev = false; + break; + default: pr_warn("unknown possible thermal alarm or keyboard event received\n"); known = false; @@ -4143,6 +4197,7 @@ static int hotkey_write(char *buf) static const struct acpi_device_id ibm_htk_device_ids[] = { {TPACPI_ACPI_IBM_HKEY_HID, 0}, {TPACPI_ACPI_LENOVO_HKEY_HID, 0}, + {TPACPI_ACPI_LENOVO_HKEY_V2_HID, 0}, {"", 0}, }; @@ -7716,7 +7771,7 @@ static struct ibm_struct volume_driver_data = { #define alsa_card NULL -static void inline volume_alsa_notify_change(void) +static inline void volume_alsa_notify_change(void) { } @@ -9018,7 +9073,7 @@ static int mute_led_on_off(struct tp_led_table *t, bool state) acpi_handle temp; int output; - if (!ACPI_SUCCESS(acpi_get_handle(hkey_handle, t->name, &temp))) { + if (ACPI_FAILURE(acpi_get_handle(hkey_handle, t->name, &temp))) { pr_warn("Thinkpad ACPI has no %s interface.\n", t->name); return -EIO; } diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c index feac4576b837101c35cf7442ca07ee47496633fe..2df07ee8f3c33e9f5e92fd776554bd44e614e836 100644 --- a/drivers/platform/x86/toshiba-wmi.c +++ b/drivers/platform/x86/toshiba-wmi.c @@ -24,14 +24,15 @@ #include #include #include +#include MODULE_AUTHOR("Azael Avalos"); MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver"); MODULE_LICENSE("GPL"); -#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100" +#define WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100" -MODULE_ALIAS("wmi:"TOSHIBA_WMI_EVENT_GUID); +MODULE_ALIAS("wmi:"WMI_EVENT_GUID); static struct input_dev *toshiba_wmi_input_dev; @@ -63,6 +64,16 @@ static void toshiba_wmi_notify(u32 value, void *context) kfree(response.pointer); } +static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = { + { + .ident = "Toshiba laptop", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + }, + }, + {} +}; + static int __init toshiba_wmi_input_setup(void) { acpi_status status; @@ -81,7 +92,7 @@ static int __init toshiba_wmi_input_setup(void) if (err) goto err_free_dev; - status = wmi_install_notify_handler(TOSHIBA_WMI_EVENT_GUID, + status = wmi_install_notify_handler(WMI_EVENT_GUID, toshiba_wmi_notify, NULL); if (ACPI_FAILURE(status)) { err = -EIO; @@ -95,7 +106,7 @@ static int __init toshiba_wmi_input_setup(void) return 0; err_remove_notifier: - wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID); + wmi_remove_notify_handler(WMI_EVENT_GUID); err_free_keymap: sparse_keymap_free(toshiba_wmi_input_dev); err_free_dev: @@ -105,7 +116,7 @@ static int __init toshiba_wmi_input_setup(void) static void toshiba_wmi_input_destroy(void) { - wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID); + wmi_remove_notify_handler(WMI_EVENT_GUID); sparse_keymap_free(toshiba_wmi_input_dev); input_unregister_device(toshiba_wmi_input_dev); } @@ -114,7 +125,8 @@ static int __init toshiba_wmi_init(void) { int ret; - if (!wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) + if (!wmi_has_guid(WMI_EVENT_GUID) || + !dmi_check_system(toshiba_wmi_dmi_table)) return -ENODEV; ret = toshiba_wmi_input_setup(); @@ -130,7 +142,7 @@ static int __init toshiba_wmi_init(void) static void __exit toshiba_wmi_exit(void) { - if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) + if (wmi_has_guid(WMI_EVENT_GUID)) toshiba_wmi_input_destroy(); } diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c index 4b6808ff0e5df3517f5e72a9fdd71ed6f05d0a9e..5c5b3d47b5f6fa52fe064ebe32f86106caafeb6c 100644 --- a/drivers/pnp/interface.c +++ b/drivers/pnp/interface.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include "base.h" diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c index c212db0fc65de583e749d7cabaac54a2849133a6..5ee6b2a5f8d5cce1bf0932d27ba9ff589b4dbab1 100644 --- a/drivers/pnp/pnpbios/proc.c +++ b/drivers/pnp/pnpbios/proc.c @@ -26,7 +26,7 @@ #include #include -#include +#include #include "pnpbios.h" diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c index 01b6d3f9b8fb32aa9877aabbc943bc01abed61d6..56bce1908be2f3af91f5d675667ca02e313e324b 100644 --- a/drivers/power/avs/rockchip-io-domain.c +++ b/drivers/power/avs/rockchip-io-domain.c @@ -143,7 +143,7 @@ static int rockchip_iodomain_notify(struct notifier_block *nb, if (ret && event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) return NOTIFY_BAD; - dev_info(supply->iod->dev, "Setting to %d done\n", uV); + dev_dbg(supply->iod->dev, "Setting to %d done\n", uV); return NOTIFY_OK; } diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index c74c3f67b8da01fad62328ca31f8c9b8bb09c794..abeb77217a21994a8bdf8481ef0e9dce993af38d 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -104,6 +104,16 @@ config POWER_RESET_MSM help Power off and restart support for Qualcomm boards. +config POWER_RESET_PIIX4_POWEROFF + tristate "Intel PIIX4 power-off driver" + depends on PCI + depends on MIPS || COMPILE_TEST + help + This driver supports powering off a system using the Intel PIIX4 + southbridge, for example the MIPS Malta development board. The + southbridge SOff state is entered in response to a request to + power off the system. + config POWER_RESET_LTC2952 bool "LTC2952 PowerPath power-off driver" depends on OF_GPIO diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile index 1be307c7fc25ebf6b5aaca25d15c15f7763c80e1..11dae3b56ff941b32f500495ad8b9b768056350a 100644 --- a/drivers/power/reset/Makefile +++ b/drivers/power/reset/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o obj-$(CONFIG_POWER_RESET_IMX) += imx-snvs-poweroff.o obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o +obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c index e9e24df35f26bd8ecc68f92dac8cd2a5f26145f5..a85dd4d233af39713a4a25aac67fd4c7747f8d06 100644 --- a/drivers/power/reset/at91-poweroff.c +++ b/drivers/power/reset/at91-poweroff.c @@ -169,6 +169,7 @@ static const struct of_device_id at91_poweroff_of_match[] = { { .compatible = "atmel,at91sam9x5-shdwc", }, { /*sentinel*/ } }; +MODULE_DEVICE_TABLE(of, at91_poweroff_of_match); static struct platform_driver at91_poweroff_driver = { .remove = __exit_p(at91_poweroff_remove), diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c index 1b5d450586d18ae6e759407213030a16a82b7fef..568580cf06552dc1e52ae39059687f25076db7bf 100644 --- a/drivers/power/reset/at91-reset.c +++ b/drivers/power/reset/at91-reset.c @@ -175,6 +175,7 @@ static const struct of_device_id at91_reset_of_match[] = { { .compatible = "atmel,sama5d3-rstc", .data = sama5d3_restart }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, at91_reset_of_match); static struct notifier_block at91_restart_nb = { .priority = 192, @@ -242,6 +243,7 @@ static const struct platform_device_id at91_reset_plat_match[] = { { "at91-sam9g45-reset", (unsigned long)at91sam9g45_restart }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(platform, at91_reset_plat_match); static struct platform_driver at91_reset_driver = { .remove = __exit_p(at91_reset_remove), diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c index 15fed9d8f871a715dfada3dd22b8ca0e461dd66a..bfcd6fba6363bc01e5db313d6094dc1573424dd0 100644 --- a/drivers/power/reset/ltc2952-poweroff.c +++ b/drivers/power/reset/ltc2952-poweroff.c @@ -169,7 +169,7 @@ static void ltc2952_poweroff_kill(void) static void ltc2952_poweroff_default(struct ltc2952_poweroff *data) { - data->wde_interval = ktime_set(0, 300L*1E6L); + data->wde_interval = 300L * 1E6L; data->trigger_delay = ktime_set(2, 500L*1E6L); hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL); diff --git a/drivers/power/reset/piix4-poweroff.c b/drivers/power/reset/piix4-poweroff.c new file mode 100644 index 0000000000000000000000000000000000000000..bacfc95783f06db0c2e11976edeefaa7fcf8c193 --- /dev/null +++ b/drivers/power/reset/piix4-poweroff.c @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2016 Imagination Technologies + * Author: Paul Burton + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include + +static struct pci_dev *pm_dev; +static resource_size_t io_offset; + +enum piix4_pm_io_reg { + PIIX4_FUNC3IO_PMSTS = 0x00, +#define PIIX4_FUNC3IO_PMSTS_PWRBTN_STS BIT(8) + PIIX4_FUNC3IO_PMCNTRL = 0x04, +#define PIIX4_FUNC3IO_PMCNTRL_SUS_EN BIT(13) +#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF (0x0 << 10) +}; + +#define PIIX4_SUSPEND_MAGIC 0x00120002 + +static const int piix4_pm_io_region = PCI_BRIDGE_RESOURCES; + +static void piix4_poweroff(void) +{ + int spec_devid; + u16 sts; + + /* Ensure the power button status is clear */ + while (1) { + sts = inw(io_offset + PIIX4_FUNC3IO_PMSTS); + if (!(sts & PIIX4_FUNC3IO_PMSTS_PWRBTN_STS)) + break; + outw(sts, io_offset + PIIX4_FUNC3IO_PMSTS); + } + + /* Enable entry to suspend */ + outw(PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF | PIIX4_FUNC3IO_PMCNTRL_SUS_EN, + io_offset + PIIX4_FUNC3IO_PMCNTRL); + + /* If the special cycle occurs too soon this doesn't work... */ + mdelay(10); + + /* + * The PIIX4 will enter the suspend state only after seeing a special + * cycle with the correct magic data on the PCI bus. Generate that + * cycle now. + */ + spec_devid = PCI_DEVID(0, PCI_DEVFN(0x1f, 0x7)); + pci_bus_write_config_dword(pm_dev->bus, spec_devid, 0, + PIIX4_SUSPEND_MAGIC); + + /* Give the system some time to power down, then error */ + mdelay(1000); + pr_emerg("Unable to poweroff system\n"); +} + +static int piix4_poweroff_probe(struct pci_dev *dev, + const struct pci_device_id *id) +{ + int res; + + if (pm_dev) + return -EINVAL; + + /* Request access to the PIIX4 PM IO registers */ + res = pci_request_region(dev, piix4_pm_io_region, + "PIIX4 PM IO registers"); + if (res) { + dev_err(&dev->dev, "failed to request PM IO registers: %d\n", + res); + return res; + } + + pm_dev = dev; + io_offset = pci_resource_start(dev, piix4_pm_io_region); + pm_power_off = piix4_poweroff; + + return 0; +} + +static void piix4_poweroff_remove(struct pci_dev *dev) +{ + if (pm_power_off == piix4_poweroff) + pm_power_off = NULL; + + pci_release_region(dev, piix4_pm_io_region); + pm_dev = NULL; +} + +static const struct pci_device_id piix4_poweroff_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) }, + { 0 }, +}; + +static struct pci_driver piix4_poweroff_driver = { + .name = "piix4-poweroff", + .id_table = piix4_poweroff_ids, + .probe = piix4_poweroff_probe, + .remove = piix4_poweroff_remove, +}; + +module_pci_driver(piix4_poweroff_driver); +MODULE_AUTHOR("Paul Burton "); +MODULE_LICENSE("GPL"); diff --git a/drivers/power/reset/syscon-reboot-mode.c b/drivers/power/reset/syscon-reboot-mode.c index 1ecb51d671492b29aa46a37dc59039c5573efdd2..c8c371b285b127457dc9228ee7de8b7f3921c9b4 100644 --- a/drivers/power/reset/syscon-reboot-mode.c +++ b/drivers/power/reset/syscon-reboot-mode.c @@ -74,6 +74,7 @@ static const struct of_device_id syscon_reboot_mode_of_match[] = { { .compatible = "syscon-reboot-mode" }, {} }; +MODULE_DEVICE_TABLE(of, syscon_reboot_mode_of_match); static struct platform_driver syscon_reboot_mode_driver = { .probe = syscon_reboot_mode_probe, diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c index b0b1eb3a78c289090fef365d2e5772a924659ae5..7549c7f74a3caa4d1fe9fdf6153bffacf9ebd28e 100644 --- a/drivers/power/reset/zx-reboot.c +++ b/drivers/power/reset/zx-reboot.c @@ -72,6 +72,7 @@ static const struct of_device_id zx_reboot_of_match[] = { { .compatible = "zte,sysctrl" }, {} }; +MODULE_DEVICE_TABLE(of, zx_reboot_of_match); static struct platform_driver zx_reboot_driver = { .probe = zx_reboot_probe, diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index 2199f673118c001e17ab09b4175d90946687d511..c569f82a007182b25531213e6d8cb84094e9a53e 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -1900,7 +1900,7 @@ static void ab8500_fg_low_bat_work(struct work_struct *work) * ab8500_fg_battok_calc - calculate the bit pattern corresponding * to the target voltage. * @di: pointer to the ab8500_fg structure - * @target target voltage + * @target: target voltage * * Returns bit pattern closest to the target voltage * valid return values are 0-14. (0-BATT_OK_MAX_NR_INCREMENTS) @@ -2391,7 +2391,7 @@ static void ab8500_fg_external_power_changed(struct power_supply *psy) } /** - * abab8500_fg_reinit_work() - work to reset the FG algorithm + * ab8500_fg_reinit_work() - work to reset the FG algorithm * @work: pointer to the work_struct structure * * Used to reset the current battery capacity to be able to @@ -2528,7 +2528,7 @@ static struct kobj_type ab8500_fg_ktype = { }; /** - * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry + * ab8500_fg_sysfs_exit() - de-init of sysfs entry * @di: pointer to the struct ab8500_chargalg * * This function removes the entry in sysfs. @@ -2539,7 +2539,7 @@ static void ab8500_fg_sysfs_exit(struct ab8500_fg *di) } /** - * ab8500_chargalg_sysfs_init() - init of sysfs entry + * ab8500_fg_sysfs_init() - init of sysfs entry * @di: pointer to the struct ab8500_chargalg * * This function adds an entry in sysfs. diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c index 5bdde692f72488c3793e1bf0574d8659f0df4a58..539eb41504bb8140c91f1b76991d59f00ba70bdd 100644 --- a/drivers/power/supply/axp288_fuel_gauge.c +++ b/drivers/power/supply/axp288_fuel_gauge.c @@ -1120,6 +1120,7 @@ static const struct platform_device_id axp288_fg_id_table[] = { { .name = DEV_NAME }, {}, }; +MODULE_DEVICE_TABLE(platform, axp288_fg_id_table); static int axp288_fuel_gauge_remove(struct platform_device *pdev) { diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index f5746b9f4e83900edd33fb049229b24aa48ba1b9..e9584330aeed27f0c4e2cc7fa731ff8311e82659 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -1141,7 +1141,7 @@ static int bq24190_battery_set_property(struct power_supply *psy, dev_dbg(bdi->dev, "prop: %d\n", psp); - pm_runtime_put_sync(bdi->dev); + pm_runtime_get_sync(bdi->dev); switch (psp) { case POWER_SUPPLY_PROP_ONLINE: diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 3b0dbc689d7299bde340b044311ab8ad859bd726..08c36b8e04bd6c804c9a94a421c9b6a2fb1dcab9 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -164,6 +164,25 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = { [BQ27XXX_REG_DCAP] = 0x3c, [BQ27XXX_REG_AP] = INVALID_REG_ADDR, }, + [BQ27510] = { + [BQ27XXX_REG_CTRL] = 0x00, + [BQ27XXX_REG_TEMP] = 0x06, + [BQ27XXX_REG_INT_TEMP] = 0x28, + [BQ27XXX_REG_VOLT] = 0x08, + [BQ27XXX_REG_AI] = 0x14, + [BQ27XXX_REG_FLAGS] = 0x0a, + [BQ27XXX_REG_TTE] = 0x16, + [BQ27XXX_REG_TTF] = INVALID_REG_ADDR, + [BQ27XXX_REG_TTES] = 0x1a, + [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR, + [BQ27XXX_REG_NAC] = 0x0c, + [BQ27XXX_REG_FCC] = 0x12, + [BQ27XXX_REG_CYCT] = 0x1e, + [BQ27XXX_REG_AE] = INVALID_REG_ADDR, + [BQ27XXX_REG_SOC] = 0x20, + [BQ27XXX_REG_DCAP] = 0x2e, + [BQ27XXX_REG_AP] = INVALID_REG_ADDR, + }, [BQ27530] = { [BQ27XXX_REG_CTRL] = 0x00, [BQ27XXX_REG_TEMP] = 0x06, @@ -302,6 +321,24 @@ static enum power_supply_property bq27500_battery_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, }; +static enum power_supply_property bq27510_battery_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_LEVEL, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_MANUFACTURER, +}; + static enum power_supply_property bq27530_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, @@ -385,6 +422,7 @@ static struct { BQ27XXX_PROP(BQ27000, bq27000_battery_props), BQ27XXX_PROP(BQ27010, bq27010_battery_props), BQ27XXX_PROP(BQ27500, bq27500_battery_props), + BQ27XXX_PROP(BQ27510, bq27510_battery_props), BQ27XXX_PROP(BQ27530, bq27530_battery_props), BQ27XXX_PROP(BQ27541, bq27541_battery_props), BQ27XXX_PROP(BQ27545, bq27545_battery_props), @@ -397,10 +435,11 @@ static LIST_HEAD(bq27xxx_battery_devices); static int poll_interval_param_set(const char *val, const struct kernel_param *kp) { struct bq27xxx_device_info *di; + unsigned int prev_val = *(unsigned int *) kp->arg; int ret; ret = param_set_uint(val, kp); - if (ret < 0) + if (ret < 0 || prev_val == *(unsigned int *) kp->arg) return ret; mutex_lock(&bq27xxx_list_lock); @@ -635,7 +674,8 @@ static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di) */ static bool bq27xxx_battery_overtemp(struct bq27xxx_device_info *di, u16 flags) { - if (di->chip == BQ27500 || di->chip == BQ27541 || di->chip == BQ27545) + if (di->chip == BQ27500 || di->chip == BQ27510 || + di->chip == BQ27541 || di->chip == BQ27545) return flags & (BQ27XXX_FLAG_OTC | BQ27XXX_FLAG_OTD); if (di->chip == BQ27530 || di->chip == BQ27421) return flags & BQ27XXX_FLAG_OT; diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c index 85d4ea2a9c2026e834329bb3b3121579ced969da..5c5c3a6f99234dfd82cc41f4cd3ec5cd50865c7d 100644 --- a/drivers/power/supply/bq27xxx_battery_i2c.c +++ b/drivers/power/supply/bq27xxx_battery_i2c.c @@ -149,8 +149,8 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = { { "bq27200", BQ27000 }, { "bq27210", BQ27010 }, { "bq27500", BQ27500 }, - { "bq27510", BQ27500 }, - { "bq27520", BQ27500 }, + { "bq27510", BQ27510 }, + { "bq27520", BQ27510 }, { "bq27530", BQ27530 }, { "bq27531", BQ27530 }, { "bq27541", BQ27541 }, diff --git a/drivers/power/supply/ipaq_micro_battery.c b/drivers/power/supply/ipaq_micro_battery.c index 4af7b770f293957e12234fbde112c33ccb72cdbb..2fa6edd6e8b1caff8ecef90f4a8d00ca184d6bf9 100644 --- a/drivers/power/supply/ipaq_micro_battery.c +++ b/drivers/power/supply/ipaq_micro_battery.c @@ -313,4 +313,4 @@ module_platform_driver(micro_batt_device_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("driver for iPAQ Atmel micro battery"); -MODULE_ALIAS("platform:battery-ipaq-micro"); +MODULE_ALIAS("platform:ipaq-micro-battery"); diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c index 7321b727d484555a2befa146a2deb910d808b49d..509e2b341bd64c54aec3ec68b69b1854dc593550 100644 --- a/drivers/power/supply/lp8788-charger.c +++ b/drivers/power/supply/lp8788-charger.c @@ -384,9 +384,6 @@ static int lp8788_update_charger_params(struct platform_device *pdev, for (i = 0; i < pdata->num_chg_params; i++) { param = pdata->chg_params + i; - if (!param) - continue; - if (lp8788_is_valid_charger_register(param->addr)) { ret = lp8788_write_byte(lp, param->addr, param->val); if (ret) diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c index 8689c80202b532047252840f3f12872aeebcf92b..e7c3649b31a080232efc580d1c2d72c5c9357bd1 100644 --- a/drivers/power/supply/max17040_battery.c +++ b/drivers/power/supply/max17040_battery.c @@ -21,18 +21,13 @@ #include #include -#define MAX17040_VCELL_MSB 0x02 -#define MAX17040_VCELL_LSB 0x03 -#define MAX17040_SOC_MSB 0x04 -#define MAX17040_SOC_LSB 0x05 -#define MAX17040_MODE_MSB 0x06 -#define MAX17040_MODE_LSB 0x07 -#define MAX17040_VER_MSB 0x08 -#define MAX17040_VER_LSB 0x09 -#define MAX17040_RCOMP_MSB 0x0C -#define MAX17040_RCOMP_LSB 0x0D -#define MAX17040_CMD_MSB 0xFE -#define MAX17040_CMD_LSB 0xFF +#define MAX17040_VCELL 0x02 +#define MAX17040_SOC 0x04 +#define MAX17040_MODE 0x06 +#define MAX17040_VER 0x08 +#define MAX17040_RCOMP 0x0C +#define MAX17040_CMD 0xFE + #define MAX17040_DELAY 1000 #define MAX17040_BATTERY_FULL 95 @@ -78,11 +73,11 @@ static int max17040_get_property(struct power_supply *psy, return 0; } -static int max17040_write_reg(struct i2c_client *client, int reg, u8 value) +static int max17040_write_reg(struct i2c_client *client, int reg, u16 value) { int ret; - ret = i2c_smbus_write_byte_data(client, reg, value); + ret = i2c_smbus_write_word_swapped(client, reg, value); if (ret < 0) dev_err(&client->dev, "%s: err %d\n", __func__, ret); @@ -94,7 +89,7 @@ static int max17040_read_reg(struct i2c_client *client, int reg) { int ret; - ret = i2c_smbus_read_byte_data(client, reg); + ret = i2c_smbus_read_word_swapped(client, reg); if (ret < 0) dev_err(&client->dev, "%s: err %d\n", __func__, ret); @@ -104,43 +99,36 @@ static int max17040_read_reg(struct i2c_client *client, int reg) static void max17040_reset(struct i2c_client *client) { - max17040_write_reg(client, MAX17040_CMD_MSB, 0x54); - max17040_write_reg(client, MAX17040_CMD_LSB, 0x00); + max17040_write_reg(client, MAX17040_CMD, 0x0054); } static void max17040_get_vcell(struct i2c_client *client) { struct max17040_chip *chip = i2c_get_clientdata(client); - u8 msb; - u8 lsb; + u16 vcell; - msb = max17040_read_reg(client, MAX17040_VCELL_MSB); - lsb = max17040_read_reg(client, MAX17040_VCELL_LSB); + vcell = max17040_read_reg(client, MAX17040_VCELL); - chip->vcell = (msb << 4) + (lsb >> 4); + chip->vcell = vcell; } static void max17040_get_soc(struct i2c_client *client) { struct max17040_chip *chip = i2c_get_clientdata(client); - u8 msb; - u8 lsb; + u16 soc; - msb = max17040_read_reg(client, MAX17040_SOC_MSB); - lsb = max17040_read_reg(client, MAX17040_SOC_LSB); + soc = max17040_read_reg(client, MAX17040_SOC); - chip->soc = msb; + chip->soc = (soc >> 8); } static void max17040_get_version(struct i2c_client *client) { - u8 msb; - u8 lsb; + u16 version; - msb = max17040_read_reg(client, MAX17040_VER_MSB); - lsb = max17040_read_reg(client, MAX17040_VER_LSB); + version = max17040_read_reg(client, MAX17040_VER); - dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver %d%d\n", msb, lsb); + dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver 0x%x\n", version); } static void max17040_get_online(struct i2c_client *client) diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c index 0b2eab571528eaed9cbe08fb47c90e313049f01c..290ddc12b0405218865edf427cb5e87ad67e785d 100644 --- a/drivers/power/supply/max8997_charger.c +++ b/drivers/power/supply/max8997_charger.c @@ -184,6 +184,7 @@ static const struct platform_device_id max8997_battery_id[] = { { "max8997-battery", 0 }, { } }; +MODULE_DEVICE_TABLE(platform, max8997_battery_id); static struct platform_driver max8997_battery_driver = { .driver = { diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index a74d8ca383a1e8cb040ca08f78b7c77d15821375..1e0960b646e823caeb82bf9f10bd72ee36a687d4 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -413,7 +413,7 @@ static int power_supply_match_device_node(struct device *dev, const void *data) /** * power_supply_get_by_phandle() - Search for a power supply and returns its ref * @np: Pointer to device node holding phandle property - * @phandle_name: Name of property holding a power supply name + * @property: Name of property holding a power supply name * * If power supply was found, it increases reference count for the * internal power supply's device. The user should power_supply_put() @@ -458,7 +458,7 @@ static void devm_power_supply_put(struct device *dev, void *res) * devm_power_supply_get_by_phandle() - Resource managed version of * power_supply_get_by_phandle() * @dev: Pointer to device holding phandle property - * @phandle_name: Name of property holding a power supply phandle + * @property: Name of property holding a power supply phandle * * Return: On success returns a reference to a power supply with * matching name equals to value under @property, NULL or ERR_PTR otherwise. diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c index 5c5880664e09b9c0072fdc9d6ccb91ee0525965e..a2740cf57ad3e0de776e779c3ff179c68d5375d8 100644 --- a/drivers/power/supply/wm8350_power.c +++ b/drivers/power/supply/wm8350_power.c @@ -182,7 +182,7 @@ static ssize_t charger_state_show(struct device *dev, return sprintf(buf, "%s\n", charge); } -static DEVICE_ATTR(charger_state, 0444, charger_state_show, NULL); +static DEVICE_ATTR_RO(charger_state); static irqreturn_t wm8350_charger_handler(int irq, void *data) { diff --git a/drivers/power/supply/wm97xx_battery.c b/drivers/power/supply/wm97xx_battery.c index 6285626d142a3b28b3e6df3565f3e63614f401f7..e3edb31ac88045a0071c78dff2f20a4a9cac0284 100644 --- a/drivers/power/supply/wm97xx_battery.c +++ b/drivers/power/supply/wm97xx_battery.c @@ -30,8 +30,7 @@ static enum power_supply_property *prop; static unsigned long wm97xx_read_bat(struct power_supply *bat_ps) { - struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data; - struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; + struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps); return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev.parent), pdata->batt_aux) * pdata->batt_mult / @@ -40,8 +39,7 @@ static unsigned long wm97xx_read_bat(struct power_supply *bat_ps) static unsigned long wm97xx_read_temp(struct power_supply *bat_ps) { - struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data; - struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; + struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps); return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev.parent), pdata->temp_aux) * pdata->temp_mult / @@ -52,8 +50,7 @@ static int wm97xx_bat_get_property(struct power_supply *bat_ps, enum power_supply_property psp, union power_supply_propval *val) { - struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data; - struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; + struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps); switch (psp) { case POWER_SUPPLY_PROP_STATUS: @@ -103,8 +100,7 @@ static void wm97xx_bat_external_power_changed(struct power_supply *bat_ps) static void wm97xx_bat_update(struct power_supply *bat_ps) { int old_status = bat_status; - struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data; - struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; + struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps); mutex_lock(&work_lock); @@ -166,15 +162,15 @@ static int wm97xx_bat_probe(struct platform_device *dev) int ret = 0; int props = 1; /* POWER_SUPPLY_PROP_PRESENT */ int i = 0; - struct wm97xx_pdata *wmdata = dev->dev.platform_data; - struct wm97xx_batt_pdata *pdata; + struct wm97xx_batt_pdata *pdata = dev->dev.platform_data; + struct power_supply_config cfg = {}; - if (!wmdata) { + if (!pdata) { dev_err(&dev->dev, "No platform data supplied\n"); return -EINVAL; } - pdata = wmdata->batt_pdata; + cfg.drv_data = pdata; if (dev->id != -1) return -EINVAL; @@ -243,7 +239,7 @@ static int wm97xx_bat_probe(struct platform_device *dev) bat_psy_desc.properties = prop; bat_psy_desc.num_properties = props; - bat_psy = power_supply_register(&dev->dev, &bat_psy_desc, NULL); + bat_psy = power_supply_register(&dev->dev, &bat_psy_desc, &cfg); if (!IS_ERR(bat_psy)) { schedule_work(&bat_work); } else { @@ -266,8 +262,7 @@ static int wm97xx_bat_probe(struct platform_device *dev) static int wm97xx_bat_remove(struct platform_device *dev) { - struct wm97xx_pdata *wmdata = dev->dev.platform_data; - struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; + struct wm97xx_batt_pdata *pdata = dev->dev.platform_data; if (pdata && gpio_is_valid(pdata->charge_gpio)) { free_irq(gpio_to_irq(pdata->charge_gpio), dev); diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index 243b233ff31bff9fdcfae1ab019b3104fd36916e..9a25110c4a46bd3ac504d2bb6db65ac3bee156c5 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c @@ -189,14 +189,13 @@ struct rapl_package { unsigned int time_unit; struct rapl_domain *domains; /* array of domains, sized at runtime */ struct powercap_zone *power_zone; /* keep track of parent zone */ - int nr_cpus; /* active cpus on the package, topology info is lost during - * cpu hotplug. so we have to track ourselves. - */ unsigned long power_limit_irq; /* keep track of package power limit * notify interrupt enable status. */ struct list_head plist; int lead_cpu; /* one active cpu per package for access */ + /* Track active cpus */ + struct cpumask cpumask; }; struct rapl_defaults { @@ -275,18 +274,6 @@ static struct rapl_package *find_package_by_id(int id) return NULL; } -/* caller must hold cpu hotplug lock */ -static void rapl_cleanup_data(void) -{ - struct rapl_package *p, *tmp; - - list_for_each_entry_safe(p, tmp, &rapl_packages, plist) { - kfree(p->domains); - list_del(&p->plist); - kfree(p); - } -} - static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw) { struct rapl_domain *rd; @@ -442,6 +429,7 @@ static int contraint_to_pl(struct rapl_domain *rd, int cid) return i; } } + pr_err("Cannot find matching power limit for constraint %d\n", cid); return -EINVAL; } @@ -457,6 +445,10 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid, get_online_cpus(); rd = power_zone_to_rapl_domain(power_zone); id = contraint_to_pl(rd, cid); + if (id < 0) { + ret = id; + goto set_exit; + } rp = rd->rp; @@ -496,6 +488,11 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid, get_online_cpus(); rd = power_zone_to_rapl_domain(power_zone); id = contraint_to_pl(rd, cid); + if (id < 0) { + ret = id; + goto get_exit; + } + switch (rd->rpl[id].prim_id) { case PL1_ENABLE: prim = POWER_LIMIT1; @@ -512,6 +509,7 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid, else *data = val; +get_exit: put_online_cpus(); return ret; @@ -527,6 +525,10 @@ static int set_time_window(struct powercap_zone *power_zone, int cid, get_online_cpus(); rd = power_zone_to_rapl_domain(power_zone); id = contraint_to_pl(rd, cid); + if (id < 0) { + ret = id; + goto set_time_exit; + } switch (rd->rpl[id].prim_id) { case PL1_ENABLE: @@ -538,6 +540,8 @@ static int set_time_window(struct powercap_zone *power_zone, int cid, default: ret = -EINVAL; } + +set_time_exit: put_online_cpus(); return ret; } @@ -552,6 +556,10 @@ static int get_time_window(struct powercap_zone *power_zone, int cid, u64 *data) get_online_cpus(); rd = power_zone_to_rapl_domain(power_zone); id = contraint_to_pl(rd, cid); + if (id < 0) { + ret = id; + goto get_time_exit; + } switch (rd->rpl[id].prim_id) { case PL1_ENABLE: @@ -566,6 +574,8 @@ static int get_time_window(struct powercap_zone *power_zone, int cid, u64 *data) } if (!ret) *data = val; + +get_time_exit: put_online_cpus(); return ret; @@ -707,7 +717,7 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type, case ENERGY_UNIT: scale = ENERGY_UNIT_SCALE; /* per domain unit takes precedence */ - if (rd && rd->domain_energy_unit) + if (rd->domain_energy_unit) units = rd->domain_energy_unit; else units = rp->energy_unit; @@ -976,10 +986,20 @@ static void package_power_limit_irq_save(struct rapl_package *rp) smp_call_function_single(rp->lead_cpu, power_limit_irq_save_cpu, rp, 1); } -static void power_limit_irq_restore_cpu(void *info) +/* + * Restore per package power limit interrupt enable state. Called from cpu + * hotplug code on package removal. + */ +static void package_power_limit_irq_restore(struct rapl_package *rp) { - u32 l, h = 0; - struct rapl_package *rp = (struct rapl_package *)info; + u32 l, h; + + if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN)) + return; + + /* irq enable state not saved, nothing to restore */ + if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) + return; rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h); @@ -991,19 +1011,6 @@ static void power_limit_irq_restore_cpu(void *info) wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); } -/* restore per package power limit interrupt enable state */ -static void package_power_limit_irq_restore(struct rapl_package *rp) -{ - if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN)) - return; - - /* irq enable state not saved, nothing to restore */ - if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) - return; - - smp_call_function_single(rp->lead_cpu, power_limit_irq_restore_cpu, rp, 1); -} - static void set_floor_freq_default(struct rapl_domain *rd, bool mode) { int nr_powerlimit = find_nr_power_limit(rd); @@ -1160,84 +1167,49 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core), RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server), + RAPL_CPU(INTEL_FAM6_XEON_PHI_KNM, rapl_defaults_hsw_server), {} }; MODULE_DEVICE_TABLE(x86cpu, rapl_ids); -/* read once for all raw primitive data for all packages, domains */ -static void rapl_update_domain_data(void) +/* Read once for all raw primitive data for domains */ +static void rapl_update_domain_data(struct rapl_package *rp) { int dmn, prim; u64 val; - struct rapl_package *rp; - list_for_each_entry(rp, &rapl_packages, plist) { - for (dmn = 0; dmn < rp->nr_domains; dmn++) { - pr_debug("update package %d domain %s data\n", rp->id, - rp->domains[dmn].name); - /* exclude non-raw primitives */ - for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++) - if (!rapl_read_data_raw(&rp->domains[dmn], prim, - rpi[prim].unit, - &val)) - rp->domains[dmn].rdd.primitives[prim] = - val; + for (dmn = 0; dmn < rp->nr_domains; dmn++) { + pr_debug("update package %d domain %s data\n", rp->id, + rp->domains[dmn].name); + /* exclude non-raw primitives */ + for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++) { + if (!rapl_read_data_raw(&rp->domains[dmn], prim, + rpi[prim].unit, &val)) + rp->domains[dmn].rdd.primitives[prim] = val; } } } -static int rapl_unregister_powercap(void) +static void rapl_unregister_powercap(void) { - struct rapl_package *rp; - struct rapl_domain *rd, *rd_package = NULL; - - /* unregister all active rapl packages from the powercap layer, - * hotplug lock held - */ - list_for_each_entry(rp, &rapl_packages, plist) { - package_power_limit_irq_restore(rp); - - for (rd = rp->domains; rd < rp->domains + rp->nr_domains; - rd++) { - pr_debug("remove package, undo power limit on %d: %s\n", - rp->id, rd->name); - rapl_write_data_raw(rd, PL1_ENABLE, 0); - rapl_write_data_raw(rd, PL1_CLAMP, 0); - if (find_nr_power_limit(rd) > 1) { - rapl_write_data_raw(rd, PL2_ENABLE, 0); - rapl_write_data_raw(rd, PL2_CLAMP, 0); - } - if (rd->id == RAPL_DOMAIN_PACKAGE) { - rd_package = rd; - continue; - } - powercap_unregister_zone(control_type, &rd->power_zone); - } - /* do the package zone last */ - if (rd_package) - powercap_unregister_zone(control_type, - &rd_package->power_zone); - } - if (platform_rapl_domain) { powercap_unregister_zone(control_type, &platform_rapl_domain->power_zone); kfree(platform_rapl_domain); } - powercap_unregister_control_type(control_type); - - return 0; } static int rapl_package_register_powercap(struct rapl_package *rp) { struct rapl_domain *rd; - int ret = 0; char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/ struct powercap_zone *power_zone = NULL; - int nr_pl; + int nr_pl, ret;; + + /* Update the domain data of the new package */ + rapl_update_domain_data(rp); /* first we register package domain as the parent zone*/ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { @@ -1257,8 +1229,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp) if (IS_ERR(power_zone)) { pr_debug("failed to register package, %d\n", rp->id); - ret = PTR_ERR(power_zone); - goto exit_package; + return PTR_ERR(power_zone); } /* track parent zone in per package/socket data */ rp->power_zone = power_zone; @@ -1268,8 +1239,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp) } if (!power_zone) { pr_err("no package domain found, unknown topology!\n"); - ret = -ENODEV; - goto exit_package; + return -ENODEV; } /* now register domains as children of the socket/package*/ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { @@ -1290,11 +1260,11 @@ static int rapl_package_register_powercap(struct rapl_package *rp) goto err_cleanup; } } + return 0; -exit_package: - return ret; err_cleanup: - /* clean up previously initialized domains within the package if we + /* + * Clean up previously initialized domains within the package if we * failed after the first domain setup. */ while (--rd >= rp->domains) { @@ -1305,7 +1275,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp) return ret; } -static int rapl_register_psys(void) +static int __init rapl_register_psys(void) { struct rapl_domain *rd; struct powercap_zone *power_zone; @@ -1346,40 +1316,14 @@ static int rapl_register_psys(void) return 0; } -static int rapl_register_powercap(void) +static int __init rapl_register_powercap(void) { - struct rapl_domain *rd; - struct rapl_package *rp; - int ret = 0; - control_type = powercap_register_control_type(NULL, "intel-rapl", NULL); if (IS_ERR(control_type)) { pr_debug("failed to register powercap control_type.\n"); return PTR_ERR(control_type); } - /* read the initial data */ - rapl_update_domain_data(); - list_for_each_entry(rp, &rapl_packages, plist) - if (rapl_package_register_powercap(rp)) - goto err_cleanup_package; - - /* Don't bail out if PSys is not supported */ - rapl_register_psys(); - - return ret; - -err_cleanup_package: - /* clean up previously initialized packages */ - list_for_each_entry_continue_reverse(rp, &rapl_packages, plist) { - for (rd = rp->domains; rd < rp->domains + rp->nr_domains; - rd++) { - pr_debug("unregister zone/package %d, %s domain\n", - rp->id, rd->name); - powercap_unregister_zone(control_type, &rd->power_zone); - } - } - - return ret; + return 0; } static int rapl_check_domain(int cpu, int domain) @@ -1452,9 +1396,8 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd) */ static int rapl_detect_domains(struct rapl_package *rp, int cpu) { - int i; - int ret = 0; struct rapl_domain *rd; + int i; for (i = 0; i < RAPL_DOMAIN_MAX; i++) { /* use physical package id to read counters */ @@ -1466,84 +1409,20 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu) rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX); if (!rp->nr_domains) { pr_debug("no valid rapl domains found in package %d\n", rp->id); - ret = -ENODEV; - goto done; + return -ENODEV; } pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id); rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain), GFP_KERNEL); - if (!rp->domains) { - ret = -ENOMEM; - goto done; - } + if (!rp->domains) + return -ENOMEM; + rapl_init_domains(rp); for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) rapl_detect_powerlimit(rd); - - -done: - return ret; -} - -static bool is_package_new(int package) -{ - struct rapl_package *rp; - - /* caller prevents cpu hotplug, there will be no new packages added - * or deleted while traversing the package list, no need for locking. - */ - list_for_each_entry(rp, &rapl_packages, plist) - if (package == rp->id) - return false; - - return true; -} - -/* RAPL interface can be made of a two-level hierarchy: package level and domain - * level. We first detect the number of packages then domains of each package. - * We have to consider the possiblity of CPU online/offline due to hotplug and - * other scenarios. - */ -static int rapl_detect_topology(void) -{ - int i; - int phy_package_id; - struct rapl_package *new_package, *rp; - - for_each_online_cpu(i) { - phy_package_id = topology_physical_package_id(i); - if (is_package_new(phy_package_id)) { - new_package = kzalloc(sizeof(*rp), GFP_KERNEL); - if (!new_package) { - rapl_cleanup_data(); - return -ENOMEM; - } - /* add the new package to the list */ - new_package->id = phy_package_id; - new_package->nr_cpus = 1; - /* use the first active cpu of the package to access */ - new_package->lead_cpu = i; - /* check if the package contains valid domains */ - if (rapl_detect_domains(new_package, i) || - rapl_defaults->check_unit(new_package, i)) { - kfree(new_package->domains); - kfree(new_package); - /* free up the packages already initialized */ - rapl_cleanup_data(); - return -ENODEV; - } - INIT_LIST_HEAD(&new_package->plist); - list_add(&new_package->plist, &rapl_packages); - } else { - rp = find_package_by_id(phy_package_id); - if (rp) - ++rp->nr_cpus; - } - } - return 0; } @@ -1552,12 +1431,21 @@ static void rapl_remove_package(struct rapl_package *rp) { struct rapl_domain *rd, *rd_package = NULL; + package_power_limit_irq_restore(rp); + for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { + rapl_write_data_raw(rd, PL1_ENABLE, 0); + rapl_write_data_raw(rd, PL1_CLAMP, 0); + if (find_nr_power_limit(rd) > 1) { + rapl_write_data_raw(rd, PL2_ENABLE, 0); + rapl_write_data_raw(rd, PL2_CLAMP, 0); + } if (rd->id == RAPL_DOMAIN_PACKAGE) { rd_package = rd; continue; } - pr_debug("remove package %d, %s domain\n", rp->id, rd->name); + pr_debug("remove package, undo power limit on %d: %s\n", + rp->id, rd->name); powercap_unregister_zone(control_type, &rd->power_zone); } /* do parent zone last */ @@ -1567,20 +1455,17 @@ static void rapl_remove_package(struct rapl_package *rp) } /* called from CPU hotplug notifier, hotplug lock held */ -static int rapl_add_package(int cpu) +static struct rapl_package *rapl_add_package(int cpu, int pkgid) { - int ret = 0; - int phy_package_id; struct rapl_package *rp; + int ret; - phy_package_id = topology_physical_package_id(cpu); rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL); if (!rp) - return -ENOMEM; + return ERR_PTR(-ENOMEM); /* add the new package to the list */ - rp->id = phy_package_id; - rp->nr_cpus = 1; + rp->id = pkgid; rp->lead_cpu = cpu; /* check if the package contains valid domains */ @@ -1589,17 +1474,17 @@ static int rapl_add_package(int cpu) ret = -ENODEV; goto err_free_package; } - if (!rapl_package_register_powercap(rp)) { + ret = rapl_package_register_powercap(rp); + if (!ret) { INIT_LIST_HEAD(&rp->plist); list_add(&rp->plist, &rapl_packages); - return ret; + return rp; } err_free_package: kfree(rp->domains); kfree(rp); - - return ret; + return ERR_PTR(ret); } /* Handles CPU hotplug on multi-socket systems. @@ -1609,55 +1494,46 @@ static int rapl_add_package(int cpu) * associated domains. Cooling devices are handled accordingly at * per-domain level. */ -static int rapl_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int rapl_cpu_online(unsigned int cpu) { - unsigned long cpu = (unsigned long)hcpu; - int phy_package_id; + int pkgid = topology_physical_package_id(cpu); struct rapl_package *rp; - int lead_cpu; - phy_package_id = topology_physical_package_id(cpu); - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - rp = find_package_by_id(phy_package_id); - if (rp) - ++rp->nr_cpus; - else - rapl_add_package(cpu); - break; - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - rp = find_package_by_id(phy_package_id); - if (!rp) - break; - if (--rp->nr_cpus == 0) - rapl_remove_package(rp); - else if (cpu == rp->lead_cpu) { - /* choose another active cpu in the package */ - lead_cpu = cpumask_any_but(topology_core_cpumask(cpu), cpu); - if (lead_cpu < nr_cpu_ids) - rp->lead_cpu = lead_cpu; - else /* should never go here */ - pr_err("no active cpu available for package %d\n", - phy_package_id); - } + rp = find_package_by_id(pkgid); + if (!rp) { + rp = rapl_add_package(cpu, pkgid); + if (IS_ERR(rp)) + return PTR_ERR(rp); } + cpumask_set_cpu(cpu, &rp->cpumask); + return 0; +} + +static int rapl_cpu_down_prep(unsigned int cpu) +{ + int pkgid = topology_physical_package_id(cpu); + struct rapl_package *rp; + int lead_cpu; + + rp = find_package_by_id(pkgid); + if (!rp) + return 0; - return NOTIFY_OK; + cpumask_clear_cpu(cpu, &rp->cpumask); + lead_cpu = cpumask_first(&rp->cpumask); + if (lead_cpu >= nr_cpu_ids) + rapl_remove_package(rp); + else if (rp->lead_cpu == cpu) + rp->lead_cpu = lead_cpu; + return 0; } -static struct notifier_block rapl_cpu_notifier = { - .notifier_call = rapl_cpu_callback, -}; +static enum cpuhp_state pcap_rapl_online; static int __init rapl_init(void) { - int ret = 0; const struct x86_cpu_id *id; + int ret; id = x86_match_cpu(rapl_ids); if (!id) { @@ -1669,36 +1545,29 @@ static int __init rapl_init(void) rapl_defaults = (struct rapl_defaults *)id->driver_data; - cpu_notifier_register_begin(); - - /* prevent CPU hotplug during detection */ - get_online_cpus(); - ret = rapl_detect_topology(); + ret = rapl_register_powercap(); if (ret) - goto done; + return ret; - if (rapl_register_powercap()) { - rapl_cleanup_data(); - ret = -ENODEV; - goto done; - } - __register_hotcpu_notifier(&rapl_cpu_notifier); -done: - put_online_cpus(); - cpu_notifier_register_done(); + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powercap/rapl:online", + rapl_cpu_online, rapl_cpu_down_prep); + if (ret < 0) + goto err_unreg; + pcap_rapl_online = ret; + + /* Don't bail out if PSys is not supported */ + rapl_register_psys(); + return 0; +err_unreg: + rapl_unregister_powercap(); return ret; } static void __exit rapl_exit(void) { - cpu_notifier_register_begin(); - get_online_cpus(); - __unregister_hotcpu_notifier(&rapl_cpu_notifier); + cpuhp_remove_state(pcap_rapl_online); rapl_unregister_powercap(); - rapl_cleanup_data(); - put_online_cpus(); - cpu_notifier_register_done(); } module_init(rapl_init); diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index ee3de3421f2dc3c06b4a5a1886b516e1e825039e..bdce33291161caee93773cc8e774ee35ab4c7c65 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -6,7 +6,7 @@ menu "PTP clock support" config PTP_1588_CLOCK tristate "PTP clock support" - depends on NET + depends on NET && POSIX_TIMERS select PPS select NET_PTP_CLASSIFY help @@ -28,7 +28,7 @@ config PTP_1588_CLOCK config PTP_1588_CLOCK_GIANFAR tristate "Freescale eTSEC as PTP clock" depends on GIANFAR - select PTP_1588_CLOCK + depends on PTP_1588_CLOCK default y help This driver adds support for using the eTSEC as a PTP @@ -42,7 +42,7 @@ config PTP_1588_CLOCK_GIANFAR config PTP_1588_CLOCK_IXP46X tristate "Intel IXP46x as PTP clock" depends on IXP4XX_ETH - select PTP_1588_CLOCK + depends on PTP_1588_CLOCK default y help This driver adds support for using the IXP46X as a PTP @@ -60,7 +60,7 @@ config DP83640_PHY tristate "Driver for the National Semiconductor DP83640 PHYTER" depends on NETWORK_PHY_TIMESTAMPING depends on PHYLIB - select PTP_1588_CLOCK + depends on PTP_1588_CLOCK ---help--- Supports the DP83640 PHYTER with IEEE 1588 features. @@ -76,7 +76,7 @@ config PTP_1588_CLOCK_PCH tristate "Intel PCH EG20T as PTP clock" depends on X86_32 || COMPILE_TEST depends on HAS_IOMEM && NET - select PTP_1588_CLOCK + imply PTP_1588_CLOCK help This driver adds support for using the PCH EG20T as a PTP clock. The hardware supports time stamping of PTP packets diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index 86280b7e41f3f79db793c0545c97bb0c1e36e96e..9c13381b69662c499d0e9a679ce3eeb71c3c6d3d 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -153,7 +153,10 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx) s32 ppb = scaled_ppm_to_ppb(tx->freq); if (ppb > ops->max_adj || ppb < -ops->max_adj) return -ERANGE; - err = ops->adjfreq(ops, ppb); + if (ops->adjfine) + err = ops->adjfine(ops, tx->freq); + else + err = ops->adjfreq(ops, ppb); ptp->dialed_frequency = tx->freq; } else if (tx->modes == 0) { tx->freq = ptp->dialed_frequency; diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index 302e626fe6b01777523c371e2760ee8f48acc68b..53d43954a9740a5520229a1e8798516805a73c19 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -28,7 +28,7 @@ static ssize_t clock_name_show(struct device *dev, struct ptp_clock *ptp = dev_get_drvdata(dev); return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name); } -static DEVICE_ATTR(clock_name, 0444, clock_name_show, NULL); +static DEVICE_ATTR_RO(clock_name); #define PTP_SHOW_INT(name, var) \ static ssize_t var##_show(struct device *dev, \ diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index bf0128899c094801f2b35c453725c02aae0fea39..f92dd41b0395f2621d188969a3a249cc5cfbd122 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -175,6 +175,15 @@ config PWM_FSL_FTM To compile this driver as a module, choose M here: the module will be called pwm-fsl-ftm. +config PWM_HIBVT + tristate "HiSilicon BVT PWM support" + depends on ARCH_HISI || COMPILE_TEST + help + Generic PWM framework driver for HiSilicon BVT SoCs. + + To compile this driver as a module, choose M here: the module + will be called pwm-hibvt. + config PWM_IMG tristate "Imagination Technologies PWM driver" depends on HAS_IOMEM diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 1194c54efcc2571cd9bfa7cfe976d22c06d44ca6..a48bdb517792d30dc580811dde7b62bf4364f6a9 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_PWM_CRC) += pwm-crc.o obj-$(CONFIG_PWM_CROS_EC) += pwm-cros-ec.o obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o +obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o obj-$(CONFIG_PWM_IMG) += pwm-img.o obj-$(CONFIG_PWM_IMX) += pwm-imx.o obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c new file mode 100644 index 0000000000000000000000000000000000000000..d0e8f8542626a08b84149b4922ed60de7131204c --- /dev/null +++ b/drivers/pwm/pwm-hibvt.c @@ -0,0 +1,271 @@ +/* + * PWM Controller Driver for HiSilicon BVT SoCs + * + * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PWM_CFG0_ADDR(x) (((x) * 0x20) + 0x0) +#define PWM_CFG1_ADDR(x) (((x) * 0x20) + 0x4) +#define PWM_CFG2_ADDR(x) (((x) * 0x20) + 0x8) +#define PWM_CTRL_ADDR(x) (((x) * 0x20) + 0xC) + +#define PWM_ENABLE_SHIFT 0 +#define PWM_ENABLE_MASK BIT(0) + +#define PWM_POLARITY_SHIFT 1 +#define PWM_POLARITY_MASK BIT(1) + +#define PWM_KEEP_SHIFT 2 +#define PWM_KEEP_MASK BIT(2) + +#define PWM_PERIOD_MASK GENMASK(31, 0) +#define PWM_DUTY_MASK GENMASK(31, 0) + +struct hibvt_pwm_chip { + struct pwm_chip chip; + struct clk *clk; + void __iomem *base; + struct reset_control *rstc; +}; + +struct hibvt_pwm_soc { + u32 num_pwms; +}; + +static const struct hibvt_pwm_soc pwm_soc[2] = { + { .num_pwms = 4 }, + { .num_pwms = 8 }, +}; + +static inline struct hibvt_pwm_chip *to_hibvt_pwm_chip(struct pwm_chip *chip) +{ + return container_of(chip, struct hibvt_pwm_chip, chip); +} + +static void hibvt_pwm_set_bits(void __iomem *base, u32 offset, + u32 mask, u32 data) +{ + void __iomem *address = base + offset; + u32 value; + + value = readl(address); + value &= ~mask; + value |= (data & mask); + writel(value, address); +} + +static void hibvt_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip); + + hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm), + PWM_ENABLE_MASK, 0x1); +} + +static void hibvt_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip); + + hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm), + PWM_ENABLE_MASK, 0x0); +} + +static void hibvt_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_cycle_ns, int period_ns) +{ + struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip); + u32 freq, period, duty; + + freq = div_u64(clk_get_rate(hi_pwm_chip->clk), 1000000); + + period = div_u64(freq * period_ns, 1000); + duty = div_u64(period * duty_cycle_ns, period_ns); + + hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CFG0_ADDR(pwm->hwpwm), + PWM_PERIOD_MASK, period); + + hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CFG1_ADDR(pwm->hwpwm), + PWM_DUTY_MASK, duty); +} + +static void hibvt_pwm_set_polarity(struct pwm_chip *chip, + struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip); + + if (polarity == PWM_POLARITY_INVERSED) + hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm), + PWM_POLARITY_MASK, (0x1 << PWM_POLARITY_SHIFT)); + else + hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm), + PWM_POLARITY_MASK, (0x0 << PWM_POLARITY_SHIFT)); +} + +static void hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip); + void __iomem *base; + u32 freq, value; + + freq = div_u64(clk_get_rate(hi_pwm_chip->clk), 1000000); + base = hi_pwm_chip->base; + + value = readl(base + PWM_CFG0_ADDR(pwm->hwpwm)); + state->period = div_u64(value * 1000, freq); + + value = readl(base + PWM_CFG1_ADDR(pwm->hwpwm)); + state->duty_cycle = div_u64(value * 1000, freq); + + value = readl(base + PWM_CTRL_ADDR(pwm->hwpwm)); + state->enabled = (PWM_ENABLE_MASK & value); +} + +static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + if (state->polarity != pwm->state.polarity) + hibvt_pwm_set_polarity(chip, pwm, state->polarity); + + if (state->period != pwm->state.period || + state->duty_cycle != pwm->state.duty_cycle) + hibvt_pwm_config(chip, pwm, state->duty_cycle, state->period); + + if (state->enabled != pwm->state.enabled) { + if (state->enabled) + hibvt_pwm_enable(chip, pwm); + else + hibvt_pwm_disable(chip, pwm); + } + + return 0; +} + +static struct pwm_ops hibvt_pwm_ops = { + .get_state = hibvt_pwm_get_state, + .apply = hibvt_pwm_apply, + + .owner = THIS_MODULE, +}; + +static int hibvt_pwm_probe(struct platform_device *pdev) +{ + const struct hibvt_pwm_soc *soc = + of_device_get_match_data(&pdev->dev); + struct hibvt_pwm_chip *pwm_chip; + struct resource *res; + int ret; + int i; + + pwm_chip = devm_kzalloc(&pdev->dev, sizeof(*pwm_chip), GFP_KERNEL); + if (pwm_chip == NULL) + return -ENOMEM; + + pwm_chip->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pwm_chip->clk)) { + dev_err(&pdev->dev, "getting clock failed with %ld\n", + PTR_ERR(pwm_chip->clk)); + return PTR_ERR(pwm_chip->clk); + } + + pwm_chip->chip.ops = &hibvt_pwm_ops; + pwm_chip->chip.dev = &pdev->dev; + pwm_chip->chip.base = -1; + pwm_chip->chip.npwm = soc->num_pwms; + pwm_chip->chip.of_xlate = of_pwm_xlate_with_flags; + pwm_chip->chip.of_pwm_n_cells = 3; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pwm_chip->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pwm_chip->base)) + return PTR_ERR(pwm_chip->base); + + ret = clk_prepare_enable(pwm_chip->clk); + if (ret < 0) + return ret; + + pwm_chip->rstc = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(pwm_chip->rstc)) { + clk_disable_unprepare(pwm_chip->clk); + return PTR_ERR(pwm_chip->rstc); + } + + reset_control_assert(pwm_chip->rstc); + msleep(30); + reset_control_deassert(pwm_chip->rstc); + + ret = pwmchip_add(&pwm_chip->chip); + if (ret < 0) { + clk_disable_unprepare(pwm_chip->clk); + return ret; + } + + for (i = 0; i < pwm_chip->chip.npwm; i++) { + hibvt_pwm_set_bits(pwm_chip->base, PWM_CTRL_ADDR(i), + PWM_KEEP_MASK, (0x1 << PWM_KEEP_SHIFT)); + } + + platform_set_drvdata(pdev, pwm_chip); + + return 0; +} + +static int hibvt_pwm_remove(struct platform_device *pdev) +{ + struct hibvt_pwm_chip *pwm_chip; + + pwm_chip = platform_get_drvdata(pdev); + + reset_control_assert(pwm_chip->rstc); + msleep(30); + reset_control_deassert(pwm_chip->rstc); + + clk_disable_unprepare(pwm_chip->clk); + + return pwmchip_remove(&pwm_chip->chip); +} + +static const struct of_device_id hibvt_pwm_of_match[] = { + { .compatible = "hisilicon,hi3516cv300-pwm", .data = &pwm_soc[0] }, + { .compatible = "hisilicon,hi3519v100-pwm", .data = &pwm_soc[1] }, + { } +}; +MODULE_DEVICE_TABLE(of, hibvt_pwm_of_match); + +static struct platform_driver hibvt_pwm_driver = { + .driver = { + .name = "hibvt-pwm", + .of_match_table = hibvt_pwm_of_match, + }, + .probe = hibvt_pwm_probe, + .remove = hibvt_pwm_remove, +}; +module_platform_driver(hibvt_pwm_driver); + +MODULE_AUTHOR("Jian Yuan"); +MODULE_DESCRIPTION("HiSilicon BVT SoCs PWM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c index 381871b2bb46ba3439b4ca4bb7ace7277233b372..045ef9fa6fe392a6986690fac25f3d020b470318 100644 --- a/drivers/pwm/pwm-meson.c +++ b/drivers/pwm/pwm-meson.c @@ -474,6 +474,7 @@ static int meson_pwm_probe(struct platform_device *pdev) if (IS_ERR(meson->base)) return PTR_ERR(meson->base); + spin_lock_init(&meson->lock); meson->chip.dev = &pdev->dev; meson->chip.ops = &meson_pwm_ops; meson->chip.base = -1; @@ -523,7 +524,6 @@ static struct platform_driver meson_pwm_driver = { }; module_platform_driver(meson_pwm_driver); -MODULE_ALIAS("platform:meson-pwm"); MODULE_DESCRIPTION("Amlogic Meson PWM Generator driver"); MODULE_AUTHOR("Neil Armstrong "); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index 0296d8178ae29d509306670c9bb85147c50f0633..a813239300c3d46bba1320caac77ec7adc8363a8 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -425,6 +425,8 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) if (test_bit(PWMF_EXPORTED, &pwm->flags)) pwm_unexport_child(parent, pwm); } + + put_device(parent); } static int __init pwm_sysfs_init(void) diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 2142a5d3fc08c15bcfa24400faf03a694fbc851a..14294692beb9d06bdded2eb5ce4c89a0d4a93fd3 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -104,7 +104,7 @@ obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o obj-$(CONFIG_REGULATOR_TPS80031) += tps80031-regulator.o -obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o +obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c index f7c88ff90c43c4f3867210a4afbf5f6856cebc40..302b57cb89c673a8d8036234641d2c9df5622a9d 100644 --- a/drivers/regulator/arizona-ldo1.c +++ b/drivers/regulator/arizona-ldo1.c @@ -130,6 +130,7 @@ static const struct regulator_desc arizona_ldo1_hc = { .uV_step = 50000, .n_voltages = 8, .enable_time = 1500, + .ramp_delay = 24000, .owner = THIS_MODULE, }; @@ -153,6 +154,7 @@ static const struct regulator_desc arizona_ldo1 = { .uV_step = 25000, .n_voltages = 13, .enable_time = 500, + .ramp_delay = 24000, .owner = THIS_MODULE, }; diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 54382ef902c67b3df6702ba6034793cf5d2ae059..e6a512ebeae2762812212ac5b9264f92bb8252be 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -337,10 +337,18 @@ static const struct regulator_desc axp809_regulators[] = { AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)), AXP_DESC(AXP809, ELDO3, "eldo3", "eldoin", 700, 3300, 100, AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)), - AXP_DESC_IO(AXP809, LDO_IO0, "ldo_io0", "ips", 700, 3300, 100, + /* + * Note the datasheet only guarantees reliable operation up to + * 3.3V, this needs to be enforced via dts provided constraints + */ + AXP_DESC_IO(AXP809, LDO_IO0, "ldo_io0", "ips", 700, 3800, 100, AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07, AXP22X_IO_ENABLED, AXP22X_IO_DISABLED), - AXP_DESC_IO(AXP809, LDO_IO1, "ldo_io1", "ips", 700, 3300, 100, + /* + * Note the datasheet only guarantees reliable operation up to + * 3.3V, this needs to be enforced via dts provided constraints + */ + AXP_DESC_IO(AXP809, LDO_IO1, "ldo_io1", "ips", 700, 3800, 100, AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07, AXP22X_IO_ENABLED, AXP22X_IO_DISABLED), AXP_DESC_FIXED(AXP809, RTC_LDO, "rtc_ldo", "ips", 1800), diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 5c1519b229e0e5cd718e4694cf0b27cde1c73f6d..04baac9a165bbb56da292a51d0a56055947861e0 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -204,7 +204,7 @@ static struct device_node *of_get_regulator(struct device *dev, const char *supp regnode = of_parse_phandle(dev->of_node, prop_name, 0); if (!regnode) { - dev_dbg(dev, "Looking up %s property in node %s failed", + dev_dbg(dev, "Looking up %s property in node %s failed\n", prop_name, dev->of_node->full_name); return NULL; } @@ -293,7 +293,8 @@ static int regulator_check_current_limit(struct regulator_dev *rdev, } /* operating mode constraint check */ -static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode) +static int regulator_mode_constrain(struct regulator_dev *rdev, + unsigned int *mode) { switch (*mode) { case REGULATOR_MODE_FAST: @@ -3359,6 +3360,39 @@ unsigned int regulator_get_mode(struct regulator *regulator) } EXPORT_SYMBOL_GPL(regulator_get_mode); +static int _regulator_get_error_flags(struct regulator_dev *rdev, + unsigned int *flags) +{ + int ret; + + mutex_lock(&rdev->mutex); + + /* sanity check */ + if (!rdev->desc->ops->get_error_flags) { + ret = -EINVAL; + goto out; + } + + ret = rdev->desc->ops->get_error_flags(rdev, flags); +out: + mutex_unlock(&rdev->mutex); + return ret; +} + +/** + * regulator_get_error_flags - get regulator error information + * @regulator: regulator source + * @flags: pointer to store error flags + * + * Get the current regulator error information. + */ +int regulator_get_error_flags(struct regulator *regulator, + unsigned int *flags) +{ + return _regulator_get_error_flags(regulator->rdev, flags); +} +EXPORT_SYMBOL_GPL(regulator_get_error_flags); + /** * regulator_set_load - set regulator load * @regulator: regulator source diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c index 988a7472c2ab568c3d1c03d1092c0713073a6d28..a43b0e8a438d305a959d3d745c7c65bf796ca9f9 100644 --- a/drivers/regulator/fixed.c +++ b/drivers/regulator/fixed.c @@ -30,6 +30,9 @@ #include #include #include +#include +#include +#include struct fixed_voltage_data { struct regulator_desc desc; @@ -94,6 +97,44 @@ of_get_fixed_voltage_config(struct device *dev, return config; } +/** + * acpi_get_fixed_voltage_config - extract fixed_voltage_config structure info + * @dev: device requesting for fixed_voltage_config + * @desc: regulator description + * + * Populates fixed_voltage_config structure by extracting data through ACPI + * interface, returns a pointer to the populated structure of NULL if memory + * alloc fails. + */ +static struct fixed_voltage_config * +acpi_get_fixed_voltage_config(struct device *dev, + const struct regulator_desc *desc) +{ + struct fixed_voltage_config *config; + const char *supply_name; + struct gpio_desc *gpiod; + int ret; + + config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL); + if (!config) + return ERR_PTR(-ENOMEM); + + ret = device_property_read_string(dev, "supply-name", &supply_name); + if (!ret) + config->supply_name = supply_name; + + gpiod = gpiod_get(dev, "gpio", GPIOD_ASIS); + if (IS_ERR(gpiod)) + return ERR_PTR(-ENODEV); + + config->gpio = desc_to_gpio(gpiod); + config->enable_high = device_property_read_bool(dev, + "enable-active-high"); + gpiod_put(gpiod); + + return config; +} + static struct regulator_ops fixed_voltage_ops = { }; @@ -114,6 +155,11 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) &drvdata->desc); if (IS_ERR(config)) return PTR_ERR(config); + } else if (ACPI_HANDLE(&pdev->dev)) { + config = acpi_get_fixed_voltage_config(&pdev->dev, + &drvdata->desc); + if (IS_ERR(config)) + return PTR_ERR(config); } else { config = dev_get_platdata(&pdev->dev); } diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 83e89e5d47526c17be6d728fcc7a651f45aec4b0..0fce06acfaeccd0d55061be589a206ce01764e96 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c @@ -162,8 +162,8 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np, of_property_read_u32(np, "startup-delay-us", &config->startup_delay); config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0); - if (config->enable_gpio == -EPROBE_DEFER) - return ERR_PTR(-EPROBE_DEFER); + if (config->enable_gpio < 0 && config->enable_gpio != -ENOENT) + return ERR_PTR(config->enable_gpio); /* Fetch GPIOs. - optional property*/ ret = of_gpio_count(np); @@ -190,8 +190,11 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np, for (i = 0; i < config->nr_gpios; i++) { gpio = of_get_named_gpio(np, "gpios", i); - if (gpio < 0) + if (gpio < 0) { + if (gpio != -ENOENT) + return ERR_PTR(gpio); break; + } config->gpios[i].gpio = gpio; if (proplen > 0) { of_property_read_u32_index(np, "gpios-states", diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c index bcf38fd5106a2324ccd83dace8d9a8e25a46d789..379cdacc05d8ecc76465eba8f2515025c7b9fc16 100644 --- a/drivers/regulator/helpers.c +++ b/drivers/regulator/helpers.c @@ -454,13 +454,17 @@ EXPORT_SYMBOL_GPL(regulator_set_bypass_regmap); int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable) { unsigned int val; + unsigned int val_on = rdev->desc->bypass_val_on; int ret; ret = regmap_read(rdev->regmap, rdev->desc->bypass_reg, &val); if (ret != 0) return ret; - *enable = (val & rdev->desc->bypass_mask) == rdev->desc->bypass_val_on; + if (!val_on) + val_on = rdev->desc->bypass_mask; + + *enable = (val & rdev->desc->bypass_mask) == val_on; return 0; } diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c index e504b9148226c3652ef6151e670bbc20e1cf9e10..70e3df653381c36262ee180e3c773ad10eccf1ff 100644 --- a/drivers/regulator/lp873x-regulator.c +++ b/drivers/regulator/lp873x-regulator.c @@ -24,6 +24,7 @@ [_id] = { \ .desc = { \ .name = _name, \ + .supply_name = _of "-in", \ .id = _id, \ .of_match = of_match_ptr(_of), \ .regulators_node = of_match_ptr("regulators"),\ diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c index a1b49a6d538f1493489f2e45092feb6dd0b96219..d088a7c79e60be4e6799337fd1834c28dea166a3 100644 --- a/drivers/regulator/max77620-regulator.c +++ b/drivers/regulator/max77620-regulator.c @@ -73,7 +73,6 @@ struct max77620_regulator_info { }; struct max77620_regulator_pdata { - struct regulator_init_data *reg_idata; int active_fps_src; int active_fps_pd_slot; int active_fps_pu_slot; @@ -81,6 +80,7 @@ struct max77620_regulator_pdata { int suspend_fps_pd_slot; int suspend_fps_pu_slot; int current_mode; + int power_ok; int ramp_rate_setting; }; @@ -351,11 +351,48 @@ static int max77620_set_slew_rate(struct max77620_regulator *pmic, int id, return 0; } +static int max77620_config_power_ok(struct max77620_regulator *pmic, int id) +{ + struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id]; + struct max77620_regulator_info *rinfo = pmic->rinfo[id]; + struct max77620_chip *chip = dev_get_drvdata(pmic->dev->parent); + u8 val, mask; + int ret; + + switch (chip->chip_id) { + case MAX20024: + if (rpdata->power_ok >= 0) { + if (rinfo->type == MAX77620_REGULATOR_TYPE_SD) + mask = MAX20024_SD_CFG1_MPOK_MASK; + else + mask = MAX20024_LDO_CFG2_MPOK_MASK; + + val = rpdata->power_ok ? mask : 0; + + ret = regmap_update_bits(pmic->rmap, rinfo->cfg_addr, + mask, val); + if (ret < 0) { + dev_err(pmic->dev, "Reg 0x%02x update failed %d\n", + rinfo->cfg_addr, ret); + return ret; + } + } + break; + + default: + break; + } + + return 0; +} + static int max77620_init_pmic(struct max77620_regulator *pmic, int id) { struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id]; int ret; + max77620_config_power_ok(pmic, id); + /* Update power mode */ ret = max77620_regulator_get_power_mode(pmic, id); if (ret < 0) @@ -595,6 +632,12 @@ static int max77620_of_parse_cb(struct device_node *np, np, "maxim,suspend-fps-power-down-slot", &pval); rpdata->suspend_fps_pd_slot = (!ret) ? pval : -1; + ret = of_property_read_u32(np, "maxim,power-ok-control", &pval); + if (!ret) + rpdata->power_ok = pval; + else + rpdata->power_ok = -1; + ret = of_property_read_u32(np, "maxim,ramp-rate-setting", &pval); rpdata->ramp_rate_setting = (!ret) ? pval : 0; @@ -807,6 +850,8 @@ static int max77620_regulator_resume(struct device *dev) for (id = 0; id < MAX77620_NUM_REGS; id++) { reg_pdata = &pmic->reg_pdata[id]; + max77620_config_power_ok(pmic, id); + max77620_regulator_set_fps_slots(pmic, id, false); if (reg_pdata->active_fps_src < 0) continue; diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 3314bf299a51f54fcff2fed6f600ae33eaf44a57..fb44d5215e30c8573b8eef9ad751367ef4e23e81 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -120,7 +120,7 @@ static const struct regulator_linear_range rk808_ldo3_voltage_ranges[] = { static int rk808_buck1_2_get_voltage_sel_regmap(struct regulator_dev *rdev) { struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev); - int id = rdev->desc->id - RK808_ID_DCDC1; + int id = rdev_get_id(rdev); struct gpio_desc *gpio = pdata->dvs_gpio[id]; unsigned int val; int ret; @@ -193,7 +193,7 @@ static int rk808_buck1_2_set_voltage_sel(struct regulator_dev *rdev, unsigned sel) { struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev); - int id = rdev->desc->id - RK808_ID_DCDC1; + int id = rdev_get_id(rdev); struct gpio_desc *gpio = pdata->dvs_gpio[id]; unsigned int reg = rdev->desc->vsel_reg; unsigned old_sel; @@ -232,7 +232,7 @@ static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev, unsigned int new_selector) { struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev); - int id = rdev->desc->id - RK808_ID_DCDC1; + int id = rdev_get_id(rdev); struct gpio_desc *gpio = pdata->dvs_gpio[id]; /* if there is no dvs1/2 pin, we don't need wait extra time here. */ @@ -245,8 +245,7 @@ static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev, static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) { unsigned int ramp_value = RK808_RAMP_RATE_10MV_PER_US; - unsigned int reg = rk808_buck_config_regs[rdev->desc->id - - RK808_ID_DCDC1]; + unsigned int reg = rk808_buck_config_regs[rdev_get_id(rdev)]; switch (ramp_delay) { case 1 ... 2000: diff --git a/drivers/regulator/stw481x-vmmc.c b/drivers/regulator/stw481x-vmmc.c index 7d2ae3e9e942f26f302f526450487213da2cc44d..342f5da799758e0d054ef2aaa64c9ff1c566365e 100644 --- a/drivers/regulator/stw481x-vmmc.c +++ b/drivers/regulator/stw481x-vmmc.c @@ -47,7 +47,8 @@ static struct regulator_desc vmmc_regulator = { .volt_table = stw481x_vmmc_voltages, .enable_time = 200, /* FIXME: look this up */ .enable_reg = STW_CONF1, - .enable_mask = STW_CONF1_PDN_VMMC, + .enable_mask = STW_CONF1_PDN_VMMC | STW_CONF1_MMC_LS_STATUS, + .enable_val = STW_CONF1_PDN_VMMC, .vsel_reg = STW_CONF1, .vsel_mask = STW_CONF1_VMMC_MASK, }; diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c index dad0bac09ecfd150a4f7c39659e72df8f8a4dcd5..c179a3a221af3347d83fb5cafad45099ba199b18 100644 --- a/drivers/regulator/tps6507x-regulator.c +++ b/drivers/regulator/tps6507x-regulator.c @@ -375,7 +375,7 @@ static struct tps6507x_board *tps6507x_parse_dt_reg_data( struct device_node *np = pdev->dev.parent->of_node; struct device_node *regulators; struct of_regulator_match *matches; - static struct regulator_init_data *reg_data; + struct regulator_init_data *reg_data; int idx = 0, count, ret; tps_board = devm_kzalloc(&pdev->dev, sizeof(*tps_board), diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c index 33f389d583ef115a988a2f2e32c89b7b3add893e..ecb0371780af6d8927a95c5ec61da1849172be40 100644 --- a/drivers/regulator/tps65086-regulator.c +++ b/drivers/regulator/tps65086-regulator.c @@ -71,7 +71,7 @@ struct tps65086_regulator { unsigned int decay_mask; }; -static const struct regulator_linear_range tps65086_buck126_10mv_ranges[] = { +static const struct regulator_linear_range tps65086_10mv_ranges[] = { REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0), REGULATOR_LINEAR_RANGE(410000, 0x1, 0x7F, 10000), }; @@ -82,7 +82,7 @@ static const struct regulator_linear_range tps65086_buck126_25mv_ranges[] = { REGULATOR_LINEAR_RANGE(1025000, 0x19, 0x7F, 25000), }; -static const struct regulator_linear_range tps65086_buck345_ranges[] = { +static const struct regulator_linear_range tps65086_buck345_25mv_ranges[] = { REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0), REGULATOR_LINEAR_RANGE(425000, 0x1, 0x7F, 25000), }; @@ -125,27 +125,27 @@ static int tps65086_of_parse_cb(struct device_node *dev, static struct tps65086_regulator regulators[] = { TPS65086_REGULATOR("BUCK1", "buck1", BUCK1, 0x80, TPS65086_BUCK1CTRL, BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(0), - tps65086_buck126_10mv_ranges, TPS65086_BUCK1CTRL, + tps65086_10mv_ranges, TPS65086_BUCK1CTRL, BIT(0)), TPS65086_REGULATOR("BUCK2", "buck2", BUCK2, 0x80, TPS65086_BUCK2CTRL, BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(1), - tps65086_buck126_10mv_ranges, TPS65086_BUCK2CTRL, + tps65086_10mv_ranges, TPS65086_BUCK2CTRL, BIT(0)), TPS65086_REGULATOR("BUCK3", "buck3", BUCK3, 0x80, TPS65086_BUCK3VID, BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(2), - tps65086_buck345_ranges, TPS65086_BUCK3DECAY, + tps65086_10mv_ranges, TPS65086_BUCK3DECAY, BIT(0)), TPS65086_REGULATOR("BUCK4", "buck4", BUCK4, 0x80, TPS65086_BUCK4VID, BUCK_VID_MASK, TPS65086_BUCK4CTRL, BIT(0), - tps65086_buck345_ranges, TPS65086_BUCK4VID, + tps65086_10mv_ranges, TPS65086_BUCK4VID, BIT(0)), TPS65086_REGULATOR("BUCK5", "buck5", BUCK5, 0x80, TPS65086_BUCK5VID, BUCK_VID_MASK, TPS65086_BUCK5CTRL, BIT(0), - tps65086_buck345_ranges, TPS65086_BUCK5CTRL, + tps65086_10mv_ranges, TPS65086_BUCK5CTRL, BIT(0)), TPS65086_REGULATOR("BUCK6", "buck6", BUCK6, 0x80, TPS65086_BUCK6VID, BUCK_VID_MASK, TPS65086_BUCK6CTRL, BIT(0), - tps65086_buck126_10mv_ranges, TPS65086_BUCK6CTRL, + tps65086_10mv_ranges, TPS65086_BUCK6CTRL, BIT(0)), TPS65086_REGULATOR("LDOA1", "ldoa1", LDOA1, 0xF, TPS65086_LDOA1CTRL, VDOA1_VID_MASK, TPS65086_LDOA1CTRL, BIT(0), @@ -162,18 +162,6 @@ static struct tps65086_regulator regulators[] = { TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)), }; -static inline bool has_25mv_mode(int id) -{ - switch (id) { - case BUCK1: - case BUCK2: - case BUCK6: - return true; - default: - return false; - } -} - static int tps65086_of_parse_cb(struct device_node *dev, const struct regulator_desc *desc, struct regulator_config *config) @@ -181,12 +169,27 @@ static int tps65086_of_parse_cb(struct device_node *dev, int ret; /* Check for 25mV step mode */ - if (has_25mv_mode(desc->id) && - of_property_read_bool(config->of_node, "ti,regulator-step-size-25mv")) { - regulators[desc->id].desc.linear_ranges = + if (of_property_read_bool(config->of_node, "ti,regulator-step-size-25mv")) { + switch (desc->id) { + case BUCK1: + case BUCK2: + case BUCK6: + regulators[desc->id].desc.linear_ranges = tps65086_buck126_25mv_ranges; - regulators[desc->id].desc.n_linear_ranges = + regulators[desc->id].desc.n_linear_ranges = ARRAY_SIZE(tps65086_buck126_25mv_ranges); + break; + case BUCK3: + case BUCK4: + case BUCK5: + regulators[desc->id].desc.linear_ranges = + tps65086_buck345_25mv_ranges; + regulators[desc->id].desc.n_linear_ranges = + ARRAY_SIZE(tps65086_buck345_25mv_ranges); + break; + default: + dev_warn(config->dev, "25mV step mode only valid for BUCK regulators\n"); + } } /* Check for decay mode */ diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c index eb0f5b13841a505ea4157e949cf179d71d26175e..9aafbb03482d735fcc55bdfab3df83249900a024 100644 --- a/drivers/regulator/tps65218-regulator.c +++ b/drivers/regulator/tps65218-regulator.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -30,10 +31,11 @@ enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4, DCDC5, DCDC6, LDO1, LS3 }; -#define TPS65218_REGULATOR(_name, _id, _type, _ops, _n, _vr, _vm, _er, _em, \ - _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \ +#define TPS65218_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \ + _em, _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \ { \ .name = _name, \ + .of_match = _of, \ .id = _id, \ .ops = &_ops, \ .n_voltages = _n, \ @@ -54,14 +56,6 @@ enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4, .bypass_mask = _sm, \ } \ -#define TPS65218_INFO(_id, _nm, _min, _max) \ - [_id] = { \ - .id = _id, \ - .name = _nm, \ - .min_uV = _min, \ - .max_uV = _max, \ - } - static const struct regulator_linear_range dcdc1_dcdc2_ranges[] = { REGULATOR_LINEAR_RANGE(850000, 0x0, 0x32, 10000), REGULATOR_LINEAR_RANGE(1375000, 0x33, 0x3f, 25000), @@ -77,36 +71,6 @@ static const struct regulator_linear_range dcdc4_ranges[] = { REGULATOR_LINEAR_RANGE(1600000, 0x10, 0x34, 50000), }; -static struct tps_info tps65218_pmic_regs[] = { - TPS65218_INFO(DCDC1, "DCDC1", 850000, 1675000), - TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000), - TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000), - TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000), - TPS65218_INFO(DCDC5, "DCDC5", 1000000, 1000000), - TPS65218_INFO(DCDC6, "DCDC6", 1800000, 1800000), - TPS65218_INFO(LDO1, "LDO1", 900000, 3400000), - TPS65218_INFO(LS3, "LS3", -1, -1), -}; - -#define TPS65218_OF_MATCH(comp, label) \ - { \ - .compatible = comp, \ - .data = &label, \ - } - -static const struct of_device_id tps65218_of_match[] = { - TPS65218_OF_MATCH("ti,tps65218-dcdc1", tps65218_pmic_regs[DCDC1]), - TPS65218_OF_MATCH("ti,tps65218-dcdc2", tps65218_pmic_regs[DCDC2]), - TPS65218_OF_MATCH("ti,tps65218-dcdc3", tps65218_pmic_regs[DCDC3]), - TPS65218_OF_MATCH("ti,tps65218-dcdc4", tps65218_pmic_regs[DCDC4]), - TPS65218_OF_MATCH("ti,tps65218-dcdc5", tps65218_pmic_regs[DCDC5]), - TPS65218_OF_MATCH("ti,tps65218-dcdc6", tps65218_pmic_regs[DCDC6]), - TPS65218_OF_MATCH("ti,tps65218-ldo1", tps65218_pmic_regs[LDO1]), - TPS65218_OF_MATCH("ti,tps65218-ls3", tps65218_pmic_regs[LS3]), - { } -}; -MODULE_DEVICE_TABLE(of, tps65218_of_match); - static int tps65218_pmic_set_voltage_sel(struct regulator_dev *dev, unsigned selector) { @@ -188,7 +152,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev) if (rid == TPS65218_DCDC_3 && tps->rev == TPS65218_REV_2_1) return 0; - if (!tps->info[rid]->strobe) { + if (!tps->strobes[rid]) { if (rid == TPS65218_DCDC_3) tps->info[rid]->strobe = 3; else @@ -197,8 +161,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev) return tps65218_set_bits(tps, dev->desc->bypass_reg, dev->desc->bypass_mask, - tps->info[rid]->strobe, - TPS65218_PROTECT_L1); + tps->strobes[rid], TPS65218_PROTECT_L1); } /* Operations permitted on DCDC1, DCDC2 */ @@ -272,7 +235,7 @@ static int tps65218_pmic_get_current_limit(struct regulator_dev *dev) unsigned int index; struct tps65218 *tps = rdev_get_drvdata(dev); - retval = tps65218_reg_read(tps, dev->desc->csel_reg, &index); + retval = regmap_read(tps->regmap, dev->desc->csel_reg, &index); if (retval < 0) return retval; @@ -300,104 +263,104 @@ static struct regulator_ops tps65218_dcdc56_pmic_ops = { }; static const struct regulator_desc regulators[] = { - TPS65218_REGULATOR("DCDC1", TPS65218_DCDC_1, REGULATOR_VOLTAGE, - tps65218_dcdc12_ops, 64, TPS65218_REG_CONTROL_DCDC1, + TPS65218_REGULATOR("DCDC1", "regulator-dcdc1", TPS65218_DCDC_1, + REGULATOR_VOLTAGE, tps65218_dcdc12_ops, 64, + TPS65218_REG_CONTROL_DCDC1, TPS65218_CONTROL_DCDC1_MASK, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC1_EN, 0, 0, dcdc1_dcdc2_ranges, 2, 4000, 0, TPS65218_REG_SEQ3, TPS65218_SEQ3_DC1_SEQ_MASK), - TPS65218_REGULATOR("DCDC2", TPS65218_DCDC_2, REGULATOR_VOLTAGE, - tps65218_dcdc12_ops, 64, TPS65218_REG_CONTROL_DCDC2, + TPS65218_REGULATOR("DCDC2", "regulator-dcdc2", TPS65218_DCDC_2, + REGULATOR_VOLTAGE, tps65218_dcdc12_ops, 64, + TPS65218_REG_CONTROL_DCDC2, TPS65218_CONTROL_DCDC2_MASK, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC2_EN, 0, 0, dcdc1_dcdc2_ranges, 2, 4000, 0, TPS65218_REG_SEQ3, TPS65218_SEQ3_DC2_SEQ_MASK), - TPS65218_REGULATOR("DCDC3", TPS65218_DCDC_3, REGULATOR_VOLTAGE, - tps65218_ldo1_dcdc34_ops, 64, + TPS65218_REGULATOR("DCDC3", "regulator-dcdc3", TPS65218_DCDC_3, + REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 64, TPS65218_REG_CONTROL_DCDC3, TPS65218_CONTROL_DCDC3_MASK, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC3_EN, 0, 0, ldo1_dcdc3_ranges, 2, 0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC3_SEQ_MASK), - TPS65218_REGULATOR("DCDC4", TPS65218_DCDC_4, REGULATOR_VOLTAGE, - tps65218_ldo1_dcdc34_ops, 53, + TPS65218_REGULATOR("DCDC4", "regulator-dcdc4", TPS65218_DCDC_4, + REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 53, TPS65218_REG_CONTROL_DCDC4, TPS65218_CONTROL_DCDC4_MASK, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC4_EN, 0, 0, dcdc4_ranges, 2, 0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC4_SEQ_MASK), - TPS65218_REGULATOR("DCDC5", TPS65218_DCDC_5, REGULATOR_VOLTAGE, - tps65218_dcdc56_pmic_ops, 1, -1, -1, - TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC5_EN, 0, 0, - NULL, 0, 0, 1000000, TPS65218_REG_SEQ5, + TPS65218_REGULATOR("DCDC5", "regulator-dcdc5", TPS65218_DCDC_5, + REGULATOR_VOLTAGE, tps65218_dcdc56_pmic_ops, 1, -1, + -1, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC5_EN, 0, + 0, NULL, 0, 0, 1000000, TPS65218_REG_SEQ5, TPS65218_SEQ5_DC5_SEQ_MASK), - TPS65218_REGULATOR("DCDC6", TPS65218_DCDC_6, REGULATOR_VOLTAGE, - tps65218_dcdc56_pmic_ops, 1, -1, -1, - TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC6_EN, 0, 0, - NULL, 0, 0, 1800000, TPS65218_REG_SEQ5, + TPS65218_REGULATOR("DCDC6", "regulator-dcdc6", TPS65218_DCDC_6, + REGULATOR_VOLTAGE, tps65218_dcdc56_pmic_ops, 1, -1, + -1, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC6_EN, 0, + 0, NULL, 0, 0, 1800000, TPS65218_REG_SEQ5, TPS65218_SEQ5_DC6_SEQ_MASK), - TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, REGULATOR_VOLTAGE, - tps65218_ldo1_dcdc34_ops, 64, + TPS65218_REGULATOR("LDO1", "regulator-ldo1", TPS65218_LDO_1, + REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 64, TPS65218_REG_CONTROL_LDO1, TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LDO1_EN, 0, 0, ldo1_dcdc3_ranges, 2, 0, 0, TPS65218_REG_SEQ6, TPS65218_SEQ6_LDO1_SEQ_MASK), - TPS65218_REGULATOR("LS3", TPS65218_LS_3, REGULATOR_CURRENT, - tps65218_ls3_ops, 0, 0, 0, TPS65218_REG_ENABLE2, - TPS65218_ENABLE2_LS3_EN, TPS65218_REG_CONFIG2, - TPS65218_CONFIG2_LS3ILIM_MASK, NULL, 0, 0, 0, 0, 0), + TPS65218_REGULATOR("LS3", "regulator-ls3", TPS65218_LS_3, + REGULATOR_CURRENT, tps65218_ls3_ops, 0, 0, 0, + TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS3_EN, + TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS3ILIM_MASK, + NULL, 0, 0, 0, 0, 0), }; static int tps65218_regulator_probe(struct platform_device *pdev) { struct tps65218 *tps = dev_get_drvdata(pdev->dev.parent); - struct regulator_init_data *init_data; - const struct tps_info *template; struct regulator_dev *rdev; - const struct of_device_id *match; struct regulator_config config = { }; - int id, ret; + int i, ret; unsigned int val; - match = of_match_device(tps65218_of_match, &pdev->dev); - if (!match) - return -ENODEV; - - template = match->data; - id = template->id; - init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node, - ®ulators[id]); - - platform_set_drvdata(pdev, tps); - - tps->info[id] = &tps65218_pmic_regs[id]; config.dev = &pdev->dev; - config.init_data = init_data; + config.dev->of_node = tps->dev->of_node; config.driver_data = tps; config.regmap = tps->regmap; - config.of_node = pdev->dev.of_node; - rdev = devm_regulator_register(&pdev->dev, ®ulators[id], &config); - if (IS_ERR(rdev)) { - dev_err(tps->dev, "failed to register %s regulator\n", - pdev->name); - return PTR_ERR(rdev); - } + /* Allocate memory for strobes */ + tps->strobes = devm_kzalloc(&pdev->dev, sizeof(u8) * + TPS65218_NUM_REGULATOR, GFP_KERNEL); - ret = tps65218_reg_read(tps, regulators[id].bypass_reg, &val); - if (ret) - return ret; + for (i = 0; i < ARRAY_SIZE(regulators); i++) { + rdev = devm_regulator_register(&pdev->dev, ®ulators[i], + &config); + if (IS_ERR(rdev)) { + dev_err(tps->dev, "failed to register %s regulator\n", + pdev->name); + return PTR_ERR(rdev); + } - tps->info[id]->strobe = val & regulators[id].bypass_mask; + ret = regmap_read(tps->regmap, regulators[i].bypass_reg, &val); + if (ret) + return ret; + + tps->strobes[i] = val & regulators[i].bypass_mask; + } return 0; } +static const struct platform_device_id tps65218_regulator_id_table[] = { + { "tps65218-regulator", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, tps65218_regulator_id_table); + static struct platform_driver tps65218_regulator_driver = { .driver = { .name = "tps65218-pmic", - .of_match_table = tps65218_of_match, }, .probe = tps65218_regulator_probe, + .id_table = tps65218_regulator_id_table, }; module_platform_driver(tps65218_regulator_driver); diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 210681d6b7438b71dc2a0d508d24708ac2dd2e36..6c9ec84121bde4fd124bbdfea390e509f53530ad 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -24,7 +24,7 @@ #include /* - * The TWL4030/TW5030/TPS659x0/TWL6030 family chips include power management, a + * The TWL4030/TW5030/TPS659x0 family chips include power management, a * USB OTG transceiver, an RTC, ADC, PWM, and lots more. Some versions * include an audio codec, battery charger, and more voltage regulators. * These chips are often used in OMAP-based systems. @@ -48,25 +48,12 @@ struct twlreg_info { /* State REMAP default configuration */ u8 remap; - /* chip constraints on regulator behavior */ - u16 min_mV; - u16 max_mV; - - u8 flags; - /* used by regulator core */ struct regulator_desc desc; /* chip specific features */ unsigned long features; - /* - * optional override functions for voltage set/get - * these are currently only used for SMPS regulators - */ - int (*get_voltage)(void *data); - int (*set_voltage)(void *data, int target_uV); - /* data passed from board for external get/set voltage */ void *data; }; @@ -88,33 +75,6 @@ struct twlreg_info { #define VREG_STATE 2 #define VREG_VOLTAGE 3 #define VREG_VOLTAGE_SMPS 4 -/* TWL6030 Misc register offsets */ -#define VREG_BC_ALL 1 -#define VREG_BC_REF 2 -#define VREG_BC_PROC 3 -#define VREG_BC_CLK_RST 4 - -/* TWL6030 LDO register values for CFG_STATE */ -#define TWL6030_CFG_STATE_OFF 0x00 -#define TWL6030_CFG_STATE_ON 0x01 -#define TWL6030_CFG_STATE_OFF2 0x02 -#define TWL6030_CFG_STATE_SLEEP 0x03 -#define TWL6030_CFG_STATE_GRP_SHIFT 5 -#define TWL6030_CFG_STATE_APP_SHIFT 2 -#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT) -#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\ - TWL6030_CFG_STATE_APP_SHIFT) - -/* Flags for SMPS Voltage reading */ -#define SMPS_OFFSET_EN BIT(0) -#define SMPS_EXTENDED_EN BIT(1) - -/* twl6032 SMPS EPROM values */ -#define TWL6030_SMPS_OFFSET 0xB0 -#define TWL6030_SMPS_MULT 0xB3 -#define SMPS_MULTOFFSET_SMPS4 BIT(0) -#define SMPS_MULTOFFSET_VIO BIT(1) -#define SMPS_MULTOFFSET_SMPS3 BIT(6) static inline int twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset) @@ -168,26 +128,6 @@ static int twl4030reg_is_enabled(struct regulator_dev *rdev) return state & P1_GRP_4030; } -static int twl6030reg_is_enabled(struct regulator_dev *rdev) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - int grp = 0, val; - - if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) { - grp = twlreg_grp(rdev); - if (grp < 0) - return grp; - grp &= P1_GRP_6030; - } else { - grp = 1; - } - - val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); - val = TWL6030_CFG_STATE_APP(val); - - return grp && (val == TWL6030_CFG_STATE_ON); -} - #define PB_I2C_BUSY BIT(0) #define PB_I2C_BWEN BIT(1) @@ -273,23 +213,6 @@ static int twl4030reg_enable(struct regulator_dev *rdev) return ret; } -static int twl6030reg_enable(struct regulator_dev *rdev) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - int grp = 0; - int ret; - - if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) - grp = twlreg_grp(rdev); - if (grp < 0) - return grp; - - ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, - grp << TWL6030_CFG_STATE_GRP_SHIFT | - TWL6030_CFG_STATE_ON); - return ret; -} - static int twl4030reg_disable(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); @@ -307,23 +230,6 @@ static int twl4030reg_disable(struct regulator_dev *rdev) return ret; } -static int twl6030reg_disable(struct regulator_dev *rdev) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - int grp = 0; - int ret; - - if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) - grp = P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030; - - /* For 6030, set the off state for all grps enabled */ - ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, - (grp) << TWL6030_CFG_STATE_GRP_SHIFT | - TWL6030_CFG_STATE_OFF); - - return ret; -} - static int twl4030reg_get_status(struct regulator_dev *rdev) { int state = twlreg_grp(rdev); @@ -340,33 +246,6 @@ static int twl4030reg_get_status(struct regulator_dev *rdev) : REGULATOR_STATUS_STANDBY; } -static int twl6030reg_get_status(struct regulator_dev *rdev) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - int val; - - val = twlreg_grp(rdev); - if (val < 0) - return val; - - val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); - - switch (TWL6030_CFG_STATE_APP(val)) { - case TWL6030_CFG_STATE_ON: - return REGULATOR_STATUS_NORMAL; - - case TWL6030_CFG_STATE_SLEEP: - return REGULATOR_STATUS_STANDBY; - - case TWL6030_CFG_STATE_OFF: - case TWL6030_CFG_STATE_OFF2: - default: - break; - } - - return REGULATOR_STATUS_OFF; -} - static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode) { struct twlreg_info *info = rdev_get_drvdata(rdev); @@ -399,36 +278,6 @@ static inline unsigned int twl4030reg_map_mode(unsigned int mode) } } -static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - int grp = 0; - int val; - - if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) - grp = twlreg_grp(rdev); - - if (grp < 0) - return grp; - - /* Compose the state register settings */ - val = grp << TWL6030_CFG_STATE_GRP_SHIFT; - /* We can only set the mode through state machine commands... */ - switch (mode) { - case REGULATOR_MODE_NORMAL: - val |= TWL6030_CFG_STATE_ON; - break; - case REGULATOR_MODE_STANDBY: - val |= TWL6030_CFG_STATE_SLEEP; - break; - - default: - return -EINVAL; - } - - return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, val); -} - /*----------------------------------------------------------------------*/ /* @@ -565,12 +414,7 @@ twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, struct twlreg_info *info = rdev_get_drvdata(rdev); int vsel = DIV_ROUND_UP(min_uV - 600000, 12500); - if (info->set_voltage) { - return info->set_voltage(info->data, min_uV); - } else { - twlreg_write(info, TWL_MODULE_PM_RECEIVER, - VREG_VOLTAGE_SMPS_4030, vsel); - } + twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030, vsel); return 0; } @@ -580,9 +424,6 @@ static int twl4030smps_get_voltage(struct regulator_dev *rdev) struct twlreg_info *info = rdev_get_drvdata(rdev); int vsel; - if (info->get_voltage) - return info->get_voltage(info->data); - vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030); @@ -594,85 +435,6 @@ static struct regulator_ops twl4030smps_ops = { .get_voltage = twl4030smps_get_voltage, }; -static int twl6030coresmps_set_voltage(struct regulator_dev *rdev, int min_uV, - int max_uV, unsigned *selector) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - - if (info->set_voltage) - return info->set_voltage(info->data, min_uV); - - return -ENODEV; -} - -static int twl6030coresmps_get_voltage(struct regulator_dev *rdev) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - - if (info->get_voltage) - return info->get_voltage(info->data); - - return -ENODEV; -} - -static struct regulator_ops twl6030coresmps_ops = { - .set_voltage = twl6030coresmps_set_voltage, - .get_voltage = twl6030coresmps_get_voltage, -}; - -static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned sel) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - - switch (sel) { - case 0: - return 0; - case 1 ... 24: - /* Linear mapping from 00000001 to 00011000: - * Absolute voltage value = 1.0 V + 0.1 V × (sel – 00000001) - */ - return (info->min_mV + 100 * (sel - 1)) * 1000; - case 25 ... 30: - return -EINVAL; - case 31: - return 2750000; - default: - return -EINVAL; - } -} - -static int -twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - - return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, - selector); -} - -static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE); - - return vsel; -} - -static struct regulator_ops twl6030ldo_ops = { - .list_voltage = twl6030ldo_list_voltage, - - .set_voltage_sel = twl6030ldo_set_voltage_sel, - .get_voltage_sel = twl6030ldo_get_voltage_sel, - - .enable = twl6030reg_enable, - .disable = twl6030reg_disable, - .is_enabled = twl6030reg_is_enabled, - - .set_mode = twl6030reg_set_mode, - - .get_status = twl6030reg_get_status, -}; - /*----------------------------------------------------------------------*/ static struct regulator_ops twl4030fixed_ops = { @@ -687,226 +449,8 @@ static struct regulator_ops twl4030fixed_ops = { .get_status = twl4030reg_get_status, }; -static struct regulator_ops twl6030fixed_ops = { - .list_voltage = regulator_list_voltage_linear, - - .enable = twl6030reg_enable, - .disable = twl6030reg_disable, - .is_enabled = twl6030reg_is_enabled, - - .set_mode = twl6030reg_set_mode, - - .get_status = twl6030reg_get_status, -}; - -/* - * SMPS status and control - */ - -static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - - int voltage = 0; - - switch (info->flags) { - case SMPS_OFFSET_EN: - voltage = 100000; - /* fall through */ - case 0: - switch (index) { - case 0: - voltage = 0; - break; - case 58: - voltage = 1350 * 1000; - break; - case 59: - voltage = 1500 * 1000; - break; - case 60: - voltage = 1800 * 1000; - break; - case 61: - voltage = 1900 * 1000; - break; - case 62: - voltage = 2100 * 1000; - break; - default: - voltage += (600000 + (12500 * (index - 1))); - } - break; - case SMPS_EXTENDED_EN: - switch (index) { - case 0: - voltage = 0; - break; - case 58: - voltage = 2084 * 1000; - break; - case 59: - voltage = 2315 * 1000; - break; - case 60: - voltage = 2778 * 1000; - break; - case 61: - voltage = 2932 * 1000; - break; - case 62: - voltage = 3241 * 1000; - break; - default: - voltage = (1852000 + (38600 * (index - 1))); - } - break; - case SMPS_OFFSET_EN | SMPS_EXTENDED_EN: - switch (index) { - case 0: - voltage = 0; - break; - case 58: - voltage = 4167 * 1000; - break; - case 59: - voltage = 2315 * 1000; - break; - case 60: - voltage = 2778 * 1000; - break; - case 61: - voltage = 2932 * 1000; - break; - case 62: - voltage = 3241 * 1000; - break; - default: - voltage = (2161000 + (38600 * (index - 1))); - } - break; - } - - return voltage; -} - -static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV, - int max_uV) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - int vsel = 0; - - switch (info->flags) { - case 0: - if (min_uV == 0) - vsel = 0; - else if ((min_uV >= 600000) && (min_uV <= 1300000)) { - vsel = DIV_ROUND_UP(min_uV - 600000, 12500); - vsel++; - } - /* Values 1..57 for vsel are linear and can be calculated - * values 58..62 are non linear. - */ - else if ((min_uV > 1900000) && (min_uV <= 2100000)) - vsel = 62; - else if ((min_uV > 1800000) && (min_uV <= 1900000)) - vsel = 61; - else if ((min_uV > 1500000) && (min_uV <= 1800000)) - vsel = 60; - else if ((min_uV > 1350000) && (min_uV <= 1500000)) - vsel = 59; - else if ((min_uV > 1300000) && (min_uV <= 1350000)) - vsel = 58; - else - return -EINVAL; - break; - case SMPS_OFFSET_EN: - if (min_uV == 0) - vsel = 0; - else if ((min_uV >= 700000) && (min_uV <= 1420000)) { - vsel = DIV_ROUND_UP(min_uV - 700000, 12500); - vsel++; - } - /* Values 1..57 for vsel are linear and can be calculated - * values 58..62 are non linear. - */ - else if ((min_uV > 1900000) && (min_uV <= 2100000)) - vsel = 62; - else if ((min_uV > 1800000) && (min_uV <= 1900000)) - vsel = 61; - else if ((min_uV > 1350000) && (min_uV <= 1800000)) - vsel = 60; - else if ((min_uV > 1350000) && (min_uV <= 1500000)) - vsel = 59; - else if ((min_uV > 1300000) && (min_uV <= 1350000)) - vsel = 58; - else - return -EINVAL; - break; - case SMPS_EXTENDED_EN: - if (min_uV == 0) { - vsel = 0; - } else if ((min_uV >= 1852000) && (max_uV <= 4013600)) { - vsel = DIV_ROUND_UP(min_uV - 1852000, 38600); - vsel++; - } - break; - case SMPS_OFFSET_EN|SMPS_EXTENDED_EN: - if (min_uV == 0) { - vsel = 0; - } else if ((min_uV >= 2161000) && (min_uV <= 4321000)) { - vsel = DIV_ROUND_UP(min_uV - 2161000, 38600); - vsel++; - } - break; - } - - return vsel; -} - -static int twl6030smps_set_voltage_sel(struct regulator_dev *rdev, - unsigned int selector) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - - return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS, - selector); -} - -static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - - return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS); -} - -static struct regulator_ops twlsmps_ops = { - .list_voltage = twl6030smps_list_voltage, - .map_voltage = twl6030smps_map_voltage, - - .set_voltage_sel = twl6030smps_set_voltage_sel, - .get_voltage_sel = twl6030smps_get_voltage_sel, - - .enable = twl6030reg_enable, - .disable = twl6030reg_disable, - .is_enabled = twl6030reg_is_enabled, - - .set_mode = twl6030reg_set_mode, - - .get_status = twl6030reg_get_status, -}; - /*----------------------------------------------------------------------*/ -#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ - remap_conf) \ - TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ - remap_conf, TWL4030, twl4030fixed_ops, \ - twl4030reg_map_mode) -#define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \ - TWL_FIXED_LDO(label, offset, mVolts, 0x0, turnon_delay, \ - 0x0, TWL6030, twl6030fixed_ops, NULL) - #define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \ static const struct twlreg_info TWL4030_INFO_##label = { \ .base = offset, \ @@ -942,79 +486,22 @@ static const struct twlreg_info TWL4030_INFO_##label = { \ }, \ } -#define TWL6030_ADJUSTABLE_SMPS(label) \ -static const struct twlreg_info TWL6030_INFO_##label = { \ - .desc = { \ - .name = #label, \ - .id = TWL6030_REG_##label, \ - .ops = &twl6030coresmps_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - }, \ - } - -#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) \ -static const struct twlreg_info TWL6030_INFO_##label = { \ - .base = offset, \ - .min_mV = min_mVolts, \ - .max_mV = max_mVolts, \ - .desc = { \ - .name = #label, \ - .id = TWL6030_REG_##label, \ - .n_voltages = 32, \ - .ops = &twl6030ldo_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - }, \ - } - -#define TWL6032_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) \ -static const struct twlreg_info TWL6032_INFO_##label = { \ - .base = offset, \ - .min_mV = min_mVolts, \ - .max_mV = max_mVolts, \ - .desc = { \ - .name = #label, \ - .id = TWL6032_REG_##label, \ - .n_voltages = 32, \ - .ops = &twl6030ldo_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - }, \ - } - -#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \ - family, operations, map_mode) \ +#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ + remap_conf) \ static const struct twlreg_info TWLFIXED_INFO_##label = { \ .base = offset, \ .id = num, \ - .min_mV = mVolts, \ .remap = remap_conf, \ .desc = { \ .name = #label, \ - .id = family##_REG_##label, \ + .id = TWL4030##_REG_##label, \ .n_voltages = 1, \ - .ops = &operations, \ + .ops = &twl4030fixed_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ .min_uV = mVolts * 1000, \ .enable_time = turnon_delay, \ - .of_map_mode = map_mode, \ - }, \ - } - -#define TWL6032_ADJUSTABLE_SMPS(label, offset) \ -static const struct twlreg_info TWLSMPS_INFO_##label = { \ - .base = offset, \ - .min_mV = 600, \ - .max_mV = 2100, \ - .desc = { \ - .name = #label, \ - .id = TWL6032_REG_##label, \ - .n_voltages = 63, \ - .ops = &twlsmps_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ + .of_map_mode = twl4030reg_map_mode, \ }, \ } @@ -1038,60 +525,11 @@ TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08); TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08); TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08); /* VUSBCP is managed *only* by the USB subchip */ -/* 6030 REG with base as PMC Slave Misc : 0x0030 */ -/* Turnon-delay and remap configuration values for 6030 are not - verified since the specification is not public */ -TWL6030_ADJUSTABLE_SMPS(VDD1); -TWL6030_ADJUSTABLE_SMPS(VDD2); -TWL6030_ADJUSTABLE_SMPS(VDD3); -TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300); -TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300); -TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300); -TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300); -TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300); -TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300); -/* 6025 are renamed compared to 6030 versions */ -TWL6032_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300); -TWL6032_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300); -TWL6032_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300); -TWL6032_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300); -TWL6032_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300); -TWL6032_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300); -TWL6032_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300); -TWL6032_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300); -TWL6032_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300); TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08); TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08); TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08); TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08); TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08); -TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0); -TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0); -TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0); -TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0); -TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0); -TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0); -TWL6032_ADJUSTABLE_SMPS(SMPS3, 0x34); -TWL6032_ADJUSTABLE_SMPS(SMPS4, 0x10); -TWL6032_ADJUSTABLE_SMPS(VIO, 0x16); - -static u8 twl_get_smps_offset(void) -{ - u8 value; - - twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value, - TWL6030_SMPS_OFFSET); - return value; -} - -static u8 twl_get_smps_mult(void) -{ - u8 value; - - twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value, - TWL6030_SMPS_MULT); - return value; -} #define TWL_OF_MATCH(comp, family, label) \ { \ @@ -1121,81 +559,37 @@ static const struct of_device_id twl_of_match[] = { TWL4030_OF_MATCH("ti,twl4030-vio", VIO), TWL4030_OF_MATCH("ti,twl4030-vdd1", VDD1), TWL4030_OF_MATCH("ti,twl4030-vdd2", VDD2), - TWL6030_OF_MATCH("ti,twl6030-vdd1", VDD1), - TWL6030_OF_MATCH("ti,twl6030-vdd2", VDD2), - TWL6030_OF_MATCH("ti,twl6030-vdd3", VDD3), - TWL6030_OF_MATCH("ti,twl6030-vaux1", VAUX1_6030), - TWL6030_OF_MATCH("ti,twl6030-vaux2", VAUX2_6030), - TWL6030_OF_MATCH("ti,twl6030-vaux3", VAUX3_6030), - TWL6030_OF_MATCH("ti,twl6030-vmmc", VMMC), - TWL6030_OF_MATCH("ti,twl6030-vpp", VPP), - TWL6030_OF_MATCH("ti,twl6030-vusim", VUSIM), - TWL6032_OF_MATCH("ti,twl6032-ldo2", LDO2), - TWL6032_OF_MATCH("ti,twl6032-ldo4", LDO4), - TWL6032_OF_MATCH("ti,twl6032-ldo3", LDO3), - TWL6032_OF_MATCH("ti,twl6032-ldo5", LDO5), - TWL6032_OF_MATCH("ti,twl6032-ldo1", LDO1), - TWL6032_OF_MATCH("ti,twl6032-ldo7", LDO7), - TWL6032_OF_MATCH("ti,twl6032-ldo6", LDO6), - TWL6032_OF_MATCH("ti,twl6032-ldoln", LDOLN), - TWL6032_OF_MATCH("ti,twl6032-ldousb", LDOUSB), TWLFIXED_OF_MATCH("ti,twl4030-vintana1", VINTANA1), TWLFIXED_OF_MATCH("ti,twl4030-vintdig", VINTDIG), TWLFIXED_OF_MATCH("ti,twl4030-vusb1v5", VUSB1V5), TWLFIXED_OF_MATCH("ti,twl4030-vusb1v8", VUSB1V8), TWLFIXED_OF_MATCH("ti,twl4030-vusb3v1", VUSB3V1), - TWLFIXED_OF_MATCH("ti,twl6030-vana", VANA), - TWLFIXED_OF_MATCH("ti,twl6030-vcxio", VCXIO), - TWLFIXED_OF_MATCH("ti,twl6030-vdac", VDAC), - TWLFIXED_OF_MATCH("ti,twl6030-vusb", VUSB), - TWLFIXED_OF_MATCH("ti,twl6030-v1v8", V1V8), - TWLFIXED_OF_MATCH("ti,twl6030-v2v1", V2V1), - TWLSMPS_OF_MATCH("ti,twl6032-smps3", SMPS3), - TWLSMPS_OF_MATCH("ti,twl6032-smps4", SMPS4), - TWLSMPS_OF_MATCH("ti,twl6032-vio", VIO), {}, }; MODULE_DEVICE_TABLE(of, twl_of_match); static int twlreg_probe(struct platform_device *pdev) { - int i, id; + int id; struct twlreg_info *info; const struct twlreg_info *template; struct regulator_init_data *initdata; struct regulation_constraints *c; struct regulator_dev *rdev; - struct twl_regulator_driver_data *drvdata; const struct of_device_id *match; struct regulator_config config = { }; match = of_match_device(twl_of_match, &pdev->dev); - if (match) { - template = match->data; - id = template->desc.id; - initdata = of_get_regulator_init_data(&pdev->dev, - pdev->dev.of_node, - &template->desc); - drvdata = NULL; - } else { - id = pdev->id; - initdata = dev_get_platdata(&pdev->dev); - for (i = 0, template = NULL; i < ARRAY_SIZE(twl_of_match); i++) { - template = twl_of_match[i].data; - if (template && template->desc.id == id) - break; - } - if (i == ARRAY_SIZE(twl_of_match)) - return -ENODEV; - - drvdata = initdata->driver_data; - if (!drvdata) - return -EINVAL; - } + if (!match) + return -ENODEV; + template = match->data; if (!template) return -ENODEV; + id = template->desc.id; + initdata = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node, + &template->desc); if (!initdata) return -EINVAL; @@ -1203,14 +597,6 @@ static int twlreg_probe(struct platform_device *pdev) if (!info) return -ENOMEM; - if (drvdata) { - /* copy the driver data into regulator data */ - info->features = drvdata->features; - info->data = drvdata->data; - info->set_voltage = drvdata->set_voltage; - info->get_voltage = drvdata->get_voltage; - } - /* Constrain board-specific capabilities according to what * this driver and the chip itself can actually do. */ @@ -1233,27 +619,6 @@ static int twlreg_probe(struct platform_device *pdev) break; } - switch (id) { - case TWL6032_REG_SMPS3: - if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS3) - info->flags |= SMPS_EXTENDED_EN; - if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS3) - info->flags |= SMPS_OFFSET_EN; - break; - case TWL6032_REG_SMPS4: - if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS4) - info->flags |= SMPS_EXTENDED_EN; - if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS4) - info->flags |= SMPS_OFFSET_EN; - break; - case TWL6032_REG_VIO: - if (twl_get_smps_mult() & SMPS_MULTOFFSET_VIO) - info->flags |= SMPS_EXTENDED_EN; - if (twl_get_smps_offset() & SMPS_MULTOFFSET_VIO) - info->flags |= SMPS_OFFSET_EN; - break; - } - config.dev = &pdev->dev; config.init_data = initdata; config.driver_data = info; @@ -1267,9 +632,7 @@ static int twlreg_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, rdev); - if (twl_class_is_4030()) - twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP, - info->remap); + twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP, info->remap); /* NOTE: many regulators support short-circuit IRQs (presentable * as REGULATOR_OVER_CURRENT notifications?) configured via: @@ -1282,7 +645,7 @@ static int twlreg_probe(struct platform_device *pdev) return 0; } -MODULE_ALIAS("platform:twl_reg"); +MODULE_ALIAS("platform:twl4030_reg"); static struct platform_driver twlreg_driver = { .probe = twlreg_probe, @@ -1290,7 +653,7 @@ static struct platform_driver twlreg_driver = { * "twl_regulator.12" (and friends) to "twl_regulator.1". */ .driver = { - .name = "twl_reg", + .name = "twl4030_reg", .of_match_table = of_match_ptr(twl_of_match), }, }; @@ -1307,5 +670,5 @@ static void __exit twlreg_exit(void) } module_exit(twlreg_exit) -MODULE_DESCRIPTION("TWL regulator driver"); +MODULE_DESCRIPTION("TWL4030 regulator driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c new file mode 100644 index 0000000000000000000000000000000000000000..4864b9d742c0f7915cc792aaacd692c2a7f305b0 --- /dev/null +++ b/drivers/regulator/twl6030-regulator.c @@ -0,0 +1,793 @@ +/* + * Split TWL6030 logic from twl-regulator.c: + * Copyright (C) 2008 David Brownell + * + * Copyright (C) 2016 Nicolae Rosia + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct twlreg_info { + /* start of regulator's PM_RECEIVER control register bank */ + u8 base; + + /* twl resource ID, for resource control state machine */ + u8 id; + + /* chip constraints on regulator behavior */ + u16 min_mV; + + u8 flags; + + /* used by regulator core */ + struct regulator_desc desc; + + /* chip specific features */ + unsigned long features; + + /* data passed from board for external get/set voltage */ + void *data; +}; + + +/* LDO control registers ... offset is from the base of its register bank. + * The first three registers of all power resource banks help hardware to + * manage the various resource groups. + */ +/* Common offset in TWL4030/6030 */ +#define VREG_GRP 0 +/* TWL6030 register offsets */ +#define VREG_TRANS 1 +#define VREG_STATE 2 +#define VREG_VOLTAGE 3 +#define VREG_VOLTAGE_SMPS 4 +/* TWL6030 Misc register offsets */ +#define VREG_BC_ALL 1 +#define VREG_BC_REF 2 +#define VREG_BC_PROC 3 +#define VREG_BC_CLK_RST 4 + +/* TWL6030 LDO register values for CFG_STATE */ +#define TWL6030_CFG_STATE_OFF 0x00 +#define TWL6030_CFG_STATE_ON 0x01 +#define TWL6030_CFG_STATE_OFF2 0x02 +#define TWL6030_CFG_STATE_SLEEP 0x03 +#define TWL6030_CFG_STATE_GRP_SHIFT 5 +#define TWL6030_CFG_STATE_APP_SHIFT 2 +#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT) +#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\ + TWL6030_CFG_STATE_APP_SHIFT) + +/* Flags for SMPS Voltage reading */ +#define SMPS_OFFSET_EN BIT(0) +#define SMPS_EXTENDED_EN BIT(1) + +/* twl6032 SMPS EPROM values */ +#define TWL6030_SMPS_OFFSET 0xB0 +#define TWL6030_SMPS_MULT 0xB3 +#define SMPS_MULTOFFSET_SMPS4 BIT(0) +#define SMPS_MULTOFFSET_VIO BIT(1) +#define SMPS_MULTOFFSET_SMPS3 BIT(6) + +static inline int +twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset) +{ + u8 value; + int status; + + status = twl_i2c_read_u8(slave_subgp, + &value, info->base + offset); + return (status < 0) ? status : value; +} + +static inline int +twlreg_write(struct twlreg_info *info, unsigned slave_subgp, unsigned offset, + u8 value) +{ + return twl_i2c_write_u8(slave_subgp, + value, info->base + offset); +} + +/* generic power resource operations, which work on all regulators */ +static int twlreg_grp(struct regulator_dev *rdev) +{ + return twlreg_read(rdev_get_drvdata(rdev), TWL_MODULE_PM_RECEIVER, + VREG_GRP); +} + +/* + * Enable/disable regulators by joining/leaving the P1 (processor) group. + * We assume nobody else is updating the DEV_GRP registers. + */ +/* definition for 6030 family */ +#define P3_GRP_6030 BIT(2) /* secondary processor, modem, etc */ +#define P2_GRP_6030 BIT(1) /* "peripherals" */ +#define P1_GRP_6030 BIT(0) /* CPU/Linux */ + +static int twl6030reg_is_enabled(struct regulator_dev *rdev) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int grp = 0, val; + + if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) { + grp = twlreg_grp(rdev); + if (grp < 0) + return grp; + grp &= P1_GRP_6030; + } else { + grp = 1; + } + + val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); + val = TWL6030_CFG_STATE_APP(val); + + return grp && (val == TWL6030_CFG_STATE_ON); +} + +#define PB_I2C_BUSY BIT(0) +#define PB_I2C_BWEN BIT(1) + + +static int twl6030reg_enable(struct regulator_dev *rdev) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int grp = 0; + int ret; + + if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) + grp = twlreg_grp(rdev); + if (grp < 0) + return grp; + + ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, + grp << TWL6030_CFG_STATE_GRP_SHIFT | + TWL6030_CFG_STATE_ON); + return ret; +} + +static int twl6030reg_disable(struct regulator_dev *rdev) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int grp = 0; + int ret; + + if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) + grp = P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030; + + /* For 6030, set the off state for all grps enabled */ + ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, + (grp) << TWL6030_CFG_STATE_GRP_SHIFT | + TWL6030_CFG_STATE_OFF); + + return ret; +} + +static int twl6030reg_get_status(struct regulator_dev *rdev) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int val; + + val = twlreg_grp(rdev); + if (val < 0) + return val; + + val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); + + switch (TWL6030_CFG_STATE_APP(val)) { + case TWL6030_CFG_STATE_ON: + return REGULATOR_STATUS_NORMAL; + + case TWL6030_CFG_STATE_SLEEP: + return REGULATOR_STATUS_STANDBY; + + case TWL6030_CFG_STATE_OFF: + case TWL6030_CFG_STATE_OFF2: + default: + break; + } + + return REGULATOR_STATUS_OFF; +} + +static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int grp = 0; + int val; + + if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) + grp = twlreg_grp(rdev); + + if (grp < 0) + return grp; + + /* Compose the state register settings */ + val = grp << TWL6030_CFG_STATE_GRP_SHIFT; + /* We can only set the mode through state machine commands... */ + switch (mode) { + case REGULATOR_MODE_NORMAL: + val |= TWL6030_CFG_STATE_ON; + break; + case REGULATOR_MODE_STANDBY: + val |= TWL6030_CFG_STATE_SLEEP; + break; + + default: + return -EINVAL; + } + + return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, val); +} + +static int twl6030coresmps_set_voltage(struct regulator_dev *rdev, int min_uV, + int max_uV, unsigned *selector) +{ + return -ENODEV; +} + +static int twl6030coresmps_get_voltage(struct regulator_dev *rdev) +{ + return -ENODEV; +} + +static struct regulator_ops twl6030coresmps_ops = { + .set_voltage = twl6030coresmps_set_voltage, + .get_voltage = twl6030coresmps_get_voltage, +}; + +static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned sel) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + + switch (sel) { + case 0: + return 0; + case 1 ... 24: + /* Linear mapping from 00000001 to 00011000: + * Absolute voltage value = 1.0 V + 0.1 V × (sel – 00000001) + */ + return (info->min_mV + 100 * (sel - 1)) * 1000; + case 25 ... 30: + return -EINVAL; + case 31: + return 2750000; + default: + return -EINVAL; + } +} + +static int +twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + + return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, + selector); +} + +static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE); + + return vsel; +} + +static struct regulator_ops twl6030ldo_ops = { + .list_voltage = twl6030ldo_list_voltage, + + .set_voltage_sel = twl6030ldo_set_voltage_sel, + .get_voltage_sel = twl6030ldo_get_voltage_sel, + + .enable = twl6030reg_enable, + .disable = twl6030reg_disable, + .is_enabled = twl6030reg_is_enabled, + + .set_mode = twl6030reg_set_mode, + + .get_status = twl6030reg_get_status, +}; + +static struct regulator_ops twl6030fixed_ops = { + .list_voltage = regulator_list_voltage_linear, + + .enable = twl6030reg_enable, + .disable = twl6030reg_disable, + .is_enabled = twl6030reg_is_enabled, + + .set_mode = twl6030reg_set_mode, + + .get_status = twl6030reg_get_status, +}; + +/* + * SMPS status and control + */ + +static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + + int voltage = 0; + + switch (info->flags) { + case SMPS_OFFSET_EN: + voltage = 100000; + /* fall through */ + case 0: + switch (index) { + case 0: + voltage = 0; + break; + case 58: + voltage = 1350 * 1000; + break; + case 59: + voltage = 1500 * 1000; + break; + case 60: + voltage = 1800 * 1000; + break; + case 61: + voltage = 1900 * 1000; + break; + case 62: + voltage = 2100 * 1000; + break; + default: + voltage += (600000 + (12500 * (index - 1))); + } + break; + case SMPS_EXTENDED_EN: + switch (index) { + case 0: + voltage = 0; + break; + case 58: + voltage = 2084 * 1000; + break; + case 59: + voltage = 2315 * 1000; + break; + case 60: + voltage = 2778 * 1000; + break; + case 61: + voltage = 2932 * 1000; + break; + case 62: + voltage = 3241 * 1000; + break; + default: + voltage = (1852000 + (38600 * (index - 1))); + } + break; + case SMPS_OFFSET_EN | SMPS_EXTENDED_EN: + switch (index) { + case 0: + voltage = 0; + break; + case 58: + voltage = 4167 * 1000; + break; + case 59: + voltage = 2315 * 1000; + break; + case 60: + voltage = 2778 * 1000; + break; + case 61: + voltage = 2932 * 1000; + break; + case 62: + voltage = 3241 * 1000; + break; + default: + voltage = (2161000 + (38600 * (index - 1))); + } + break; + } + + return voltage; +} + +static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV, + int max_uV) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int vsel = 0; + + switch (info->flags) { + case 0: + if (min_uV == 0) + vsel = 0; + else if ((min_uV >= 600000) && (min_uV <= 1300000)) { + vsel = DIV_ROUND_UP(min_uV - 600000, 12500); + vsel++; + } + /* Values 1..57 for vsel are linear and can be calculated + * values 58..62 are non linear. + */ + else if ((min_uV > 1900000) && (min_uV <= 2100000)) + vsel = 62; + else if ((min_uV > 1800000) && (min_uV <= 1900000)) + vsel = 61; + else if ((min_uV > 1500000) && (min_uV <= 1800000)) + vsel = 60; + else if ((min_uV > 1350000) && (min_uV <= 1500000)) + vsel = 59; + else if ((min_uV > 1300000) && (min_uV <= 1350000)) + vsel = 58; + else + return -EINVAL; + break; + case SMPS_OFFSET_EN: + if (min_uV == 0) + vsel = 0; + else if ((min_uV >= 700000) && (min_uV <= 1420000)) { + vsel = DIV_ROUND_UP(min_uV - 700000, 12500); + vsel++; + } + /* Values 1..57 for vsel are linear and can be calculated + * values 58..62 are non linear. + */ + else if ((min_uV > 1900000) && (min_uV <= 2100000)) + vsel = 62; + else if ((min_uV > 1800000) && (min_uV <= 1900000)) + vsel = 61; + else if ((min_uV > 1350000) && (min_uV <= 1800000)) + vsel = 60; + else if ((min_uV > 1350000) && (min_uV <= 1500000)) + vsel = 59; + else if ((min_uV > 1300000) && (min_uV <= 1350000)) + vsel = 58; + else + return -EINVAL; + break; + case SMPS_EXTENDED_EN: + if (min_uV == 0) { + vsel = 0; + } else if ((min_uV >= 1852000) && (max_uV <= 4013600)) { + vsel = DIV_ROUND_UP(min_uV - 1852000, 38600); + vsel++; + } + break; + case SMPS_OFFSET_EN|SMPS_EXTENDED_EN: + if (min_uV == 0) { + vsel = 0; + } else if ((min_uV >= 2161000) && (min_uV <= 4321000)) { + vsel = DIV_ROUND_UP(min_uV - 2161000, 38600); + vsel++; + } + break; + } + + return vsel; +} + +static int twl6030smps_set_voltage_sel(struct regulator_dev *rdev, + unsigned int selector) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + + return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS, + selector); +} + +static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + + return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS); +} + +static struct regulator_ops twlsmps_ops = { + .list_voltage = twl6030smps_list_voltage, + .map_voltage = twl6030smps_map_voltage, + + .set_voltage_sel = twl6030smps_set_voltage_sel, + .get_voltage_sel = twl6030smps_get_voltage_sel, + + .enable = twl6030reg_enable, + .disable = twl6030reg_disable, + .is_enabled = twl6030reg_is_enabled, + + .set_mode = twl6030reg_set_mode, + + .get_status = twl6030reg_get_status, +}; + +/*----------------------------------------------------------------------*/ + +#define TWL6030_ADJUSTABLE_SMPS(label) \ +static const struct twlreg_info TWL6030_INFO_##label = { \ + .desc = { \ + .name = #label, \ + .id = TWL6030_REG_##label, \ + .ops = &twl6030coresmps_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + }, \ + } + +#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts) \ +static const struct twlreg_info TWL6030_INFO_##label = { \ + .base = offset, \ + .min_mV = min_mVolts, \ + .desc = { \ + .name = #label, \ + .id = TWL6030_REG_##label, \ + .n_voltages = 32, \ + .ops = &twl6030ldo_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + }, \ + } + +#define TWL6032_ADJUSTABLE_LDO(label, offset, min_mVolts) \ +static const struct twlreg_info TWL6032_INFO_##label = { \ + .base = offset, \ + .min_mV = min_mVolts, \ + .desc = { \ + .name = #label, \ + .id = TWL6032_REG_##label, \ + .n_voltages = 32, \ + .ops = &twl6030ldo_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + }, \ + } + +#define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \ +static const struct twlreg_info TWLFIXED_INFO_##label = { \ + .base = offset, \ + .id = 0, \ + .min_mV = mVolts, \ + .desc = { \ + .name = #label, \ + .id = TWL6030##_REG_##label, \ + .n_voltages = 1, \ + .ops = &twl6030fixed_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = mVolts * 1000, \ + .enable_time = turnon_delay, \ + .of_map_mode = NULL, \ + }, \ + } + +#define TWL6032_ADJUSTABLE_SMPS(label, offset) \ +static const struct twlreg_info TWLSMPS_INFO_##label = { \ + .base = offset, \ + .min_mV = 600, \ + .desc = { \ + .name = #label, \ + .id = TWL6032_REG_##label, \ + .n_voltages = 63, \ + .ops = &twlsmps_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + }, \ + } + +/* VUSBCP is managed *only* by the USB subchip */ +/* 6030 REG with base as PMC Slave Misc : 0x0030 */ +/* Turnon-delay and remap configuration values for 6030 are not + verified since the specification is not public */ +TWL6030_ADJUSTABLE_SMPS(VDD1); +TWL6030_ADJUSTABLE_SMPS(VDD2); +TWL6030_ADJUSTABLE_SMPS(VDD3); +TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000); +TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000); +TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000); +TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000); +TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000); +TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000); +/* 6025 are renamed compared to 6030 versions */ +TWL6032_ADJUSTABLE_LDO(LDO2, 0x54, 1000); +TWL6032_ADJUSTABLE_LDO(LDO4, 0x58, 1000); +TWL6032_ADJUSTABLE_LDO(LDO3, 0x5c, 1000); +TWL6032_ADJUSTABLE_LDO(LDO5, 0x68, 1000); +TWL6032_ADJUSTABLE_LDO(LDO1, 0x6c, 1000); +TWL6032_ADJUSTABLE_LDO(LDO7, 0x74, 1000); +TWL6032_ADJUSTABLE_LDO(LDO6, 0x60, 1000); +TWL6032_ADJUSTABLE_LDO(LDOLN, 0x64, 1000); +TWL6032_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000); +TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0); +TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0); +TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0); +TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0); +TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0); +TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0); +TWL6032_ADJUSTABLE_SMPS(SMPS3, 0x34); +TWL6032_ADJUSTABLE_SMPS(SMPS4, 0x10); +TWL6032_ADJUSTABLE_SMPS(VIO, 0x16); + +static u8 twl_get_smps_offset(void) +{ + u8 value; + + twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value, + TWL6030_SMPS_OFFSET); + return value; +} + +static u8 twl_get_smps_mult(void) +{ + u8 value; + + twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value, + TWL6030_SMPS_MULT); + return value; +} + +#define TWL_OF_MATCH(comp, family, label) \ + { \ + .compatible = comp, \ + .data = &family##_INFO_##label, \ + } + +#define TWL6030_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6030, label) +#define TWL6032_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6032, label) +#define TWLFIXED_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLFIXED, label) +#define TWLSMPS_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLSMPS, label) + +static const struct of_device_id twl_of_match[] = { + TWL6030_OF_MATCH("ti,twl6030-vdd1", VDD1), + TWL6030_OF_MATCH("ti,twl6030-vdd2", VDD2), + TWL6030_OF_MATCH("ti,twl6030-vdd3", VDD3), + TWL6030_OF_MATCH("ti,twl6030-vaux1", VAUX1_6030), + TWL6030_OF_MATCH("ti,twl6030-vaux2", VAUX2_6030), + TWL6030_OF_MATCH("ti,twl6030-vaux3", VAUX3_6030), + TWL6030_OF_MATCH("ti,twl6030-vmmc", VMMC), + TWL6030_OF_MATCH("ti,twl6030-vpp", VPP), + TWL6030_OF_MATCH("ti,twl6030-vusim", VUSIM), + TWL6032_OF_MATCH("ti,twl6032-ldo2", LDO2), + TWL6032_OF_MATCH("ti,twl6032-ldo4", LDO4), + TWL6032_OF_MATCH("ti,twl6032-ldo3", LDO3), + TWL6032_OF_MATCH("ti,twl6032-ldo5", LDO5), + TWL6032_OF_MATCH("ti,twl6032-ldo1", LDO1), + TWL6032_OF_MATCH("ti,twl6032-ldo7", LDO7), + TWL6032_OF_MATCH("ti,twl6032-ldo6", LDO6), + TWL6032_OF_MATCH("ti,twl6032-ldoln", LDOLN), + TWL6032_OF_MATCH("ti,twl6032-ldousb", LDOUSB), + TWLFIXED_OF_MATCH("ti,twl6030-vana", VANA), + TWLFIXED_OF_MATCH("ti,twl6030-vcxio", VCXIO), + TWLFIXED_OF_MATCH("ti,twl6030-vdac", VDAC), + TWLFIXED_OF_MATCH("ti,twl6030-vusb", VUSB), + TWLFIXED_OF_MATCH("ti,twl6030-v1v8", V1V8), + TWLFIXED_OF_MATCH("ti,twl6030-v2v1", V2V1), + TWLSMPS_OF_MATCH("ti,twl6032-smps3", SMPS3), + TWLSMPS_OF_MATCH("ti,twl6032-smps4", SMPS4), + TWLSMPS_OF_MATCH("ti,twl6032-vio", VIO), + {}, +}; +MODULE_DEVICE_TABLE(of, twl_of_match); + +static int twlreg_probe(struct platform_device *pdev) +{ + int id; + struct twlreg_info *info; + const struct twlreg_info *template; + struct regulator_init_data *initdata; + struct regulation_constraints *c; + struct regulator_dev *rdev; + const struct of_device_id *match; + struct regulator_config config = { }; + + match = of_match_device(twl_of_match, &pdev->dev); + if (!match) + return -ENODEV; + + template = match->data; + if (!template) + return -ENODEV; + + id = template->desc.id; + initdata = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node, + &template->desc); + if (!initdata) + return -EINVAL; + + info = devm_kmemdup(&pdev->dev, template, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + /* Constrain board-specific capabilities according to what + * this driver and the chip itself can actually do. + */ + c = &initdata->constraints; + c->valid_modes_mask &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY; + c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE + | REGULATOR_CHANGE_MODE + | REGULATOR_CHANGE_STATUS; + + switch (id) { + case TWL6032_REG_SMPS3: + if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS3) + info->flags |= SMPS_EXTENDED_EN; + if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS3) + info->flags |= SMPS_OFFSET_EN; + break; + case TWL6032_REG_SMPS4: + if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS4) + info->flags |= SMPS_EXTENDED_EN; + if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS4) + info->flags |= SMPS_OFFSET_EN; + break; + case TWL6032_REG_VIO: + if (twl_get_smps_mult() & SMPS_MULTOFFSET_VIO) + info->flags |= SMPS_EXTENDED_EN; + if (twl_get_smps_offset() & SMPS_MULTOFFSET_VIO) + info->flags |= SMPS_OFFSET_EN; + break; + } + + config.dev = &pdev->dev; + config.init_data = initdata; + config.driver_data = info; + config.of_node = pdev->dev.of_node; + + rdev = devm_regulator_register(&pdev->dev, &info->desc, &config); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, "can't register %s, %ld\n", + info->desc.name, PTR_ERR(rdev)); + return PTR_ERR(rdev); + } + platform_set_drvdata(pdev, rdev); + + /* NOTE: many regulators support short-circuit IRQs (presentable + * as REGULATOR_OVER_CURRENT notifications?) configured via: + * - SC_CONFIG + * - SC_DETECT1 (vintana2, vmmc1/2, vaux1/2/3/4) + * - SC_DETECT2 (vusb, vdac, vio, vdd1/2, vpll2) + * - IT_CONFIG + */ + + return 0; +} + +MODULE_ALIAS("platform:twl6030_reg"); + +static struct platform_driver twlreg_driver = { + .probe = twlreg_probe, + /* NOTE: short name, to work around driver model truncation of + * "twl_regulator.12" (and friends) to "twl_regulator.1". + */ + .driver = { + .name = "twl6030_reg", + .of_match_table = of_match_ptr(twl_of_match), + }, +}; + +static int __init twlreg_init(void) +{ + return platform_driver_register(&twlreg_driver); +} +subsys_initcall(twlreg_init); + +static void __exit twlreg_exit(void) +{ + platform_driver_unregister(&twlreg_driver); +} +module_exit(twlreg_exit) + +MODULE_DESCRIPTION("TWL6030 regulator driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index f396bfef5d42863b4f3b9609c9798b4df4ce6059..8f9cf0bc571ccb94ff67729d440be665f90a58ab 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -1,20 +1,21 @@ menu "Remoteproc drivers" -# REMOTEPROC gets selected by whoever wants it config REMOTEPROC - tristate + tristate "Support for Remote Processor subsystem" depends on HAS_DMA select CRC32 select FW_LOADER select VIRTIO select VIRTUALIZATION +if REMOTEPROC + config OMAP_REMOTEPROC tristate "OMAP remoteproc support" depends on HAS_DMA depends on ARCH_OMAP4 || SOC_OMAP5 depends on OMAP_IOMMU - select REMOTEPROC + depends on REMOTEPROC select MAILBOX select OMAP2PLUS_MBOX select RPMSG_VIRTIO @@ -31,20 +32,10 @@ config OMAP_REMOTEPROC It's safe to say n here if you're not interested in multimedia offloading or just want a bare minimum kernel. -config STE_MODEM_RPROC - tristate "STE-Modem remoteproc support" - depends on HAS_DMA - select REMOTEPROC - default n - help - Say y or m here to support STE-Modem shared memory driver. - This can be either built-in or a loadable module. - If unsure say N. - config WKUP_M3_RPROC tristate "AMx3xx Wakeup M3 remoteproc support" depends on SOC_AM33XX || SOC_AM43XX - select REMOTEPROC + depends on REMOTEPROC help Say y here to support Wakeup M3 remote processor on TI AM33xx and AM43xx family of SoCs. @@ -57,8 +48,8 @@ config WKUP_M3_RPROC config DA8XX_REMOTEPROC tristate "DA8xx/OMAP-L13x remoteproc support" depends on ARCH_DAVINCI_DA8XX + depends on REMOTEPROC select CMA if MMU - select REMOTEPROC select RPMSG_VIRTIO help Say y here to support DA8xx/OMAP-L13x remote processors via the @@ -77,6 +68,18 @@ config DA8XX_REMOTEPROC It's safe to say n here if you're not interested in multimedia offloading. +config QCOM_ADSP_PIL + tristate "Qualcomm ADSP Peripheral Image Loader" + depends on OF && ARCH_QCOM + depends on REMOTEPROC + depends on QCOM_SMEM + select MFD_SYSCON + select QCOM_MDT_LOADER + select QCOM_SCM + help + Say y here to support the TrustZone based Peripherial Image Loader + for the Qualcomm ADSP remote processors. + config QCOM_MDT_LOADER tristate @@ -84,25 +87,22 @@ config QCOM_Q6V5_PIL tristate "Qualcomm Hexagon V5 Peripherial Image Loader" depends on OF && ARCH_QCOM depends on QCOM_SMEM + depends on REMOTEPROC select MFD_SYSCON select QCOM_MDT_LOADER - select REMOTEPROC + select QCOM_SCM help Say y here to support the Qualcomm Peripherial Image Loader for the Hexagon V5 based remote processors. -config QCOM_WCNSS_IRIS - tristate - depends on OF && ARCH_QCOM - config QCOM_WCNSS_PIL tristate "Qualcomm WCNSS Peripheral Image Loader" depends on OF && ARCH_QCOM + depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) depends on QCOM_SMEM + depends on REMOTEPROC select QCOM_MDT_LOADER select QCOM_SCM - select QCOM_WCNSS_IRIS - select REMOTEPROC help Say y here to support the Peripheral Image Loader for the Qualcomm Wireless Connectivity Subsystem. @@ -110,10 +110,16 @@ config QCOM_WCNSS_PIL config ST_REMOTEPROC tristate "ST remoteproc support" depends on ARCH_STI - select REMOTEPROC + depends on REMOTEPROC help Say y here to support ST's adjunct processors via the remote processor framework. This can be either built-in or a loadable module. +config ST_SLIM_REMOTEPROC + tristate + depends on REMOTEPROC + +endif # REMOTEPROC + endmenu diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index 6dfb62ed643f8ea46db285b4fc77a60ca4f8f3ee..0938ea3c41ba617f638e3a4cb4f6834866a2b088 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -5,14 +5,17 @@ obj-$(CONFIG_REMOTEPROC) += remoteproc.o remoteproc-y := remoteproc_core.o remoteproc-y += remoteproc_debugfs.o +remoteproc-y += remoteproc_sysfs.o remoteproc-y += remoteproc_virtio.o remoteproc-y += remoteproc_elf_loader.o obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o -obj-$(CONFIG_STE_MODEM_RPROC) += ste_modem_rproc.o obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o +obj-$(CONFIG_QCOM_ADSP_PIL) += qcom_adsp_pil.o obj-$(CONFIG_QCOM_MDT_LOADER) += qcom_mdt_loader.o obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o -obj-$(CONFIG_QCOM_WCNSS_IRIS) += qcom_wcnss_iris.o -obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss.o +obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o +qcom_wcnss_pil-y += qcom_wcnss.o +qcom_wcnss_pil-y += qcom_wcnss_iris.o obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o +obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o diff --git a/drivers/remoteproc/qcom_adsp_pil.c b/drivers/remoteproc/qcom_adsp_pil.c new file mode 100644 index 0000000000000000000000000000000000000000..43a4ed2f346cf6370eab882fcbe07d4b4deda39e --- /dev/null +++ b/drivers/remoteproc/qcom_adsp_pil.c @@ -0,0 +1,428 @@ +/* + * Qualcomm ADSP Peripheral Image Loader for MSM8974 and MSM8996 + * + * Copyright (C) 2016 Linaro Ltd + * Copyright (C) 2014 Sony Mobile Communications AB + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qcom_mdt_loader.h" +#include "remoteproc_internal.h" + +#define ADSP_CRASH_REASON_SMEM 423 +#define ADSP_FIRMWARE_NAME "adsp.mdt" +#define ADSP_PAS_ID 1 + +struct qcom_adsp { + struct device *dev; + struct rproc *rproc; + + int wdog_irq; + int fatal_irq; + int ready_irq; + int handover_irq; + int stop_ack_irq; + + struct qcom_smem_state *state; + unsigned stop_bit; + + struct clk *xo; + + struct regulator *cx_supply; + + struct completion start_done; + struct completion stop_done; + + phys_addr_t mem_phys; + phys_addr_t mem_reloc; + void *mem_region; + size_t mem_size; +}; + +static int adsp_load(struct rproc *rproc, const struct firmware *fw) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + phys_addr_t fw_addr; + size_t fw_size; + bool relocate; + int ret; + + ret = qcom_scm_pas_init_image(ADSP_PAS_ID, fw->data, fw->size); + if (ret) { + dev_err(&rproc->dev, "invalid firmware metadata\n"); + return ret; + } + + ret = qcom_mdt_parse(fw, &fw_addr, &fw_size, &relocate); + if (ret) { + dev_err(&rproc->dev, "failed to parse mdt header\n"); + return ret; + } + + if (relocate) { + adsp->mem_reloc = fw_addr; + + ret = qcom_scm_pas_mem_setup(ADSP_PAS_ID, adsp->mem_phys, fw_size); + if (ret) { + dev_err(&rproc->dev, "unable to setup memory for image\n"); + return ret; + } + } + + return qcom_mdt_load(rproc, fw, rproc->firmware); +} + +static const struct rproc_fw_ops adsp_fw_ops = { + .find_rsc_table = qcom_mdt_find_rsc_table, + .load = adsp_load, +}; + +static int adsp_start(struct rproc *rproc) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int ret; + + ret = clk_prepare_enable(adsp->xo); + if (ret) + return ret; + + ret = regulator_enable(adsp->cx_supply); + if (ret) + goto disable_clocks; + + ret = qcom_scm_pas_auth_and_reset(ADSP_PAS_ID); + if (ret) { + dev_err(adsp->dev, + "failed to authenticate image and release reset\n"); + goto disable_regulators; + } + + ret = wait_for_completion_timeout(&adsp->start_done, + msecs_to_jiffies(5000)); + if (!ret) { + dev_err(adsp->dev, "start timed out\n"); + qcom_scm_pas_shutdown(ADSP_PAS_ID); + ret = -ETIMEDOUT; + goto disable_regulators; + } + + ret = 0; + +disable_regulators: + regulator_disable(adsp->cx_supply); +disable_clocks: + clk_disable_unprepare(adsp->xo); + + return ret; +} + +static int adsp_stop(struct rproc *rproc) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int ret; + + qcom_smem_state_update_bits(adsp->state, + BIT(adsp->stop_bit), + BIT(adsp->stop_bit)); + + ret = wait_for_completion_timeout(&adsp->stop_done, + msecs_to_jiffies(5000)); + if (ret == 0) + dev_err(adsp->dev, "timed out on wait\n"); + + qcom_smem_state_update_bits(adsp->state, + BIT(adsp->stop_bit), + 0); + + ret = qcom_scm_pas_shutdown(ADSP_PAS_ID); + if (ret) + dev_err(adsp->dev, "failed to shutdown: %d\n", ret); + + return ret; +} + +static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int offset; + + offset = da - adsp->mem_reloc; + if (offset < 0 || offset + len > adsp->mem_size) + return NULL; + + return adsp->mem_region + offset; +} + +static const struct rproc_ops adsp_ops = { + .start = adsp_start, + .stop = adsp_stop, + .da_to_va = adsp_da_to_va, +}; + +static irqreturn_t adsp_wdog_interrupt(int irq, void *dev) +{ + struct qcom_adsp *adsp = dev; + + rproc_report_crash(adsp->rproc, RPROC_WATCHDOG); + + return IRQ_HANDLED; +} + +static irqreturn_t adsp_fatal_interrupt(int irq, void *dev) +{ + struct qcom_adsp *adsp = dev; + size_t len; + char *msg; + + msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, ADSP_CRASH_REASON_SMEM, &len); + if (!IS_ERR(msg) && len > 0 && msg[0]) + dev_err(adsp->dev, "fatal error received: %s\n", msg); + + rproc_report_crash(adsp->rproc, RPROC_FATAL_ERROR); + + if (!IS_ERR(msg)) + msg[0] = '\0'; + + return IRQ_HANDLED; +} + +static irqreturn_t adsp_ready_interrupt(int irq, void *dev) +{ + return IRQ_HANDLED; +} + +static irqreturn_t adsp_handover_interrupt(int irq, void *dev) +{ + struct qcom_adsp *adsp = dev; + + complete(&adsp->start_done); + + return IRQ_HANDLED; +} + +static irqreturn_t adsp_stop_ack_interrupt(int irq, void *dev) +{ + struct qcom_adsp *adsp = dev; + + complete(&adsp->stop_done); + + return IRQ_HANDLED; +} + +static int adsp_init_clock(struct qcom_adsp *adsp) +{ + int ret; + + adsp->xo = devm_clk_get(adsp->dev, "xo"); + if (IS_ERR(adsp->xo)) { + ret = PTR_ERR(adsp->xo); + if (ret != -EPROBE_DEFER) + dev_err(adsp->dev, "failed to get xo clock"); + return ret; + } + + return 0; +} + +static int adsp_init_regulator(struct qcom_adsp *adsp) +{ + adsp->cx_supply = devm_regulator_get(adsp->dev, "cx"); + if (IS_ERR(adsp->cx_supply)) + return PTR_ERR(adsp->cx_supply); + + regulator_set_load(adsp->cx_supply, 100000); + + return 0; +} + +static int adsp_request_irq(struct qcom_adsp *adsp, + struct platform_device *pdev, + const char *name, + irq_handler_t thread_fn) +{ + int ret; + + ret = platform_get_irq_byname(pdev, name); + if (ret < 0) { + dev_err(&pdev->dev, "no %s IRQ defined\n", name); + return ret; + } + + ret = devm_request_threaded_irq(&pdev->dev, ret, + NULL, thread_fn, + IRQF_ONESHOT, + "adsp", adsp); + if (ret) + dev_err(&pdev->dev, "request %s IRQ failed\n", name); + + return ret; +} + +static int adsp_alloc_memory_region(struct qcom_adsp *adsp) +{ + struct device_node *node; + struct resource r; + int ret; + + node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0); + if (!node) { + dev_err(adsp->dev, "no memory-region specified\n"); + return -EINVAL; + } + + ret = of_address_to_resource(node, 0, &r); + if (ret) + return ret; + + adsp->mem_phys = adsp->mem_reloc = r.start; + adsp->mem_size = resource_size(&r); + adsp->mem_region = devm_ioremap_wc(adsp->dev, adsp->mem_phys, adsp->mem_size); + if (!adsp->mem_region) { + dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n", + &r.start, adsp->mem_size); + return -EBUSY; + } + + return 0; +} + +static int adsp_probe(struct platform_device *pdev) +{ + struct qcom_adsp *adsp; + struct rproc *rproc; + int ret; + + if (!qcom_scm_is_available()) + return -EPROBE_DEFER; + + if (!qcom_scm_pas_supported(ADSP_PAS_ID)) { + dev_err(&pdev->dev, "PAS is not available for ADSP\n"); + return -ENXIO; + } + + rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops, + ADSP_FIRMWARE_NAME, sizeof(*adsp)); + if (!rproc) { + dev_err(&pdev->dev, "unable to allocate remoteproc\n"); + return -ENOMEM; + } + + rproc->fw_ops = &adsp_fw_ops; + + adsp = (struct qcom_adsp *)rproc->priv; + adsp->dev = &pdev->dev; + adsp->rproc = rproc; + platform_set_drvdata(pdev, adsp); + + init_completion(&adsp->start_done); + init_completion(&adsp->stop_done); + + ret = adsp_alloc_memory_region(adsp); + if (ret) + goto free_rproc; + + ret = adsp_init_clock(adsp); + if (ret) + goto free_rproc; + + ret = adsp_init_regulator(adsp); + if (ret) + goto free_rproc; + + ret = adsp_request_irq(adsp, pdev, "wdog", adsp_wdog_interrupt); + if (ret < 0) + goto free_rproc; + adsp->wdog_irq = ret; + + ret = adsp_request_irq(adsp, pdev, "fatal", adsp_fatal_interrupt); + if (ret < 0) + goto free_rproc; + adsp->fatal_irq = ret; + + ret = adsp_request_irq(adsp, pdev, "ready", adsp_ready_interrupt); + if (ret < 0) + goto free_rproc; + adsp->ready_irq = ret; + + ret = adsp_request_irq(adsp, pdev, "handover", adsp_handover_interrupt); + if (ret < 0) + goto free_rproc; + adsp->handover_irq = ret; + + ret = adsp_request_irq(adsp, pdev, "stop-ack", adsp_stop_ack_interrupt); + if (ret < 0) + goto free_rproc; + adsp->stop_ack_irq = ret; + + adsp->state = qcom_smem_state_get(&pdev->dev, "stop", + &adsp->stop_bit); + if (IS_ERR(adsp->state)) { + ret = PTR_ERR(adsp->state); + goto free_rproc; + } + + ret = rproc_add(rproc); + if (ret) + goto free_rproc; + + return 0; + +free_rproc: + rproc_free(rproc); + + return ret; +} + +static int adsp_remove(struct platform_device *pdev) +{ + struct qcom_adsp *adsp = platform_get_drvdata(pdev); + + qcom_smem_state_put(adsp->state); + rproc_del(adsp->rproc); + rproc_free(adsp->rproc); + + return 0; +} + +static const struct of_device_id adsp_of_match[] = { + { .compatible = "qcom,msm8974-adsp-pil" }, + { .compatible = "qcom,msm8996-adsp-pil" }, + { }, +}; +MODULE_DEVICE_TABLE(of, adsp_of_match); + +static struct platform_driver adsp_driver = { + .probe = adsp_probe, + .remove = adsp_remove, + .driver = { + .name = "qcom_adsp_pil", + .of_match_table = adsp_of_match, + }, +}; + +module_platform_driver(adsp_driver); +MODULE_DESCRIPTION("Qualcomm MSM8974/MSM8996 ADSP Peripherial Image Loader"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_mdt_loader.c b/drivers/remoteproc/qcom_mdt_loader.c index 114e8e4cef67adabc9bc7dd9fddde23bedd214df..2ff18cd6c0964fc34b2c1b9f88ce351f3019971d 100644 --- a/drivers/remoteproc/qcom_mdt_loader.c +++ b/drivers/remoteproc/qcom_mdt_loader.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include "remoteproc_internal.h" diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c index 2e0caaaa766a38939b8278da20ef6c17c8c70598..b08989b48df7bad5b4e9fb0ef11f81ee369363b9 100644 --- a/drivers/remoteproc/qcom_q6v5_pil.c +++ b/drivers/remoteproc/qcom_q6v5_pil.c @@ -894,6 +894,7 @@ static const struct of_device_id q6v5_of_match[] = { { .compatible = "qcom,q6v5-pil", }, { }, }; +MODULE_DEVICE_TABLE(of, q6v5_of_match); static struct platform_driver q6v5_driver = { .probe = q6v5_probe, diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index f5cedeaafba18946757227dd646d393a2bc3f5a0..ebd61f5d18bb05562f8a3a687c83d5024a8e13b3 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "qcom_mdt_loader.h" #include "remoteproc_internal.h" @@ -94,6 +95,10 @@ struct qcom_wcnss { phys_addr_t mem_reloc; void *mem_region; size_t mem_size; + + struct device_node *smd_node; + struct qcom_smd_edge *smd_edge; + struct rproc_subdev smd_subdev; }; static const struct wcnss_data riva_data = { @@ -143,7 +148,6 @@ void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss, mutex_unlock(&wcnss->iris_lock); } -EXPORT_SYMBOL_GPL(qcom_wcnss_assign_iris); static int wcnss_load(struct rproc *rproc, const struct firmware *fw) { @@ -396,6 +400,23 @@ static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev) return IRQ_HANDLED; } +static int wcnss_smd_probe(struct rproc_subdev *subdev) +{ + struct qcom_wcnss *wcnss = container_of(subdev, struct qcom_wcnss, smd_subdev); + + wcnss->smd_edge = qcom_smd_register_edge(wcnss->dev, wcnss->smd_node); + + return IS_ERR(wcnss->smd_edge) ? PTR_ERR(wcnss->smd_edge) : 0; +} + +static void wcnss_smd_remove(struct rproc_subdev *subdev) +{ + struct qcom_wcnss *wcnss = container_of(subdev, struct qcom_wcnss, smd_subdev); + + qcom_smd_unregister_edge(wcnss->smd_edge); + wcnss->smd_edge = NULL; +} + static int wcnss_init_regulators(struct qcom_wcnss *wcnss, const struct wcnss_vreg_info *info, int num_vregs) @@ -578,6 +599,10 @@ static int wcnss_probe(struct platform_device *pdev) } } + wcnss->smd_node = of_get_child_by_name(pdev->dev.of_node, "smd-edge"); + if (wcnss->smd_node) + rproc_add_subdev(rproc, &wcnss->smd_subdev, wcnss_smd_probe, wcnss_smd_remove); + ret = rproc_add(rproc); if (ret) goto free_rproc; @@ -596,6 +621,7 @@ static int wcnss_remove(struct platform_device *pdev) of_platform_depopulate(&pdev->dev); + of_node_put(wcnss->smd_node); qcom_smem_state_put(wcnss->state); rproc_del(wcnss->rproc); rproc_free(wcnss->rproc); @@ -609,6 +635,7 @@ static const struct of_device_id wcnss_of_match[] = { { .compatible = "qcom,pronto-v2-pil", &pronto_v2_data }, { }, }; +MODULE_DEVICE_TABLE(of, wcnss_of_match); static struct platform_driver wcnss_driver = { .probe = wcnss_probe, @@ -619,6 +646,28 @@ static struct platform_driver wcnss_driver = { }, }; -module_platform_driver(wcnss_driver); +static int __init wcnss_init(void) +{ + int ret; + + ret = platform_driver_register(&wcnss_driver); + if (ret) + return ret; + + ret = platform_driver_register(&qcom_iris_driver); + if (ret) + platform_driver_unregister(&wcnss_driver); + + return ret; +} +module_init(wcnss_init); + +static void __exit wcnss_exit(void) +{ + platform_driver_unregister(&qcom_iris_driver); + platform_driver_unregister(&wcnss_driver); +} +module_exit(wcnss_exit); + MODULE_DESCRIPTION("Qualcomm Peripherial Image Loader for Wireless Subsystem"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_wcnss.h b/drivers/remoteproc/qcom_wcnss.h index 9dc4a9fe41e19af6e637285b5bbce1664ac78b8d..25fb7f62a4576ddda9cea53247246ae26c84b9de 100644 --- a/drivers/remoteproc/qcom_wcnss.h +++ b/drivers/remoteproc/qcom_wcnss.h @@ -4,6 +4,8 @@ struct qcom_iris; struct qcom_wcnss; +extern struct platform_driver qcom_iris_driver; + struct wcnss_vreg_info { const char * const name; int min_voltage; diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c index f0ca24a8dd0b77aef8103a8302d59af6fe658b8a..e842be58e8c7799a59b4d36b3f186f0c8631a664 100644 --- a/drivers/remoteproc/qcom_wcnss_iris.c +++ b/drivers/remoteproc/qcom_wcnss_iris.c @@ -94,14 +94,12 @@ int qcom_iris_enable(struct qcom_iris *iris) return ret; } -EXPORT_SYMBOL_GPL(qcom_iris_enable); void qcom_iris_disable(struct qcom_iris *iris) { clk_disable_unprepare(iris->xo_clk); regulator_bulk_disable(iris->num_vregs, iris->vregs); } -EXPORT_SYMBOL_GPL(qcom_iris_disable); static int qcom_iris_probe(struct platform_device *pdev) { @@ -173,8 +171,9 @@ static const struct of_device_id iris_of_match[] = { { .compatible = "qcom,wcn3680", .data = &wcn3680_data }, {} }; +MODULE_DEVICE_TABLE(of, iris_of_match); -static struct platform_driver wcnss_driver = { +struct platform_driver qcom_iris_driver = { .probe = qcom_iris_probe, .remove = qcom_iris_remove, .driver = { @@ -182,7 +181,3 @@ static struct platform_driver wcnss_driver = { .of_match_table = iris_of_match, }, }; - -module_platform_driver(wcnss_driver); -MODULE_DESCRIPTION("Qualcomm Wireless Subsystem Iris driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index c6bfb3496684efde7dc55d777e392445490db585..9a507e77eced18cc433792f651761dd137b75d62 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -236,6 +236,10 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) } notifyid = ret; + /* Potentially bump max_notifyid */ + if (notifyid > rproc->max_notifyid) + rproc->max_notifyid = notifyid; + dev_dbg(dev, "vring%d: va %p dma %pad size 0x%x idr %d\n", i, va, &dma, size, notifyid); @@ -296,6 +300,20 @@ void rproc_free_vring(struct rproc_vring *rvring) rsc->vring[idx].notifyid = -1; } +static int rproc_vdev_do_probe(struct rproc_subdev *subdev) +{ + struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); + + return rproc_add_virtio_dev(rvdev, rvdev->id); +} + +static void rproc_vdev_do_remove(struct rproc_subdev *subdev) +{ + struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); + + rproc_remove_virtio_dev(rvdev); +} + /** * rproc_handle_vdev() - handle a vdev fw resource * @rproc: the remote processor @@ -356,6 +374,9 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, if (!rvdev) return -ENOMEM; + kref_init(&rvdev->refcount); + + rvdev->id = rsc->id; rvdev->rproc = rproc; /* parse the vrings */ @@ -368,22 +389,51 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, /* remember the resource offset*/ rvdev->rsc_offset = offset; + /* allocate the vring resources */ + for (i = 0; i < rsc->num_of_vrings; i++) { + ret = rproc_alloc_vring(rvdev, i); + if (ret) + goto unwind_vring_allocations; + } + + /* track the rvdevs list reference */ + kref_get(&rvdev->refcount); + list_add_tail(&rvdev->node, &rproc->rvdevs); - /* it is now safe to add the virtio device */ - ret = rproc_add_virtio_dev(rvdev, rsc->id); - if (ret) - goto remove_rvdev; + rproc_add_subdev(rproc, &rvdev->subdev, + rproc_vdev_do_probe, rproc_vdev_do_remove); return 0; -remove_rvdev: - list_del(&rvdev->node); +unwind_vring_allocations: + for (i--; i >= 0; i--) + rproc_free_vring(&rvdev->vring[i]); free_rvdev: kfree(rvdev); return ret; } +void rproc_vdev_release(struct kref *ref) +{ + struct rproc_vdev *rvdev = container_of(ref, struct rproc_vdev, refcount); + struct rproc_vring *rvring; + struct rproc *rproc = rvdev->rproc; + int id; + + for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) { + rvring = &rvdev->vring[id]; + if (!rvring->va) + continue; + + rproc_free_vring(rvring); + } + + rproc_remove_subdev(rproc, &rvdev->subdev); + list_del(&rvdev->node); + kfree(rvdev); +} + /** * rproc_handle_trace() - handle a shared trace buffer resource * @rproc: the remote processor @@ -673,15 +723,6 @@ static int rproc_handle_carveout(struct rproc *rproc, return ret; } -static int rproc_count_vrings(struct rproc *rproc, struct fw_rsc_vdev *rsc, - int offset, int avail) -{ - /* Summarize the number of notification IDs */ - rproc->max_notifyid += rsc->num_of_vrings; - - return 0; -} - /* * A lookup table for resource handlers. The indices are defined in * enum fw_resource_type. @@ -690,10 +731,6 @@ static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = { [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout, [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem, [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace, - [RSC_VDEV] = (rproc_handle_resource_t)rproc_count_vrings, -}; - -static rproc_handle_resource_t rproc_vdev_handler[RSC_LAST] = { [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev, }; @@ -736,6 +773,34 @@ static int rproc_handle_resources(struct rproc *rproc, int len, return ret; } +static int rproc_probe_subdevices(struct rproc *rproc) +{ + struct rproc_subdev *subdev; + int ret; + + list_for_each_entry(subdev, &rproc->subdevs, node) { + ret = subdev->probe(subdev); + if (ret) + goto unroll_registration; + } + + return 0; + +unroll_registration: + list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) + subdev->remove(subdev); + + return ret; +} + +static void rproc_remove_subdevices(struct rproc *rproc) +{ + struct rproc_subdev *subdev; + + list_for_each_entry(subdev, &rproc->subdevs, node) + subdev->remove(subdev); +} + /** * rproc_resource_cleanup() - clean up and free all acquired resources * @rproc: rproc handle @@ -782,7 +847,7 @@ static void rproc_resource_cleanup(struct rproc *rproc) /* clean up remote vdev entries */ list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) - rproc_remove_virtio_dev(rvdev); + kref_put(&rvdev->refcount, rproc_vdev_release); } /* @@ -824,25 +889,16 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) /* * Create a copy of the resource table. When a virtio device starts * and calls vring_new_virtqueue() the address of the allocated vring - * will be stored in the cached_table. Before the device is started, - * cached_table will be copied into device memory. + * will be stored in the table_ptr. Before the device is started, + * table_ptr will be copied into device memory. */ - rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL); - if (!rproc->cached_table) + rproc->table_ptr = kmemdup(table, tablesz, GFP_KERNEL); + if (!rproc->table_ptr) goto clean_up; - rproc->table_ptr = rproc->cached_table; - /* reset max_notifyid */ rproc->max_notifyid = -1; - /* look for virtio devices and register them */ - ret = rproc_handle_resources(rproc, tablesz, rproc_vdev_handler); - if (ret) { - dev_err(dev, "Failed to handle vdev resources: %d\n", ret); - goto clean_up; - } - /* handle fw resources which are required to boot rproc */ ret = rproc_handle_resources(rproc, tablesz, rproc_loading_handlers); if (ret) { @@ -858,18 +914,16 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) } /* - * The starting device has been given the rproc->cached_table as the + * The starting device has been given the rproc->table_ptr as the * resource table. The address of the vring along with the other - * allocated resources (carveouts etc) is stored in cached_table. + * allocated resources (carveouts etc) is stored in table_ptr. * In order to pass this information to the remote device we must copy * this information to device memory. We also update the table_ptr so * that any subsequent changes will be applied to the loaded version. */ loaded_table = rproc_find_loaded_rsc_table(rproc, fw); - if (loaded_table) { - memcpy(loaded_table, rproc->cached_table, tablesz); - rproc->table_ptr = loaded_table; - } + if (loaded_table) + memcpy(loaded_table, rproc->table_ptr, tablesz); /* power up the remote processor */ ret = rproc->ops->start(rproc); @@ -878,17 +932,26 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) goto clean_up_resources; } + /* probe any subdevices for the remote processor */ + ret = rproc_probe_subdevices(rproc); + if (ret) { + dev_err(dev, "failed to probe subdevices for %s: %d\n", + rproc->name, ret); + goto stop_rproc; + } + rproc->state = RPROC_RUNNING; dev_info(dev, "remote processor %s is now up\n", rproc->name); return 0; +stop_rproc: + rproc->ops->stop(rproc); clean_up_resources: rproc_resource_cleanup(rproc); clean_up: - kfree(rproc->cached_table); - rproc->cached_table = NULL; + kfree(rproc->table_ptr); rproc->table_ptr = NULL; rproc_disable_iommu(rproc); @@ -909,7 +972,7 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context) /* if rproc is marked always-on, request it to boot */ if (rproc->auto_boot) - rproc_boot_nowait(rproc); + rproc_boot(rproc); release_firmware(fw); /* allow rproc_del() contexts, if any, to proceed */ @@ -1007,7 +1070,6 @@ static void rproc_crash_handler_work(struct work_struct *work) /** * __rproc_boot() - boot a remote processor * @rproc: handle of a remote processor - * @wait: wait for rproc registration completion * * Boot a remote processor (i.e. load its firmware, power it on, ...). * @@ -1016,7 +1078,7 @@ static void rproc_crash_handler_work(struct work_struct *work) * * Returns 0 on success, and an appropriate error value otherwise. */ -static int __rproc_boot(struct rproc *rproc, bool wait) +static int __rproc_boot(struct rproc *rproc) { const struct firmware *firmware_p; struct device *dev; @@ -1050,10 +1112,6 @@ static int __rproc_boot(struct rproc *rproc, bool wait) goto downref_rproc; } - /* if rproc virtio is not yet configured, wait */ - if (wait) - wait_for_completion(&rproc->firmware_loading_complete); - ret = rproc_fw_boot(rproc, firmware_p); release_firmware(firmware_p); @@ -1072,21 +1130,10 @@ static int __rproc_boot(struct rproc *rproc, bool wait) */ int rproc_boot(struct rproc *rproc) { - return __rproc_boot(rproc, true); + return __rproc_boot(rproc); } EXPORT_SYMBOL(rproc_boot); -/** - * rproc_boot_nowait() - boot a remote processor - * @rproc: handle of a remote processor - * - * Same as rproc_boot() but don't wait for rproc registration completion - */ -int rproc_boot_nowait(struct rproc *rproc) -{ - return __rproc_boot(rproc, false); -} - /** * rproc_shutdown() - power off the remote processor * @rproc: the remote processor @@ -1121,6 +1168,9 @@ void rproc_shutdown(struct rproc *rproc) if (!atomic_dec_and_test(&rproc->power)) goto out; + /* remove any subdevices for the remote processor */ + rproc_remove_subdevices(rproc); + /* power off the remote processor */ ret = rproc->ops->stop(rproc); if (ret) { @@ -1135,8 +1185,7 @@ void rproc_shutdown(struct rproc *rproc) rproc_disable_iommu(rproc); /* Free the copy of the resource table */ - kfree(rproc->cached_table); - rproc->cached_table = NULL; + kfree(rproc->table_ptr); rproc->table_ptr = NULL; /* if in crash state, unlock crash handler */ @@ -1233,9 +1282,6 @@ int rproc_add(struct rproc *rproc) dev_info(dev, "%s is available\n", rproc->name); - dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n"); - dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n"); - /* create debugfs entries */ rproc_create_debug_dir(rproc); ret = rproc_add_virtio_devices(rproc); @@ -1273,6 +1319,7 @@ static void rproc_type_release(struct device *dev) if (rproc->index >= 0) ida_simple_remove(&rproc_dev_index, rproc->index); + kfree(rproc->firmware); kfree(rproc); } @@ -1310,31 +1357,31 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, { struct rproc *rproc; char *p, *template = "rproc-%s-fw"; - int name_len = 0; + int name_len; if (!dev || !name || !ops) return NULL; - if (!firmware) + if (!firmware) { /* - * Make room for default firmware name (minus %s plus '\0'). * If the caller didn't pass in a firmware name then - * construct a default name. We're already glomming 'len' - * bytes onto the end of the struct rproc allocation, so do - * a few more for the default firmware name (but only if - * the caller doesn't pass one). + * construct a default name. */ name_len = strlen(name) + strlen(template) - 2 + 1; - - rproc = kzalloc(sizeof(*rproc) + len + name_len, GFP_KERNEL); - if (!rproc) - return NULL; - - if (!firmware) { - p = (char *)rproc + sizeof(struct rproc) + len; + p = kmalloc(name_len, GFP_KERNEL); + if (!p) + return NULL; snprintf(p, name_len, template, name); } else { - p = (char *)firmware; + p = kstrdup(firmware, GFP_KERNEL); + if (!p) + return NULL; + } + + rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL); + if (!rproc) { + kfree(p); + return NULL; } rproc->firmware = p; @@ -1346,6 +1393,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, device_initialize(&rproc->dev); rproc->dev.parent = dev; rproc->dev.type = &rproc_type; + rproc->dev.class = &rproc_class; /* Assign a unique device index and name */ rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL); @@ -1370,6 +1418,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, INIT_LIST_HEAD(&rproc->mappings); INIT_LIST_HEAD(&rproc->traces); INIT_LIST_HEAD(&rproc->rvdevs); + INIT_LIST_HEAD(&rproc->subdevs); INIT_WORK(&rproc->crash_handler, rproc_crash_handler_work); init_completion(&rproc->crash_comp); @@ -1428,8 +1477,6 @@ EXPORT_SYMBOL(rproc_put); */ int rproc_del(struct rproc *rproc) { - struct rproc_vdev *rvdev, *tmp; - if (!rproc) return -EINVAL; @@ -1441,10 +1488,6 @@ int rproc_del(struct rproc *rproc) if (rproc->auto_boot) rproc_shutdown(rproc); - /* clean up remote vdev entries */ - list_for_each_entry_safe(rvdev, tmp, &rproc->rvdevs, node) - rproc_remove_virtio_dev(rvdev); - /* the rproc is downref'ed as soon as it's removed from the klist */ mutex_lock(&rproc_list_mutex); list_del(&rproc->node); @@ -1456,6 +1499,36 @@ int rproc_del(struct rproc *rproc) } EXPORT_SYMBOL(rproc_del); +/** + * rproc_add_subdev() - add a subdevice to a remoteproc + * @rproc: rproc handle to add the subdevice to + * @subdev: subdev handle to register + * @probe: function to call when the rproc boots + * @remove: function to call when the rproc shuts down + */ +void rproc_add_subdev(struct rproc *rproc, + struct rproc_subdev *subdev, + int (*probe)(struct rproc_subdev *subdev), + void (*remove)(struct rproc_subdev *subdev)) +{ + subdev->probe = probe; + subdev->remove = remove; + + list_add_tail(&subdev->node, &rproc->subdevs); +} +EXPORT_SYMBOL(rproc_add_subdev); + +/** + * rproc_remove_subdev() - remove a subdevice from a remoteproc + * @rproc: rproc handle to remove the subdevice from + * @subdev: subdev handle, previously registered with rproc_add_subdev() + */ +void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev) +{ + list_del(&subdev->node); +} +EXPORT_SYMBOL(rproc_remove_subdev); + /** * rproc_report_crash() - rproc crash reporter function * @rproc: remote processor @@ -1484,6 +1557,7 @@ EXPORT_SYMBOL(rproc_report_crash); static int __init remoteproc_init(void) { + rproc_init_sysfs(); rproc_init_debugfs(); return 0; @@ -1495,6 +1569,7 @@ static void __exit remoteproc_exit(void) ida_destroy(&rproc_dev_index); rproc_exit_debugfs(); + rproc_exit_sysfs(); } module_exit(remoteproc_exit); diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index 374797206c79032ee854219a7fbb6fee5fe87066..1c122e230cec5189fdec1a72897b7a74f0309b50 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c @@ -59,75 +59,6 @@ static const struct file_operations trace_rproc_ops = { .llseek = generic_file_llseek, }; -/* - * A state-to-string lookup table, for exposing a human readable state - * via debugfs. Always keep in sync with enum rproc_state - */ -static const char * const rproc_state_string[] = { - "offline", - "suspended", - "running", - "crashed", - "invalid", -}; - -/* expose the state of the remote processor via debugfs */ -static ssize_t rproc_state_read(struct file *filp, char __user *userbuf, - size_t count, loff_t *ppos) -{ - struct rproc *rproc = filp->private_data; - unsigned int state; - char buf[30]; - int i; - - state = rproc->state > RPROC_LAST ? RPROC_LAST : rproc->state; - - i = scnprintf(buf, 30, "%.28s (%d)\n", rproc_state_string[state], - rproc->state); - - return simple_read_from_buffer(userbuf, count, ppos, buf, i); -} - -static ssize_t rproc_state_write(struct file *filp, const char __user *userbuf, - size_t count, loff_t *ppos) -{ - struct rproc *rproc = filp->private_data; - char buf[10]; - int ret; - - if (count > sizeof(buf) || count <= 0) - return -EINVAL; - - ret = copy_from_user(buf, userbuf, count); - if (ret) - return -EFAULT; - - if (buf[count - 1] == '\n') - buf[count - 1] = '\0'; - - if (!strncmp(buf, "start", count)) { - ret = rproc_boot(rproc); - if (ret) { - dev_err(&rproc->dev, "Boot failed: %d\n", ret); - return ret; - } - } else if (!strncmp(buf, "stop", count)) { - rproc_shutdown(rproc); - } else { - dev_err(&rproc->dev, "Unrecognised option: %s\n", buf); - return -EINVAL; - } - - return count; -} - -static const struct file_operations rproc_state_ops = { - .read = rproc_state_read, - .write = rproc_state_write, - .open = simple_open, - .llseek = generic_file_llseek, -}; - /* expose the name of the remote processor via debugfs */ static ssize_t rproc_name_read(struct file *filp, char __user *userbuf, size_t count, loff_t *ppos) @@ -265,8 +196,6 @@ void rproc_create_debug_dir(struct rproc *rproc) debugfs_create_file("name", 0400, rproc->dbg_dir, rproc, &rproc_name_ops); - debugfs_create_file("state", 0400, rproc->dbg_dir, - rproc, &rproc_state_ops); debugfs_create_file("recovery", 0400, rproc->dbg_dir, rproc, &rproc_recovery_ops); } diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index 4cf93ca2816e29b8ac7a87bf6904020f05a8f079..1e9e5b3f021c0728fb8906f8a3d3d627f40d2ac4 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -49,6 +49,7 @@ struct rproc_fw_ops { void rproc_release(struct kref *kref); irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id); int rproc_boot_nowait(struct rproc *rproc); +void rproc_vdev_release(struct kref *ref); /* from remoteproc_virtio.c */ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id); @@ -63,6 +64,11 @@ void rproc_create_debug_dir(struct rproc *rproc); void rproc_init_debugfs(void); void rproc_exit_debugfs(void); +/* from remoteproc_sysfs.c */ +extern struct class rproc_class; +int rproc_init_sysfs(void); +void rproc_exit_sysfs(void); + void rproc_free_vring(struct rproc_vring *rvring); int rproc_alloc_vring(struct rproc_vdev *rvdev, int i); diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..bc5b0e00efb15422bcbf57a32798a4ea28a42646 --- /dev/null +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -0,0 +1,151 @@ +/* + * Remote Processor Framework + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +#include "remoteproc_internal.h" + +#define to_rproc(d) container_of(d, struct rproc, dev) + +/* Expose the loaded / running firmware name via sysfs */ +static ssize_t firmware_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rproc *rproc = to_rproc(dev); + + return sprintf(buf, "%s\n", rproc->firmware); +} + +/* Change firmware name via sysfs */ +static ssize_t firmware_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct rproc *rproc = to_rproc(dev); + char *p; + int err, len = count; + + err = mutex_lock_interruptible(&rproc->lock); + if (err) { + dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, err); + return -EINVAL; + } + + if (rproc->state != RPROC_OFFLINE) { + dev_err(dev, "can't change firmware while running\n"); + err = -EBUSY; + goto out; + } + + len = strcspn(buf, "\n"); + + p = kstrndup(buf, len, GFP_KERNEL); + if (!p) { + err = -ENOMEM; + goto out; + } + + kfree(rproc->firmware); + rproc->firmware = p; +out: + mutex_unlock(&rproc->lock); + + return err ? err : count; +} +static DEVICE_ATTR_RW(firmware); + +/* + * A state-to-string lookup table, for exposing a human readable state + * via sysfs. Always keep in sync with enum rproc_state + */ +static const char * const rproc_state_string[] = { + [RPROC_OFFLINE] = "offline", + [RPROC_SUSPENDED] = "suspended", + [RPROC_RUNNING] = "running", + [RPROC_CRASHED] = "crashed", + [RPROC_LAST] = "invalid", +}; + +/* Expose the state of the remote processor via sysfs */ +static ssize_t state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rproc *rproc = to_rproc(dev); + unsigned int state; + + state = rproc->state > RPROC_LAST ? RPROC_LAST : rproc->state; + return sprintf(buf, "%s\n", rproc_state_string[state]); +} + +/* Change remote processor state via sysfs */ +static ssize_t state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct rproc *rproc = to_rproc(dev); + int ret = 0; + + if (sysfs_streq(buf, "start")) { + if (rproc->state == RPROC_RUNNING) + return -EBUSY; + + ret = rproc_boot(rproc); + if (ret) + dev_err(&rproc->dev, "Boot failed: %d\n", ret); + } else if (sysfs_streq(buf, "stop")) { + if (rproc->state != RPROC_RUNNING) + return -EINVAL; + + rproc_shutdown(rproc); + } else { + dev_err(&rproc->dev, "Unrecognised option: %s\n", buf); + ret = -EINVAL; + } + return ret ? ret : count; +} +static DEVICE_ATTR_RW(state); + +static struct attribute *rproc_attrs[] = { + &dev_attr_firmware.attr, + &dev_attr_state.attr, + NULL +}; + +static const struct attribute_group rproc_devgroup = { + .attrs = rproc_attrs +}; + +static const struct attribute_group *rproc_devgroups[] = { + &rproc_devgroup, + NULL +}; + +struct class rproc_class = { + .name = "remoteproc", + .dev_groups = rproc_devgroups, +}; + +int __init rproc_init_sysfs(void) +{ + /* create remoteproc device class for sysfs */ + int err = class_register(&rproc_class); + + if (err) + pr_err("remoteproc: unable to register class\n"); + return err; +} + +void __exit rproc_exit_sysfs(void) +{ + class_unregister(&rproc_class); +} diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 01870a16d6d23e7023d511da3d27a1695899597a..364411fb77343f5c043bb9e681ecde8a5ca4cec7 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -79,7 +79,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev, struct rproc_vring *rvring; struct virtqueue *vq; void *addr; - int len, size, ret; + int len, size; /* we're temporarily limited to two virtqueues per rvdev */ if (id >= ARRAY_SIZE(rvdev->vring)) @@ -88,10 +88,6 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev, if (!name) return NULL; - ret = rproc_alloc_vring(rvdev, id); - if (ret) - return ERR_PTR(ret); - rvring = &rvdev->vring[id]; addr = rvring->va; len = rvring->len; @@ -130,7 +126,6 @@ static void __rproc_virtio_del_vqs(struct virtio_device *vdev) rvring = vq->priv; rvring->vq = NULL; vring_del_virtqueue(vq); - rproc_free_vring(rvring); } } @@ -282,14 +277,13 @@ static const struct virtio_config_ops rproc_virtio_config_ops = { * Never call this function directly; it will be called by the driver * core when needed. */ -static void rproc_vdev_release(struct device *dev) +static void rproc_virtio_dev_release(struct device *dev) { struct virtio_device *vdev = dev_to_virtio(dev); struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); struct rproc *rproc = vdev_to_rproc(vdev); - list_del(&rvdev->node); - kfree(rvdev); + kref_put(&rvdev->refcount, rproc_vdev_release); put_device(&rproc->dev); } @@ -313,7 +307,7 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id) vdev->id.device = id, vdev->config = &rproc_virtio_config_ops, vdev->dev.parent = dev; - vdev->dev.release = rproc_vdev_release; + vdev->dev.release = rproc_virtio_dev_release; /* * We're indirectly making a non-temporary copy of the rproc pointer @@ -325,6 +319,9 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id) */ get_device(&rproc->dev); + /* Reference the vdev and vring allocations */ + kref_get(&rvdev->refcount); + ret = register_virtio_device(vdev); if (ret) { put_device(&rproc->dev); diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c index ae8963fcc8c8f8f2b0ccb3cedc43f2419eddb6f9..da4e152e97331fbed605887430dddbe22947c9ff 100644 --- a/drivers/remoteproc/st_remoteproc.c +++ b/drivers/remoteproc/st_remoteproc.c @@ -245,8 +245,10 @@ static int st_rproc_probe(struct platform_device *pdev) goto free_rproc; enabled = st_rproc_state(pdev); - if (enabled < 0) + if (enabled < 0) { + ret = enabled; goto free_rproc; + } if (enabled) { atomic_inc(&rproc->power); diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c new file mode 100644 index 0000000000000000000000000000000000000000..507716c8721fd306f056e1a738e68edff6fdc546 --- /dev/null +++ b/drivers/remoteproc/st_slim_rproc.c @@ -0,0 +1,364 @@ +/* + * SLIM core rproc driver + * + * Copyright (C) 2016 STMicroelectronics + * + * Author: Peter Griffin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "remoteproc_internal.h" + +/* SLIM core registers */ +#define SLIM_ID_OFST 0x0 +#define SLIM_VER_OFST 0x4 + +#define SLIM_EN_OFST 0x8 +#define SLIM_EN_RUN BIT(0) + +#define SLIM_CLK_GATE_OFST 0xC +#define SLIM_CLK_GATE_DIS BIT(0) +#define SLIM_CLK_GATE_RESET BIT(2) + +#define SLIM_SLIM_PC_OFST 0x20 + +/* DMEM registers */ +#define SLIM_REV_ID_OFST 0x0 +#define SLIM_REV_ID_MIN_MASK GENMASK(15, 8) +#define SLIM_REV_ID_MIN(id) ((id & SLIM_REV_ID_MIN_MASK) >> 8) +#define SLIM_REV_ID_MAJ_MASK GENMASK(23, 16) +#define SLIM_REV_ID_MAJ(id) ((id & SLIM_REV_ID_MAJ_MASK) >> 16) + + +/* peripherals registers */ +#define SLIM_STBUS_SYNC_OFST 0xF88 +#define SLIM_STBUS_SYNC_DIS BIT(0) + +#define SLIM_INT_SET_OFST 0xFD4 +#define SLIM_INT_CLR_OFST 0xFD8 +#define SLIM_INT_MASK_OFST 0xFDC + +#define SLIM_CMD_CLR_OFST 0xFC8 +#define SLIM_CMD_MASK_OFST 0xFCC + +static const char *mem_names[ST_SLIM_MEM_MAX] = { + [ST_SLIM_DMEM] = "dmem", + [ST_SLIM_IMEM] = "imem", +}; + +static int slim_clk_get(struct st_slim_rproc *slim_rproc, struct device *dev) +{ + int clk, err; + + for (clk = 0; clk < ST_SLIM_MAX_CLK; clk++) { + slim_rproc->clks[clk] = of_clk_get(dev->of_node, clk); + if (IS_ERR(slim_rproc->clks[clk])) { + err = PTR_ERR(slim_rproc->clks[clk]); + if (err == -EPROBE_DEFER) + goto err_put_clks; + slim_rproc->clks[clk] = NULL; + break; + } + } + + return 0; + +err_put_clks: + while (--clk >= 0) + clk_put(slim_rproc->clks[clk]); + + return err; +} + +static void slim_clk_disable(struct st_slim_rproc *slim_rproc) +{ + int clk; + + for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++) + clk_disable_unprepare(slim_rproc->clks[clk]); +} + +static int slim_clk_enable(struct st_slim_rproc *slim_rproc) +{ + int clk, ret; + + for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++) { + ret = clk_prepare_enable(slim_rproc->clks[clk]); + if (ret) + goto err_disable_clks; + } + + return 0; + +err_disable_clks: + while (--clk >= 0) + clk_disable_unprepare(slim_rproc->clks[clk]); + + return ret; +} + +/* + * Remoteproc slim specific device handlers + */ +static int slim_rproc_start(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + struct st_slim_rproc *slim_rproc = rproc->priv; + unsigned long hw_id, hw_ver, fw_rev; + u32 val; + + /* disable CPU pipeline clock & reset CPU pipeline */ + val = SLIM_CLK_GATE_DIS | SLIM_CLK_GATE_RESET; + writel(val, slim_rproc->slimcore + SLIM_CLK_GATE_OFST); + + /* disable SLIM core STBus sync */ + writel(SLIM_STBUS_SYNC_DIS, slim_rproc->peri + SLIM_STBUS_SYNC_OFST); + + /* enable cpu pipeline clock */ + writel(!SLIM_CLK_GATE_DIS, + slim_rproc->slimcore + SLIM_CLK_GATE_OFST); + + /* clear int & cmd mailbox */ + writel(~0U, slim_rproc->peri + SLIM_INT_CLR_OFST); + writel(~0U, slim_rproc->peri + SLIM_CMD_CLR_OFST); + + /* enable all channels cmd & int */ + writel(~0U, slim_rproc->peri + SLIM_INT_MASK_OFST); + writel(~0U, slim_rproc->peri + SLIM_CMD_MASK_OFST); + + /* enable cpu */ + writel(SLIM_EN_RUN, slim_rproc->slimcore + SLIM_EN_OFST); + + hw_id = readl_relaxed(slim_rproc->slimcore + SLIM_ID_OFST); + hw_ver = readl_relaxed(slim_rproc->slimcore + SLIM_VER_OFST); + + fw_rev = readl(slim_rproc->mem[ST_SLIM_DMEM].cpu_addr + + SLIM_REV_ID_OFST); + + dev_info(dev, "fw rev:%ld.%ld on SLIM %ld.%ld\n", + SLIM_REV_ID_MAJ(fw_rev), SLIM_REV_ID_MIN(fw_rev), + hw_id, hw_ver); + + return 0; +} + +static int slim_rproc_stop(struct rproc *rproc) +{ + struct st_slim_rproc *slim_rproc = rproc->priv; + u32 val; + + /* mask all (cmd & int) channels */ + writel(0UL, slim_rproc->peri + SLIM_INT_MASK_OFST); + writel(0UL, slim_rproc->peri + SLIM_CMD_MASK_OFST); + + /* disable cpu pipeline clock */ + writel(SLIM_CLK_GATE_DIS, slim_rproc->slimcore + SLIM_CLK_GATE_OFST); + + writel(!SLIM_EN_RUN, slim_rproc->slimcore + SLIM_EN_OFST); + + val = readl(slim_rproc->slimcore + SLIM_EN_OFST); + if (val & SLIM_EN_RUN) + dev_warn(&rproc->dev, "Failed to disable SLIM"); + + dev_dbg(&rproc->dev, "slim stopped\n"); + + return 0; +} + +static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, int len) +{ + struct st_slim_rproc *slim_rproc = rproc->priv; + void *va = NULL; + int i; + + for (i = 0; i < ST_SLIM_MEM_MAX; i++) { + if (da != slim_rproc->mem[i].bus_addr) + continue; + + if (len <= slim_rproc->mem[i].size) { + /* __force to make sparse happy with type conversion */ + va = (__force void *)slim_rproc->mem[i].cpu_addr; + break; + } + } + + dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%p\n", da, len, va); + + return va; +} + +static struct rproc_ops slim_rproc_ops = { + .start = slim_rproc_start, + .stop = slim_rproc_stop, + .da_to_va = slim_rproc_da_to_va, +}; + +/* + * Firmware handler operations: sanity, boot address, load ... + */ + +static struct resource_table empty_rsc_tbl = { + .ver = 1, + .num = 0, +}; + +static struct resource_table *slim_rproc_find_rsc_table(struct rproc *rproc, + const struct firmware *fw, + int *tablesz) +{ + *tablesz = sizeof(empty_rsc_tbl); + return &empty_rsc_tbl; +} + +static struct rproc_fw_ops slim_rproc_fw_ops = { + .find_rsc_table = slim_rproc_find_rsc_table, +}; + +/** + * st_slim_rproc_alloc() - allocate and initialise slim rproc + * @pdev: Pointer to the platform_device struct + * @fw_name: Name of firmware for rproc to use + * + * Function for allocating and initialising a slim rproc for use by + * device drivers whose IP is based around the SLIM core. It + * obtains and enables any clocks required by the SLIM core and also + * ioremaps the various IO. + * + * Returns st_slim_rproc pointer or PTR_ERR() on error. + */ + +struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev, + char *fw_name) +{ + struct device *dev = &pdev->dev; + struct st_slim_rproc *slim_rproc; + struct device_node *np = dev->of_node; + struct rproc *rproc; + struct resource *res; + int err, i; + const struct rproc_fw_ops *elf_ops; + + if (!fw_name) + return ERR_PTR(-EINVAL); + + if (!of_device_is_compatible(np, "st,slim-rproc")) + return ERR_PTR(-EINVAL); + + rproc = rproc_alloc(dev, np->name, &slim_rproc_ops, + fw_name, sizeof(*slim_rproc)); + if (!rproc) + return ERR_PTR(-ENOMEM); + + rproc->has_iommu = false; + + slim_rproc = rproc->priv; + slim_rproc->rproc = rproc; + + elf_ops = rproc->fw_ops; + /* Use some generic elf ops */ + slim_rproc_fw_ops.load = elf_ops->load; + slim_rproc_fw_ops.sanity_check = elf_ops->sanity_check; + + rproc->fw_ops = &slim_rproc_fw_ops; + + /* get imem and dmem */ + for (i = 0; i < ARRAY_SIZE(mem_names); i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + mem_names[i]); + + slim_rproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(slim_rproc->mem[i].cpu_addr)) { + dev_err(&pdev->dev, "devm_ioremap_resource failed\n"); + err = PTR_ERR(slim_rproc->mem[i].cpu_addr); + goto err; + } + slim_rproc->mem[i].bus_addr = res->start; + slim_rproc->mem[i].size = resource_size(res); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimcore"); + slim_rproc->slimcore = devm_ioremap_resource(dev, res); + if (IS_ERR(slim_rproc->slimcore)) { + dev_err(&pdev->dev, "failed to ioremap slimcore IO\n"); + err = PTR_ERR(slim_rproc->slimcore); + goto err; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "peripherals"); + slim_rproc->peri = devm_ioremap_resource(dev, res); + if (IS_ERR(slim_rproc->peri)) { + dev_err(&pdev->dev, "failed to ioremap peripherals IO\n"); + err = PTR_ERR(slim_rproc->peri); + goto err; + } + + err = slim_clk_get(slim_rproc, dev); + if (err) + goto err; + + err = slim_clk_enable(slim_rproc); + if (err) { + dev_err(dev, "Failed to enable clocks\n"); + goto err_clk_put; + } + + /* Register as a remoteproc device */ + err = rproc_add(rproc); + if (err) { + dev_err(dev, "registration of slim remoteproc failed\n"); + goto err_clk_dis; + } + + return slim_rproc; + +err_clk_dis: + slim_clk_disable(slim_rproc); +err_clk_put: + for (i = 0; i < ST_SLIM_MAX_CLK && slim_rproc->clks[i]; i++) + clk_put(slim_rproc->clks[i]); +err: + rproc_free(rproc); + return ERR_PTR(err); +} +EXPORT_SYMBOL(st_slim_rproc_alloc); + +/** + * st_slim_rproc_put() - put slim rproc resources + * @slim_rproc: Pointer to the st_slim_rproc struct + * + * Function for calling respective _put() functions on slim_rproc resources. + * + */ +void st_slim_rproc_put(struct st_slim_rproc *slim_rproc) +{ + int clk; + + if (!slim_rproc) + return; + + slim_clk_disable(slim_rproc); + + for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++) + clk_put(slim_rproc->clks[clk]); + + rproc_del(slim_rproc->rproc); + rproc_free(slim_rproc->rproc); +} +EXPORT_SYMBOL(st_slim_rproc_put); + +MODULE_AUTHOR("Peter Griffin "); +MODULE_DESCRIPTION("STMicroelectronics SLIM core rproc driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c deleted file mode 100644 index 03d69a9a3c5b733ee78d7504c8d3ba8f9db38851..0000000000000000000000000000000000000000 --- a/drivers/remoteproc/ste_modem_rproc.c +++ /dev/null @@ -1,342 +0,0 @@ -/* - * Copyright (C) ST-Ericsson AB 2012 - * Author: Sjur Brændeland - * License terms: GNU General Public License (GPL), version 2 - */ - -#include -#include -#include -#include -#include "remoteproc_internal.h" - -#define SPROC_FW_SIZE (50 * 4096) -#define SPROC_MAX_TOC_ENTRIES 32 -#define SPROC_MAX_NOTIFY_ID 14 -#define SPROC_RESOURCE_NAME "rsc-table" -#define SPROC_MODEM_NAME "ste-modem" -#define SPROC_MODEM_FIRMWARE SPROC_MODEM_NAME "-fw.bin" - -#define sproc_dbg(sproc, fmt, ...) \ - dev_dbg(&sproc->mdev->pdev.dev, fmt, ##__VA_ARGS__) -#define sproc_err(sproc, fmt, ...) \ - dev_err(&sproc->mdev->pdev.dev, fmt, ##__VA_ARGS__) - -/* STE-modem control structure */ -struct sproc { - struct rproc *rproc; - struct ste_modem_device *mdev; - int error; - void *fw_addr; - size_t fw_size; - dma_addr_t fw_dma_addr; -}; - -/* STE-Modem firmware entry */ -struct ste_toc_entry { - __le32 start; - __le32 size; - __le32 flags; - __le32 entry_point; - __le32 load_addr; - char name[12]; -}; - -/* - * The Table Of Content is located at the start of the firmware image and - * at offset zero in the shared memory region. The resource table typically - * contains the initial boot image (boot strap) and other information elements - * such as remoteproc resource table. Each entry is identified by a unique - * name. - */ -struct ste_toc { - struct ste_toc_entry table[SPROC_MAX_TOC_ENTRIES]; -}; - -/* Loads the firmware to shared memory. */ -static int sproc_load_segments(struct rproc *rproc, const struct firmware *fw) -{ - struct sproc *sproc = rproc->priv; - - memcpy(sproc->fw_addr, fw->data, fw->size); - - return 0; -} - -/* Find the entry for resource table in the Table of Content */ -static const struct ste_toc_entry *sproc_find_rsc_entry(const void *data) -{ - int i; - const struct ste_toc *toc = data; - - /* Search the table for the resource table */ - for (i = 0; i < SPROC_MAX_TOC_ENTRIES && - toc->table[i].start != 0xffffffff; i++) { - if (!strncmp(toc->table[i].name, SPROC_RESOURCE_NAME, - sizeof(toc->table[i].name))) - return &toc->table[i]; - } - - return NULL; -} - -/* Find the resource table inside the remote processor's firmware. */ -static struct resource_table * -sproc_find_rsc_table(struct rproc *rproc, const struct firmware *fw, - int *tablesz) -{ - struct sproc *sproc = rproc->priv; - struct resource_table *table; - const struct ste_toc_entry *entry; - - if (!fw) - return NULL; - - entry = sproc_find_rsc_entry(fw->data); - if (!entry) { - sproc_err(sproc, "resource table not found in fw\n"); - return NULL; - } - - table = (void *)(fw->data + entry->start); - - /* sanity check size and offset of resource table */ - if (entry->start > SPROC_FW_SIZE || - entry->size > SPROC_FW_SIZE || - fw->size > SPROC_FW_SIZE || - entry->start + entry->size > fw->size || - sizeof(struct resource_table) > entry->size) { - sproc_err(sproc, "bad size of fw or resource table\n"); - return NULL; - } - - /* we don't support any version beyond the first */ - if (table->ver != 1) { - sproc_err(sproc, "unsupported fw ver: %d\n", table->ver); - return NULL; - } - - /* make sure reserved bytes are zeroes */ - if (table->reserved[0] || table->reserved[1]) { - sproc_err(sproc, "non zero reserved bytes\n"); - return NULL; - } - - /* make sure the offsets array isn't truncated */ - if (table->num > SPROC_MAX_TOC_ENTRIES || - table->num * sizeof(table->offset[0]) + - sizeof(struct resource_table) > entry->size) { - sproc_err(sproc, "resource table incomplete\n"); - return NULL; - } - - /* If the fw size has grown, release the previous fw allocation */ - if (SPROC_FW_SIZE < fw->size) { - sproc_err(sproc, "Insufficient space for fw (%d < %zd)\n", - SPROC_FW_SIZE, fw->size); - return NULL; - } - - sproc->fw_size = fw->size; - *tablesz = entry->size; - - return table; -} - -/* Find the resource table inside the remote processor's firmware. */ -static struct resource_table * -sproc_find_loaded_rsc_table(struct rproc *rproc, const struct firmware *fw) -{ - struct sproc *sproc = rproc->priv; - const struct ste_toc_entry *entry; - - if (!fw || !sproc->fw_addr) - return NULL; - - entry = sproc_find_rsc_entry(sproc->fw_addr); - if (!entry) { - sproc_err(sproc, "resource table not found in fw\n"); - return NULL; - } - - return sproc->fw_addr + entry->start; -} - -/* STE modem firmware handler operations */ -static const struct rproc_fw_ops sproc_fw_ops = { - .load = sproc_load_segments, - .find_rsc_table = sproc_find_rsc_table, - .find_loaded_rsc_table = sproc_find_loaded_rsc_table, -}; - -/* Kick the modem with specified notification id */ -static void sproc_kick(struct rproc *rproc, int vqid) -{ - struct sproc *sproc = rproc->priv; - - sproc_dbg(sproc, "kick vqid:%d\n", vqid); - - /* - * We need different notification IDs for RX and TX so add - * an offset on TX notification IDs. - */ - sproc->mdev->ops.kick(sproc->mdev, vqid + SPROC_MAX_NOTIFY_ID); -} - -/* Received a kick from a modem, kick the virtqueue */ -static void sproc_kick_callback(struct ste_modem_device *mdev, int vqid) -{ - struct sproc *sproc = mdev->drv_data; - - if (rproc_vq_interrupt(sproc->rproc, vqid) == IRQ_NONE) - sproc_dbg(sproc, "no message was found in vqid %d\n", vqid); -} - -static struct ste_modem_dev_cb sproc_dev_cb = { - .kick = sproc_kick_callback, -}; - -/* Start the STE modem */ -static int sproc_start(struct rproc *rproc) -{ - struct sproc *sproc = rproc->priv; - int i, err; - - sproc_dbg(sproc, "start ste-modem\n"); - - /* Sanity test the max_notifyid */ - if (rproc->max_notifyid > SPROC_MAX_NOTIFY_ID) { - sproc_err(sproc, "Notification IDs too high:%d\n", - rproc->max_notifyid); - return -EINVAL; - } - - /* Subscribe to notifications */ - for (i = 0; i <= rproc->max_notifyid; i++) { - err = sproc->mdev->ops.kick_subscribe(sproc->mdev, i); - if (err) { - sproc_err(sproc, - "subscription of kicks failed:%d\n", err); - return err; - } - } - - /* Request modem start-up*/ - return sproc->mdev->ops.power(sproc->mdev, true); -} - -/* Stop the STE modem */ -static int sproc_stop(struct rproc *rproc) -{ - struct sproc *sproc = rproc->priv; - - sproc_dbg(sproc, "stop ste-modem\n"); - - return sproc->mdev->ops.power(sproc->mdev, false); -} - -static struct rproc_ops sproc_ops = { - .start = sproc_start, - .stop = sproc_stop, - .kick = sproc_kick, -}; - -/* STE modem device is unregistered */ -static int sproc_drv_remove(struct platform_device *pdev) -{ - struct ste_modem_device *mdev = - container_of(pdev, struct ste_modem_device, pdev); - struct sproc *sproc = mdev->drv_data; - - sproc_dbg(sproc, "remove ste-modem\n"); - - /* Reset device callback functions */ - sproc->mdev->ops.setup(sproc->mdev, NULL); - - /* Unregister as remoteproc device */ - rproc_del(sproc->rproc); - dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE, - sproc->fw_addr, sproc->fw_dma_addr); - rproc_free(sproc->rproc); - - mdev->drv_data = NULL; - - return 0; -} - -/* Handle probe of a modem device */ -static int sproc_probe(struct platform_device *pdev) -{ - struct ste_modem_device *mdev = - container_of(pdev, struct ste_modem_device, pdev); - struct sproc *sproc; - struct rproc *rproc; - int err; - - dev_dbg(&mdev->pdev.dev, "probe ste-modem\n"); - - if (!mdev->ops.setup || !mdev->ops.kick || !mdev->ops.kick_subscribe || - !mdev->ops.power) { - dev_err(&mdev->pdev.dev, "invalid mdev ops\n"); - return -EINVAL; - } - - rproc = rproc_alloc(&mdev->pdev.dev, mdev->pdev.name, &sproc_ops, - SPROC_MODEM_FIRMWARE, sizeof(*sproc)); - if (!rproc) - return -ENOMEM; - - sproc = rproc->priv; - sproc->mdev = mdev; - sproc->rproc = rproc; - rproc->has_iommu = false; - mdev->drv_data = sproc; - - /* Provide callback functions to modem device */ - sproc->mdev->ops.setup(sproc->mdev, &sproc_dev_cb); - - /* Set the STE-modem specific firmware handler */ - rproc->fw_ops = &sproc_fw_ops; - - /* - * STE-modem requires the firmware to be located - * at the start of the shared memory region. So we need to - * reserve space for firmware at the start. - */ - sproc->fw_addr = dma_alloc_coherent(rproc->dev.parent, SPROC_FW_SIZE, - &sproc->fw_dma_addr, - GFP_KERNEL); - if (!sproc->fw_addr) { - sproc_err(sproc, "Cannot allocate memory for fw\n"); - err = -ENOMEM; - goto free_rproc; - } - - /* Register as a remoteproc device */ - err = rproc_add(rproc); - if (err) - goto free_mem; - - return 0; - -free_mem: - dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE, - sproc->fw_addr, sproc->fw_dma_addr); -free_rproc: - /* Reset device data upon error */ - mdev->drv_data = NULL; - rproc_free(rproc); - return err; -} - -static struct platform_driver sproc_driver = { - .driver = { - .name = SPROC_MODEM_NAME, - }, - .probe = sproc_probe, - .remove = sproc_drv_remove, -}; - -module_platform_driver(sproc_driver); -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("STE Modem driver using the Remote Processor Framework"); diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 06d9fa2f3bc099efccabc3bb5d7c6b847e19fd0c..172dc966a01fb087c87d014e8f6f7d7299c09aff 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -94,5 +94,6 @@ config RESET_ZYNQ source "drivers/reset/sti/Kconfig" source "drivers/reset/hisilicon/Kconfig" +source "drivers/reset/tegra/Kconfig" endif diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index bbe7026617fc77f0ed5766f73186d6dd36f8d0f3..13b346e03d84b5b924100e96ad99f5f9f0c7a4b8 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -1,6 +1,7 @@ obj-y += core.o obj-y += hisilicon/ obj-$(CONFIG_ARCH_STI) += sti/ +obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-$(CONFIG_RESET_ATH79) += reset-ath79.o obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o diff --git a/drivers/reset/core.c b/drivers/reset/core.c index b8ae1dbd4c17dac9aaf36b7f119cced2f7b5500b..10368ed8fd136230d1d9a13492bf557bd063a669 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -32,6 +32,9 @@ static LIST_HEAD(reset_controller_list); * @refcnt: Number of gets of this reset_control * @shared: Is this a shared (1), or an exclusive (0) reset_control? * @deassert_cnt: Number of times this reset line has been deasserted + * @triggered_count: Number of times this reset line has been reset. Currently + * only used for shared resets, which means that the value + * will be either 0 or 1. */ struct reset_control { struct reset_controller_dev *rcdev; @@ -40,6 +43,7 @@ struct reset_control { unsigned int refcnt; int shared; atomic_t deassert_count; + atomic_t triggered_count; }; /** @@ -134,18 +138,35 @@ EXPORT_SYMBOL_GPL(devm_reset_controller_register); * reset_control_reset - reset the controlled device * @rstc: reset controller * - * Calling this on a shared reset controller is an error. + * On a shared reset line the actual reset pulse is only triggered once for the + * lifetime of the reset_control instance: for all but the first caller this is + * a no-op. + * Consumers must not use reset_control_(de)assert on shared reset lines when + * reset_control_reset has been used. */ int reset_control_reset(struct reset_control *rstc) { - if (WARN_ON(IS_ERR_OR_NULL(rstc)) || - WARN_ON(rstc->shared)) + int ret; + + if (WARN_ON(IS_ERR_OR_NULL(rstc))) return -EINVAL; - if (rstc->rcdev->ops->reset) - return rstc->rcdev->ops->reset(rstc->rcdev, rstc->id); + if (!rstc->rcdev->ops->reset) + return -ENOTSUPP; - return -ENOTSUPP; + if (rstc->shared) { + if (WARN_ON(atomic_read(&rstc->deassert_count) != 0)) + return -EINVAL; + + if (atomic_inc_return(&rstc->triggered_count) != 1) + return 0; + } + + ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id); + if (rstc->shared && !ret) + atomic_dec(&rstc->triggered_count); + + return ret; } EXPORT_SYMBOL_GPL(reset_control_reset); @@ -159,6 +180,8 @@ EXPORT_SYMBOL_GPL(reset_control_reset); * * For shared reset controls a driver cannot expect the hw's registers and * internal state to be reset, but must be prepared for this to happen. + * Consumers must not use reset_control_reset on shared reset lines when + * reset_control_(de)assert has been used. */ int reset_control_assert(struct reset_control *rstc) { @@ -169,6 +192,9 @@ int reset_control_assert(struct reset_control *rstc) return -ENOTSUPP; if (rstc->shared) { + if (WARN_ON(atomic_read(&rstc->triggered_count) != 0)) + return -EINVAL; + if (WARN_ON(atomic_read(&rstc->deassert_count) == 0)) return -EINVAL; @@ -185,6 +211,8 @@ EXPORT_SYMBOL_GPL(reset_control_assert); * @rstc: reset controller * * After calling this function, the reset is guaranteed to be deasserted. + * Consumers must not use reset_control_reset on shared reset lines when + * reset_control_(de)assert has been used. */ int reset_control_deassert(struct reset_control *rstc) { @@ -195,6 +223,9 @@ int reset_control_deassert(struct reset_control *rstc) return -ENOTSUPP; if (rstc->shared) { + if (WARN_ON(atomic_read(&rstc->triggered_count) != 0)) + return -EINVAL; + if (atomic_inc_return(&rstc->deassert_count) != 1) return 0; } diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c index 369f3917fd8e384baf798d6a1f143e7ae59df737..371197bbd0556f036ed5af9da6affa47e2171b3a 100644 --- a/drivers/reset/reset-berlin.c +++ b/drivers/reset/reset-berlin.c @@ -1,6 +1,8 @@ /* * Copyright (C) 2014 Marvell Technology Group Ltd. * + * Marvell Berlin reset driver + * * Antoine Tenart * Sebastian Hesselbarth * @@ -12,7 +14,7 @@ #include #include #include -#include +#include #include #include #include @@ -91,7 +93,6 @@ static const struct of_device_id berlin_reset_dt_match[] = { { .compatible = "marvell,berlin2-reset" }, { }, }; -MODULE_DEVICE_TABLE(of, berlin_reset_dt_match); static struct platform_driver berlin_reset_driver = { .probe = berlin2_reset_probe, @@ -100,9 +101,4 @@ static struct platform_driver berlin_reset_driver = { .of_match_table = berlin_reset_dt_match, }, }; -module_platform_driver(berlin_reset_driver); - -MODULE_AUTHOR("Antoine Tenart "); -MODULE_AUTHOR("Sebastian Hesselbarth "); -MODULE_DESCRIPTION("Marvell Berlin reset driver"); -MODULE_LICENSE("GPL"); +builtin_platform_driver(berlin_reset_driver); diff --git a/drivers/reset/reset-lpc18xx.c b/drivers/reset/reset-lpc18xx.c index 54cca005517105ba203144c7fbb68db170cf1f14..a62ad52e262bec42e7c9ff55493ad3be05ed3d0a 100644 --- a/drivers/reset/reset-lpc18xx.c +++ b/drivers/reset/reset-lpc18xx.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -218,39 +218,17 @@ static int lpc18xx_rgu_probe(struct platform_device *pdev) return ret; } -static int lpc18xx_rgu_remove(struct platform_device *pdev) -{ - struct lpc18xx_rgu_data *rc = platform_get_drvdata(pdev); - int ret; - - ret = unregister_restart_handler(&rc->restart_nb); - if (ret) - dev_warn(&pdev->dev, "failed to unregister restart handler\n"); - - reset_controller_unregister(&rc->rcdev); - - clk_disable_unprepare(rc->clk_delay); - clk_disable_unprepare(rc->clk_reg); - - return 0; -} - static const struct of_device_id lpc18xx_rgu_match[] = { { .compatible = "nxp,lpc1850-rgu" }, { } }; -MODULE_DEVICE_TABLE(of, lpc18xx_rgu_match); static struct platform_driver lpc18xx_rgu_driver = { .probe = lpc18xx_rgu_probe, - .remove = lpc18xx_rgu_remove, .driver = { - .name = "lpc18xx-reset", - .of_match_table = lpc18xx_rgu_match, + .name = "lpc18xx-reset", + .of_match_table = lpc18xx_rgu_match, + .suppress_bind_attrs = true, }, }; -module_platform_driver(lpc18xx_rgu_driver); - -MODULE_AUTHOR("Joachim Eastwood "); -MODULE_DESCRIPTION("Reset driver for LPC18xx/43xx RGU"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(lpc18xx_rgu_driver); diff --git a/drivers/reset/reset-oxnas.c b/drivers/reset/reset-oxnas.c index 944980572f79b1b190f7c5aabef1fdbc38048bda..0d9036dea010d1274e4a79b536368adad543f0a0 100644 --- a/drivers/reset/reset-oxnas.c +++ b/drivers/reset/reset-oxnas.c @@ -80,6 +80,7 @@ static const struct reset_control_ops oxnas_reset_ops = { static const struct of_device_id oxnas_reset_dt_ids[] = { { .compatible = "oxsemi,ox810se-reset", }, + { .compatible = "oxsemi,ox820-reset", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, oxnas_reset_dt_ids); diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c index 78ebf8424375ca1a63b35b7e031e64a6f652f440..43e4a9f39b9b8f9be75a52695c9adec73a981667 100644 --- a/drivers/reset/reset-socfpga.c +++ b/drivers/reset/reset-socfpga.c @@ -1,4 +1,6 @@ /* + * Socfpga Reset Controller Driver + * * Copyright 2014 Steffen Trumtrar * * based on @@ -16,7 +18,7 @@ #include #include -#include +#include #include #include #include @@ -148,8 +150,4 @@ static struct platform_driver socfpga_reset_driver = { .of_match_table = socfpga_reset_dt_ids, }, }; -module_platform_driver(socfpga_reset_driver); - -MODULE_AUTHOR("Steffen Trumtrar #include -#include +#include #include #include #include @@ -142,7 +142,6 @@ static const struct of_device_id sunxi_reset_dt_ids[] = { { .compatible = "allwinner,sun6i-a31-clock-reset", }, { /* sentinel */ }, }; -MODULE_DEVICE_TABLE(of, sunxi_reset_dt_ids); static int sunxi_reset_probe(struct platform_device *pdev) { @@ -175,8 +174,4 @@ static struct platform_driver sunxi_reset_driver = { .of_match_table = sunxi_reset_dt_ids, }, }; -module_platform_driver(sunxi_reset_driver); - -MODULE_AUTHOR("Maxime Ripard + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. @@ -15,7 +17,7 @@ #include #include -#include +#include #include #include #include @@ -137,8 +139,4 @@ static struct platform_driver zynq_reset_driver = { .of_match_table = zynq_reset_dt_ids, }, }; -module_platform_driver(zynq_reset_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Moritz Fischer "); -MODULE_DESCRIPTION("Zynq Reset Controller Driver"); +builtin_platform_driver(zynq_reset_driver); diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig index 613178553612b0ad6450c53b7f1fa7c58a3e9dda..71592b5bfd14f800be26feb97c7bcb3ebf444989 100644 --- a/drivers/reset/sti/Kconfig +++ b/drivers/reset/sti/Kconfig @@ -3,14 +3,6 @@ if ARCH_STI config STI_RESET_SYSCFG bool -config STIH415_RESET - bool - select STI_RESET_SYSCFG - -config STIH416_RESET - bool - select STI_RESET_SYSCFG - config STIH407_RESET bool select STI_RESET_SYSCFG diff --git a/drivers/reset/sti/Makefile b/drivers/reset/sti/Makefile index dc85dfbe56a91746185f495cceb967a0addce8b3..f9d82411f29e14ce49a5322571a3ad1bd7c7c307 100644 --- a/drivers/reset/sti/Makefile +++ b/drivers/reset/sti/Makefile @@ -1,5 +1,3 @@ obj-$(CONFIG_STI_RESET_SYSCFG) += reset-syscfg.o -obj-$(CONFIG_STIH415_RESET) += reset-stih415.o -obj-$(CONFIG_STIH416_RESET) += reset-stih416.o obj-$(CONFIG_STIH407_RESET) += reset-stih407.o diff --git a/drivers/reset/sti/reset-stih415.c b/drivers/reset/sti/reset-stih415.c deleted file mode 100644 index 6f220cdbef46ce5246164a9c434a4c976dfc5823..0000000000000000000000000000000000000000 --- a/drivers/reset/sti/reset-stih415.c +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (C) 2013 STMicroelectronics (R&D) Limited - * Author: Stephen Gallimore - * Author: Srinivas Kandagatla - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ -#include -#include -#include -#include - -#include - -#include "reset-syscfg.h" - -/* - * STiH415 Peripheral powerdown definitions. - */ -static const char stih415_front[] = "st,stih415-front-syscfg"; -static const char stih415_rear[] = "st,stih415-rear-syscfg"; -static const char stih415_sbc[] = "st,stih415-sbc-syscfg"; -static const char stih415_lpm[] = "st,stih415-lpm-syscfg"; - -#define STIH415_PDN_FRONT(_bit) \ - _SYSCFG_RST_CH(stih415_front, SYSCFG_114, _bit, SYSSTAT_187, _bit) - -#define STIH415_PDN_REAR(_cntl, _stat) \ - _SYSCFG_RST_CH(stih415_rear, SYSCFG_336, _cntl, SYSSTAT_384, _stat) - -#define STIH415_SRST_REAR(_reg, _bit) \ - _SYSCFG_RST_CH_NO_ACK(stih415_rear, _reg, _bit) - -#define STIH415_SRST_SBC(_reg, _bit) \ - _SYSCFG_RST_CH_NO_ACK(stih415_sbc, _reg, _bit) - -#define STIH415_SRST_FRONT(_reg, _bit) \ - _SYSCFG_RST_CH_NO_ACK(stih415_front, _reg, _bit) - -#define STIH415_SRST_LPM(_reg, _bit) \ - _SYSCFG_RST_CH_NO_ACK(stih415_lpm, _reg, _bit) - -#define SYSCFG_114 0x38 /* Powerdown request EMI/NAND/Keyscan */ -#define SYSSTAT_187 0x15c /* Powerdown status EMI/NAND/Keyscan */ - -#define SYSCFG_336 0x90 /* Powerdown request USB/SATA/PCIe */ -#define SYSSTAT_384 0x150 /* Powerdown status USB/SATA/PCIe */ - -#define SYSCFG_376 0x130 /* Reset generator 0 control 0 */ -#define SYSCFG_166 0x108 /* Softreset Ethernet 0 */ -#define SYSCFG_31 0x7c /* Softreset Ethernet 1 */ -#define LPM_SYSCFG_1 0x4 /* Softreset IRB */ - -static const struct syscfg_reset_channel_data stih415_powerdowns[] = { - [STIH415_EMISS_POWERDOWN] = STIH415_PDN_FRONT(0), - [STIH415_NAND_POWERDOWN] = STIH415_PDN_FRONT(1), - [STIH415_KEYSCAN_POWERDOWN] = STIH415_PDN_FRONT(2), - [STIH415_USB0_POWERDOWN] = STIH415_PDN_REAR(0, 0), - [STIH415_USB1_POWERDOWN] = STIH415_PDN_REAR(1, 1), - [STIH415_USB2_POWERDOWN] = STIH415_PDN_REAR(2, 2), - [STIH415_SATA0_POWERDOWN] = STIH415_PDN_REAR(3, 3), - [STIH415_SATA1_POWERDOWN] = STIH415_PDN_REAR(4, 4), - [STIH415_PCIE_POWERDOWN] = STIH415_PDN_REAR(5, 8), -}; - -static const struct syscfg_reset_channel_data stih415_softresets[] = { - [STIH415_ETH0_SOFTRESET] = STIH415_SRST_FRONT(SYSCFG_166, 0), - [STIH415_ETH1_SOFTRESET] = STIH415_SRST_SBC(SYSCFG_31, 0), - [STIH415_IRB_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 6), - [STIH415_USB0_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 9), - [STIH415_USB1_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 10), - [STIH415_USB2_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 11), - [STIH415_KEYSCAN_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 8), -}; - -static struct syscfg_reset_controller_data stih415_powerdown_controller = { - .wait_for_ack = true, - .nr_channels = ARRAY_SIZE(stih415_powerdowns), - .channels = stih415_powerdowns, -}; - -static struct syscfg_reset_controller_data stih415_softreset_controller = { - .wait_for_ack = false, - .active_low = true, - .nr_channels = ARRAY_SIZE(stih415_softresets), - .channels = stih415_softresets, -}; - -static const struct of_device_id stih415_reset_match[] = { - { .compatible = "st,stih415-powerdown", - .data = &stih415_powerdown_controller, }, - { .compatible = "st,stih415-softreset", - .data = &stih415_softreset_controller, }, - {}, -}; - -static struct platform_driver stih415_reset_driver = { - .probe = syscfg_reset_probe, - .driver = { - .name = "reset-stih415", - .of_match_table = stih415_reset_match, - }, -}; - -static int __init stih415_reset_init(void) -{ - return platform_driver_register(&stih415_reset_driver); -} -arch_initcall(stih415_reset_init); diff --git a/drivers/reset/sti/reset-stih416.c b/drivers/reset/sti/reset-stih416.c deleted file mode 100644 index c581d606ef0f76f482610fd1933fd3563a853f2b..0000000000000000000000000000000000000000 --- a/drivers/reset/sti/reset-stih416.c +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright (C) 2013 STMicroelectronics (R&D) Limited - * Author: Stephen Gallimore - * Author: Srinivas Kandagatla - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ -#include -#include -#include -#include - -#include - -#include "reset-syscfg.h" - -/* - * STiH416 Peripheral powerdown definitions. - */ -static const char stih416_front[] = "st,stih416-front-syscfg"; -static const char stih416_rear[] = "st,stih416-rear-syscfg"; -static const char stih416_sbc[] = "st,stih416-sbc-syscfg"; -static const char stih416_lpm[] = "st,stih416-lpm-syscfg"; -static const char stih416_cpu[] = "st,stih416-cpu-syscfg"; - -#define STIH416_PDN_FRONT(_bit) \ - _SYSCFG_RST_CH(stih416_front, SYSCFG_1500, _bit, SYSSTAT_1578, _bit) - -#define STIH416_PDN_REAR(_cntl, _stat) \ - _SYSCFG_RST_CH(stih416_rear, SYSCFG_2525, _cntl, SYSSTAT_2583, _stat) - -#define SYSCFG_1500 0x7d0 /* Powerdown request EMI/NAND/Keyscan */ -#define SYSSTAT_1578 0x908 /* Powerdown status EMI/NAND/Keyscan */ - -#define SYSCFG_2525 0x834 /* Powerdown request USB/SATA/PCIe */ -#define SYSSTAT_2583 0x91c /* Powerdown status USB/SATA/PCIe */ - -#define SYSCFG_2552 0x8A0 /* Reset Generator control 0 */ -#define SYSCFG_1539 0x86c /* Softreset Ethernet 0 */ -#define SYSCFG_510 0x7f8 /* Softreset Ethernet 1 */ -#define LPM_SYSCFG_1 0x4 /* Softreset IRB */ -#define SYSCFG_2553 0x8a4 /* Softreset SATA0/1, PCIE0/1 */ -#define SYSCFG_7563 0x8cc /* MPE softresets 0 */ -#define SYSCFG_7564 0x8d0 /* MPE softresets 1 */ - -#define STIH416_SRST_CPU(_reg, _bit) \ - _SYSCFG_RST_CH_NO_ACK(stih416_cpu, _reg, _bit) - -#define STIH416_SRST_FRONT(_reg, _bit) \ - _SYSCFG_RST_CH_NO_ACK(stih416_front, _reg, _bit) - -#define STIH416_SRST_REAR(_reg, _bit) \ - _SYSCFG_RST_CH_NO_ACK(stih416_rear, _reg, _bit) - -#define STIH416_SRST_LPM(_reg, _bit) \ - _SYSCFG_RST_CH_NO_ACK(stih416_lpm, _reg, _bit) - -#define STIH416_SRST_SBC(_reg, _bit) \ - _SYSCFG_RST_CH_NO_ACK(stih416_sbc, _reg, _bit) - -static const struct syscfg_reset_channel_data stih416_powerdowns[] = { - [STIH416_EMISS_POWERDOWN] = STIH416_PDN_FRONT(0), - [STIH416_NAND_POWERDOWN] = STIH416_PDN_FRONT(1), - [STIH416_KEYSCAN_POWERDOWN] = STIH416_PDN_FRONT(2), - [STIH416_USB0_POWERDOWN] = STIH416_PDN_REAR(0, 0), - [STIH416_USB1_POWERDOWN] = STIH416_PDN_REAR(1, 1), - [STIH416_USB2_POWERDOWN] = STIH416_PDN_REAR(2, 2), - [STIH416_USB3_POWERDOWN] = STIH416_PDN_REAR(6, 5), - [STIH416_SATA0_POWERDOWN] = STIH416_PDN_REAR(3, 3), - [STIH416_SATA1_POWERDOWN] = STIH416_PDN_REAR(4, 4), - [STIH416_PCIE0_POWERDOWN] = STIH416_PDN_REAR(7, 9), - [STIH416_PCIE1_POWERDOWN] = STIH416_PDN_REAR(5, 8), -}; - -static const struct syscfg_reset_channel_data stih416_softresets[] = { - [STIH416_ETH0_SOFTRESET] = STIH416_SRST_FRONT(SYSCFG_1539, 0), - [STIH416_ETH1_SOFTRESET] = STIH416_SRST_SBC(SYSCFG_510, 0), - [STIH416_IRB_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 6), - [STIH416_USB0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 9), - [STIH416_USB1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 10), - [STIH416_USB2_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 11), - [STIH416_USB3_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 28), - [STIH416_SATA0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 7), - [STIH416_SATA1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 3), - [STIH416_PCIE0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 15), - [STIH416_PCIE1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 2), - [STIH416_AUD_DAC_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 14), - [STIH416_HDTVOUT_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 5), - [STIH416_VTAC_M_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 25), - [STIH416_VTAC_A_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 26), - [STIH416_SYNC_HD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 5), - [STIH416_SYNC_SD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 6), - [STIH416_BLITTER_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 10), - [STIH416_GPU_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 11), - [STIH416_VTAC_M_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 18), - [STIH416_VTAC_A_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 19), - [STIH416_VTG_AUX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 21), - [STIH416_JPEG_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 23), - [STIH416_HVA_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 2), - [STIH416_COMPO_M_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 3), - [STIH416_COMPO_A_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 4), - [STIH416_VP8_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 10), - [STIH416_VTG_MAIN_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 16), - [STIH416_KEYSCAN_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 8), -}; - -static struct syscfg_reset_controller_data stih416_powerdown_controller = { - .wait_for_ack = true, - .nr_channels = ARRAY_SIZE(stih416_powerdowns), - .channels = stih416_powerdowns, -}; - -static struct syscfg_reset_controller_data stih416_softreset_controller = { - .wait_for_ack = false, - .active_low = true, - .nr_channels = ARRAY_SIZE(stih416_softresets), - .channels = stih416_softresets, -}; - -static const struct of_device_id stih416_reset_match[] = { - { .compatible = "st,stih416-powerdown", - .data = &stih416_powerdown_controller, }, - { .compatible = "st,stih416-softreset", - .data = &stih416_softreset_controller, }, - {}, -}; - -static struct platform_driver stih416_reset_driver = { - .probe = syscfg_reset_probe, - .driver = { - .name = "reset-stih416", - .of_match_table = stih416_reset_match, - }, -}; - -static int __init stih416_reset_init(void) -{ - return platform_driver_register(&stih416_reset_driver); -} -arch_initcall(stih416_reset_init); diff --git a/drivers/reset/tegra/Kconfig b/drivers/reset/tegra/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..d2afa293df7d02cb95c16e90990e65a1a7b19714 --- /dev/null +++ b/drivers/reset/tegra/Kconfig @@ -0,0 +1,2 @@ +config RESET_TEGRA_BPMP + def_bool TEGRA_BPMP diff --git a/drivers/reset/tegra/Makefile b/drivers/reset/tegra/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..775243ab7383f5e494e346611823075ec49e4bf3 --- /dev/null +++ b/drivers/reset/tegra/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_RESET_TEGRA_BPMP) += reset-bpmp.o diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c new file mode 100644 index 0000000000000000000000000000000000000000..5daf2ee1a396b453248be33b0183e7092bccb457 --- /dev/null +++ b/drivers/reset/tegra/reset-bpmp.c @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2016 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#include +#include + +static struct tegra_bpmp *to_tegra_bpmp(struct reset_controller_dev *rstc) +{ + return container_of(rstc, struct tegra_bpmp, rstc); +} + +static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc, + enum mrq_reset_commands command, + unsigned int id) +{ + struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc); + struct mrq_reset_request request; + struct tegra_bpmp_message msg; + + memset(&request, 0, sizeof(request)); + request.cmd = command; + request.reset_id = id; + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_RESET; + msg.tx.data = &request; + msg.tx.size = sizeof(request); + + return tegra_bpmp_transfer(bpmp, &msg); +} + +static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc, + unsigned long id) +{ + return tegra_bpmp_reset_common(rstc, CMD_RESET_MODULE, id); +} + +static int tegra_bpmp_reset_assert(struct reset_controller_dev *rstc, + unsigned long id) +{ + return tegra_bpmp_reset_common(rstc, CMD_RESET_ASSERT, id); +} + +static int tegra_bpmp_reset_deassert(struct reset_controller_dev *rstc, + unsigned long id) +{ + return tegra_bpmp_reset_common(rstc, CMD_RESET_DEASSERT, id); +} + +static const struct reset_control_ops tegra_bpmp_reset_ops = { + .reset = tegra_bpmp_reset_module, + .assert = tegra_bpmp_reset_assert, + .deassert = tegra_bpmp_reset_deassert, +}; + +int tegra_bpmp_init_resets(struct tegra_bpmp *bpmp) +{ + bpmp->rstc.ops = &tegra_bpmp_reset_ops; + bpmp->rstc.owner = THIS_MODULE; + bpmp->rstc.of_node = bpmp->dev->of_node; + bpmp->rstc.nr_resets = bpmp->soc->num_resets; + + return devm_reset_controller_register(bpmp->dev, &bpmp->rstc); +} diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 06fef2b4c81408f3e4e1bb1c91949f82e6afa58b..0fae48116a0d51d8d78518b48ad871caa2645344 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "rpmsg_internal.h" @@ -739,7 +740,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data, while (qcom_smd_get_tx_avail(channel) < tlen) { if (!wait) { - ret = -ENOMEM; + ret = -EAGAIN; goto out; } @@ -820,20 +821,13 @@ qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name) struct qcom_smd_channel *channel; struct qcom_smd_channel *ret = NULL; unsigned long flags; - unsigned state; spin_lock_irqsave(&edge->channels_lock, flags); list_for_each_entry(channel, &edge->channels, list) { - if (strcmp(channel->name, name)) - continue; - - state = GET_RX_CHANNEL_INFO(channel, state); - if (state != SMD_CHANNEL_OPENING && - state != SMD_CHANNEL_OPENED) - continue; - - ret = channel; - break; + if (!strcmp(channel->name, name)) { + ret = channel; + break; + } } spin_unlock_irqrestore(&edge->channels_lock, flags); diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c index b6ea9ffa73816d2bbf3cada1033a5058ccb14394..a79cb5a9e5f22963ed56214421abc099c5949c37 100644 --- a/drivers/rpmsg/rpmsg_core.c +++ b/drivers/rpmsg/rpmsg_core.c @@ -71,6 +71,9 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv, struct rpmsg_channel_info chinfo) { + if (WARN_ON(!rpdev)) + return ERR_PTR(-EINVAL); + return rpdev->ops->create_ept(rpdev, cb, priv, chinfo); } EXPORT_SYMBOL(rpmsg_create_ept); @@ -80,11 +83,13 @@ EXPORT_SYMBOL(rpmsg_create_ept); * @ept: endpoing to destroy * * Should be used by drivers to destroy an rpmsg endpoint previously - * created with rpmsg_create_ept(). + * created with rpmsg_create_ept(). As with other types of "free" NULL + * is a valid parameter. */ void rpmsg_destroy_ept(struct rpmsg_endpoint *ept) { - ept->ops->destroy_ept(ept); + if (ept) + ept->ops->destroy_ept(ept); } EXPORT_SYMBOL(rpmsg_destroy_ept); @@ -108,6 +113,11 @@ EXPORT_SYMBOL(rpmsg_destroy_ept); */ int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len) { + if (WARN_ON(!ept)) + return -EINVAL; + if (!ept->ops->send) + return -ENXIO; + return ept->ops->send(ept, data, len); } EXPORT_SYMBOL(rpmsg_send); @@ -132,6 +142,11 @@ EXPORT_SYMBOL(rpmsg_send); */ int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) { + if (WARN_ON(!ept)) + return -EINVAL; + if (!ept->ops->sendto) + return -ENXIO; + return ept->ops->sendto(ept, data, len, dst); } EXPORT_SYMBOL(rpmsg_sendto); @@ -159,6 +174,11 @@ EXPORT_SYMBOL(rpmsg_sendto); int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, void *data, int len) { + if (WARN_ON(!ept)) + return -EINVAL; + if (!ept->ops->send_offchannel) + return -ENXIO; + return ept->ops->send_offchannel(ept, src, dst, data, len); } EXPORT_SYMBOL(rpmsg_send_offchannel); @@ -182,6 +202,11 @@ EXPORT_SYMBOL(rpmsg_send_offchannel); */ int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len) { + if (WARN_ON(!ept)) + return -EINVAL; + if (!ept->ops->trysend) + return -ENXIO; + return ept->ops->trysend(ept, data, len); } EXPORT_SYMBOL(rpmsg_trysend); @@ -205,6 +230,11 @@ EXPORT_SYMBOL(rpmsg_trysend); */ int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) { + if (WARN_ON(!ept)) + return -EINVAL; + if (!ept->ops->trysendto) + return -ENXIO; + return ept->ops->trysendto(ept, data, len, dst); } EXPORT_SYMBOL(rpmsg_trysendto); @@ -231,6 +261,11 @@ EXPORT_SYMBOL(rpmsg_trysendto); int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, void *data, int len) { + if (WARN_ON(!ept)) + return -EINVAL; + if (!ept->ops->trysend_offchannel) + return -ENXIO; + return ept->ops->trysend_offchannel(ept, src, dst, data, len); } EXPORT_SYMBOL(rpmsg_trysend_offchannel); @@ -315,6 +350,9 @@ static int rpmsg_dev_match(struct device *dev, struct device_driver *drv) const struct rpmsg_device_id *ids = rpdrv->id_table; unsigned int i; + if (rpdev->driver_override) + return !strcmp(rpdev->driver_override, drv->name); + if (ids) for (i = 0; ids[i].name[0]; i++) if (rpmsg_id_match(rpdev, &ids[i])) @@ -344,27 +382,30 @@ static int rpmsg_dev_probe(struct device *dev) struct rpmsg_device *rpdev = to_rpmsg_device(dev); struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver); struct rpmsg_channel_info chinfo = {}; - struct rpmsg_endpoint *ept; + struct rpmsg_endpoint *ept = NULL; int err; - strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE); - chinfo.src = rpdev->src; - chinfo.dst = RPMSG_ADDR_ANY; + if (rpdrv->callback) { + strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE); + chinfo.src = rpdev->src; + chinfo.dst = RPMSG_ADDR_ANY; - ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, chinfo); - if (!ept) { - dev_err(dev, "failed to create endpoint\n"); - err = -ENOMEM; - goto out; - } + ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, chinfo); + if (!ept) { + dev_err(dev, "failed to create endpoint\n"); + err = -ENOMEM; + goto out; + } - rpdev->ept = ept; - rpdev->src = ept->addr; + rpdev->ept = ept; + rpdev->src = ept->addr; + } err = rpdrv->probe(rpdev); if (err) { dev_err(dev, "%s: failed: %d\n", __func__, err); - rpmsg_destroy_ept(ept); + if (ept) + rpmsg_destroy_ept(ept); goto out; } @@ -385,7 +426,8 @@ static int rpmsg_dev_remove(struct device *dev) rpdrv->remove(rpdev); - rpmsg_destroy_ept(rpdev->ept); + if (rpdev->ept) + rpmsg_destroy_ept(rpdev->ept); return err; } diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index e859d148aba9ecdbcecc47e745209b94ed83dd67..c93c5a8fba32925584dbc28c60610786328d09ff 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -303,7 +303,7 @@ config RTC_DRV_MAX6900 config RTC_DRV_MAX8907 tristate "Maxim MAX8907" - depends on MFD_MAX8907 + depends on MFD_MAX8907 || COMPILE_TEST help If you say yes here you will get support for the RTC of Maxim MAX8907 PMIC. @@ -343,7 +343,7 @@ config RTC_DRV_MAX8997 config RTC_DRV_MAX77686 tristate "Maxim MAX77686" - depends on MFD_MAX77686 || MFD_MAX77620 + depends on MFD_MAX77686 || MFD_MAX77620 || COMPILE_TEST help If you say yes here you will get support for the RTC of Maxim MAX77686/MAX77620/MAX77802 PMIC. @@ -481,6 +481,7 @@ config RTC_DRV_TWL92330 config RTC_DRV_TWL4030 tristate "TI TWL4030/TWL5030/TWL6030/TPS659x0" depends on TWL4030_CORE + depends on OF help If you say yes here you get support for the RTC on the TWL4030/TWL5030/TWL6030 family chips, used mostly with OMAP3 platforms. @@ -602,7 +603,8 @@ config RTC_DRV_RV8803 config RTC_DRV_S5M tristate "Samsung S2M/S5M series" - depends on MFD_SEC_CORE + depends on MFD_SEC_CORE || COMPILE_TEST + select REGMAP_IRQ help If you say yes here you will get support for the RTC of Samsung S2MPS14 and S5M PMIC series. @@ -820,8 +822,8 @@ config RTC_DRV_RV3029_HWMON comment "Platform RTC drivers" -# this 'CMOS' RTC driver is arch dependent because -# requires defining CMOS_READ/CMOS_WRITE, and a +# this 'CMOS' RTC driver is arch dependent because it requires +# defining CMOS_READ/CMOS_WRITE, and a # global rtc_lock ... it's not yet just another platform_device. config RTC_DRV_CMOS @@ -1549,14 +1551,11 @@ config RTC_DRV_MPC5121 will be called rtc-mpc5121. config RTC_DRV_JZ4740 - tristate "Ingenic JZ4740 SoC" - depends on MACH_JZ4740 || COMPILE_TEST + bool "Ingenic JZ4740 SoC" + depends on MACH_INGENIC || COMPILE_TEST help - If you say yes here you get support for the Ingenic JZ4740 SoC RTC - controller. - - This driver can also be buillt as a module. If so, the module - will be called rtc-jz4740. + If you say yes here you get support for the Ingenic JZ47xx SoCs RTC + controllers. config RTC_DRV_LPC24XX tristate "NXP RTC for LPC178x/18xx/408x/43xx" @@ -1567,7 +1566,7 @@ config RTC_DRV_LPC24XX NXP LPC178x/18xx/408x/43xx devices. If you have one of the devices above enable this driver to use - the hardware RTC. This driver can also be buillt as a module. If + the hardware RTC. This driver can also be built as a module. If so, the module will be called rtc-lpc24xx. config RTC_DRV_LPC32XX @@ -1576,7 +1575,7 @@ config RTC_DRV_LPC32XX help This enables support for the NXP RTC in the LPC32XX - This driver can also be buillt as a module. If so, the module + This driver can also be built as a module. If so, the module will be called rtc-lpc32xx. config RTC_DRV_PM8XXX @@ -1706,6 +1705,17 @@ config RTC_DRV_PIC32 This driver can also be built as a module. If so, the module will be called rtc-pic32 +config RTC_DRV_R7301 + tristate "EPSON TOYOCOM RTC-7301SF/DG" + select REGMAP_MMIO + depends on OF && HAS_IOMEM + help + If you say yes here you get support for the EPSON TOYOCOM + RTC-7301SF/DG chips. + + This driver can also be built as a module. If so, the module + will be called rtc-r7301. + comment "HID Sensor RTC drivers" config RTC_DRV_HID_SENSOR_TIME diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 1ac694a330c8dadc0cbe78a4f6cd8d7359de2ecc..f13ab1c5c222c269e711786e74f5a99a258150a5 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -120,6 +120,7 @@ obj-$(CONFIG_RTC_DRV_PM8XXX) += rtc-pm8xxx.o obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o obj-$(CONFIG_RTC_DRV_PUV3) += rtc-puv3.o obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o +obj-$(CONFIG_RTC_DRV_R7301) += rtc-r7301.o obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o obj-$(CONFIG_RTC_DRV_RC5T583) += rtc-rc5t583.o obj-$(CONFIG_RTC_DRV_RK808) += rtc-rk808.o diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 84a52db9b05f905bae3c294b88d116e77327f8c5..fc0fa7577636dab1c02c3a34ea9e2c18b3255bfd 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -363,7 +363,7 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) rtc_timer_remove(rtc, &rtc->aie_timer); rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); - rtc->aie_timer.period = ktime_set(0, 0); + rtc->aie_timer.period = 0; if (alarm->enabled) err = rtc_timer_enqueue(rtc, &rtc->aie_timer); @@ -391,11 +391,11 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) return err; rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); - rtc->aie_timer.period = ktime_set(0, 0); + rtc->aie_timer.period = 0; /* Alarm has to be enabled & in the future for us to enqueue it */ - if (alarm->enabled && (rtc_tm_to_ktime(now).tv64 < - rtc->aie_timer.node.expires.tv64)) { + if (alarm->enabled && (rtc_tm_to_ktime(now) < + rtc->aie_timer.node.expires)) { rtc->aie_timer.enabled = 1; timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); @@ -554,7 +554,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer) int count; rtc = container_of(timer, struct rtc_device, pie_timer); - period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); + period = NSEC_PER_SEC / rtc->irq_freq; count = hrtimer_forward_now(timer, period); rtc_handle_legacy_irq(rtc, count, RTC_PF); @@ -665,7 +665,7 @@ static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled) return -1; if (enabled) { - ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq); + ktime_t period = NSEC_PER_SEC / rtc->irq_freq; hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL); } @@ -766,7 +766,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) /* Skip over expired timers */ while (next) { - if (next->expires.tv64 >= now.tv64) + if (next->expires >= now) break; next = timerqueue_iterate_next(next); } @@ -858,7 +858,7 @@ void rtc_timer_do_work(struct work_struct *work) __rtc_read_time(rtc, &tm); now = rtc_tm_to_ktime(tm); while ((next = timerqueue_getnext(&rtc->timerqueue))) { - if (next->expires.tv64 > now.tv64) + if (next->expires > now) break; /* expire timer */ diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c index 18a93d3e3f9317286c99f9718ea1d42ea95ebd50..d36534965635ca48292fd6d570bbe3a60bde75df 100644 --- a/drivers/rtc/rtc-asm9260.c +++ b/drivers/rtc/rtc-asm9260.c @@ -327,6 +327,7 @@ static const struct of_device_id asm9260_dt_ids[] = { { .compatible = "alphascale,asm9260-rtc", }, {} }; +MODULE_DEVICE_TABLE(of, asm9260_dt_ids); static struct platform_driver asm9260_rtc_driver = { .probe = asm9260_rtc_probe, diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index dd3d59806ffa02ef955660390533881d9f6cee62..f4a96dbdabf21ec4bdf8017e524ea42b9b0ce5c7 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -191,6 +191,13 @@ static inline void cmos_write_bank2(unsigned char val, unsigned char addr) static int cmos_read_time(struct device *dev, struct rtc_time *t) { + /* + * If pm_trace abused the RTC for storage, set the timespec to 0, + * which tells the caller that this RTC value is unusable. + */ + if (!pm_trace_rtc_valid()) + return -EIO; + /* REVISIT: if the clock has a "century" register, use * that instead of the heuristic in mc146818_get_time(). * That'll make Y3K compatility (year > 2070) easy! @@ -325,14 +332,86 @@ static void cmos_irq_disable(struct cmos_rtc *cmos, unsigned char mask) cmos_checkintr(cmos, rtc_control); } +static int cmos_validate_alarm(struct device *dev, struct rtc_wkalrm *t) +{ + struct cmos_rtc *cmos = dev_get_drvdata(dev); + struct rtc_time now; + + cmos_read_time(dev, &now); + + if (!cmos->day_alrm) { + time64_t t_max_date; + time64_t t_alrm; + + t_max_date = rtc_tm_to_time64(&now); + t_max_date += 24 * 60 * 60 - 1; + t_alrm = rtc_tm_to_time64(&t->time); + if (t_alrm > t_max_date) { + dev_err(dev, + "Alarms can be up to one day in the future\n"); + return -EINVAL; + } + } else if (!cmos->mon_alrm) { + struct rtc_time max_date = now; + time64_t t_max_date; + time64_t t_alrm; + int max_mday; + + if (max_date.tm_mon == 11) { + max_date.tm_mon = 0; + max_date.tm_year += 1; + } else { + max_date.tm_mon += 1; + } + max_mday = rtc_month_days(max_date.tm_mon, max_date.tm_year); + if (max_date.tm_mday > max_mday) + max_date.tm_mday = max_mday; + + t_max_date = rtc_tm_to_time64(&max_date); + t_max_date -= 1; + t_alrm = rtc_tm_to_time64(&t->time); + if (t_alrm > t_max_date) { + dev_err(dev, + "Alarms can be up to one month in the future\n"); + return -EINVAL; + } + } else { + struct rtc_time max_date = now; + time64_t t_max_date; + time64_t t_alrm; + int max_mday; + + max_date.tm_year += 1; + max_mday = rtc_month_days(max_date.tm_mon, max_date.tm_year); + if (max_date.tm_mday > max_mday) + max_date.tm_mday = max_mday; + + t_max_date = rtc_tm_to_time64(&max_date); + t_max_date -= 1; + t_alrm = rtc_tm_to_time64(&t->time); + if (t_alrm > t_max_date) { + dev_err(dev, + "Alarms can be up to one year in the future\n"); + return -EINVAL; + } + } + + return 0; +} + static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) { struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned char mon, mday, hrs, min, sec, rtc_control; + int ret; if (!is_valid_irq(cmos->irq)) return -EIO; + ret = cmos_validate_alarm(dev, t); + if (ret < 0) + return ret; + mon = t->time.tm_mon + 1; mday = t->time.tm_mday; hrs = t->time.tm_hour; @@ -700,9 +779,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) spin_unlock_irq(&rtc_lock); - /* FIXME: - * doesn't know 12-hour mode either. - */ if (is_valid_irq(rtc_irq) && !(rtc_control & RTC_24H)) { dev_warn(dev, "only 24-hr supported\n"); retval = -ENXIO; @@ -776,7 +852,7 @@ static void cmos_do_shutdown(int rtc_irq) spin_unlock_irq(&rtc_lock); } -static void __exit cmos_do_remove(struct device *dev) +static void cmos_do_remove(struct device *dev) { struct cmos_rtc *cmos = dev_get_drvdata(dev); struct resource *ports; @@ -996,8 +1072,9 @@ static u32 rtc_handler(void *context) struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned char rtc_control = 0; unsigned char rtc_intr; + unsigned long flags; - spin_lock_irq(&rtc_lock); + spin_lock_irqsave(&rtc_lock, flags); if (cmos_rtc.suspend_ctrl) rtc_control = CMOS_READ(RTC_CONTROL); if (rtc_control & RTC_AIE) { @@ -1006,7 +1083,7 @@ static u32 rtc_handler(void *context) rtc_intr = CMOS_READ(RTC_INTR_FLAGS); rtc_update_irq(cmos->rtc, 1, rtc_intr); } - spin_unlock_irq(&rtc_lock); + spin_unlock_irqrestore(&rtc_lock, flags); pm_wakeup_event(dev, 0); acpi_clear_event(ACPI_EVENT_RTC); @@ -1129,7 +1206,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) pnp_irq(pnp, 0)); } -static void __exit cmos_pnp_remove(struct pnp_dev *pnp) +static void cmos_pnp_remove(struct pnp_dev *pnp) { cmos_do_remove(&pnp->dev); } @@ -1161,7 +1238,7 @@ static struct pnp_driver cmos_pnp_driver = { .name = (char *) driver_name, .id_table = rtc_ids, .probe = cmos_pnp_probe, - .remove = __exit_p(cmos_pnp_remove), + .remove = cmos_pnp_remove, .shutdown = cmos_pnp_shutdown, /* flag ensures resume() gets called, and stops syslog spam */ @@ -1238,7 +1315,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev) return cmos_do_probe(&pdev->dev, resource, irq); } -static int __exit cmos_platform_remove(struct platform_device *pdev) +static int cmos_platform_remove(struct platform_device *pdev) { cmos_do_remove(&pdev->dev); return 0; @@ -1263,7 +1340,7 @@ static void cmos_platform_shutdown(struct platform_device *pdev) MODULE_ALIAS("platform:rtc_cmos"); static struct platform_driver cmos_platform_driver = { - .remove = __exit_p(cmos_platform_remove), + .remove = cmos_platform_remove, .shutdown = cmos_platform_shutdown, .driver = { .name = driver_name, diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 4e31036ee2596dec93accd26f627c5b95591ae9f..4ad97be480430babc3321075f2739114eaad8f04 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -11,6 +11,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -191,6 +192,26 @@ static const struct i2c_device_id ds1307_id[] = { }; MODULE_DEVICE_TABLE(i2c, ds1307_id); +#ifdef CONFIG_ACPI +static const struct acpi_device_id ds1307_acpi_ids[] = { + { .id = "DS1307", .driver_data = ds_1307 }, + { .id = "DS1337", .driver_data = ds_1337 }, + { .id = "DS1338", .driver_data = ds_1338 }, + { .id = "DS1339", .driver_data = ds_1339 }, + { .id = "DS1388", .driver_data = ds_1388 }, + { .id = "DS1340", .driver_data = ds_1340 }, + { .id = "DS3231", .driver_data = ds_3231 }, + { .id = "M41T00", .driver_data = m41t00 }, + { .id = "MCP7940X", .driver_data = mcp794xx }, + { .id = "MCP7941X", .driver_data = mcp794xx }, + { .id = "PT7C4338", .driver_data = ds_1307 }, + { .id = "RX8025", .driver_data = rx_8025 }, + { .id = "ISL12057", .driver_data = ds_1337 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids); +#endif + /*----------------------------------------------------------------------*/ #define BLOCK_DATA_MAX_TRIES 10 @@ -874,17 +895,17 @@ static u8 do_trickle_setup_ds1339(struct i2c_client *client, return setup; } -static void ds1307_trickle_of_init(struct i2c_client *client, - struct chip_desc *chip) +static void ds1307_trickle_init(struct i2c_client *client, + struct chip_desc *chip) { uint32_t ohms = 0; bool diode = true; if (!chip->do_trickle_setup) goto out; - if (of_property_read_u32(client->dev.of_node, "trickle-resistor-ohms" , &ohms)) + if (device_property_read_u32(&client->dev, "trickle-resistor-ohms", &ohms)) goto out; - if (of_property_read_bool(client->dev.of_node, "trickle-diode-disable")) + if (device_property_read_bool(&client->dev, "trickle-diode-disable")) diode = false; chip->trickle_charger_setup = chip->do_trickle_setup(client, ohms, diode); @@ -1268,7 +1289,7 @@ static int ds1307_probe(struct i2c_client *client, struct ds1307 *ds1307; int err = -ENODEV; int tmp, wday; - struct chip_desc *chip = &chips[id->driver_data]; + struct chip_desc *chip; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); bool want_irq = false; bool ds1307_can_wakeup_device = false; @@ -1297,11 +1318,23 @@ static int ds1307_probe(struct i2c_client *client, i2c_set_clientdata(client, ds1307); ds1307->client = client; - ds1307->type = id->driver_data; + if (id) { + chip = &chips[id->driver_data]; + ds1307->type = id->driver_data; + } else { + const struct acpi_device_id *acpi_id; + + acpi_id = acpi_match_device(ACPI_PTR(ds1307_acpi_ids), + &client->dev); + if (!acpi_id) + return -ENODEV; + chip = &chips[acpi_id->driver_data]; + ds1307->type = acpi_id->driver_data; + } - if (!pdata && client->dev.of_node) - ds1307_trickle_of_init(client, chip); - else if (pdata && pdata->trickle_charger_setup) + if (!pdata) + ds1307_trickle_init(client, chip); + else if (pdata->trickle_charger_setup) chip->trickle_charger_setup = pdata->trickle_charger_setup; if (chip->trickle_charger_setup && chip->trickle_charger_reg) { @@ -1678,6 +1711,7 @@ static int ds1307_remove(struct i2c_client *client) static struct i2c_driver ds1307_driver = { .driver = { .name = "rtc-ds1307", + .acpi_match_table = ACPI_PTR(ds1307_acpi_ids), }, .probe = ds1307_probe, .remove = ds1307_remove, diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 3b3049c8c9e04ddb8ebfb3c391eba41b320286f3..52429f0a57cc2125e428aa88be917a4268f44b35 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c @@ -89,10 +89,8 @@ static int ds1374_read_rtc(struct i2c_client *client, u32 *time, int ret; int i; - if (nbytes > 4) { - WARN_ON(1); + if (WARN_ON(nbytes > 4)) return -EINVAL; - } ret = i2c_smbus_read_i2c_block_data(client, reg, nbytes, buf); diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c index 8d8049bdfaf613a83336fd22ed326ddfe9717716..67b56b80dc7097049dbf20f0ea8d0e908a231b5f 100644 --- a/drivers/rtc/rtc-imxdi.c +++ b/drivers/rtc/rtc-imxdi.c @@ -67,7 +67,7 @@ #define DSR_ETAD (1 << 21) /* External tamper A detected */ #define DSR_EBD (1 << 20) /* External boot detected */ #define DSR_SAD (1 << 19) /* SCC alarm detected */ -#define DSR_TTD (1 << 18) /* Temperatur tamper detected */ +#define DSR_TTD (1 << 18) /* Temperature tamper detected */ #define DSR_CTD (1 << 17) /* Clock tamper detected */ #define DSR_VTD (1 << 16) /* Voltage tamper detected */ #define DSR_WBF (1 << 10) /* Write Busy Flag (synchronous) */ diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c index 5e14651b71a89a2327a2f1fe175cc56156d6a880..72918c1ba0928d4fc78d921db621c489fafb9701 100644 --- a/drivers/rtc/rtc-jz4740.c +++ b/drivers/rtc/rtc-jz4740.c @@ -14,10 +14,12 @@ * */ +#include #include #include -#include +#include #include +#include #include #include #include @@ -27,8 +29,14 @@ #define JZ_REG_RTC_SEC_ALARM 0x08 #define JZ_REG_RTC_REGULATOR 0x0C #define JZ_REG_RTC_HIBERNATE 0x20 +#define JZ_REG_RTC_WAKEUP_FILTER 0x24 +#define JZ_REG_RTC_RESET_COUNTER 0x28 #define JZ_REG_RTC_SCRATCHPAD 0x34 +/* The following are present on the jz4780 */ +#define JZ_REG_RTC_WENR 0x3C +#define JZ_RTC_WENR_WEN BIT(31) + #define JZ_RTC_CTRL_WRDY BIT(7) #define JZ_RTC_CTRL_1HZ BIT(6) #define JZ_RTC_CTRL_1HZ_IRQ BIT(5) @@ -37,16 +45,34 @@ #define JZ_RTC_CTRL_AE BIT(2) #define JZ_RTC_CTRL_ENABLE BIT(0) +/* Magic value to enable writes on jz4780 */ +#define JZ_RTC_WENR_MAGIC 0xA55A + +#define JZ_RTC_WAKEUP_FILTER_MASK 0x0000FFE0 +#define JZ_RTC_RESET_COUNTER_MASK 0x00000FE0 + +enum jz4740_rtc_type { + ID_JZ4740, + ID_JZ4780, +}; + struct jz4740_rtc { void __iomem *base; + enum jz4740_rtc_type type; struct rtc_device *rtc; + struct clk *clk; int irq; spinlock_t lock; + + unsigned int min_wakeup_pin_assert_time; + unsigned int reset_pin_assert_time; }; +static struct device *dev_for_power_off; + static inline uint32_t jz4740_rtc_reg_read(struct jz4740_rtc *rtc, size_t reg) { return readl(rtc->base + reg); @@ -64,11 +90,33 @@ static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc) return timeout ? 0 : -EIO; } +static inline int jz4780_rtc_enable_write(struct jz4740_rtc *rtc) +{ + uint32_t ctrl; + int ret, timeout = 1000; + + ret = jz4740_rtc_wait_write_ready(rtc); + if (ret != 0) + return ret; + + writel(JZ_RTC_WENR_MAGIC, rtc->base + JZ_REG_RTC_WENR); + + do { + ctrl = readl(rtc->base + JZ_REG_RTC_WENR); + } while (!(ctrl & JZ_RTC_WENR_WEN) && --timeout); + + return timeout ? 0 : -EIO; +} + static inline int jz4740_rtc_reg_write(struct jz4740_rtc *rtc, size_t reg, uint32_t val) { - int ret; - ret = jz4740_rtc_wait_write_ready(rtc); + int ret = 0; + + if (rtc->type >= ID_JZ4780) + ret = jz4780_rtc_enable_write(rtc); + if (ret == 0) + ret = jz4740_rtc_wait_write_ready(rtc); if (ret == 0) writel(val, rtc->base + reg); @@ -203,12 +251,57 @@ static irqreturn_t jz4740_rtc_irq(int irq, void *data) return IRQ_HANDLED; } -void jz4740_rtc_poweroff(struct device *dev) +static void jz4740_rtc_poweroff(struct device *dev) { struct jz4740_rtc *rtc = dev_get_drvdata(dev); jz4740_rtc_reg_write(rtc, JZ_REG_RTC_HIBERNATE, 1); } -EXPORT_SYMBOL_GPL(jz4740_rtc_poweroff); + +static void jz4740_rtc_power_off(void) +{ + struct jz4740_rtc *rtc = dev_get_drvdata(dev_for_power_off); + unsigned long rtc_rate; + unsigned long wakeup_filter_ticks; + unsigned long reset_counter_ticks; + + clk_prepare_enable(rtc->clk); + + rtc_rate = clk_get_rate(rtc->clk); + + /* + * Set minimum wakeup pin assertion time: 100 ms. + * Range is 0 to 2 sec if RTC is clocked at 32 kHz. + */ + wakeup_filter_ticks = + (rtc->min_wakeup_pin_assert_time * rtc_rate) / 1000; + if (wakeup_filter_ticks < JZ_RTC_WAKEUP_FILTER_MASK) + wakeup_filter_ticks &= JZ_RTC_WAKEUP_FILTER_MASK; + else + wakeup_filter_ticks = JZ_RTC_WAKEUP_FILTER_MASK; + jz4740_rtc_reg_write(rtc, + JZ_REG_RTC_WAKEUP_FILTER, wakeup_filter_ticks); + + /* + * Set reset pin low-level assertion time after wakeup: 60 ms. + * Range is 0 to 125 ms if RTC is clocked at 32 kHz. + */ + reset_counter_ticks = (rtc->reset_pin_assert_time * rtc_rate) / 1000; + if (reset_counter_ticks < JZ_RTC_RESET_COUNTER_MASK) + reset_counter_ticks &= JZ_RTC_RESET_COUNTER_MASK; + else + reset_counter_ticks = JZ_RTC_RESET_COUNTER_MASK; + jz4740_rtc_reg_write(rtc, + JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks); + + jz4740_rtc_poweroff(dev_for_power_off); + machine_halt(); +} + +static const struct of_device_id jz4740_rtc_of_match[] = { + { .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 }, + { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 }, + {}, +}; static int jz4740_rtc_probe(struct platform_device *pdev) { @@ -216,11 +309,20 @@ static int jz4740_rtc_probe(struct platform_device *pdev) struct jz4740_rtc *rtc; uint32_t scratchpad; struct resource *mem; + const struct platform_device_id *id = platform_get_device_id(pdev); + const struct of_device_id *of_id = of_match_device( + jz4740_rtc_of_match, &pdev->dev); + struct device_node *np = pdev->dev.of_node; rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); if (!rtc) return -ENOMEM; + if (of_id) + rtc->type = (enum jz4740_rtc_type)of_id->data; + else + rtc->type = id->driver_data; + rtc->irq = platform_get_irq(pdev, 0); if (rtc->irq < 0) { dev_err(&pdev->dev, "Failed to get platform irq\n"); @@ -232,6 +334,12 @@ static int jz4740_rtc_probe(struct platform_device *pdev) if (IS_ERR(rtc->base)) return PTR_ERR(rtc->base); + rtc->clk = devm_clk_get(&pdev->dev, "rtc"); + if (IS_ERR(rtc->clk)) { + dev_err(&pdev->dev, "Failed to get RTC clock\n"); + return PTR_ERR(rtc->clk); + } + spin_lock_init(&rtc->lock); platform_set_drvdata(pdev, rtc); @@ -263,6 +371,27 @@ static int jz4740_rtc_probe(struct platform_device *pdev) } } + if (np && of_device_is_system_power_controller(np)) { + if (!pm_power_off) { + /* Default: 60ms */ + rtc->reset_pin_assert_time = 60; + of_property_read_u32(np, "reset-pin-assert-time-ms", + &rtc->reset_pin_assert_time); + + /* Default: 100ms */ + rtc->min_wakeup_pin_assert_time = 100; + of_property_read_u32(np, + "min-wakeup-pin-assert-time-ms", + &rtc->min_wakeup_pin_assert_time); + + dev_for_power_off = &pdev->dev; + pm_power_off = jz4740_rtc_power_off; + } else { + dev_warn(&pdev->dev, + "Poweroff handler already present!\n"); + } + } + return 0; } @@ -295,17 +424,20 @@ static const struct dev_pm_ops jz4740_pm_ops = { #define JZ4740_RTC_PM_OPS NULL #endif /* CONFIG_PM */ +static const struct platform_device_id jz4740_rtc_ids[] = { + { "jz4740-rtc", ID_JZ4740 }, + { "jz4780-rtc", ID_JZ4780 }, + {} +}; + static struct platform_driver jz4740_rtc_driver = { .probe = jz4740_rtc_probe, .driver = { .name = "jz4740-rtc", .pm = JZ4740_RTC_PM_OPS, + .of_match_table = of_match_ptr(jz4740_rtc_of_match), }, + .id_table = jz4740_rtc_ids, }; -module_platform_driver(jz4740_rtc_driver); - -MODULE_AUTHOR("Lars-Peter Clausen "); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n"); -MODULE_ALIAS("platform:jz4740-rtc"); +builtin_platform_driver(jz4740_rtc_driver); diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c index e6bfb9c42a10b08948910345e34f1b3b83057e82..1ae7da5cfc608f6c4bcb3764b00683e2a54b7919 100644 --- a/drivers/rtc/rtc-lib.c +++ b/drivers/rtc/rtc-lib.c @@ -11,7 +11,7 @@ * published by the Free Software Foundation. */ -#include +#include #include static const unsigned char rtc_days_in_month[] = { @@ -148,5 +148,3 @@ struct rtc_time rtc_ktime_to_tm(ktime_t kt) return ret; } EXPORT_SYMBOL_GPL(rtc_ktime_to_tm); - -MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c index 4021fd04cb0ac847c955c4de79e283a97b73ff17..ce75e421ba001fce02c7f7afce57c310af012001 100644 --- a/drivers/rtc/rtc-mcp795.c +++ b/drivers/rtc/rtc-mcp795.c @@ -12,7 +12,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * - * */ + */ #include #include @@ -21,6 +21,8 @@ #include #include #include +#include +#include /* MCP795 Instructions, see datasheet table 3-1 */ #define MCP795_EEREAD 0x03 @@ -29,7 +31,7 @@ #define MCP795_EEWREN 0x06 #define MCP795_SRREAD 0x05 #define MCP795_SRWRITE 0x01 -#define MCP795_READ 0x13 +#define MCP795_READ 0x13 #define MCP795_WRITE 0x12 #define MCP795_UNLOCK 0x14 #define MCP795_IDWRITE 0x32 @@ -37,8 +39,17 @@ #define MCP795_CLRWDT 0x44 #define MCP795_CLRRAM 0x54 -#define MCP795_ST_BIT 0x80 -#define MCP795_24_BIT 0x40 +/* MCP795 RTCC registers, see datasheet table 4-1 */ +#define MCP795_REG_SECONDS 0x01 +#define MCP795_REG_DAY 0x04 +#define MCP795_REG_MONTH 0x06 +#define MCP795_REG_CONTROL 0x08 + +#define MCP795_ST_BIT BIT(7) +#define MCP795_24_BIT BIT(6) +#define MCP795_LP_BIT BIT(5) +#define MCP795_EXTOSC_BIT BIT(3) +#define MCP795_OSCON_BIT BIT(5) static int mcp795_rtcc_read(struct device *dev, u8 addr, u8 *buf, u8 count) { @@ -93,30 +104,97 @@ static int mcp795_rtcc_set_bits(struct device *dev, u8 addr, u8 mask, u8 state) return ret; } +static int mcp795_stop_oscillator(struct device *dev, bool *extosc) +{ + int retries = 5; + int ret; + u8 data; + + ret = mcp795_rtcc_set_bits(dev, MCP795_REG_SECONDS, MCP795_ST_BIT, 0); + if (ret) + return ret; + ret = mcp795_rtcc_read(dev, MCP795_REG_CONTROL, &data, 1); + if (ret) + return ret; + *extosc = !!(data & MCP795_EXTOSC_BIT); + ret = mcp795_rtcc_set_bits( + dev, MCP795_REG_CONTROL, MCP795_EXTOSC_BIT, 0); + if (ret) + return ret; + /* wait for the OSCON bit to clear */ + do { + usleep_range(700, 800); + ret = mcp795_rtcc_read(dev, MCP795_REG_DAY, &data, 1); + if (ret) + break; + if (!(data & MCP795_OSCON_BIT)) + break; + + } while (--retries); + + return !retries ? -EIO : ret; +} + +static int mcp795_start_oscillator(struct device *dev, bool *extosc) +{ + if (extosc) { + u8 data = *extosc ? MCP795_EXTOSC_BIT : 0; + int ret; + + ret = mcp795_rtcc_set_bits( + dev, MCP795_REG_CONTROL, MCP795_EXTOSC_BIT, data); + if (ret) + return ret; + } + return mcp795_rtcc_set_bits( + dev, MCP795_REG_SECONDS, MCP795_ST_BIT, MCP795_ST_BIT); +} + static int mcp795_set_time(struct device *dev, struct rtc_time *tim) { int ret; u8 data[7]; + bool extosc; + + /* Stop RTC and store current value of EXTOSC bit */ + ret = mcp795_stop_oscillator(dev, &extosc); + if (ret) + return ret; /* Read first, so we can leave config bits untouched */ - ret = mcp795_rtcc_read(dev, 0x01, data, sizeof(data)); + ret = mcp795_rtcc_read(dev, MCP795_REG_SECONDS, data, sizeof(data)); if (ret) return ret; - data[0] = (data[0] & 0x80) | ((tim->tm_sec / 10) << 4) | (tim->tm_sec % 10); - data[1] = (data[1] & 0x80) | ((tim->tm_min / 10) << 4) | (tim->tm_min % 10); - data[2] = ((tim->tm_hour / 10) << 4) | (tim->tm_hour % 10); - data[4] = ((tim->tm_mday / 10) << 4) | ((tim->tm_mday) % 10); - data[5] = (data[5] & 0x10) | (tim->tm_mon / 10) | (tim->tm_mon % 10); + data[0] = (data[0] & 0x80) | bin2bcd(tim->tm_sec); + data[1] = (data[1] & 0x80) | bin2bcd(tim->tm_min); + data[2] = bin2bcd(tim->tm_hour); + data[4] = bin2bcd(tim->tm_mday); + data[5] = (data[5] & MCP795_LP_BIT) | bin2bcd(tim->tm_mon + 1); if (tim->tm_year > 100) tim->tm_year -= 100; - data[6] = ((tim->tm_year / 10) << 4) | (tim->tm_year % 10); + data[6] = bin2bcd(tim->tm_year); + + /* Always write the date and month using a separate Write command. + * This is a workaround for a know silicon issue that some combinations + * of date and month values may result in the date being reset to 1. + */ + ret = mcp795_rtcc_write(dev, MCP795_REG_SECONDS, data, 5); + if (ret) + return ret; - ret = mcp795_rtcc_write(dev, 0x01, data, sizeof(data)); + ret = mcp795_rtcc_write(dev, MCP795_REG_MONTH, &data[5], 2); + if (ret) + return ret; + /* Start back RTC and restore previous value of EXTOSC bit. + * There is no need to clear EXTOSC bit when the previous value was 0 + * because it was already cleared when stopping the RTC oscillator. + */ + ret = mcp795_start_oscillator(dev, extosc ? &extosc : NULL); if (ret) return ret; @@ -132,17 +210,17 @@ static int mcp795_read_time(struct device *dev, struct rtc_time *tim) int ret; u8 data[7]; - ret = mcp795_rtcc_read(dev, 0x01, data, sizeof(data)); + ret = mcp795_rtcc_read(dev, MCP795_REG_SECONDS, data, sizeof(data)); if (ret) return ret; - tim->tm_sec = ((data[0] & 0x70) >> 4) * 10 + (data[0] & 0x0f); - tim->tm_min = ((data[1] & 0x70) >> 4) * 10 + (data[1] & 0x0f); - tim->tm_hour = ((data[2] & 0x30) >> 4) * 10 + (data[2] & 0x0f); - tim->tm_mday = ((data[4] & 0x30) >> 4) * 10 + (data[4] & 0x0f); - tim->tm_mon = ((data[5] & 0x10) >> 4) * 10 + (data[5] & 0x0f); - tim->tm_year = ((data[6] & 0xf0) >> 4) * 10 + (data[6] & 0x0f) + 100; /* Assume we are in 20xx */ + tim->tm_sec = bcd2bin(data[0] & 0x7F); + tim->tm_min = bcd2bin(data[1] & 0x7F); + tim->tm_hour = bcd2bin(data[2] & 0x3F); + tim->tm_mday = bcd2bin(data[4] & 0x3F); + tim->tm_mon = bcd2bin(data[5] & 0x1F) - 1; + tim->tm_year = bcd2bin(data[6]) + 100; /* Assume we are in 20xx */ dev_dbg(dev, "Read from mcp795: %04d-%02d-%02d %02d:%02d:%02d\n", tim->tm_year + 1900, tim->tm_mon, tim->tm_mday, @@ -169,13 +247,13 @@ static int mcp795_probe(struct spi_device *spi) return ret; } - /* Start the oscillator */ - mcp795_rtcc_set_bits(&spi->dev, 0x01, MCP795_ST_BIT, MCP795_ST_BIT); + /* Start the oscillator but don't set the value of EXTOSC bit */ + mcp795_start_oscillator(&spi->dev, NULL); /* Clear the 12 hour mode flag*/ mcp795_rtcc_set_bits(&spi->dev, 0x03, MCP795_24_BIT, 0); rtc = devm_rtc_device_register(&spi->dev, "rtc-mcp795", - &mcp795_rtc_ops, THIS_MODULE); + &mcp795_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index b04ea9b5ae67348c69210d29f3f138e7d5b649e3..51e52446eacb8f4d1e32821fd9c2d6da019825f3 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -113,6 +113,7 @@ /* OMAP_RTC_OSC_REG bit fields: */ #define OMAP_RTC_OSC_32KCLK_EN BIT(6) #define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3) +#define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4) /* OMAP_RTC_IRQWAKEEN bit fields: */ #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1) @@ -146,6 +147,7 @@ struct omap_rtc { u8 interrupts_reg; bool is_pmic_controller; bool has_ext_clk; + bool is_suspending; const struct omap_rtc_device_type *type; struct pinctrl_dev *pctldev; }; @@ -786,8 +788,9 @@ static int omap_rtc_probe(struct platform_device *pdev) */ if (rtc->has_ext_clk) { reg = rtc_read(rtc, OMAP_RTC_OSC_REG); - rtc_write(rtc, OMAP_RTC_OSC_REG, - reg | OMAP_RTC_OSC_SEL_32KCLK_SRC); + reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE; + reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC; + rtc_writel(rtc, OMAP_RTC_OSC_REG, reg); } rtc->type->lock(rtc); @@ -898,8 +901,7 @@ static int omap_rtc_suspend(struct device *dev) rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0); rtc->type->lock(rtc); - /* Disable the clock/module */ - pm_runtime_put_sync(dev); + rtc->is_suspending = true; return 0; } @@ -908,9 +910,6 @@ static int omap_rtc_resume(struct device *dev) { struct omap_rtc *rtc = dev_get_drvdata(dev); - /* Enable the clock/module so that we can access the registers */ - pm_runtime_get_sync(dev); - rtc->type->unlock(rtc); if (device_may_wakeup(dev)) disable_irq_wake(rtc->irq_alarm); @@ -918,11 +917,34 @@ static int omap_rtc_resume(struct device *dev) rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg); rtc->type->lock(rtc); + rtc->is_suspending = false; + return 0; } #endif -static SIMPLE_DEV_PM_OPS(omap_rtc_pm_ops, omap_rtc_suspend, omap_rtc_resume); +#ifdef CONFIG_PM +static int omap_rtc_runtime_suspend(struct device *dev) +{ + struct omap_rtc *rtc = dev_get_drvdata(dev); + + if (rtc->is_suspending && !rtc->has_ext_clk) + return -EBUSY; + + return 0; +} + +static int omap_rtc_runtime_resume(struct device *dev) +{ + return 0; +} +#endif + +static const struct dev_pm_ops omap_rtc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(omap_rtc_suspend, omap_rtc_resume) + SET_RUNTIME_PM_OPS(omap_rtc_runtime_suspend, + omap_rtc_runtime_resume, NULL) +}; static void omap_rtc_shutdown(struct platform_device *pdev) { diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c index efb0a08ac1175182a07ef9d63a49184a20698bd4..a06dff994c8316c0062b39d3704055a1305584a8 100644 --- a/drivers/rtc/rtc-pcf85063.c +++ b/drivers/rtc/rtc-pcf85063.c @@ -191,12 +191,19 @@ static int pcf85063_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct rtc_device *rtc; + int err; dev_dbg(&client->dev, "%s\n", __func__); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; + err = i2c_smbus_read_byte_data(client, PCF85063_REG_CTRL1); + if (err < 0) { + dev_err(&client->dev, "RTC chip is not present\n"); + return err; + } + rtc = devm_rtc_device_register(&client->dev, pcf85063_driver.driver.name, &pcf85063_rtc_ops, THIS_MODULE); diff --git a/drivers/rtc/rtc-r7301.c b/drivers/rtc/rtc-r7301.c new file mode 100644 index 0000000000000000000000000000000000000000..28d540885f3db055ebbf238983cbb64371894d79 --- /dev/null +++ b/drivers/rtc/rtc-r7301.c @@ -0,0 +1,453 @@ +/* + * EPSON TOYOCOM RTC-7301SF/DG Driver + * + * Copyright (c) 2016 Akinobu Mita + * + * Based on rtc-rp5c01.c + * + * Datasheet: http://www5.epsondevice.com/en/products/parallel/rtc7301sf.html + */ + +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "rtc-r7301" + +#define RTC7301_1_SEC 0x0 /* Bank 0 and Band 1 */ +#define RTC7301_10_SEC 0x1 /* Bank 0 and Band 1 */ +#define RTC7301_AE BIT(3) +#define RTC7301_1_MIN 0x2 /* Bank 0 and Band 1 */ +#define RTC7301_10_MIN 0x3 /* Bank 0 and Band 1 */ +#define RTC7301_1_HOUR 0x4 /* Bank 0 and Band 1 */ +#define RTC7301_10_HOUR 0x5 /* Bank 0 and Band 1 */ +#define RTC7301_DAY_OF_WEEK 0x6 /* Bank 0 and Band 1 */ +#define RTC7301_1_DAY 0x7 /* Bank 0 and Band 1 */ +#define RTC7301_10_DAY 0x8 /* Bank 0 and Band 1 */ +#define RTC7301_1_MONTH 0x9 /* Bank 0 */ +#define RTC7301_10_MONTH 0xa /* Bank 0 */ +#define RTC7301_1_YEAR 0xb /* Bank 0 */ +#define RTC7301_10_YEAR 0xc /* Bank 0 */ +#define RTC7301_100_YEAR 0xd /* Bank 0 */ +#define RTC7301_1000_YEAR 0xe /* Bank 0 */ +#define RTC7301_ALARM_CONTROL 0xe /* Bank 1 */ +#define RTC7301_ALARM_CONTROL_AIE BIT(0) +#define RTC7301_ALARM_CONTROL_AF BIT(1) +#define RTC7301_TIMER_CONTROL 0xe /* Bank 2 */ +#define RTC7301_TIMER_CONTROL_TIE BIT(0) +#define RTC7301_TIMER_CONTROL_TF BIT(1) +#define RTC7301_CONTROL 0xf /* All banks */ +#define RTC7301_CONTROL_BUSY BIT(0) +#define RTC7301_CONTROL_STOP BIT(1) +#define RTC7301_CONTROL_BANK_SEL_0 BIT(2) +#define RTC7301_CONTROL_BANK_SEL_1 BIT(3) + +struct rtc7301_priv { + struct regmap *regmap; + int irq; + spinlock_t lock; + u8 bank; +}; + +static const struct regmap_config rtc7301_regmap_config = { + .reg_bits = 32, + .val_bits = 8, + .reg_stride = 4, +}; + +static u8 rtc7301_read(struct rtc7301_priv *priv, unsigned int reg) +{ + int reg_stride = regmap_get_reg_stride(priv->regmap); + unsigned int val; + + regmap_read(priv->regmap, reg_stride * reg, &val); + + return val & 0xf; +} + +static void rtc7301_write(struct rtc7301_priv *priv, u8 val, unsigned int reg) +{ + int reg_stride = regmap_get_reg_stride(priv->regmap); + + regmap_write(priv->regmap, reg_stride * reg, val); +} + +static void rtc7301_update_bits(struct rtc7301_priv *priv, unsigned int reg, + u8 mask, u8 val) +{ + int reg_stride = regmap_get_reg_stride(priv->regmap); + + regmap_update_bits(priv->regmap, reg_stride * reg, mask, val); +} + +static int rtc7301_wait_while_busy(struct rtc7301_priv *priv) +{ + int retries = 100; + + while (retries-- > 0) { + u8 val; + + val = rtc7301_read(priv, RTC7301_CONTROL); + if (!(val & RTC7301_CONTROL_BUSY)) + return 0; + + usleep_range(200, 300); + } + + return -ETIMEDOUT; +} + +static void rtc7301_stop(struct rtc7301_priv *priv) +{ + rtc7301_update_bits(priv, RTC7301_CONTROL, RTC7301_CONTROL_STOP, + RTC7301_CONTROL_STOP); +} + +static void rtc7301_start(struct rtc7301_priv *priv) +{ + rtc7301_update_bits(priv, RTC7301_CONTROL, RTC7301_CONTROL_STOP, 0); +} + +static void rtc7301_select_bank(struct rtc7301_priv *priv, u8 bank) +{ + u8 val = 0; + + if (bank == priv->bank) + return; + + if (bank & BIT(0)) + val |= RTC7301_CONTROL_BANK_SEL_0; + if (bank & BIT(1)) + val |= RTC7301_CONTROL_BANK_SEL_1; + + rtc7301_update_bits(priv, RTC7301_CONTROL, + RTC7301_CONTROL_BANK_SEL_0 | + RTC7301_CONTROL_BANK_SEL_1, val); + + priv->bank = bank; +} + +static void rtc7301_get_time(struct rtc7301_priv *priv, struct rtc_time *tm, + bool alarm) +{ + int year; + + tm->tm_sec = rtc7301_read(priv, RTC7301_1_SEC); + tm->tm_sec += (rtc7301_read(priv, RTC7301_10_SEC) & ~RTC7301_AE) * 10; + tm->tm_min = rtc7301_read(priv, RTC7301_1_MIN); + tm->tm_min += (rtc7301_read(priv, RTC7301_10_MIN) & ~RTC7301_AE) * 10; + tm->tm_hour = rtc7301_read(priv, RTC7301_1_HOUR); + tm->tm_hour += (rtc7301_read(priv, RTC7301_10_HOUR) & ~RTC7301_AE) * 10; + tm->tm_mday = rtc7301_read(priv, RTC7301_1_DAY); + tm->tm_mday += (rtc7301_read(priv, RTC7301_10_DAY) & ~RTC7301_AE) * 10; + + if (alarm) { + tm->tm_wday = -1; + tm->tm_mon = -1; + tm->tm_year = -1; + tm->tm_yday = -1; + tm->tm_isdst = -1; + return; + } + + tm->tm_wday = (rtc7301_read(priv, RTC7301_DAY_OF_WEEK) & ~RTC7301_AE); + tm->tm_mon = rtc7301_read(priv, RTC7301_10_MONTH) * 10 + + rtc7301_read(priv, RTC7301_1_MONTH) - 1; + year = rtc7301_read(priv, RTC7301_1000_YEAR) * 1000 + + rtc7301_read(priv, RTC7301_100_YEAR) * 100 + + rtc7301_read(priv, RTC7301_10_YEAR) * 10 + + rtc7301_read(priv, RTC7301_1_YEAR); + + tm->tm_year = year - 1900; +} + +static void rtc7301_write_time(struct rtc7301_priv *priv, struct rtc_time *tm, + bool alarm) +{ + int year; + + rtc7301_write(priv, tm->tm_sec % 10, RTC7301_1_SEC); + rtc7301_write(priv, tm->tm_sec / 10, RTC7301_10_SEC); + + rtc7301_write(priv, tm->tm_min % 10, RTC7301_1_MIN); + rtc7301_write(priv, tm->tm_min / 10, RTC7301_10_MIN); + + rtc7301_write(priv, tm->tm_hour % 10, RTC7301_1_HOUR); + rtc7301_write(priv, tm->tm_hour / 10, RTC7301_10_HOUR); + + rtc7301_write(priv, tm->tm_mday % 10, RTC7301_1_DAY); + rtc7301_write(priv, tm->tm_mday / 10, RTC7301_10_DAY); + + /* Don't care for alarm register */ + rtc7301_write(priv, alarm ? RTC7301_AE : tm->tm_wday, + RTC7301_DAY_OF_WEEK); + + if (alarm) + return; + + rtc7301_write(priv, (tm->tm_mon + 1) % 10, RTC7301_1_MONTH); + rtc7301_write(priv, (tm->tm_mon + 1) / 10, RTC7301_10_MONTH); + + year = tm->tm_year + 1900; + + rtc7301_write(priv, year % 10, RTC7301_1_YEAR); + rtc7301_write(priv, (year / 10) % 10, RTC7301_10_YEAR); + rtc7301_write(priv, (year / 100) % 10, RTC7301_100_YEAR); + rtc7301_write(priv, year / 1000, RTC7301_1000_YEAR); +} + +static void rtc7301_alarm_irq(struct rtc7301_priv *priv, unsigned int enabled) +{ + rtc7301_update_bits(priv, RTC7301_ALARM_CONTROL, + RTC7301_ALARM_CONTROL_AF | + RTC7301_ALARM_CONTROL_AIE, + enabled ? RTC7301_ALARM_CONTROL_AIE : 0); +} + +static int rtc7301_read_time(struct device *dev, struct rtc_time *tm) +{ + struct rtc7301_priv *priv = dev_get_drvdata(dev); + unsigned long flags; + int err; + + spin_lock_irqsave(&priv->lock, flags); + + rtc7301_select_bank(priv, 0); + + err = rtc7301_wait_while_busy(priv); + if (!err) + rtc7301_get_time(priv, tm, false); + + spin_unlock_irqrestore(&priv->lock, flags); + + return err ? err : rtc_valid_tm(tm); +} + +static int rtc7301_set_time(struct device *dev, struct rtc_time *tm) +{ + struct rtc7301_priv *priv = dev_get_drvdata(dev); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + rtc7301_stop(priv); + usleep_range(200, 300); + rtc7301_select_bank(priv, 0); + rtc7301_write_time(priv, tm, false); + rtc7301_start(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int rtc7301_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct rtc7301_priv *priv = dev_get_drvdata(dev); + unsigned long flags; + u8 alrm_ctrl; + + if (priv->irq <= 0) + return -EINVAL; + + spin_lock_irqsave(&priv->lock, flags); + + rtc7301_select_bank(priv, 1); + rtc7301_get_time(priv, &alarm->time, true); + + alrm_ctrl = rtc7301_read(priv, RTC7301_ALARM_CONTROL); + + alarm->enabled = !!(alrm_ctrl & RTC7301_ALARM_CONTROL_AIE); + alarm->pending = !!(alrm_ctrl & RTC7301_ALARM_CONTROL_AF); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int rtc7301_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct rtc7301_priv *priv = dev_get_drvdata(dev); + unsigned long flags; + + if (priv->irq <= 0) + return -EINVAL; + + spin_lock_irqsave(&priv->lock, flags); + + rtc7301_select_bank(priv, 1); + rtc7301_write_time(priv, &alarm->time, true); + rtc7301_alarm_irq(priv, alarm->enabled); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int rtc7301_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct rtc7301_priv *priv = dev_get_drvdata(dev); + unsigned long flags; + + if (priv->irq <= 0) + return -EINVAL; + + spin_lock_irqsave(&priv->lock, flags); + + rtc7301_select_bank(priv, 1); + rtc7301_alarm_irq(priv, enabled); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static const struct rtc_class_ops rtc7301_rtc_ops = { + .read_time = rtc7301_read_time, + .set_time = rtc7301_set_time, + .read_alarm = rtc7301_read_alarm, + .set_alarm = rtc7301_set_alarm, + .alarm_irq_enable = rtc7301_alarm_irq_enable, +}; + +static irqreturn_t rtc7301_irq_handler(int irq, void *dev_id) +{ + struct rtc_device *rtc = dev_id; + struct rtc7301_priv *priv = dev_get_drvdata(rtc->dev.parent); + unsigned long flags; + irqreturn_t ret = IRQ_NONE; + u8 alrm_ctrl; + + spin_lock_irqsave(&priv->lock, flags); + + rtc7301_select_bank(priv, 1); + + alrm_ctrl = rtc7301_read(priv, RTC7301_ALARM_CONTROL); + if (alrm_ctrl & RTC7301_ALARM_CONTROL_AF) { + ret = IRQ_HANDLED; + rtc7301_alarm_irq(priv, false); + rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + return ret; +} + +static void rtc7301_init(struct rtc7301_priv *priv) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + rtc7301_select_bank(priv, 2); + rtc7301_write(priv, 0, RTC7301_TIMER_CONTROL); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static int __init rtc7301_rtc_probe(struct platform_device *dev) +{ + struct resource *res; + void __iomem *regs; + struct rtc7301_priv *priv; + struct rtc_device *rtc; + int ret; + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + regs = devm_ioremap_resource(&dev->dev, res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + priv->regmap = devm_regmap_init_mmio(&dev->dev, regs, + &rtc7301_regmap_config); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + + priv->irq = platform_get_irq(dev, 0); + + spin_lock_init(&priv->lock); + priv->bank = -1; + + rtc7301_init(priv); + + platform_set_drvdata(dev, priv); + + rtc = devm_rtc_device_register(&dev->dev, DRV_NAME, &rtc7301_rtc_ops, + THIS_MODULE); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + if (priv->irq > 0) { + ret = devm_request_irq(&dev->dev, priv->irq, + rtc7301_irq_handler, IRQF_SHARED, + dev_name(&dev->dev), rtc); + if (ret) { + priv->irq = 0; + dev_err(&dev->dev, "unable to request IRQ\n"); + } else { + device_set_wakeup_capable(&dev->dev, true); + } + } + + return 0; +} + +#ifdef CONFIG_PM_SLEEP + +static int rtc7301_suspend(struct device *dev) +{ + struct rtc7301_priv *priv = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + enable_irq_wake(priv->irq); + + return 0; +} + +static int rtc7301_resume(struct device *dev) +{ + struct rtc7301_priv *priv = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + disable_irq_wake(priv->irq); + + return 0; +} + +#endif + +static SIMPLE_DEV_PM_OPS(rtc7301_pm_ops, rtc7301_suspend, rtc7301_resume); + +static const struct of_device_id rtc7301_dt_match[] = { + { .compatible = "epson,rtc7301sf" }, + { .compatible = "epson,rtc7301dg" }, + {} +}; +MODULE_DEVICE_TABLE(of, rtc7301_dt_match); + +static struct platform_driver rtc7301_rtc_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = rtc7301_dt_match, + .pm = &rtc7301_pm_ops, + }, +}; + +module_platform_driver_probe(rtc7301_rtc_driver, rtc7301_rtc_probe); + +MODULE_AUTHOR("Akinobu Mita "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("EPSON TOYOCOM RTC-7301SF/DG Driver"); +MODULE_ALIAS("platform:rtc-r7301"); diff --git a/drivers/rtc/rtc-starfire.c b/drivers/rtc/rtc-starfire.c index 83a057a0306072ff5efc4beebbcc6f5727b30307..7fc36973fa330e4845b84f688800d6903556de38 100644 --- a/drivers/rtc/rtc-starfire.c +++ b/drivers/rtc/rtc-starfire.c @@ -1,20 +1,18 @@ /* rtc-starfire.c: Starfire platform RTC driver. + * + * Author: David S. Miller + * License: GPL * * Copyright (C) 2008 David S. Miller */ #include -#include #include #include #include #include -MODULE_AUTHOR("David S. Miller "); -MODULE_DESCRIPTION("Starfire RTC driver"); -MODULE_LICENSE("GPL"); - static u32 starfire_get_time(void) { static char obp_gettod[32]; @@ -57,4 +55,4 @@ static struct platform_driver starfire_rtc_driver = { }, }; -module_platform_driver_probe(starfire_rtc_driver, starfire_rtc_probe); +builtin_platform_driver_probe(starfire_rtc_driver, starfire_rtc_probe); diff --git a/drivers/rtc/rtc-sun4v.c b/drivers/rtc/rtc-sun4v.c index 7c696c12f28f8cf36bdbc281ffcf3d52a68b133d..11bc562eba5dfb7d9aa749ff9e951397b9d6c7a8 100644 --- a/drivers/rtc/rtc-sun4v.c +++ b/drivers/rtc/rtc-sun4v.c @@ -1,4 +1,7 @@ /* rtc-sun4v.c: Hypervisor based RTC for SUN4V systems. + * + * Author: David S. Miller + * License: GPL * * Copyright (C) 2008 David S. Miller */ @@ -6,7 +9,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include #include #include #include @@ -98,8 +100,4 @@ static struct platform_driver sun4v_rtc_driver = { }, }; -module_platform_driver_probe(sun4v_rtc_driver, sun4v_rtc_probe); - -MODULE_AUTHOR("David S. Miller "); -MODULE_DESCRIPTION("SUN4V RTC driver"); -MODULE_LICENSE("GPL"); +builtin_platform_driver_probe(sun4v_rtc_driver, sun4v_rtc_probe); diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index 176720b7b9e5083195b3bd789dbf5a8a622cd6c8..c18c39212ce680929b72c07e0ebe6305bb2675d8 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c @@ -33,6 +33,10 @@ #include +enum twl_class { + TWL_4030 = 0, + TWL_6030, +}; /* * RTC block register offsets (use TWL_MODULE_RTC) @@ -136,16 +140,30 @@ static const u8 twl6030_rtc_reg_map[] = { #define ALL_TIME_REGS 6 /*----------------------------------------------------------------------*/ -static u8 *rtc_reg_map; +struct twl_rtc { + struct device *dev; + struct rtc_device *rtc; + u8 *reg_map; + /* + * Cache the value for timer/alarm interrupts register; this is + * only changed by callers holding rtc ops lock (or resume). + */ + unsigned char rtc_irq_bits; + bool wake_enabled; +#ifdef CONFIG_PM_SLEEP + unsigned char irqstat; +#endif + enum twl_class class; +}; /* * Supports 1 byte read from TWL RTC register. */ -static int twl_rtc_read_u8(u8 *data, u8 reg) +static int twl_rtc_read_u8(struct twl_rtc *twl_rtc, u8 *data, u8 reg) { int ret; - ret = twl_i2c_read_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg])); + ret = twl_i2c_read_u8(TWL_MODULE_RTC, data, (twl_rtc->reg_map[reg])); if (ret < 0) pr_err("Could not read TWL register %X - error %d\n", reg, ret); return ret; @@ -154,40 +172,34 @@ static int twl_rtc_read_u8(u8 *data, u8 reg) /* * Supports 1 byte write to TWL RTC registers. */ -static int twl_rtc_write_u8(u8 data, u8 reg) +static int twl_rtc_write_u8(struct twl_rtc *twl_rtc, u8 data, u8 reg) { int ret; - ret = twl_i2c_write_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg])); + ret = twl_i2c_write_u8(TWL_MODULE_RTC, data, (twl_rtc->reg_map[reg])); if (ret < 0) pr_err("Could not write TWL register %X - error %d\n", reg, ret); return ret; } -/* - * Cache the value for timer/alarm interrupts register; this is - * only changed by callers holding rtc ops lock (or resume). - */ -static unsigned char rtc_irq_bits; - /* * Enable 1/second update and/or alarm interrupts. */ -static int set_rtc_irq_bit(unsigned char bit) +static int set_rtc_irq_bit(struct twl_rtc *twl_rtc, unsigned char bit) { unsigned char val; int ret; /* if the bit is set, return from here */ - if (rtc_irq_bits & bit) + if (twl_rtc->rtc_irq_bits & bit) return 0; - val = rtc_irq_bits | bit; + val = twl_rtc->rtc_irq_bits | bit; val &= ~BIT_RTC_INTERRUPTS_REG_EVERY_M; - ret = twl_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG); + ret = twl_rtc_write_u8(twl_rtc, val, REG_RTC_INTERRUPTS_REG); if (ret == 0) - rtc_irq_bits = val; + twl_rtc->rtc_irq_bits = val; return ret; } @@ -195,19 +207,19 @@ static int set_rtc_irq_bit(unsigned char bit) /* * Disable update and/or alarm interrupts. */ -static int mask_rtc_irq_bit(unsigned char bit) +static int mask_rtc_irq_bit(struct twl_rtc *twl_rtc, unsigned char bit) { unsigned char val; int ret; /* if the bit is clear, return from here */ - if (!(rtc_irq_bits & bit)) + if (!(twl_rtc->rtc_irq_bits & bit)) return 0; - val = rtc_irq_bits & ~bit; - ret = twl_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG); + val = twl_rtc->rtc_irq_bits & ~bit; + ret = twl_rtc_write_u8(twl_rtc, val, REG_RTC_INTERRUPTS_REG); if (ret == 0) - rtc_irq_bits = val; + twl_rtc->rtc_irq_bits = val; return ret; } @@ -215,21 +227,23 @@ static int mask_rtc_irq_bit(unsigned char bit) static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled) { struct platform_device *pdev = to_platform_device(dev); + struct twl_rtc *twl_rtc = dev_get_drvdata(dev); int irq = platform_get_irq(pdev, 0); - static bool twl_rtc_wake_enabled; int ret; if (enabled) { - ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); - if (device_can_wakeup(dev) && !twl_rtc_wake_enabled) { + ret = set_rtc_irq_bit(twl_rtc, + BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); + if (device_can_wakeup(dev) && !twl_rtc->wake_enabled) { enable_irq_wake(irq); - twl_rtc_wake_enabled = true; + twl_rtc->wake_enabled = true; } } else { - ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); - if (twl_rtc_wake_enabled) { + ret = mask_rtc_irq_bit(twl_rtc, + BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); + if (twl_rtc->wake_enabled) { disable_irq_wake(irq); - twl_rtc_wake_enabled = false; + twl_rtc->wake_enabled = false; } } @@ -247,21 +261,23 @@ static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled) */ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm) { + struct twl_rtc *twl_rtc = dev_get_drvdata(dev); unsigned char rtc_data[ALL_TIME_REGS]; int ret; u8 save_control; u8 rtc_control; - ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG); + ret = twl_rtc_read_u8(twl_rtc, &save_control, REG_RTC_CTRL_REG); if (ret < 0) { dev_err(dev, "%s: reading CTRL_REG, error %d\n", __func__, ret); return ret; } /* for twl6030/32 make sure BIT_RTC_CTRL_REG_GET_TIME_M is clear */ - if (twl_class_is_6030()) { + if (twl_rtc->class == TWL_6030) { if (save_control & BIT_RTC_CTRL_REG_GET_TIME_M) { save_control &= ~BIT_RTC_CTRL_REG_GET_TIME_M; - ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG); + ret = twl_rtc_write_u8(twl_rtc, save_control, + REG_RTC_CTRL_REG); if (ret < 0) { dev_err(dev, "%s clr GET_TIME, error %d\n", __func__, ret); @@ -274,17 +290,17 @@ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm) rtc_control = save_control | BIT_RTC_CTRL_REG_GET_TIME_M; /* for twl6030/32 enable read access to static shadowed registers */ - if (twl_class_is_6030()) + if (twl_rtc->class == TWL_6030) rtc_control |= BIT_RTC_CTRL_REG_RTC_V_OPT; - ret = twl_rtc_write_u8(rtc_control, REG_RTC_CTRL_REG); + ret = twl_rtc_write_u8(twl_rtc, rtc_control, REG_RTC_CTRL_REG); if (ret < 0) { dev_err(dev, "%s: writing CTRL_REG, error %d\n", __func__, ret); return ret; } ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data, - (rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS); + (twl_rtc->reg_map[REG_SECONDS_REG]), ALL_TIME_REGS); if (ret < 0) { dev_err(dev, "%s: reading data, error %d\n", __func__, ret); @@ -292,8 +308,8 @@ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm) } /* for twl6030 restore original state of rtc control register */ - if (twl_class_is_6030()) { - ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG); + if (twl_rtc->class == TWL_6030) { + ret = twl_rtc_write_u8(twl_rtc, save_control, REG_RTC_CTRL_REG); if (ret < 0) { dev_err(dev, "%s: restore CTRL_REG, error %d\n", __func__, ret); @@ -313,6 +329,7 @@ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm) static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm) { + struct twl_rtc *twl_rtc = dev_get_drvdata(dev); unsigned char save_control; unsigned char rtc_data[ALL_TIME_REGS]; int ret; @@ -325,18 +342,18 @@ static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm) rtc_data[5] = bin2bcd(tm->tm_year - 100); /* Stop RTC while updating the TC registers */ - ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG); + ret = twl_rtc_read_u8(twl_rtc, &save_control, REG_RTC_CTRL_REG); if (ret < 0) goto out; save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M; - ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG); + ret = twl_rtc_write_u8(twl_rtc, save_control, REG_RTC_CTRL_REG); if (ret < 0) goto out; /* update all the time registers in one shot */ ret = twl_i2c_write(TWL_MODULE_RTC, rtc_data, - (rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS); + (twl_rtc->reg_map[REG_SECONDS_REG]), ALL_TIME_REGS); if (ret < 0) { dev_err(dev, "rtc_set_time error %d\n", ret); goto out; @@ -344,7 +361,7 @@ static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm) /* Start back RTC */ save_control |= BIT_RTC_CTRL_REG_STOP_RTC_M; - ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG); + ret = twl_rtc_write_u8(twl_rtc, save_control, REG_RTC_CTRL_REG); out: return ret; @@ -355,11 +372,12 @@ static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm) */ static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) { + struct twl_rtc *twl_rtc = dev_get_drvdata(dev); unsigned char rtc_data[ALL_TIME_REGS]; int ret; ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data, - (rtc_reg_map[REG_ALARM_SECONDS_REG]), ALL_TIME_REGS); + twl_rtc->reg_map[REG_ALARM_SECONDS_REG], ALL_TIME_REGS); if (ret < 0) { dev_err(dev, "rtc_read_alarm error %d\n", ret); return ret; @@ -374,7 +392,7 @@ static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) alm->time.tm_year = bcd2bin(rtc_data[5]) + 100; /* report cached alarm enable state */ - if (rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M) + if (twl_rtc->rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M) alm->enabled = 1; return ret; @@ -382,6 +400,8 @@ static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) static int twl_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) { + struct twl_rtc *twl_rtc = dev_get_drvdata(dev); + unsigned char alarm_data[ALL_TIME_REGS]; int ret; @@ -398,7 +418,7 @@ static int twl_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) /* update all the alarm registers in one shot */ ret = twl_i2c_write(TWL_MODULE_RTC, alarm_data, - (rtc_reg_map[REG_ALARM_SECONDS_REG]), ALL_TIME_REGS); + twl_rtc->reg_map[REG_ALARM_SECONDS_REG], ALL_TIME_REGS); if (ret) { dev_err(dev, "rtc_set_alarm error %d\n", ret); goto out; @@ -410,14 +430,15 @@ static int twl_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) return ret; } -static irqreturn_t twl_rtc_interrupt(int irq, void *rtc) +static irqreturn_t twl_rtc_interrupt(int irq, void *data) { + struct twl_rtc *twl_rtc = data; unsigned long events; int ret = IRQ_NONE; int res; u8 rd_reg; - res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); + res = twl_rtc_read_u8(twl_rtc, &rd_reg, REG_RTC_STATUS_REG); if (res) goto out; /* @@ -431,12 +452,12 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc) else events = RTC_IRQF | RTC_PF; - res = twl_rtc_write_u8(BIT_RTC_STATUS_REG_ALARM_M, - REG_RTC_STATUS_REG); + res = twl_rtc_write_u8(twl_rtc, BIT_RTC_STATUS_REG_ALARM_M, + REG_RTC_STATUS_REG); if (res) goto out; - if (twl_class_is_4030()) { + if (twl_rtc->class == TWL_4030) { /* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1 * needs 2 reads to clear the interrupt. One read is done in * do_twl_pwrirq(). Doing the second read, to clear @@ -455,7 +476,7 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc) } /* Notify RTC core on event */ - rtc_update_irq(rtc, 1, events); + rtc_update_irq(twl_rtc->rtc, 1, events); ret = IRQ_HANDLED; out: @@ -474,21 +495,36 @@ static const struct rtc_class_ops twl_rtc_ops = { static int twl_rtc_probe(struct platform_device *pdev) { - struct rtc_device *rtc; + struct twl_rtc *twl_rtc; + struct device_node *np = pdev->dev.of_node; int ret = -EINVAL; int irq = platform_get_irq(pdev, 0); u8 rd_reg; + if (!np) { + dev_err(&pdev->dev, "no DT info\n"); + return -EINVAL; + } + if (irq <= 0) return ret; - /* Initialize the register map */ - if (twl_class_is_4030()) - rtc_reg_map = (u8 *)twl4030_rtc_reg_map; - else - rtc_reg_map = (u8 *)twl6030_rtc_reg_map; + twl_rtc = devm_kzalloc(&pdev->dev, sizeof(*twl_rtc), GFP_KERNEL); + if (!twl_rtc) + return -ENOMEM; - ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); + if (twl_class_is_4030()) { + twl_rtc->class = TWL_4030; + twl_rtc->reg_map = (u8 *)twl4030_rtc_reg_map; + } else if (twl_class_is_6030()) { + twl_rtc->class = TWL_6030; + twl_rtc->reg_map = (u8 *)twl6030_rtc_reg_map; + } else { + dev_err(&pdev->dev, "TWL Class not supported.\n"); + return -EINVAL; + } + + ret = twl_rtc_read_u8(twl_rtc, &rd_reg, REG_RTC_STATUS_REG); if (ret < 0) return ret; @@ -499,11 +535,11 @@ static int twl_rtc_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "Pending Alarm interrupt detected.\n"); /* Clear RTC Power up reset and pending alarm interrupts */ - ret = twl_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG); + ret = twl_rtc_write_u8(twl_rtc, rd_reg, REG_RTC_STATUS_REG); if (ret < 0) return ret; - if (twl_class_is_6030()) { + if (twl_rtc->class == TWL_6030) { twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, REG_INT_MSK_LINE_A); twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, @@ -511,40 +547,42 @@ static int twl_rtc_probe(struct platform_device *pdev) } dev_info(&pdev->dev, "Enabling TWL-RTC\n"); - ret = twl_rtc_write_u8(BIT_RTC_CTRL_REG_STOP_RTC_M, REG_RTC_CTRL_REG); + ret = twl_rtc_write_u8(twl_rtc, BIT_RTC_CTRL_REG_STOP_RTC_M, + REG_RTC_CTRL_REG); if (ret < 0) return ret; /* ensure interrupts are disabled, bootloaders can be strange */ - ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG); + ret = twl_rtc_write_u8(twl_rtc, 0, REG_RTC_INTERRUPTS_REG); if (ret < 0) dev_warn(&pdev->dev, "unable to disable interrupt\n"); /* init cached IRQ enable bits */ - ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); + ret = twl_rtc_read_u8(twl_rtc, &twl_rtc->rtc_irq_bits, + REG_RTC_INTERRUPTS_REG); if (ret < 0) return ret; + platform_set_drvdata(pdev, twl_rtc); device_init_wakeup(&pdev->dev, 1); - rtc = devm_rtc_device_register(&pdev->dev, pdev->name, + twl_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &twl_rtc_ops, THIS_MODULE); - if (IS_ERR(rtc)) { + if (IS_ERR(twl_rtc->rtc)) { dev_err(&pdev->dev, "can't register RTC device, err %ld\n", - PTR_ERR(rtc)); - return PTR_ERR(rtc); + PTR_ERR(twl_rtc->rtc)); + return PTR_ERR(twl_rtc->rtc); } ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, twl_rtc_interrupt, IRQF_TRIGGER_RISING | IRQF_ONESHOT, - dev_name(&rtc->dev), rtc); + dev_name(&twl_rtc->rtc->dev), twl_rtc); if (ret < 0) { dev_err(&pdev->dev, "IRQ is not free.\n"); return ret; } - platform_set_drvdata(pdev, rtc); return 0; } @@ -554,10 +592,12 @@ static int twl_rtc_probe(struct platform_device *pdev) */ static int twl_rtc_remove(struct platform_device *pdev) { + struct twl_rtc *twl_rtc = platform_get_drvdata(pdev); + /* leave rtc running, but disable irqs */ - mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); - mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); - if (twl_class_is_6030()) { + mask_rtc_irq_bit(twl_rtc, BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); + mask_rtc_irq_bit(twl_rtc, BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); + if (twl_rtc->class == TWL_6030) { twl6030_interrupt_mask(TWL6030_RTC_INT_MASK, REG_INT_MSK_LINE_A); twl6030_interrupt_mask(TWL6030_RTC_INT_MASK, @@ -569,40 +609,40 @@ static int twl_rtc_remove(struct platform_device *pdev) static void twl_rtc_shutdown(struct platform_device *pdev) { + struct twl_rtc *twl_rtc = platform_get_drvdata(pdev); + /* mask timer interrupts, but leave alarm interrupts on to enable power-on when alarm is triggered */ - mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); + mask_rtc_irq_bit(twl_rtc, BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); } #ifdef CONFIG_PM_SLEEP -static unsigned char irqstat; - static int twl_rtc_suspend(struct device *dev) { - irqstat = rtc_irq_bits; + struct twl_rtc *twl_rtc = dev_get_drvdata(dev); + + twl_rtc->irqstat = twl_rtc->rtc_irq_bits; - mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); + mask_rtc_irq_bit(twl_rtc, BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); return 0; } static int twl_rtc_resume(struct device *dev) { - set_rtc_irq_bit(irqstat); + struct twl_rtc *twl_rtc = dev_get_drvdata(dev); + + set_rtc_irq_bit(twl_rtc, twl_rtc->irqstat); return 0; } #endif static SIMPLE_DEV_PM_OPS(twl_rtc_pm_ops, twl_rtc_suspend, twl_rtc_resume); -#ifdef CONFIG_OF static const struct of_device_id twl_rtc_of_match[] = { {.compatible = "ti,twl4030-rtc", }, { }, }; MODULE_DEVICE_TABLE(of, twl_rtc_of_match); -#endif - -MODULE_ALIAS("platform:twl_rtc"); static struct platform_driver twl4030rtc_driver = { .probe = twl_rtc_probe, @@ -611,7 +651,7 @@ static struct platform_driver twl4030rtc_driver = { .driver = { .name = "twl_rtc", .pm = &twl_rtc_pm_ops, - .of_match_table = of_match_ptr(twl_rtc_of_match), + .of_match_table = twl_rtc_of_match, }, }; diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 1de089019268e10279a4c88be7767cf11f21eb96..0e3fdfdbd09846c454987913dfd678c4ec725991 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -69,6 +69,7 @@ static void dasd_block_tasklet(struct dasd_block *); static void do_kick_device(struct work_struct *); static void do_restore_device(struct work_struct *); static void do_reload_device(struct work_struct *); +static void do_requeue_requests(struct work_struct *); static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); static void dasd_device_timeout(unsigned long); static void dasd_block_timeout(unsigned long); @@ -125,6 +126,7 @@ struct dasd_device *dasd_alloc_device(void) INIT_WORK(&device->kick_work, do_kick_device); INIT_WORK(&device->restore_device, do_restore_device); INIT_WORK(&device->reload_device, do_reload_device); + INIT_WORK(&device->requeue_requests, do_requeue_requests); device->state = DASD_STATE_NEW; device->target = DASD_STATE_NEW; mutex_init(&device->state_mutex); @@ -1448,9 +1450,9 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) cqr->starttime = jiffies; cqr->retries--; if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { - cqr->lpm &= device->path_data.opm; + cqr->lpm &= dasd_path_get_opm(device); if (!cqr->lpm) - cqr->lpm = device->path_data.opm; + cqr->lpm = dasd_path_get_opm(device); } if (cqr->cpmode == 1) { rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, @@ -1483,8 +1485,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) DBF_DEV_EVENT(DBF_WARNING, device, "start_IO: selected paths gone (%x)", cqr->lpm); - } else if (cqr->lpm != device->path_data.opm) { - cqr->lpm = device->path_data.opm; + } else if (cqr->lpm != dasd_path_get_opm(device)) { + cqr->lpm = dasd_path_get_opm(device); DBF_DEV_EVENT(DBF_DEBUG, device, "%s", "start_IO: selected paths gone," " retry on all paths"); @@ -1493,11 +1495,10 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) "start_IO: all paths in opm gone," " do path verification"); dasd_generic_last_path_gone(device); - device->path_data.opm = 0; - device->path_data.ppm = 0; - device->path_data.npm = 0; - device->path_data.tbvpm = - ccw_device_get_path_mask(device->cdev); + dasd_path_no_path(device); + dasd_path_set_tbvpm(device, + ccw_device_get_path_mask( + device->cdev)); } break; case -ENODEV: @@ -1623,6 +1624,13 @@ void dasd_generic_handle_state_change(struct dasd_device *device) } EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); +static int dasd_check_hpf_error(struct irb *irb) +{ + return (scsw_tm_is_valid_schxs(&irb->scsw) && + (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX || + irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); +} + /* * Interrupt handler for "normal" ssch-io based dasd devices. */ @@ -1642,7 +1650,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, switch (PTR_ERR(irb)) { case -EIO: if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { - device = (struct dasd_device *) cqr->startdev; + device = cqr->startdev; cqr->status = DASD_CQR_CLEARED; dasd_device_clear_timer(device); wake_up(&dasd_flush_wq); @@ -1749,19 +1757,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, struct dasd_ccw_req, devlist); } } else { /* error */ + /* check for HPF error + * call discipline function to requeue all requests + * and disable HPF accordingly + */ + if (cqr->cpmode && dasd_check_hpf_error(irb) && + device->discipline->handle_hpf_error) + device->discipline->handle_hpf_error(device, irb); /* * If we don't want complex ERP for this request, then just * reset this and retry it in the fastpath */ if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && cqr->retries > 0) { - if (cqr->lpm == device->path_data.opm) + if (cqr->lpm == dasd_path_get_opm(device)) DBF_DEV_EVENT(DBF_DEBUG, device, "default ERP in fastpath " "(%i retries left)", cqr->retries); if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) - cqr->lpm = device->path_data.opm; + cqr->lpm = dasd_path_get_opm(device); cqr->status = DASD_CQR_QUEUED; next = cqr; } else @@ -2002,17 +2017,18 @@ static void __dasd_device_check_path_events(struct dasd_device *device) { int rc; - if (device->path_data.tbvpm) { - if (device->stopped & ~(DASD_STOPPED_DC_WAIT | - DASD_UNRESUMED_PM)) - return; - rc = device->discipline->verify_path( - device, device->path_data.tbvpm); - if (rc) - dasd_device_set_timer(device, 50); - else - device->path_data.tbvpm = 0; - } + if (!dasd_path_get_tbvpm(device)) + return; + + if (device->stopped & + ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) + return; + rc = device->discipline->verify_path(device, + dasd_path_get_tbvpm(device)); + if (rc) + dasd_device_set_timer(device, 50); + else + dasd_path_clear_all_verify(device); }; /* @@ -2924,10 +2940,10 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr) if (!block) return -EINVAL; - spin_lock_irqsave(&block->queue_lock, flags); + spin_lock_irqsave(&block->request_queue_lock, flags); req = (struct request *) cqr->callback_data; blk_requeue_request(block->request_queue, req); - spin_unlock_irqrestore(&block->queue_lock, flags); + spin_unlock_irqrestore(&block->request_queue_lock, flags); return 0; } @@ -3121,6 +3137,7 @@ static int dasd_alloc_queue(struct dasd_block *block) */ static void dasd_setup_queue(struct dasd_block *block) { + struct request_queue *q = block->request_queue; int max; if (block->base->features & DASD_FEATURE_USERAW) { @@ -3135,17 +3152,16 @@ static void dasd_setup_queue(struct dasd_block *block) } else { max = block->base->discipline->max_blocks << block->s2b_shift; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue); - block->request_queue->limits.max_dev_sectors = max; - blk_queue_logical_block_size(block->request_queue, - block->bp_block); - blk_queue_max_hw_sectors(block->request_queue, max); - blk_queue_max_segments(block->request_queue, -1L); + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + q->limits.max_dev_sectors = max; + blk_queue_logical_block_size(q, block->bp_block); + blk_queue_max_hw_sectors(q, max); + blk_queue_max_segments(q, USHRT_MAX); /* with page sized segments we can translate each segement into * one idaw/tidaw */ - blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); - blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); + blk_queue_max_segment_size(q, PAGE_SIZE); + blk_queue_segment_boundary(q, PAGE_SIZE - 1); } /* @@ -3517,11 +3533,15 @@ int dasd_generic_set_offline(struct ccw_device *cdev) struct dasd_device *device; struct dasd_block *block; int max_count, open_count, rc; + unsigned long flags; rc = 0; - device = dasd_device_from_cdev(cdev); - if (IS_ERR(device)) + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + device = dasd_device_from_cdev_locked(cdev); + if (IS_ERR(device)) { + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); return PTR_ERR(device); + } /* * We must make sure that this device is currently not in use. @@ -3540,8 +3560,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev) pr_warn("%s: The DASD cannot be set offline while it is in use\n", dev_name(&cdev->dev)); clear_bit(DASD_FLAG_OFFLINE, &device->flags); - dasd_put_device(device); - return -EBUSY; + goto out_busy; } } @@ -3551,19 +3570,19 @@ int dasd_generic_set_offline(struct ccw_device *cdev) * could only be called by normal offline so safe_offline flag * needs to be removed to run normal offline and kill all I/O */ - if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { + if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) /* Already doing normal offline processing */ - dasd_put_device(device); - return -EBUSY; - } else + goto out_busy; + else clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); - - } else - if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { + } else { + if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) /* Already doing offline processing */ - dasd_put_device(device); - return -EBUSY; - } + goto out_busy; + } + + set_bit(DASD_FLAG_OFFLINE, &device->flags); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); /* * if safe_offline called set safe_offline_running flag and @@ -3591,7 +3610,6 @@ int dasd_generic_set_offline(struct ccw_device *cdev) goto interrupted; } - set_bit(DASD_FLAG_OFFLINE, &device->flags); dasd_set_target_state(device, DASD_STATE_NEW); /* dasd_delete_device destroys the device reference. */ block = device->block; @@ -3610,7 +3628,14 @@ int dasd_generic_set_offline(struct ccw_device *cdev) clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); clear_bit(DASD_FLAG_OFFLINE, &device->flags); dasd_put_device(device); + return rc; + +out_busy: + dasd_put_device(device); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + + return -EBUSY; } EXPORT_SYMBOL_GPL(dasd_generic_set_offline); @@ -3675,14 +3700,12 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) case CIO_GONE: case CIO_BOXED: case CIO_NO_PATH: - device->path_data.opm = 0; - device->path_data.ppm = 0; - device->path_data.npm = 0; + dasd_path_no_path(device); ret = dasd_generic_last_path_gone(device); break; case CIO_OPER: ret = 1; - if (device->path_data.opm) + if (dasd_path_get_opm(device)) ret = dasd_generic_path_operational(device); break; } @@ -3693,48 +3716,32 @@ EXPORT_SYMBOL_GPL(dasd_generic_notify); void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) { - int chp; - __u8 oldopm, eventlpm; struct dasd_device *device; + int chp, oldopm, hpfpm, ifccpm; device = dasd_device_from_cdev_locked(cdev); if (IS_ERR(device)) return; + + oldopm = dasd_path_get_opm(device); for (chp = 0; chp < 8; chp++) { - eventlpm = 0x80 >> chp; if (path_event[chp] & PE_PATH_GONE) { - oldopm = device->path_data.opm; - device->path_data.opm &= ~eventlpm; - device->path_data.ppm &= ~eventlpm; - device->path_data.npm &= ~eventlpm; - if (oldopm && !device->path_data.opm) { - dev_warn(&device->cdev->dev, - "No verified channel paths remain " - "for the device\n"); - DBF_DEV_EVENT(DBF_WARNING, device, - "%s", "last verified path gone"); - dasd_eer_write(device, NULL, DASD_EER_NOPATH); - dasd_device_set_stop_bits(device, - DASD_STOPPED_DC_WAIT); - } + dasd_path_notoper(device, chp); } if (path_event[chp] & PE_PATH_AVAILABLE) { - device->path_data.opm &= ~eventlpm; - device->path_data.ppm &= ~eventlpm; - device->path_data.npm &= ~eventlpm; - device->path_data.tbvpm |= eventlpm; + dasd_path_available(device, chp); dasd_schedule_device_bh(device); } if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { - if (!(device->path_data.opm & eventlpm) && - !(device->path_data.tbvpm & eventlpm)) { + if (!dasd_path_is_operational(device, chp) && + !dasd_path_need_verify(device, chp)) { /* * we can not establish a pathgroup on an * unavailable path, so trigger a path * verification first */ - device->path_data.tbvpm |= eventlpm; - dasd_schedule_device_bh(device); + dasd_path_available(device, chp); + dasd_schedule_device_bh(device); } DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Pathgroup re-established\n"); @@ -3742,45 +3749,65 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) device->discipline->kick_validate(device); } } + hpfpm = dasd_path_get_hpfpm(device); + ifccpm = dasd_path_get_ifccpm(device); + if (!dasd_path_get_opm(device) && hpfpm) { + /* + * device has no operational paths but at least one path is + * disabled due to HPF errors + * disable HPF at all and use the path(s) again + */ + if (device->discipline->disable_hpf) + device->discipline->disable_hpf(device); + dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); + dasd_path_set_tbvpm(device, hpfpm); + dasd_schedule_device_bh(device); + dasd_schedule_requeue(device); + } else if (!dasd_path_get_opm(device) && ifccpm) { + /* + * device has no operational paths but at least one path is + * disabled due to IFCC errors + * trigger path verification on paths with IFCC errors + */ + dasd_path_set_tbvpm(device, ifccpm); + dasd_schedule_device_bh(device); + } + if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { + dev_warn(&device->cdev->dev, + "No verified channel paths remain for the device\n"); + DBF_DEV_EVENT(DBF_WARNING, device, + "%s", "last verified path gone"); + dasd_eer_write(device, NULL, DASD_EER_NOPATH); + dasd_device_set_stop_bits(device, + DASD_STOPPED_DC_WAIT); + } dasd_put_device(device); } EXPORT_SYMBOL_GPL(dasd_generic_path_event); int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) { - if (!device->path_data.opm && lpm) { - device->path_data.opm = lpm; + if (!dasd_path_get_opm(device) && lpm) { + dasd_path_set_opm(device, lpm); dasd_generic_path_operational(device); } else - device->path_data.opm |= lpm; + dasd_path_add_opm(device, lpm); return 0; } EXPORT_SYMBOL_GPL(dasd_generic_verify_path); - -int dasd_generic_pm_freeze(struct ccw_device *cdev) +/* + * clear active requests and requeue them to block layer if possible + */ +static int dasd_generic_requeue_all_requests(struct dasd_device *device) { - struct dasd_device *device = dasd_device_from_cdev(cdev); - struct list_head freeze_queue; + struct list_head requeue_queue; struct dasd_ccw_req *cqr, *n; struct dasd_ccw_req *refers; int rc; - if (IS_ERR(device)) - return PTR_ERR(device); - - /* mark device as suspended */ - set_bit(DASD_FLAG_SUSPENDED, &device->flags); - - if (device->discipline->freeze) - rc = device->discipline->freeze(device); - - /* disallow new I/O */ - dasd_device_set_stop_bits(device, DASD_STOPPED_PM); - - /* clear active requests and requeue them to block layer if possible */ - INIT_LIST_HEAD(&freeze_queue); - spin_lock_irq(get_ccwdev_lock(cdev)); + INIT_LIST_HEAD(&requeue_queue); + spin_lock_irq(get_ccwdev_lock(device->cdev)); rc = 0; list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { /* Check status and move request to flush_queue */ @@ -3791,25 +3818,22 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) dev_err(&device->cdev->dev, "Unable to terminate request %p " "on suspend\n", cqr); - spin_unlock_irq(get_ccwdev_lock(cdev)); + spin_unlock_irq(get_ccwdev_lock(device->cdev)); dasd_put_device(device); return rc; } } - list_move_tail(&cqr->devlist, &freeze_queue); + list_move_tail(&cqr->devlist, &requeue_queue); } - spin_unlock_irq(get_ccwdev_lock(cdev)); + spin_unlock_irq(get_ccwdev_lock(device->cdev)); - list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { + list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { wait_event(dasd_flush_wq, (cqr->status != DASD_CQR_CLEAR_PENDING)); - if (cqr->status == DASD_CQR_CLEARED) - cqr->status = DASD_CQR_QUEUED; - /* requeue requests to blocklayer will only work for - block device requests */ - if (_dasd_requeue_request(cqr)) - continue; + /* mark sleepon requests as ended */ + if (cqr->callback_data == DASD_SLEEPON_START_TAG) + cqr->callback_data = DASD_SLEEPON_END_TAG; /* remove requests from device and block queue */ list_del_init(&cqr->devlist); @@ -3821,6 +3845,14 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) dasd_free_erp_request(cqr, cqr->memdev); cqr = refers; } + + /* + * requeue requests to blocklayer will only work + * for block device requests + */ + if (_dasd_requeue_request(cqr)) + continue; + if (cqr->block) list_del_init(&cqr->blocklist); cqr->block->base->discipline->free_cp( @@ -3831,15 +3863,56 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) * if requests remain then they are internal request * and go back to the device queue */ - if (!list_empty(&freeze_queue)) { + if (!list_empty(&requeue_queue)) { /* move freeze_queue to start of the ccw_queue */ - spin_lock_irq(get_ccwdev_lock(cdev)); - list_splice_tail(&freeze_queue, &device->ccw_queue); - spin_unlock_irq(get_ccwdev_lock(cdev)); + spin_lock_irq(get_ccwdev_lock(device->cdev)); + list_splice_tail(&requeue_queue, &device->ccw_queue); + spin_unlock_irq(get_ccwdev_lock(device->cdev)); } - dasd_put_device(device); + /* wake up generic waitqueue for eventually ended sleepon requests */ + wake_up(&generic_waitq); return rc; } + +static void do_requeue_requests(struct work_struct *work) +{ + struct dasd_device *device = container_of(work, struct dasd_device, + requeue_requests); + dasd_generic_requeue_all_requests(device); + dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); + if (device->block) + dasd_schedule_block_bh(device->block); + dasd_put_device(device); +} + +void dasd_schedule_requeue(struct dasd_device *device) +{ + dasd_get_device(device); + /* queue call to dasd_reload_device to the kernel event daemon. */ + if (!schedule_work(&device->requeue_requests)) + dasd_put_device(device); +} +EXPORT_SYMBOL(dasd_schedule_requeue); + +int dasd_generic_pm_freeze(struct ccw_device *cdev) +{ + struct dasd_device *device = dasd_device_from_cdev(cdev); + int rc; + + if (IS_ERR(device)) + return PTR_ERR(device); + + /* mark device as suspended */ + set_bit(DASD_FLAG_SUSPENDED, &device->flags); + + if (device->discipline->freeze) + rc = device->discipline->freeze(device); + + /* disallow new I/O */ + dasd_device_set_stop_bits(device, DASD_STOPPED_PM); + + return dasd_generic_requeue_all_requests(device); +} EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); int dasd_generic_restore_device(struct ccw_device *cdev) diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 8305ab688d5766b5eaf9b694bd10b52cfc7ec150..774da20ceb58a794d895a64ca857426d5a239642 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -152,7 +152,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp) opm = ccw_device_get_path_mask(device->cdev); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); if (erp->lpm == 0) - erp->lpm = device->path_data.opm & + erp->lpm = dasd_path_get_opm(device) & ~(erp->irb.esw.esw0.sublog.lpum); else erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum); @@ -273,7 +273,7 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp) !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) { erp->status = DASD_CQR_FILLED; erp->retries = 10; - erp->lpm = erp->startdev->path_data.opm; + erp->lpm = dasd_path_get_opm(erp->startdev); erp->function = dasd_3990_erp_action_1_sec; } return erp; @@ -674,7 +674,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) break; case 0x0D: dev_warn(&device->cdev->dev, - "FORMAT 4 - No syn byte in count " + "FORMAT 4 - No sync byte in count " "address area; offset active\n"); break; case 0x0E: @@ -684,7 +684,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) break; case 0x0F: dev_warn(&device->cdev->dev, - "FORMAT 4 - No syn byte in data area; " + "FORMAT 4 - No sync byte in data area; " "offset active\n"); break; default: @@ -999,7 +999,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) break; default: dev_warn(&device->cdev->dev, - "FORMAT D - Reserved\n"); + "FORMAT F - Reserved\n"); } break; @@ -1926,7 +1926,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense) !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) { /* reset the lpm and the status to be able to * try further actions. */ - erp->lpm = erp->startdev->path_data.opm; + erp->lpm = dasd_path_get_opm(erp->startdev); erp->status = DASD_CQR_NEED_ERP; } } @@ -2208,6 +2208,51 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense) } /* end dasd_3990_erp_inspect_32 */ +static void dasd_3990_erp_disable_path(struct dasd_device *device, __u8 lpum) +{ + int pos = pathmask_to_pos(lpum); + + /* no remaining path, cannot disable */ + if (!(dasd_path_get_opm(device) & ~lpum)) + return; + + dev_err(&device->cdev->dev, + "Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n", + device->path[pos].cssid, device->path[pos].chpid, lpum); + dasd_path_remove_opm(device, lpum); + dasd_path_add_ifccpm(device, lpum); + device->path[pos].errorclk = 0; + atomic_set(&device->path[pos].error_count, 0); +} + +static void dasd_3990_erp_account_error(struct dasd_ccw_req *erp) +{ + struct dasd_device *device = erp->startdev; + __u8 lpum = erp->refers->irb.esw.esw1.lpum; + int pos = pathmask_to_pos(lpum); + unsigned long long clk; + + if (!device->path_thrhld) + return; + + clk = get_tod_clock(); + /* + * check if the last error is longer ago than the timeout, + * if so reset error state + */ + if ((tod_to_ns(clk - device->path[pos].errorclk) / NSEC_PER_SEC) + >= device->path_interval) { + atomic_set(&device->path[pos].error_count, 0); + device->path[pos].errorclk = 0; + } + atomic_inc(&device->path[pos].error_count); + device->path[pos].errorclk = clk; + /* threshold exceeded disable path if possible */ + if (atomic_read(&device->path[pos].error_count) >= + device->path_thrhld) + dasd_3990_erp_disable_path(device, lpum); +} + /* ***************************************************************************** * main ERP control functions (24 and 32 byte sense) @@ -2237,6 +2282,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp) | SCHN_STAT_CHN_CTRL_CHK)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "channel or interface control check"); + dasd_3990_erp_account_error(erp); erp = dasd_3990_erp_action_4(erp, NULL); } return erp; diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 15a1a70cace90f6eae0dc6a9466970df293e4132..dd46e96a3034239b875a4ea3eeb9f5c8f01c4db3 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -20,7 +20,7 @@ #include #include -#include +#include #include /* This is ugly... */ @@ -725,27 +725,15 @@ static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr, static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dasd_devmap *devmap; - int val; - char *endp; - - devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); - if (IS_ERR(devmap)) - return PTR_ERR(devmap); + unsigned int val; + int rc; - val = simple_strtoul(buf, &endp, 0); - if (((endp + 1) < (buf + count)) || (val > 1)) + if (kstrtouint(buf, 0, &val) || val > 1) return -EINVAL; - spin_lock(&dasd_devmap_lock); - if (val) - devmap->features |= DASD_FEATURE_FAILFAST; - else - devmap->features &= ~DASD_FEATURE_FAILFAST; - if (devmap->device) - devmap->device->features = devmap->features; - spin_unlock(&dasd_devmap_lock); - return count; + rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_FAILFAST, val); + + return rc ? : count; } static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store); @@ -771,32 +759,41 @@ static ssize_t dasd_ro_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dasd_devmap *devmap; + struct ccw_device *cdev = to_ccwdev(dev); struct dasd_device *device; - int val; - char *endp; - - devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); - if (IS_ERR(devmap)) - return PTR_ERR(devmap); + unsigned long flags; + unsigned int val; + int rc; - val = simple_strtoul(buf, &endp, 0); - if (((endp + 1) < (buf + count)) || (val > 1)) + if (kstrtouint(buf, 0, &val) || val > 1) return -EINVAL; - spin_lock(&dasd_devmap_lock); - if (val) - devmap->features |= DASD_FEATURE_READONLY; - else - devmap->features &= ~DASD_FEATURE_READONLY; - device = devmap->device; - if (device) { - device->features = devmap->features; - val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags); + rc = dasd_set_feature(cdev, DASD_FEATURE_READONLY, val); + if (rc) + return rc; + + device = dasd_device_from_cdev(cdev); + if (IS_ERR(device)) + return PTR_ERR(device); + + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags); + + if (!device->block || !device->block->gdp || + test_bit(DASD_FLAG_OFFLINE, &device->flags)) { + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + goto out; } - spin_unlock(&dasd_devmap_lock); - if (device && device->block && device->block->gdp) - set_disk_ro(device->block->gdp, val); + /* Increase open_count to avoid losing the block device */ + atomic_inc(&device->block->open_count); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + + set_disk_ro(device->block->gdp, val); + atomic_dec(&device->block->open_count); + +out: + dasd_put_device(device); + return count; } @@ -823,27 +820,15 @@ static ssize_t dasd_erplog_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dasd_devmap *devmap; - int val; - char *endp; - - devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); - if (IS_ERR(devmap)) - return PTR_ERR(devmap); + unsigned int val; + int rc; - val = simple_strtoul(buf, &endp, 0); - if (((endp + 1) < (buf + count)) || (val > 1)) + if (kstrtouint(buf, 0, &val) || val > 1) return -EINVAL; - spin_lock(&dasd_devmap_lock); - if (val) - devmap->features |= DASD_FEATURE_ERPLOG; - else - devmap->features &= ~DASD_FEATURE_ERPLOG; - if (devmap->device) - devmap->device->features = devmap->features; - spin_unlock(&dasd_devmap_lock); - return count; + rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_ERPLOG, val); + + return rc ? : count; } static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store); @@ -871,16 +856,14 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct dasd_devmap *devmap; + unsigned int val; ssize_t rc; - int val; - char *endp; devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); if (IS_ERR(devmap)) return PTR_ERR(devmap); - val = simple_strtoul(buf, &endp, 0); - if (((endp + 1) < (buf + count)) || (val > 1)) + if (kstrtouint(buf, 0, &val) || val > 1) return -EINVAL; spin_lock(&dasd_devmap_lock); @@ -994,10 +977,12 @@ dasd_access_show(struct device *dev, struct device_attribute *attr, if (IS_ERR(device)) return PTR_ERR(device); - if (device->discipline->host_access_count) - count = device->discipline->host_access_count(device); - else + if (!device->discipline) + count = -ENODEV; + else if (!device->discipline->host_access_count) count = -EOPNOTSUPP; + else + count = device->discipline->host_access_count(device); dasd_put_device(device); if (count < 0) @@ -1197,27 +1182,25 @@ static ssize_t dasd_eer_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dasd_devmap *devmap; - int val, rc; - char *endp; + struct dasd_device *device; + unsigned int val; + int rc = 0; - devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); - if (IS_ERR(devmap)) - return PTR_ERR(devmap); - if (!devmap->device) - return -ENODEV; + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return PTR_ERR(device); - val = simple_strtoul(buf, &endp, 0); - if (((endp + 1) < (buf + count)) || (val > 1)) + if (kstrtouint(buf, 0, &val) || val > 1) return -EINVAL; - if (val) { - rc = dasd_eer_enable(devmap->device); - if (rc) - return rc; - } else - dasd_eer_disable(devmap->device); - return count; + if (val) + rc = dasd_eer_enable(device); + else + dasd_eer_disable(device); + + dasd_put_device(device); + + return rc ? : count; } static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store); @@ -1360,6 +1343,50 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(timeout, 0644, dasd_timeout_show, dasd_timeout_store); + +static ssize_t +dasd_path_reset_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_device *device; + unsigned int val; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + + if ((kstrtouint(buf, 16, &val) != 0) || val > 0xff) + val = 0; + + if (device->discipline && device->discipline->reset_path) + device->discipline->reset_path(device, (__u8) val); + + dasd_put_device(device); + return count; +} + +static DEVICE_ATTR(path_reset, 0200, NULL, dasd_path_reset_store); + +static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dasd_device *device; + int hpf; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + if (!device->discipline || !device->discipline->hpf_enabled) { + dasd_put_device(device); + return snprintf(buf, PAGE_SIZE, "%d\n", dasd_nofcx); + } + hpf = device->discipline->hpf_enabled(device); + dasd_put_device(device); + return snprintf(buf, PAGE_SIZE, "%d\n", hpf); +} + +static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL); + static ssize_t dasd_reservation_policy_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1385,27 +1412,17 @@ static ssize_t dasd_reservation_policy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dasd_devmap *devmap; + struct ccw_device *cdev = to_ccwdev(dev); int rc; - devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); - if (IS_ERR(devmap)) - return PTR_ERR(devmap); - rc = 0; - spin_lock(&dasd_devmap_lock); if (sysfs_streq("ignore", buf)) - devmap->features &= ~DASD_FEATURE_FAILONSLCK; + rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 0); else if (sysfs_streq("fail", buf)) - devmap->features |= DASD_FEATURE_FAILONSLCK; + rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 1); else rc = -EINVAL; - if (devmap->device) - devmap->device->features = devmap->features; - spin_unlock(&dasd_devmap_lock); - if (rc) - return rc; - else - return count; + + return rc ? : count; } static DEVICE_ATTR(reservation_policy, 0644, @@ -1461,25 +1478,120 @@ static ssize_t dasd_pm_show(struct device *dev, struct device_attribute *attr, char *buf) { struct dasd_device *device; - u8 opm, nppm, cablepm, cuirpm, hpfpm; + u8 opm, nppm, cablepm, cuirpm, hpfpm, ifccpm; device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) return sprintf(buf, "0\n"); - opm = device->path_data.opm; - nppm = device->path_data.npm; - cablepm = device->path_data.cablepm; - cuirpm = device->path_data.cuirpm; - hpfpm = device->path_data.hpfpm; + opm = dasd_path_get_opm(device); + nppm = dasd_path_get_nppm(device); + cablepm = dasd_path_get_cablepm(device); + cuirpm = dasd_path_get_cuirpm(device); + hpfpm = dasd_path_get_hpfpm(device); + ifccpm = dasd_path_get_ifccpm(device); dasd_put_device(device); - return sprintf(buf, "%02x %02x %02x %02x %02x\n", opm, nppm, - cablepm, cuirpm, hpfpm); + return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm, + cablepm, cuirpm, hpfpm, ifccpm); } static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL); +/* + * threshold value for IFCC/CCC errors + */ +static ssize_t +dasd_path_threshold_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dasd_device *device; + int len; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_thrhld); + dasd_put_device(device); + return len; +} + +static ssize_t +dasd_path_threshold_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_device *device; + unsigned long flags; + unsigned long val; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + + if ((kstrtoul(buf, 10, &val) != 0) || + (val > DASD_THRHLD_MAX) || val == 0) { + dasd_put_device(device); + return -EINVAL; + } + spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags); + if (val) + device->path_thrhld = val; + spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags); + dasd_put_device(device); + return count; +} + +static DEVICE_ATTR(path_threshold, 0644, dasd_path_threshold_show, + dasd_path_threshold_store); +/* + * interval for IFCC/CCC checks + * meaning time with no IFCC/CCC error before the error counter + * gets reset + */ +static ssize_t +dasd_path_interval_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dasd_device *device; + int len; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_interval); + dasd_put_device(device); + return len; +} + +static ssize_t +dasd_path_interval_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_device *device; + unsigned long flags; + unsigned long val; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + + if ((kstrtoul(buf, 10, &val) != 0) || + (val > DASD_INTERVAL_MAX) || val == 0) { + dasd_put_device(device); + return -EINVAL; + } + spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags); + if (val) + device->path_interval = val; + spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags); + dasd_put_device(device); + return count; +} + +static DEVICE_ATTR(path_interval, 0644, dasd_path_interval_show, + dasd_path_interval_store); + + static struct attribute * dasd_attrs[] = { &dev_attr_readonly.attr, &dev_attr_discipline.attr, @@ -1500,6 +1612,10 @@ static struct attribute * dasd_attrs[] = { &dev_attr_safe_offline.attr, &dev_attr_host_access_count.attr, &dev_attr_path_masks.attr, + &dev_attr_path_threshold.attr, + &dev_attr_path_interval.attr, + &dev_attr_path_reset.attr, + &dev_attr_hpf.attr, NULL, }; @@ -1531,7 +1647,7 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag) { struct dasd_devmap *devmap; - devmap = dasd_find_busid(dev_name(&cdev->dev)); + devmap = dasd_devmap_from_cdev(cdev); if (IS_ERR(devmap)) return PTR_ERR(devmap); diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index a7a88476e215e7b027958adb5c162a4d2b959802..ade04216c970094f0e5d8c76cf62c738d9140780 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include @@ -1042,8 +1042,11 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device) private->conf_data = NULL; private->conf_len = 0; for (i = 0; i < 8; i++) { - kfree(private->path_conf_data[i]); - private->path_conf_data[i] = NULL; + kfree(device->path[i].conf_data); + device->path[i].conf_data = NULL; + device->path[i].cssid = 0; + device->path[i].ssid = 0; + device->path[i].chpid = 0; } } @@ -1055,13 +1058,14 @@ static int dasd_eckd_read_conf(struct dasd_device *device) int rc, path_err, pos; __u8 lpm, opm; struct dasd_eckd_private *private, path_private; - struct dasd_path *path_data; struct dasd_uid *uid; char print_path_uid[60], print_device_uid[60]; + struct channel_path_desc *chp_desc; + struct subchannel_id sch_id; private = device->private; - path_data = &device->path_data; opm = ccw_device_get_path_mask(device->cdev); + ccw_device_get_schid(device->cdev, &sch_id); conf_data_saved = 0; path_err = 0; /* get configuration data per operational path */ @@ -1081,7 +1085,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device) "No configuration data " "retrieved"); /* no further analysis possible */ - path_data->opm |= lpm; + dasd_path_add_opm(device, opm); continue; /* no error */ } /* save first valid configuration data */ @@ -1098,8 +1102,13 @@ static int dasd_eckd_read_conf(struct dasd_device *device) } pos = pathmask_to_pos(lpm); /* store per path conf_data */ - private->path_conf_data[pos] = - (struct dasd_conf_data *) conf_data; + device->path[pos].conf_data = conf_data; + device->path[pos].cssid = sch_id.cssid; + device->path[pos].ssid = sch_id.ssid; + chp_desc = ccw_device_get_chp_desc(device->cdev, pos); + if (chp_desc) + device->path[pos].chpid = chp_desc->chpid; + kfree(chp_desc); /* * build device UID that other path data * can be compared to it @@ -1154,42 +1163,66 @@ static int dasd_eckd_read_conf(struct dasd_device *device) "device %s instead of %s\n", lpm, print_path_uid, print_device_uid); path_err = -EINVAL; - path_data->cablepm |= lpm; + dasd_path_add_cablepm(device, lpm); continue; } pos = pathmask_to_pos(lpm); /* store per path conf_data */ - private->path_conf_data[pos] = - (struct dasd_conf_data *) conf_data; + device->path[pos].conf_data = conf_data; + device->path[pos].cssid = sch_id.cssid; + device->path[pos].ssid = sch_id.ssid; + chp_desc = ccw_device_get_chp_desc(device->cdev, pos); + if (chp_desc) + device->path[pos].chpid = chp_desc->chpid; + kfree(chp_desc); path_private.conf_data = NULL; path_private.conf_len = 0; } switch (dasd_eckd_path_access(conf_data, conf_len)) { case 0x02: - path_data->npm |= lpm; + dasd_path_add_nppm(device, lpm); break; case 0x03: - path_data->ppm |= lpm; + dasd_path_add_ppm(device, lpm); break; } - if (!path_data->opm) { - path_data->opm = lpm; + if (!dasd_path_get_opm(device)) { + dasd_path_set_opm(device, lpm); dasd_generic_path_operational(device); } else { - path_data->opm |= lpm; + dasd_path_add_opm(device, lpm); } - /* - * if the path is used - * it should not be in one of the negative lists - */ - path_data->cablepm &= ~lpm; - path_data->hpfpm &= ~lpm; - path_data->cuirpm &= ~lpm; } return path_err; } +static u32 get_fcx_max_data(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + int fcx_in_css, fcx_in_gneq, fcx_in_features; + int tpm, mdc; + + if (dasd_nofcx) + return 0; + /* is transport mode supported? */ + fcx_in_css = css_general_characteristics.fcx; + fcx_in_gneq = private->gneq->reserved2[7] & 0x04; + fcx_in_features = private->features.feature[40] & 0x80; + tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; + + if (!tpm) + return 0; + + mdc = ccw_device_get_mdc(device->cdev, 0); + if (mdc < 0) { + dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n"); + return 0; + } else { + return (u32)mdc * FCX_MAX_DATA_FACTOR; + } +} + static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) { struct dasd_eckd_private *private = device->private; @@ -1222,8 +1255,7 @@ static int rebuild_device_uid(struct dasd_device *device, struct path_verification_work_data *data) { struct dasd_eckd_private *private = device->private; - struct dasd_path *path_data = &device->path_data; - __u8 lpm, opm = path_data->opm; + __u8 lpm, opm = dasd_path_get_opm(device); int rc = -ENODEV; for (lpm = 0x80; lpm; lpm >>= 1) { @@ -1356,7 +1388,7 @@ static void do_path_verification_work(struct work_struct *work) * in other case the device UID may have changed and * the first working path UID will be used as device UID */ - if (device->path_data.opm && + if (dasd_path_get_opm(device) && dasd_eckd_compare_path_uid(device, &path_private)) { /* * the comparison was not successful @@ -1406,23 +1438,17 @@ static void do_path_verification_work(struct work_struct *work) * situation in dasd_start_IO. */ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); - if (!device->path_data.opm && opm) { - device->path_data.opm = opm; - device->path_data.cablepm &= ~opm; - device->path_data.cuirpm &= ~opm; - device->path_data.hpfpm &= ~opm; + if (!dasd_path_get_opm(device) && opm) { + dasd_path_set_opm(device, opm); dasd_generic_path_operational(device); } else { - device->path_data.opm |= opm; - device->path_data.cablepm &= ~opm; - device->path_data.cuirpm &= ~opm; - device->path_data.hpfpm &= ~opm; + dasd_path_add_opm(device, opm); } - device->path_data.npm |= npm; - device->path_data.ppm |= ppm; - device->path_data.tbvpm |= epm; - device->path_data.cablepm |= cablepm; - device->path_data.hpfpm |= hpfpm; + dasd_path_add_nppm(device, npm); + dasd_path_add_ppm(device, ppm); + dasd_path_add_tbvpm(device, epm); + dasd_path_add_cablepm(device, cablepm); + dasd_path_add_nohpfpm(device, hpfpm); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); } clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags); @@ -1456,6 +1482,19 @@ static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm) return 0; } +static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm) +{ + struct dasd_eckd_private *private = device->private; + unsigned long flags; + + if (!private->fcx_max_data) + private->fcx_max_data = get_fcx_max_data(device); + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device)); + dasd_schedule_device_bh(device); + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); +} + static int dasd_eckd_read_features(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; @@ -1652,32 +1691,6 @@ static void dasd_eckd_kick_validate_server(struct dasd_device *device) dasd_put_device(device); } -static u32 get_fcx_max_data(struct dasd_device *device) -{ - struct dasd_eckd_private *private = device->private; - int fcx_in_css, fcx_in_gneq, fcx_in_features; - int tpm, mdc; - - if (dasd_nofcx) - return 0; - /* is transport mode supported? */ - fcx_in_css = css_general_characteristics.fcx; - fcx_in_gneq = private->gneq->reserved2[7] & 0x04; - fcx_in_features = private->features.feature[40] & 0x80; - tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; - - if (!tpm) - return 0; - - mdc = ccw_device_get_mdc(device->cdev, 0); - if (mdc < 0) { - dev_warn(&device->cdev->dev, "Detecting the maximum supported" - " data size for zHPF requests failed\n"); - return 0; - } else - return (u32)mdc * FCX_MAX_DATA_FACTOR; -} - /* * Check device characteristics. * If the device is accessible using ECKD discipline, the device is enabled. @@ -1729,10 +1742,11 @@ dasd_eckd_check_characteristics(struct dasd_device *device) if (rc) goto out_err1; - /* set default timeout */ + /* set some default values */ device->default_expires = DASD_EXPIRES; - /* set default retry count */ device->default_retries = DASD_RETRIES; + device->path_thrhld = DASD_ECKD_PATH_THRHLD; + device->path_interval = DASD_ECKD_PATH_INTERVAL; if (private->gneq) { value = 1; @@ -1839,13 +1853,16 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device) private->gneq = NULL; private->conf_len = 0; for (i = 0; i < 8; i++) { - kfree(private->path_conf_data[i]); - if ((__u8 *)private->path_conf_data[i] == + kfree(device->path[i].conf_data); + if ((__u8 *)device->path[i].conf_data == private->conf_data) { private->conf_data = NULL; private->conf_len = 0; } - private->path_conf_data[i] = NULL; + device->path[i].conf_data = NULL; + device->path[i].cssid = 0; + device->path[i].ssid = 0; + device->path[i].chpid = 0; } kfree(private->conf_data); private->conf_data = NULL; @@ -2966,7 +2983,7 @@ static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) if (cqr->block && (cqr->startdev != cqr->block->base)) { dasd_eckd_reset_ccw_to_base_io(cqr); cqr->startdev = cqr->block->base; - cqr->lpm = cqr->block->base->path_data.opm; + cqr->lpm = dasd_path_get_opm(cqr->block->base); } }; @@ -3251,7 +3268,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( cqr->memdev = startdev; cqr->block = block; cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ - cqr->lpm = startdev->path_data.ppm; + cqr->lpm = dasd_path_get_ppm(startdev); cqr->retries = startdev->default_retries; cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -3426,7 +3443,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( cqr->memdev = startdev; cqr->block = block; cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ - cqr->lpm = startdev->path_data.ppm; + cqr->lpm = dasd_path_get_ppm(startdev); cqr->retries = startdev->default_retries; cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -3735,7 +3752,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( cqr->memdev = startdev; cqr->block = block; cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ - cqr->lpm = startdev->path_data.ppm; + cqr->lpm = dasd_path_get_ppm(startdev); cqr->retries = startdev->default_retries; cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -3962,7 +3979,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, cqr->memdev = startdev; cqr->block = block; cqr->expires = startdev->default_expires * HZ; - cqr->lpm = startdev->path_data.ppm; + cqr->lpm = dasd_path_get_ppm(startdev); cqr->retries = startdev->default_retries; cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -4783,7 +4800,8 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), - irb->scsw.tm.fcxs, irb->scsw.tm.schxs, + irb->scsw.tm.fcxs, + (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq, req ? req->intrc : 0); len += sprintf(page + len, PRINTK_HEADER " device %s: Failing TCW: %p\n", @@ -5306,11 +5324,10 @@ static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m) */ static int dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, - __u32 message_id, - struct channel_path_desc *desc, - struct subchannel_id sch_id) + __u32 message_id, __u8 lpum) { struct dasd_psf_cuir_response *psf_cuir; + int pos = pathmask_to_pos(lpum); struct dasd_ccw_req *cqr; struct ccw1 *ccw; int rc; @@ -5328,11 +5345,10 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, psf_cuir = (struct dasd_psf_cuir_response *)cqr->data; psf_cuir->order = PSF_ORDER_CUIR_RESPONSE; psf_cuir->cc = response; - if (desc) - psf_cuir->chpid = desc->chpid; + psf_cuir->chpid = device->path[pos].chpid; psf_cuir->message_id = message_id; - psf_cuir->cssid = sch_id.cssid; - psf_cuir->ssid = sch_id.ssid; + psf_cuir->cssid = device->path[pos].cssid; + psf_cuir->ssid = device->path[pos].ssid; ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_PSF; ccw->cda = (__u32)(addr_t)psf_cuir; @@ -5363,20 +5379,19 @@ static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device, __u8 lpum, struct dasd_cuir_message *cuir) { - struct dasd_eckd_private *private = device->private; struct dasd_conf_data *conf_data; int path, pos; if (cuir->record_selector == 0) goto out; for (path = 0x80, pos = 0; path; path >>= 1, pos++) { - conf_data = private->path_conf_data[pos]; + conf_data = device->path[pos].conf_data; if (conf_data->gneq.record_selector == cuir->record_selector) return conf_data; } out: - return private->path_conf_data[pathmask_to_pos(lpum)]; + return device->path[pathmask_to_pos(lpum)].conf_data; } /* @@ -5391,7 +5406,6 @@ static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device, static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum, struct dasd_cuir_message *cuir) { - struct dasd_eckd_private *private = device->private; struct dasd_conf_data *ref_conf_data; unsigned long bitmask = 0, mask = 0; struct dasd_conf_data *conf_data; @@ -5417,11 +5431,10 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum, mask |= cuir->neq_map[1] << 8; mask |= cuir->neq_map[0] << 16; - for (path = 0x80; path; path >>= 1) { + for (path = 0; path < 8; path++) { /* initialise data per path */ bitmask = mask; - pos = pathmask_to_pos(path); - conf_data = private->path_conf_data[pos]; + conf_data = device->path[path].conf_data; pos = 8 - ffs(cuir->ned_map); ned = (char *) &conf_data->neds[pos]; /* compare reference ned and per path ned */ @@ -5442,33 +5455,29 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum, continue; /* device and path match the reference values add path to CUIR scope */ - tbcpm |= path; + tbcpm |= 0x80 >> path; } return tbcpm; } static void dasd_eckd_cuir_notify_user(struct dasd_device *device, - unsigned long paths, - struct subchannel_id sch_id, int action) + unsigned long paths, int action) { - struct channel_path_desc *desc; int pos; while (paths) { /* get position of bit in mask */ - pos = ffs(paths) - 1; + pos = 8 - ffs(paths); /* get channel path descriptor from this position */ - desc = ccw_device_get_chp_desc(device->cdev, 7 - pos); if (action == CUIR_QUIESCE) - pr_warn("Service on the storage server caused path " - "%x.%02x to go offline", sch_id.cssid, - desc ? desc->chpid : 0); + pr_warn("Service on the storage server caused path %x.%02x to go offline", + device->path[pos].cssid, + device->path[pos].chpid); else if (action == CUIR_RESUME) - pr_info("Path %x.%02x is back online after service " - "on the storage server", sch_id.cssid, - desc ? desc->chpid : 0); - kfree(desc); - clear_bit(pos, &paths); + pr_info("Path %x.%02x is back online after service on the storage server", + device->path[pos].cssid, + device->path[pos].chpid); + clear_bit(7 - pos, &paths); } } @@ -5479,16 +5488,16 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum, tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir); /* nothing to do if path is not in use */ - if (!(device->path_data.opm & tbcpm)) + if (!(dasd_path_get_opm(device) & tbcpm)) return 0; - if (!(device->path_data.opm & ~tbcpm)) { + if (!(dasd_path_get_opm(device) & ~tbcpm)) { /* no path would be left if the CUIR action is taken return error */ return -EINVAL; } /* remove device from operational path mask */ - device->path_data.opm &= ~tbcpm; - device->path_data.cuirpm |= tbcpm; + dasd_path_remove_opm(device, tbcpm); + dasd_path_add_cuirpm(device, tbcpm); return tbcpm; } @@ -5501,7 +5510,6 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum, * notify the already set offline devices again */ static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum, - struct subchannel_id sch_id, struct dasd_cuir_message *cuir) { struct dasd_eckd_private *private = device->private; @@ -5556,14 +5564,13 @@ static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum, } } /* notify user about all paths affected by CUIR action */ - dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_QUIESCE); + dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE); return 0; out_err: return tbcpm; } static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, - struct subchannel_id sch_id, struct dasd_cuir_message *cuir) { struct dasd_eckd_private *private = device->private; @@ -5581,8 +5588,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, alias_list) { tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); paths |= tbcpm; - if (!(dev->path_data.opm & tbcpm)) { - dev->path_data.tbvpm |= tbcpm; + if (!(dasd_path_get_opm(dev) & tbcpm)) { + dasd_path_add_tbvpm(dev, tbcpm); dasd_schedule_device_bh(dev); } } @@ -5591,8 +5598,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, alias_list) { tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); paths |= tbcpm; - if (!(dev->path_data.opm & tbcpm)) { - dev->path_data.tbvpm |= tbcpm; + if (!(dasd_path_get_opm(dev) & tbcpm)) { + dasd_path_add_tbvpm(dev, tbcpm); dasd_schedule_device_bh(dev); } } @@ -5605,8 +5612,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, alias_list) { tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); paths |= tbcpm; - if (!(dev->path_data.opm & tbcpm)) { - dev->path_data.tbvpm |= tbcpm; + if (!(dasd_path_get_opm(dev) & tbcpm)) { + dasd_path_add_tbvpm(dev, tbcpm); dasd_schedule_device_bh(dev); } } @@ -5615,14 +5622,14 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, alias_list) { tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); paths |= tbcpm; - if (!(dev->path_data.opm & tbcpm)) { - dev->path_data.tbvpm |= tbcpm; + if (!(dasd_path_get_opm(dev) & tbcpm)) { + dasd_path_add_tbvpm(dev, tbcpm); dasd_schedule_device_bh(dev); } } } /* notify user about all paths affected by CUIR action */ - dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_RESUME); + dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME); return 0; } @@ -5630,38 +5637,31 @@ static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages, __u8 lpum) { struct dasd_cuir_message *cuir = messages; - struct channel_path_desc *desc; - struct subchannel_id sch_id; - int pos, response; + int response; DBF_DEV_EVENT(DBF_WARNING, device, "CUIR request: %016llx %016llx %016llx %08x", ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2], ((u32 *)cuir)[3]); - ccw_device_get_schid(device->cdev, &sch_id); - pos = pathmask_to_pos(lpum); - desc = ccw_device_get_chp_desc(device->cdev, pos); if (cuir->code == CUIR_QUIESCE) { /* quiesce */ - if (dasd_eckd_cuir_quiesce(device, lpum, sch_id, cuir)) + if (dasd_eckd_cuir_quiesce(device, lpum, cuir)) response = PSF_CUIR_LAST_PATH; else response = PSF_CUIR_COMPLETED; } else if (cuir->code == CUIR_RESUME) { /* resume */ - dasd_eckd_cuir_resume(device, lpum, sch_id, cuir); + dasd_eckd_cuir_resume(device, lpum, cuir); response = PSF_CUIR_COMPLETED; } else response = PSF_CUIR_NOT_SUPPORTED; dasd_eckd_psf_cuir_response(device, response, - cuir->message_id, desc, sch_id); + cuir->message_id, lpum); DBF_DEV_EVENT(DBF_WARNING, device, "CUIR response: %d on message ID %08x", response, cuir->message_id); - /* free descriptor copy */ - kfree(desc); /* to make sure there is no attention left schedule work again */ device->discipline->check_attention(device, lpum); } @@ -5708,6 +5708,63 @@ static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum) return 0; } +static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum) +{ + if (~lpum & dasd_path_get_opm(device)) { + dasd_path_add_nohpfpm(device, lpum); + dasd_path_remove_opm(device, lpum); + dev_err(&device->cdev->dev, + "Channel path %02X lost HPF functionality and is disabled\n", + lpum); + return 1; + } + return 0; +} + +static void dasd_eckd_disable_hpf_device(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + + dev_err(&device->cdev->dev, + "High Performance FICON disabled\n"); + private->fcx_max_data = 0; +} + +static int dasd_eckd_hpf_enabled(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + + return private->fcx_max_data ? 1 : 0; +} + +static void dasd_eckd_handle_hpf_error(struct dasd_device *device, + struct irb *irb) +{ + struct dasd_eckd_private *private = device->private; + + if (!private->fcx_max_data) { + /* sanity check for no HPF, the error makes no sense */ + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "Trying to disable HPF for a non HPF device"); + return; + } + if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) { + dasd_eckd_disable_hpf_device(device); + } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) { + if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum)) + return; + dasd_eckd_disable_hpf_device(device); + dasd_path_set_tbvpm(device, + dasd_path_get_hpfpm(device)); + } + /* + * prevent that any new I/O ist started on the device and schedule a + * requeue of existing requests + */ + dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); + dasd_schedule_requeue(device); +} + static struct ccw_driver dasd_eckd_driver = { .driver = { .name = "dasd-eckd", @@ -5776,6 +5833,10 @@ static struct dasd_discipline dasd_eckd_discipline = { .check_attention = dasd_eckd_check_attention, .host_access_count = dasd_eckd_host_access_count, .hosts_print = dasd_hosts_print, + .handle_hpf_error = dasd_eckd_handle_hpf_error, + .disable_hpf = dasd_eckd_disable_hpf_device, + .hpf_enabled = dasd_eckd_hpf_enabled, + .reset_path = dasd_eckd_reset_path, }; static int __init diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index 59803626ea36cd9662c7d208a27d59b238aa836d..e2a710c250a567ff99bbb8ff7e8e22668fca2d84 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h @@ -94,6 +94,8 @@ #define FCX_MAX_DATA_FACTOR 65536 #define DASD_ECKD_RCD_DATA_SIZE 256 +#define DASD_ECKD_PATH_THRHLD 256 +#define DASD_ECKD_PATH_INTERVAL 300 /***************************************************************************** * SECTION: Type Definitions @@ -535,8 +537,7 @@ struct dasd_eckd_private { struct dasd_eckd_characteristics rdc_data; u8 *conf_data; int conf_len; - /* per path configuration data */ - struct dasd_conf_data *path_conf_data[8]; + /* pointers to specific parts in the conf_data */ struct dasd_ned *ned; struct dasd_sneq *sneq; diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 21ef63cf0960db2e8288497b7d197952c498c3b7..8713fefd794bfa7e89cfc79c07f06460236304b5 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -20,7 +20,7 @@ #include #include -#include +#include #include #include @@ -454,20 +454,30 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) */ int dasd_eer_enable(struct dasd_device *device) { - struct dasd_ccw_req *cqr; + struct dasd_ccw_req *cqr = NULL; unsigned long flags; struct ccw1 *ccw; + int rc = 0; + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); if (device->eer_cqr) - return 0; + goto out; + else if (!device->discipline || + strcmp(device->discipline->name, "ECKD")) + rc = -EMEDIUMTYPE; + else if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) + rc = -EBUSY; - if (!device->discipline || strcmp(device->discipline->name, "ECKD")) - return -EPERM; /* FIXME: -EMEDIUMTYPE ? */ + if (rc) + goto out; cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */, SNSS_DATA_SIZE, device); - if (IS_ERR(cqr)) - return -ENOMEM; + if (IS_ERR(cqr)) { + rc = -ENOMEM; + cqr = NULL; + goto out; + } cqr->startdev = device; cqr->retries = 255; @@ -485,15 +495,18 @@ int dasd_eer_enable(struct dasd_device *device) cqr->status = DASD_CQR_FILLED; cqr->callback = dasd_eer_snss_cb; - spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); if (!device->eer_cqr) { device->eer_cqr = cqr; cqr = NULL; } + +out: spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); + if (cqr) dasd_kfree_request(cqr, device); - return 0; + + return rc; } /* diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index d138d0116734d575352f99d74bb98a9cce4bbfef..9e3419124264ab05ffe3cbe93a06489179c1f7d6 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c @@ -15,7 +15,7 @@ #include #include -#include +#include /* This is ugly... */ #define PRINTK_HEADER "dasd_erp:" @@ -96,7 +96,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr) "default ERP called (%i retries left)", cqr->retries); if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) - cqr->lpm = device->path_data.opm; + cqr->lpm = dasd_path_get_opm(device); cqr->status = DASD_CQR_FILLED; } else { pr_err("%s: default ERP has run out of retries and failed\n", diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index d7b5b550364badfd896368d4e5a91bdfc3f1615d..462cab5d4302756659c8faf0e54bb6f10a83d38c 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -168,7 +168,7 @@ dasd_fba_check_characteristics(struct dasd_device *device) device->default_expires = DASD_EXPIRES; device->default_retries = FBA_DEFAULT_RETRIES; - device->path_data.opm = LPM_ANYPATH; + dasd_path_set_opm(device, LPM_ANYPATH); readonly = dasd_device_is_ro(device); if (readonly) diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index e2fa759bf2ad42683b571e8a44a434ff3936b408..8b1341fb2e0db757d0ce753c0ebb8fe481c04515 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -16,7 +16,7 @@ #include #include -#include +#include /* This is ugly... */ #define PRINTK_HEADER "dasd_gendisk:" diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 87ff6cef872f238b4993333e4c9d416450062ba6..24be210c10e5fa3b18ddffcf4be15f7cf265cc50 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -55,6 +55,7 @@ #include #include #include +#include /* DASD discipline magic */ #define DASD_ECKD_MAGIC 0xC5C3D2C4 @@ -377,6 +378,10 @@ struct dasd_discipline { int (*check_attention)(struct dasd_device *, __u8); int (*host_access_count)(struct dasd_device *); int (*hosts_print)(struct dasd_device *, struct seq_file *); + void (*handle_hpf_error)(struct dasd_device *, struct irb *); + void (*disable_hpf)(struct dasd_device *); + int (*hpf_enabled)(struct dasd_device *); + void (*reset_path)(struct dasd_device *, __u8); }; extern struct dasd_discipline *dasd_diag_discipline_pointer; @@ -397,17 +402,31 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer; #define DASD_EER_STATECHANGE 3 #define DASD_EER_PPRCSUSPEND 4 +/* DASD path handling */ + +#define DASD_PATH_OPERATIONAL 1 +#define DASD_PATH_TBV 2 +#define DASD_PATH_PP 3 +#define DASD_PATH_NPP 4 +#define DASD_PATH_MISCABLED 5 +#define DASD_PATH_NOHPF 6 +#define DASD_PATH_CUIR 7 +#define DASD_PATH_IFCC 8 + +#define DASD_THRHLD_MAX 4294967295U +#define DASD_INTERVAL_MAX 4294967295U + struct dasd_path { - __u8 opm; - __u8 tbvpm; - __u8 ppm; - __u8 npm; - /* paths that are not used because of a special condition */ - __u8 cablepm; /* miss-cabled */ - __u8 hpfpm; /* the HPF requirements of the other paths are not met */ - __u8 cuirpm; /* CUIR varied offline */ + unsigned long flags; + u8 cssid; + u8 ssid; + u8 chpid; + struct dasd_conf_data *conf_data; + atomic_t error_count; + unsigned long long errorclk; }; + struct dasd_profile_info { /* legacy part of profile data, as in dasd_profile_info_t */ unsigned int dasd_io_reqs; /* number of requests processed */ @@ -458,7 +477,8 @@ struct dasd_device { struct dasd_discipline *discipline; struct dasd_discipline *base_discipline; void *private; - struct dasd_path path_data; + struct dasd_path path[8]; + __u8 opm; /* Device state and target state. */ int state, target; @@ -483,6 +503,7 @@ struct dasd_device { struct work_struct reload_device; struct work_struct kick_validate; struct work_struct suc_work; + struct work_struct requeue_requests; struct timer_list timer; debug_info_t *debug_area; @@ -498,6 +519,9 @@ struct dasd_device { unsigned long blk_timeout; + unsigned long path_thrhld; + unsigned long path_interval; + struct dentry *debugfs_dentry; struct dentry *hosts_dentry; struct dasd_profile profile; @@ -707,6 +731,7 @@ void dasd_set_target_state(struct dasd_device *, int); void dasd_kick_device(struct dasd_device *); void dasd_restore_device(struct dasd_device *); void dasd_reload_device(struct dasd_device *); +void dasd_schedule_requeue(struct dasd_device *); void dasd_add_request_head(struct dasd_ccw_req *); void dasd_add_request_tail(struct dasd_ccw_req *); @@ -835,4 +860,410 @@ static inline int dasd_eer_enabled(struct dasd_device *device) #define dasd_eer_enabled(d) (0) #endif /* CONFIG_DASD_ERR */ + +/* DASD path handling functions */ + +/* + * helper functions to modify bit masks for a given channel path for a device + */ +static inline int dasd_path_is_operational(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags); +} + +static inline int dasd_path_need_verify(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_TBV, &device->path[chp].flags); +} + +static inline void dasd_path_verify(struct dasd_device *device, int chp) +{ + __set_bit(DASD_PATH_TBV, &device->path[chp].flags); +} + +static inline void dasd_path_clear_verify(struct dasd_device *device, int chp) +{ + __clear_bit(DASD_PATH_TBV, &device->path[chp].flags); +} + +static inline void dasd_path_clear_all_verify(struct dasd_device *device) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + dasd_path_clear_verify(device, chp); +} + +static inline void dasd_path_operational(struct dasd_device *device, int chp) +{ + __set_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags); + device->opm |= (0x80 >> chp); +} + +static inline void dasd_path_nonpreferred(struct dasd_device *device, int chp) +{ + __set_bit(DASD_PATH_NPP, &device->path[chp].flags); +} + +static inline int dasd_path_is_nonpreferred(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_NPP, &device->path[chp].flags); +} + +static inline void dasd_path_clear_nonpreferred(struct dasd_device *device, + int chp) +{ + __clear_bit(DASD_PATH_NPP, &device->path[chp].flags); +} + +static inline void dasd_path_preferred(struct dasd_device *device, int chp) +{ + __set_bit(DASD_PATH_PP, &device->path[chp].flags); +} + +static inline int dasd_path_is_preferred(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_PP, &device->path[chp].flags); +} + +static inline void dasd_path_clear_preferred(struct dasd_device *device, + int chp) +{ + __clear_bit(DASD_PATH_PP, &device->path[chp].flags); +} + +static inline void dasd_path_clear_oper(struct dasd_device *device, int chp) +{ + __clear_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags); + device->opm &= ~(0x80 >> chp); +} + +static inline void dasd_path_clear_cable(struct dasd_device *device, int chp) +{ + __clear_bit(DASD_PATH_MISCABLED, &device->path[chp].flags); +} + +static inline void dasd_path_cuir(struct dasd_device *device, int chp) +{ + __set_bit(DASD_PATH_CUIR, &device->path[chp].flags); +} + +static inline int dasd_path_is_cuir(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_CUIR, &device->path[chp].flags); +} + +static inline void dasd_path_clear_cuir(struct dasd_device *device, int chp) +{ + __clear_bit(DASD_PATH_CUIR, &device->path[chp].flags); +} + +static inline void dasd_path_ifcc(struct dasd_device *device, int chp) +{ + set_bit(DASD_PATH_IFCC, &device->path[chp].flags); +} + +static inline int dasd_path_is_ifcc(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_IFCC, &device->path[chp].flags); +} + +static inline void dasd_path_clear_ifcc(struct dasd_device *device, int chp) +{ + clear_bit(DASD_PATH_IFCC, &device->path[chp].flags); +} + +static inline void dasd_path_clear_nohpf(struct dasd_device *device, int chp) +{ + __clear_bit(DASD_PATH_NOHPF, &device->path[chp].flags); +} + +static inline void dasd_path_miscabled(struct dasd_device *device, int chp) +{ + __set_bit(DASD_PATH_MISCABLED, &device->path[chp].flags); +} + +static inline int dasd_path_is_miscabled(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_MISCABLED, &device->path[chp].flags); +} + +static inline void dasd_path_nohpf(struct dasd_device *device, int chp) +{ + __set_bit(DASD_PATH_NOHPF, &device->path[chp].flags); +} + +static inline int dasd_path_is_nohpf(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_NOHPF, &device->path[chp].flags); +} + +/* + * get functions for path masks + * will return a path masks for the given device + */ + +static inline __u8 dasd_path_get_opm(struct dasd_device *device) +{ + return device->opm; +} + +static inline __u8 dasd_path_get_tbvpm(struct dasd_device *device) +{ + int chp; + __u8 tbvpm = 0x00; + + for (chp = 0; chp < 8; chp++) + if (dasd_path_need_verify(device, chp)) + tbvpm |= 0x80 >> chp; + return tbvpm; +} + +static inline __u8 dasd_path_get_nppm(struct dasd_device *device) +{ + int chp; + __u8 npm = 0x00; + + for (chp = 0; chp < 8; chp++) { + if (dasd_path_is_nonpreferred(device, chp)) + npm |= 0x80 >> chp; + } + return npm; +} + +static inline __u8 dasd_path_get_ppm(struct dasd_device *device) +{ + int chp; + __u8 ppm = 0x00; + + for (chp = 0; chp < 8; chp++) + if (dasd_path_is_preferred(device, chp)) + ppm |= 0x80 >> chp; + return ppm; +} + +static inline __u8 dasd_path_get_cablepm(struct dasd_device *device) +{ + int chp; + __u8 cablepm = 0x00; + + for (chp = 0; chp < 8; chp++) + if (dasd_path_is_miscabled(device, chp)) + cablepm |= 0x80 >> chp; + return cablepm; +} + +static inline __u8 dasd_path_get_cuirpm(struct dasd_device *device) +{ + int chp; + __u8 cuirpm = 0x00; + + for (chp = 0; chp < 8; chp++) + if (dasd_path_is_cuir(device, chp)) + cuirpm |= 0x80 >> chp; + return cuirpm; +} + +static inline __u8 dasd_path_get_ifccpm(struct dasd_device *device) +{ + int chp; + __u8 ifccpm = 0x00; + + for (chp = 0; chp < 8; chp++) + if (dasd_path_is_ifcc(device, chp)) + ifccpm |= 0x80 >> chp; + return ifccpm; +} + +static inline __u8 dasd_path_get_hpfpm(struct dasd_device *device) +{ + int chp; + __u8 hpfpm = 0x00; + + for (chp = 0; chp < 8; chp++) + if (dasd_path_is_nohpf(device, chp)) + hpfpm |= 0x80 >> chp; + return hpfpm; +} + +/* + * add functions for path masks + * the existing path mask will be extended by the given path mask + */ +static inline void dasd_path_add_tbvpm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_verify(device, chp); +} + +static inline __u8 dasd_path_get_notoperpm(struct dasd_device *device) +{ + int chp; + __u8 nopm = 0x00; + + for (chp = 0; chp < 8; chp++) + if (dasd_path_is_nohpf(device, chp) || + dasd_path_is_ifcc(device, chp) || + dasd_path_is_cuir(device, chp) || + dasd_path_is_miscabled(device, chp)) + nopm |= 0x80 >> chp; + return nopm; +} + +static inline void dasd_path_add_opm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) { + dasd_path_operational(device, chp); + /* + * if the path is used + * it should not be in one of the negative lists + */ + dasd_path_clear_nohpf(device, chp); + dasd_path_clear_cuir(device, chp); + dasd_path_clear_cable(device, chp); + dasd_path_clear_ifcc(device, chp); + } +} + +static inline void dasd_path_add_cablepm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_miscabled(device, chp); +} + +static inline void dasd_path_add_cuirpm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_cuir(device, chp); +} + +static inline void dasd_path_add_ifccpm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_ifcc(device, chp); +} + +static inline void dasd_path_add_nppm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_nonpreferred(device, chp); +} + +static inline void dasd_path_add_nohpfpm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_nohpf(device, chp); +} + +static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_preferred(device, chp); +} + +/* + * set functions for path masks + * the existing path mask will be replaced by the given path mask + */ +static inline void dasd_path_set_tbvpm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_verify(device, chp); + else + dasd_path_clear_verify(device, chp); +} + +static inline void dasd_path_set_opm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) { + dasd_path_clear_oper(device, chp); + if (pm & (0x80 >> chp)) { + dasd_path_operational(device, chp); + /* + * if the path is used + * it should not be in one of the negative lists + */ + dasd_path_clear_nohpf(device, chp); + dasd_path_clear_cuir(device, chp); + dasd_path_clear_cable(device, chp); + dasd_path_clear_ifcc(device, chp); + } + } +} + +/* + * remove functions for path masks + * the existing path mask will be cleared with the given path mask + */ +static inline void dasd_path_remove_opm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) { + if (pm & (0x80 >> chp)) + dasd_path_clear_oper(device, chp); + } +} + +/* + * add the newly available path to the to be verified pm and remove it from + * normal operation until it is verified + */ +static inline void dasd_path_available(struct dasd_device *device, int chp) +{ + dasd_path_clear_oper(device, chp); + dasd_path_verify(device, chp); +} + +static inline void dasd_path_notoper(struct dasd_device *device, int chp) +{ + dasd_path_clear_oper(device, chp); + dasd_path_clear_preferred(device, chp); + dasd_path_clear_nonpreferred(device, chp); +} + +/* + * remove all paths from normal operation + */ +static inline void dasd_path_no_path(struct dasd_device *device) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + dasd_path_notoper(device, chp); + + dasd_path_clear_all_verify(device); +} + +/* end - path handling */ + #endif /* DASD_H */ diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 9dfbd972f844ef44cc237ce36236b993dc6baa4d..ec65c1e51c2a11d1cfeecab888d6c093f925205f 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include /* This is ugly... */ #define PRINTK_HEADER "dasd_ioctl:" diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index bad7a196bf8401e053db9d7499058953067e9c5a..70dc2c4cd3f75408e786e0490876e03510a6f505 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c @@ -20,7 +20,7 @@ #include #include -#include +#include /* This is ugly... */ #define PRINTK_HEADER "dasd_proc:" diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 288f59a4147b1ffee2c330166c957f29406ed500..b9d7e755c8a37890961d06f95c77052cbd74d92f 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -41,7 +41,7 @@ #include #include #include -#include +#include #define XPRAM_NAME "xpram" #define XPRAM_DEVS 1 /* one partition */ diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 931d10e86837c917f35df331ca919c3ba33f60cf..9ec4ae0561582544359933042769d263e7c62b4f 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -9,7 +9,6 @@ * Dan Morrison, IBM Corporation */ -#include #include #include #include @@ -26,7 +25,7 @@ #include #include #include -#include +#include #include #include #include @@ -1215,13 +1214,4 @@ static int __init tty3215_init(void) tty3215_driver = driver; return 0; } - -static void __exit tty3215_exit(void) -{ - tty_unregister_driver(tty3215_driver); - put_tty_driver(tty3215_driver); - ccw_driver_unregister(&raw3215_ccw_driver); -} - -module_init(tty3215_init); -module_exit(tty3215_exit); +device_initcall(tty3215_init); diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 7b9c50aa4cc922973cb2cd758244f264b81849d1..82c913318b73be149ac829b0c1e547a658686c8a 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include "keyboard.h" diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index ebdeaa53182de968ec3216318b2504c5ecdee1d2..027ac6ae5eea512c530a9afbb87bb31ad2bedd8e 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 9b5d1138b2e2b2218bc15cbe8b0adf873b3a343e..571a7e3527553ad905612007b860197ca4105b5a 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 7a10c56334bbec3e0948565073df426381b05dd6..e1fc7eb043d67dbb62403e1744d204be6bc01406 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -59,6 +59,7 @@ typedef unsigned int sclp_cmdw_t; +#define SCLP_CMDW_READ_CPU_INFO 0x00010001 #define SCLP_CMDW_READ_EVENT_DATA 0x00770005 #define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005 #define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005 @@ -102,6 +103,28 @@ struct init_sccb { sccb_mask_t sclp_send_mask; } __attribute__((packed)); +struct read_cpu_info_sccb { + struct sccb_header header; + u16 nr_configured; + u16 offset_configured; + u16 nr_standby; + u16 offset_standby; + u8 reserved[4096 - 16]; +} __attribute__((packed, aligned(PAGE_SIZE))); + +static inline void sclp_fill_core_info(struct sclp_core_info *info, + struct read_cpu_info_sccb *sccb) +{ + char *page = (char *) sccb; + + memset(info, 0, sizeof(*info)); + info->configured = sccb->nr_configured; + info->standby = sccb->nr_standby; + info->combined = sccb->nr_configured + sccb->nr_standby; + memcpy(&info->core, page + sccb->offset_configured, + info->combined * sizeof(struct sclp_core_entry)); +} + #define SCLP_HAS_CHP_INFO (sclp.facilities & 0x8000000000000000ULL) #define SCLP_HAS_CHP_RECONFIG (sclp.facilities & 0x2000000000000000ULL) #define SCLP_HAS_CPU_INFO (sclp.facilities & 0x0800000000000000ULL) diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index e3fc7539116b4f2c6ba43c0d1938cf407ebaac9e..b9c5522b8a680c0a7a49a2c3196d8b7215803b40 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -80,33 +80,10 @@ int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout) * CPU configuration related functions. */ -#define SCLP_CMDW_READ_CPU_INFO 0x00010001 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 -struct read_cpu_info_sccb { - struct sccb_header header; - u16 nr_configured; - u16 offset_configured; - u16 nr_standby; - u16 offset_standby; - u8 reserved[4096 - 16]; -} __attribute__((packed, aligned(PAGE_SIZE))); - -static void sclp_fill_core_info(struct sclp_core_info *info, - struct read_cpu_info_sccb *sccb) -{ - char *page = (char *) sccb; - - memset(info, 0, sizeof(*info)); - info->configured = sccb->nr_configured; - info->standby = sccb->nr_standby; - info->combined = sccb->nr_configured + sccb->nr_standby; - memcpy(&info->core, page + sccb->offset_configured, - info->combined * sizeof(struct sclp_core_entry)); -} - -int sclp_get_core_info(struct sclp_core_info *info) +int _sclp_get_core_info(struct sclp_core_info *info) { int rc; struct read_cpu_info_sccb *sccb; diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c index 554eaa1e347de5125cc4c52d7cb1f24abb4e706a..78a7e4f947218524cf6322974f484e310075fb23 100644 --- a/drivers/s390/char/sclp_ctl.c +++ b/drivers/s390/char/sclp_ctl.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include @@ -126,4 +126,4 @@ static struct miscdevice sclp_ctl_device = { .name = "sclp", .fops = &sclp_ctl_fops, }; -module_misc_device(sclp_ctl_device); +builtin_misc_device(sclp_ctl_device); diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index c71df0c7dedc1c75c7b17b0f9038bee188c42939..f8e46c22e641501d5c8e26d1c2c2de52dd97c91a 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -221,6 +221,36 @@ static int __init sclp_set_event_mask(struct init_sccb *sccb, return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); } +static struct sclp_core_info sclp_core_info_early __initdata; +static int sclp_core_info_early_valid __initdata; + +static void __init sclp_init_core_info_early(struct read_cpu_info_sccb *sccb) +{ + int rc; + + if (!SCLP_HAS_CPU_INFO) + return; + memset(sccb, 0, sizeof(*sccb)); + sccb->header.length = sizeof(*sccb); + do { + rc = sclp_cmd_sync_early(SCLP_CMDW_READ_CPU_INFO, sccb); + } while (rc == -EBUSY); + if (rc) + return; + if (sccb->header.response_code != 0x0010) + return; + sclp_fill_core_info(&sclp_core_info_early, sccb); + sclp_core_info_early_valid = 1; +} + +int __init _sclp_get_core_info_early(struct sclp_core_info *info) +{ + if (!sclp_core_info_early_valid) + return -EIO; + *info = sclp_core_info_early; + return 0; +} + static long __init sclp_hsa_size_init(struct sdias_sccb *sccb) { sccb_init_eq_size(sccb); @@ -293,6 +323,7 @@ void __init sclp_early_detect(void) void *sccb = &sccb_early; sclp_facilities_detect(sccb); + sclp_init_core_info_early(sccb); sclp_hsa_size_detect(sccb); /* Turn off SCLP event notifications. Also save remote masks in the diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c index 475e470d9768ea729adc9ba95feee801f942dda1..e4958511168a70a611a3de9720c501f8ab3cd934 100644 --- a/drivers/s390/char/sclp_quiesce.c +++ b/drivers/s390/char/sclp_quiesce.c @@ -6,7 +6,6 @@ * Peter Oberparleiter */ -#include #include #include #include @@ -80,5 +79,4 @@ static int __init sclp_quiesce_init(void) { return sclp_register(&sclp_quiesce_event); } - -module_init(sclp_quiesce_init); +device_initcall(sclp_quiesce_init); diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index 6010cd347a08700701379ef17f738a12755eb110..91b26df5227d7b69ec7f82be9bbb807d7e9770e4 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include "sclp.h" #include "sclp_rw.h" diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 3c6e174e19b6faa54a9a828be4c23d707e045df3..236b736ae136485bd0c39a5b27d70aaea2e2aaa5 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c @@ -7,7 +7,6 @@ * Martin Schwidefsky */ -#include #include #include #include @@ -16,7 +15,7 @@ #include #include #include -#include +#include #include "ctrlchar.h" #include "sclp.h" @@ -573,4 +572,4 @@ sclp_tty_init(void) sclp_tty_driver = driver; return 0; } -module_init(sclp_tty_init); +device_initcall(sclp_tty_init); diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 68d6ee7ae504dceab936071319152574304316ff..095481d32236e802d13ab236e72367e638f17caa 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -26,7 +26,7 @@ #include #include -#include +#include #include "sclp.h" #include "ctrlchar.h" diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 77f9b9c2f7019d822a5bb9b96459af51f169d67f..46ac1164f2428e9a0d12b7fea20339bbda551a99 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -18,7 +18,7 @@ #include #include -#include +#include #define TAPE_DBF_AREA tape_core_dbf diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 272cb6cd1b2ac34afc9b9f38e7fe71f3c0b3f18d..e5ebe2fbee2353435002ec357542efd336c47f12 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include "raw3270.h" #include "tty3270.h" diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 2a67b496a9e28869dd9443aa5b823be6003b900f..65f5a794f26d030380142837969bff389ad3c127 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include "vmcp.h" static debug_info_t *vmcp_debug; diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index e883063c72581b61c8d4e8fd0db287476a37e101..57974a1e0e03ca8121c4e4458c3feeabfce77b26 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -870,7 +870,7 @@ static int __init vmlogrdr_init(void) goto cleanup; for (i=0; i < MAXMINOR; ++i ) { - sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL); + sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sys_ser[i].buffer) { rc = -ENOMEM; break; diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index ff18f373af9acbccf0f9ec05887362811d582e21..04aceb694d5158ef5261930a0da02f04ef132c03 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 16992e2a40ad551a64c4e8eee57df9ca3ccca80b..d3b51edb056e64cde8760b5ad8cf98ef52a5993c 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -7,6 +7,7 @@ * * Copyright IBM Corp. 2003, 2008 * Author(s): Michael Holzheu + * License: GPL */ #define KMSG_COMPONENT "zdump" @@ -16,14 +17,13 @@ #include #include #include -#include #include #include #include #include #include -#include +#include #include #include #include @@ -320,7 +320,7 @@ static int __init zcore_init(void) goto fail; } - pr_alert("DETECTED 'S390X (64 bit) OS'\n"); + pr_alert("The dump process started for a 64-bit operating system\n"); rc = init_cpu_info(); if (rc) goto fail; @@ -364,22 +364,4 @@ static int __init zcore_init(void) diag308(DIAG308_REL_HSA, NULL); return rc; } - -static void __exit zcore_exit(void) -{ - debug_unregister(zcore_dbf); - sclp_sdias_exit(); - free_page((unsigned long) ipl_block); - debugfs_remove(zcore_hsa_file); - debugfs_remove(zcore_reipl_file); - debugfs_remove(zcore_memmap_file); - debugfs_remove(zcore_dir); - diag308(DIAG308_REL_HSA, NULL); -} - -MODULE_AUTHOR("Copyright IBM Corp. 2003,2008"); -MODULE_DESCRIPTION("zcore module for zfcpdump support"); -MODULE_LICENSE("GPL"); - subsys_initcall(zcore_init); -module_exit(zcore_exit); diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 9082476b51db9122a8a41186e7e40e1a97172368..bf7f5d4c50e130d8995d78ae5088b9212f8aacaf 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 268aa23afa0166a3a509fb3d7b7e50d1f1f4515c..6b6386e9a5004104b1ede3fcfbcc57854a45deda 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include #include /* get_tod_clock() */ @@ -1389,13 +1389,7 @@ static int __init init_cmf(void) "%s (mode %s)\n", format_string, detect_string); return 0; } -module_init(init_cmf); - - -MODULE_AUTHOR("Arnd Bergmann "); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("channel measurement facility base driver\n" - "Copyright IBM Corp. 2003\n"); +device_initcall(init_cmf); EXPORT_SYMBOL_GPL(enable_cmf); EXPORT_SYMBOL_GPL(disable_cmf); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 3d2b20ee613f8e38843bde3a2fed32a5ba05e347..bc099b61394df13cf5e5feefaf4a4ac7b198e60a 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -5,12 +5,14 @@ * * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) + * + * License: GPL */ #define KMSG_COMPONENT "cio" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include +#include #include #include #include @@ -1285,5 +1287,3 @@ void css_driver_unregister(struct css_driver *cdrv) driver_unregister(&cdrv->drv); } EXPORT_SYMBOL_GPL(css_driver_unregister); - -MODULE_LICENSE("GPL"); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 6a58bc8f46e2a20bd170fb458c53b2f77871e3f6..79823ee9c1007d17b2bb881f4ce49ca438d35ab6 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -5,12 +5,14 @@ * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * License: GPL */ #define KMSG_COMPONENT "cio" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include +#include #include #include #include @@ -145,7 +147,6 @@ static struct css_device_id io_subchannel_ids[] = { { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, { /* end of list */ }, }; -MODULE_DEVICE_TABLE(css, io_subchannel_ids); static int io_subchannel_prepare(struct subchannel *sch) { @@ -2150,7 +2151,6 @@ int ccw_device_siosl(struct ccw_device *cdev) } EXPORT_SYMBOL_GPL(ccw_device_siosl); -MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ccw_device_set_online); EXPORT_SYMBOL(ccw_device_set_offline); EXPORT_SYMBOL(ccw_driver_register); diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 065b1be98e2c5b73e73cadc5547b5c1b7ec2cec9..ec497af99dd8acfb74c388f990201214ee02713e 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -13,7 +13,6 @@ */ enum dev_state { DEV_STATE_NOT_OPER, - DEV_STATE_SENSE_PGID, DEV_STATE_SENSE_ID, DEV_STATE_OFFLINE, DEV_STATE_VERIFY, diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 8327d47e08b6a0617b7ebc172ce91aafcb8c9283..9afb5ce130071116a25193f1d0d921dc620a656a 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -1058,12 +1058,6 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { [DEV_EVENT_TIMEOUT] = ccw_device_nop, [DEV_EVENT_VERIFY] = ccw_device_nop, }, - [DEV_STATE_SENSE_PGID] = { - [DEV_EVENT_NOTOPER] = ccw_device_request_event, - [DEV_EVENT_INTERRUPT] = ccw_device_request_event, - [DEV_EVENT_TIMEOUT] = ccw_device_request_event, - [DEV_EVENT_VERIFY] = ccw_device_nop, - }, [DEV_STATE_SENSE_ID] = { [DEV_EVENT_NOTOPER] = ccw_device_request_event, [DEV_EVENT_INTERRUPT] = ccw_device_request_event, diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 877d9f601e63abe69819bfa039dab9905a152b42..cf8c4ac6323a6d1c91dfe93dfdb22e2d9d0432b3 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -3,8 +3,10 @@ * * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) + * + * License: GPL */ -#include +#include #include #include #include @@ -676,7 +678,6 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid) } EXPORT_SYMBOL_GPL(ccw_device_get_schid); -MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ccw_device_set_options_mask); EXPORT_SYMBOL(ccw_device_set_options); EXPORT_SYMBOL(ccw_device_clear_options); diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile index b8ab18676e69a39d2077fa3b5ff7c192ed3d3fe9..0a7fb83f35e5b364381c22594cd31f89552ce676 100644 --- a/drivers/s390/crypto/Makefile +++ b/drivers/s390/crypto/Makefile @@ -2,10 +2,11 @@ # S/390 crypto devices # -ap-objs := ap_bus.o -# zcrypt_api depends on ap -obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o -# msgtype* depend on zcrypt_api -obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o -# adapter drivers depend on ap, zcrypt_api and msgtype* +ap-objs := ap_bus.o ap_card.o ap_queue.o +obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o +# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o +zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o +zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o +obj-$(CONFIG_ZCRYPT) += zcrypt.o +# adapter drivers depend on ap.o and zcrypt.o obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h new file mode 100644 index 0000000000000000000000000000000000000000..7a630047c372a3051a35d3db83af617e0a0075db --- /dev/null +++ b/drivers/s390/crypto/ap_asm.h @@ -0,0 +1,191 @@ +/* + * Copyright IBM Corp. 2016 + * Author(s): Martin Schwidefsky + * + * Adjunct processor bus inline assemblies. + */ + +#ifndef _AP_ASM_H_ +#define _AP_ASM_H_ + +#include + +/** + * ap_intructions_available() - Test if AP instructions are available. + * + * Returns 0 if the AP instructions are installed. + */ +static inline int ap_instructions_available(void) +{ + register unsigned long reg0 asm ("0") = AP_MKQID(0, 0); + register unsigned long reg1 asm ("1") = -ENODEV; + register unsigned long reg2 asm ("2") = 0UL; + + asm volatile( + " .long 0xb2af0000\n" /* PQAP(TAPQ) */ + "0: la %1,0\n" + "1:\n" + EX_TABLE(0b, 1b) + : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc"); + return reg1; +} + +/** + * ap_tapq(): Test adjunct processor queue. + * @qid: The AP queue number + * @info: Pointer to queue descriptor + * + * Returns AP queue status structure. + */ +static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info) +{ + register unsigned long reg0 asm ("0") = qid; + register struct ap_queue_status reg1 asm ("1"); + register unsigned long reg2 asm ("2") = 0UL; + + asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ + : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); + if (info) + *info = reg2; + return reg1; +} + +/** + * ap_pqap_rapq(): Reset adjunct processor queue. + * @qid: The AP queue number + * + * Returns AP queue status structure. + */ +static inline struct ap_queue_status ap_rapq(ap_qid_t qid) +{ + register unsigned long reg0 asm ("0") = qid | 0x01000000UL; + register struct ap_queue_status reg1 asm ("1"); + register unsigned long reg2 asm ("2") = 0UL; + + asm volatile( + ".long 0xb2af0000" /* PQAP(RAPQ) */ + : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); + return reg1; +} + +/** + * ap_aqic(): Enable interruption for a specific AP. + * @qid: The AP queue number + * @ind: The notification indicator byte + * + * Returns AP queue status. + */ +static inline struct ap_queue_status ap_aqic(ap_qid_t qid, void *ind) +{ + register unsigned long reg0 asm ("0") = qid | (3UL << 24); + register unsigned long reg1_in asm ("1") = (8UL << 44) | AP_ISC; + register struct ap_queue_status reg1_out asm ("1"); + register void *reg2 asm ("2") = ind; + + asm volatile( + ".long 0xb2af0000" /* PQAP(AQIC) */ + : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) + : + : "cc"); + return reg1_out; +} + +/** + * ap_qci(): Get AP configuration data + * + * Returns 0 on success, or -EOPNOTSUPP. + */ +static inline int ap_qci(void *config) +{ + register unsigned long reg0 asm ("0") = 0x04000000UL; + register unsigned long reg1 asm ("1") = -EINVAL; + register void *reg2 asm ("2") = (void *) config; + + asm volatile( + ".long 0xb2af0000\n" /* PQAP(QCI) */ + "0: la %1,0\n" + "1:\n" + EX_TABLE(0b, 1b) + : "+d" (reg0), "+d" (reg1), "+d" (reg2) + : + : "cc", "memory"); + + return reg1; +} + +/** + * ap_nqap(): Send message to adjunct processor queue. + * @qid: The AP queue number + * @psmid: The program supplied message identifier + * @msg: The message text + * @length: The message length + * + * Returns AP queue status structure. + * Condition code 1 on NQAP can't happen because the L bit is 1. + * Condition code 2 on NQAP also means the send is incomplete, + * because a segment boundary was reached. The NQAP is repeated. + */ +static inline struct ap_queue_status ap_nqap(ap_qid_t qid, + unsigned long long psmid, + void *msg, size_t length) +{ + struct msgblock { char _[length]; }; + register unsigned long reg0 asm ("0") = qid | 0x40000000UL; + register struct ap_queue_status reg1 asm ("1"); + register unsigned long reg2 asm ("2") = (unsigned long) msg; + register unsigned long reg3 asm ("3") = (unsigned long) length; + register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); + register unsigned long reg5 asm ("5") = psmid & 0xffffffff; + + asm volatile ( + "0: .long 0xb2ad0042\n" /* NQAP */ + " brc 2,0b" + : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) + : "d" (reg4), "d" (reg5), "m" (*(struct msgblock *) msg) + : "cc"); + return reg1; +} + +/** + * ap_dqap(): Receive message from adjunct processor queue. + * @qid: The AP queue number + * @psmid: Pointer to program supplied message identifier + * @msg: The message text + * @length: The message length + * + * Returns AP queue status structure. + * Condition code 1 on DQAP means the receive has taken place + * but only partially. The response is incomplete, hence the + * DQAP is repeated. + * Condition code 2 on DQAP also means the receive is incomplete, + * this time because a segment boundary was reached. Again, the + * DQAP is repeated. + * Note that gpr2 is used by the DQAP instruction to keep track of + * any 'residual' length, in case the instruction gets interrupted. + * Hence it gets zeroed before the instruction. + */ +static inline struct ap_queue_status ap_dqap(ap_qid_t qid, + unsigned long long *psmid, + void *msg, size_t length) +{ + struct msgblock { char _[length]; }; + register unsigned long reg0 asm("0") = qid | 0x80000000UL; + register struct ap_queue_status reg1 asm ("1"); + register unsigned long reg2 asm("2") = 0UL; + register unsigned long reg4 asm("4") = (unsigned long) msg; + register unsigned long reg5 asm("5") = (unsigned long) length; + register unsigned long reg6 asm("6") = 0UL; + register unsigned long reg7 asm("7") = 0UL; + + + asm volatile( + "0: .long 0xb2ae0064\n" /* DQAP */ + " brc 6,0b\n" + : "+d" (reg0), "=d" (reg1), "+d" (reg2), + "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), + "=m" (*(struct msgblock *) msg) : : "cc"); + *psmid = (((unsigned long long) reg6) << 32) + reg7; + return reg1; +} + +#endif /* _AP_ASM_H_ */ diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index ed92fb09fc8ed9266f3a056780dfbf4e5c9908d9..5fa6991928647971ef52047dad1294455219ada8 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -46,8 +46,12 @@ #include #include #include +#include +#include #include "ap_bus.h" +#include "ap_asm.h" +#include "ap_debug.h" /* * Module description. @@ -62,6 +66,7 @@ MODULE_ALIAS_CRYPTO("z90crypt"); * Module parameter */ int ap_domain_index = -1; /* Adjunct Processor Domain Index */ +static DEFINE_SPINLOCK(ap_domain_lock); module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP); MODULE_PARM_DESC(domain, "domain index for ap devices"); EXPORT_SYMBOL(ap_domain_index); @@ -70,12 +75,20 @@ static int ap_thread_flag = 0; module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP); MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); -static struct device *ap_root_device = NULL; +static struct device *ap_root_device; + +DEFINE_SPINLOCK(ap_list_lock); +LIST_HEAD(ap_card_list); + static struct ap_config_info *ap_configuration; -static DEFINE_SPINLOCK(ap_device_list_lock); -static LIST_HEAD(ap_device_list); static bool initialised; +/* + * AP bus related debug feature things. + */ +static struct dentry *ap_dbf_root; +debug_info_t *ap_dbf_info; + /* * Workqueue timer for bus rescan. */ @@ -89,7 +102,6 @@ static DECLARE_WORK(ap_scan_work, ap_scan_bus); */ static void ap_tasklet_fn(unsigned long); static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0); -static atomic_t ap_poll_requests = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); static struct task_struct *ap_poll_kthread = NULL; static DEFINE_MUTEX(ap_poll_thread_mutex); @@ -129,23 +141,17 @@ static inline int ap_using_interrupts(void) } /** - * ap_intructions_available() - Test if AP instructions are available. + * ap_airq_ptr() - Get the address of the adapter interrupt indicator * - * Returns 0 if the AP instructions are installed. + * Returns the address of the local-summary-indicator of the adapter + * interrupt handler for AP, or NULL if adapter interrupts are not + * available. */ -static inline int ap_instructions_available(void) +void *ap_airq_ptr(void) { - register unsigned long reg0 asm ("0") = AP_MKQID(0,0); - register unsigned long reg1 asm ("1") = -ENODEV; - register unsigned long reg2 asm ("2") = 0UL; - - asm volatile( - " .long 0xb2af0000\n" /* PQAP(TAPQ) */ - "0: la %1,0\n" - "1:\n" - EX_TABLE(0b, 1b) - : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" ); - return reg1; + if (ap_using_interrupts()) + return ap_airq.lsi_ptr; + return NULL; } /** @@ -169,19 +175,6 @@ static int ap_configuration_available(void) return test_facility(12); } -static inline struct ap_queue_status -__pqap_tapq(ap_qid_t qid, unsigned long *info) -{ - register unsigned long reg0 asm ("0") = qid; - register struct ap_queue_status reg1 asm ("1"); - register unsigned long reg2 asm ("2") = 0UL; - - asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ - : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); - *info = reg2; - return reg1; -} - /** * ap_test_queue(): Test adjunct processor queue. * @qid: The AP queue number @@ -192,85 +185,16 @@ __pqap_tapq(ap_qid_t qid, unsigned long *info) static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, unsigned long *info) { - struct ap_queue_status aqs; - unsigned long _info; - if (test_facility(15)) qid |= 1UL << 23; /* set APFT T bit*/ - aqs = __pqap_tapq(qid, &_info); - if (info) - *info = _info; - return aqs; -} - -/** - * ap_reset_queue(): Reset adjunct processor queue. - * @qid: The AP queue number - * - * Returns AP queue status structure. - */ -static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) -{ - register unsigned long reg0 asm ("0") = qid | 0x01000000UL; - register struct ap_queue_status reg1 asm ("1"); - register unsigned long reg2 asm ("2") = 0UL; - - asm volatile( - ".long 0xb2af0000" /* PQAP(RAPQ) */ - : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); - return reg1; -} - -/** - * ap_queue_interruption_control(): Enable interruption for a specific AP. - * @qid: The AP queue number - * @ind: The notification indicator byte - * - * Returns AP queue status. - */ -static inline struct ap_queue_status -ap_queue_interruption_control(ap_qid_t qid, void *ind) -{ - register unsigned long reg0 asm ("0") = qid | 0x03000000UL; - register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC; - register struct ap_queue_status reg1_out asm ("1"); - register void *reg2 asm ("2") = ind; - asm volatile( - ".long 0xb2af0000" /* PQAP(AQIC) */ - : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) - : - : "cc" ); - return reg1_out; -} - -/** - * ap_query_configuration(): Get AP configuration data - * - * Returns 0 on success, or -EOPNOTSUPP. - */ -static inline int __ap_query_configuration(void) -{ - register unsigned long reg0 asm ("0") = 0x04000000UL; - register unsigned long reg1 asm ("1") = -EINVAL; - register void *reg2 asm ("2") = (void *) ap_configuration; - - asm volatile( - ".long 0xb2af0000\n" /* PQAP(QCI) */ - "0: la %1,0\n" - "1:\n" - EX_TABLE(0b, 1b) - : "+d" (reg0), "+d" (reg1), "+d" (reg2) - : - : "cc"); - - return reg1; + return ap_tapq(qid, info); } static inline int ap_query_configuration(void) { if (!ap_configuration) return -EOPNOTSUPP; - return __ap_query_configuration(); + return ap_qci(ap_configuration); } /** @@ -330,162 +254,6 @@ static inline int ap_test_config_domain(unsigned int domain) return ap_test_config(ap_configuration->aqm, domain); } -/** - * ap_queue_enable_interruption(): Enable interruption on an AP. - * @qid: The AP queue number - * @ind: the notification indicator byte - * - * Enables interruption on AP queue via ap_queue_interruption_control(). Based - * on the return value it waits a while and tests the AP queue if interrupts - * have been switched on using ap_test_queue(). - */ -static int ap_queue_enable_interruption(struct ap_device *ap_dev, void *ind) -{ - struct ap_queue_status status; - - status = ap_queue_interruption_control(ap_dev->qid, ind); - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - case AP_RESPONSE_OTHERWISE_CHANGED: - return 0; - case AP_RESPONSE_Q_NOT_AVAIL: - case AP_RESPONSE_DECONFIGURED: - case AP_RESPONSE_CHECKSTOPPED: - case AP_RESPONSE_INVALID_ADDRESS: - pr_err("Registering adapter interrupts for AP %d failed\n", - AP_QID_DEVICE(ap_dev->qid)); - return -EOPNOTSUPP; - case AP_RESPONSE_RESET_IN_PROGRESS: - case AP_RESPONSE_BUSY: - default: - return -EBUSY; - } -} - -static inline struct ap_queue_status -__nqap(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) -{ - typedef struct { char _[length]; } msgblock; - register unsigned long reg0 asm ("0") = qid | 0x40000000UL; - register struct ap_queue_status reg1 asm ("1"); - register unsigned long reg2 asm ("2") = (unsigned long) msg; - register unsigned long reg3 asm ("3") = (unsigned long) length; - register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); - register unsigned long reg5 asm ("5") = psmid & 0xffffffff; - - asm volatile ( - "0: .long 0xb2ad0042\n" /* NQAP */ - " brc 2,0b" - : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) - : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) - : "cc"); - return reg1; -} - -/** - * __ap_send(): Send message to adjunct processor queue. - * @qid: The AP queue number - * @psmid: The program supplied message identifier - * @msg: The message text - * @length: The message length - * @special: Special Bit - * - * Returns AP queue status structure. - * Condition code 1 on NQAP can't happen because the L bit is 1. - * Condition code 2 on NQAP also means the send is incomplete, - * because a segment boundary was reached. The NQAP is repeated. - */ -static inline struct ap_queue_status -__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, - unsigned int special) -{ - if (special == 1) - qid |= 0x400000UL; - return __nqap(qid, psmid, msg, length); -} - -int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) -{ - struct ap_queue_status status; - - status = __ap_send(qid, psmid, msg, length, 0); - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - return 0; - case AP_RESPONSE_Q_FULL: - case AP_RESPONSE_RESET_IN_PROGRESS: - return -EBUSY; - case AP_RESPONSE_REQ_FAC_NOT_INST: - return -EINVAL; - default: /* Device is gone. */ - return -ENODEV; - } -} -EXPORT_SYMBOL(ap_send); - -/** - * __ap_recv(): Receive message from adjunct processor queue. - * @qid: The AP queue number - * @psmid: Pointer to program supplied message identifier - * @msg: The message text - * @length: The message length - * - * Returns AP queue status structure. - * Condition code 1 on DQAP means the receive has taken place - * but only partially. The response is incomplete, hence the - * DQAP is repeated. - * Condition code 2 on DQAP also means the receive is incomplete, - * this time because a segment boundary was reached. Again, the - * DQAP is repeated. - * Note that gpr2 is used by the DQAP instruction to keep track of - * any 'residual' length, in case the instruction gets interrupted. - * Hence it gets zeroed before the instruction. - */ -static inline struct ap_queue_status -__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) -{ - typedef struct { char _[length]; } msgblock; - register unsigned long reg0 asm("0") = qid | 0x80000000UL; - register struct ap_queue_status reg1 asm ("1"); - register unsigned long reg2 asm("2") = 0UL; - register unsigned long reg4 asm("4") = (unsigned long) msg; - register unsigned long reg5 asm("5") = (unsigned long) length; - register unsigned long reg6 asm("6") = 0UL; - register unsigned long reg7 asm("7") = 0UL; - - - asm volatile( - "0: .long 0xb2ae0064\n" /* DQAP */ - " brc 6,0b\n" - : "+d" (reg0), "=d" (reg1), "+d" (reg2), - "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), - "=m" (*(msgblock *) msg) : : "cc" ); - *psmid = (((unsigned long long) reg6) << 32) + reg7; - return reg1; -} - -int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) -{ - struct ap_queue_status status; - - if (msg == NULL) - return -EINVAL; - status = __ap_recv(qid, psmid, msg, length); - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - return 0; - case AP_RESPONSE_NO_PENDING_REPLY: - if (status.queue_empty) - return -ENOENT; - return -EBUSY; - case AP_RESPONSE_RESET_IN_PROGRESS: - return -EBUSY; - default: - return -ENODEV; - } -} -EXPORT_SYMBOL(ap_recv); - /** * ap_query_queue(): Check if an AP queue is available. * @qid: The AP queue number @@ -500,7 +268,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type, unsigned long info; int nd; - if (!ap_test_config_card_id(AP_QID_DEVICE(qid))) + if (!ap_test_config_card_id(AP_QID_CARD(qid))) return -ENODEV; status = ap_test_queue(qid, &info); @@ -511,8 +279,28 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type, *facilities = (unsigned int)(info >> 32); /* Update maximum domain id */ nd = (info >> 16) & 0xff; + /* if N bit is available, z13 and newer */ if ((info & (1UL << 57)) && nd > 0) ap_max_domain_id = nd; + else /* older machine types */ + ap_max_domain_id = 15; + switch (*device_type) { + /* For CEX2 and CEX3 the available functions + * are not refrected by the facilities bits. + * Instead it is coded into the type. So here + * modify the function bits based on the type. + */ + case AP_DEVICE_TYPE_CEX2A: + case AP_DEVICE_TYPE_CEX3A: + *facilities |= 0x08000000; + break; + case AP_DEVICE_TYPE_CEX2C: + case AP_DEVICE_TYPE_CEX3C: + *facilities |= 0x10000000; + break; + default: + break; + } return 0; case AP_RESPONSE_Q_NOT_AVAIL: case AP_RESPONSE_DECONFIGURED: @@ -528,9 +316,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type, } } -/* State machine definitions and helpers */ - -static void ap_sm_wait(enum ap_wait wait) +void ap_wait(enum ap_wait wait) { ktime_t hr_time; @@ -547,7 +333,7 @@ static void ap_sm_wait(enum ap_wait wait) case AP_WAIT_TIMEOUT: spin_lock_bh(&ap_poll_timer_lock); if (!hrtimer_is_queued(&ap_poll_timer)) { - hr_time = ktime_set(0, poll_timeout); + hr_time = poll_timeout; hrtimer_forward_now(&ap_poll_timer, hr_time); hrtimer_restart(&ap_poll_timer); } @@ -559,350 +345,21 @@ static void ap_sm_wait(enum ap_wait wait) } } -static enum ap_wait ap_sm_nop(struct ap_device *ap_dev) -{ - return AP_WAIT_NONE; -} - -/** - * ap_sm_recv(): Receive pending reply messages from an AP device but do - * not change the state of the device. - * @ap_dev: pointer to the AP device - * - * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT - */ -static struct ap_queue_status ap_sm_recv(struct ap_device *ap_dev) -{ - struct ap_queue_status status; - struct ap_message *ap_msg; - - status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid, - ap_dev->reply->message, ap_dev->reply->length); - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - atomic_dec(&ap_poll_requests); - ap_dev->queue_count--; - if (ap_dev->queue_count > 0) - mod_timer(&ap_dev->timeout, - jiffies + ap_dev->drv->request_timeout); - list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { - if (ap_msg->psmid != ap_dev->reply->psmid) - continue; - list_del_init(&ap_msg->list); - ap_dev->pendingq_count--; - ap_msg->receive(ap_dev, ap_msg, ap_dev->reply); - break; - } - case AP_RESPONSE_NO_PENDING_REPLY: - if (!status.queue_empty || ap_dev->queue_count <= 0) - break; - /* The card shouldn't forget requests but who knows. */ - atomic_sub(ap_dev->queue_count, &ap_poll_requests); - ap_dev->queue_count = 0; - list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); - ap_dev->requestq_count += ap_dev->pendingq_count; - ap_dev->pendingq_count = 0; - break; - default: - break; - } - return status; -} - -/** - * ap_sm_read(): Receive pending reply messages from an AP device. - * @ap_dev: pointer to the AP device - * - * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT - */ -static enum ap_wait ap_sm_read(struct ap_device *ap_dev) -{ - struct ap_queue_status status; - - if (!ap_dev->reply) - return AP_WAIT_NONE; - status = ap_sm_recv(ap_dev); - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - if (ap_dev->queue_count > 0) { - ap_dev->state = AP_STATE_WORKING; - return AP_WAIT_AGAIN; - } - ap_dev->state = AP_STATE_IDLE; - return AP_WAIT_NONE; - case AP_RESPONSE_NO_PENDING_REPLY: - if (ap_dev->queue_count > 0) - return AP_WAIT_INTERRUPT; - ap_dev->state = AP_STATE_IDLE; - return AP_WAIT_NONE; - default: - ap_dev->state = AP_STATE_BORKED; - return AP_WAIT_NONE; - } -} - -/** - * ap_sm_suspend_read(): Receive pending reply messages from an AP device - * without changing the device state in between. In suspend mode we don't - * allow sending new requests, therefore just fetch pending replies. - * @ap_dev: pointer to the AP device - * - * Returns AP_WAIT_NONE or AP_WAIT_AGAIN - */ -static enum ap_wait ap_sm_suspend_read(struct ap_device *ap_dev) -{ - struct ap_queue_status status; - - if (!ap_dev->reply) - return AP_WAIT_NONE; - status = ap_sm_recv(ap_dev); - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - if (ap_dev->queue_count > 0) - return AP_WAIT_AGAIN; - /* fall through */ - default: - return AP_WAIT_NONE; - } -} - -/** - * ap_sm_write(): Send messages from the request queue to an AP device. - * @ap_dev: pointer to the AP device - * - * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT - */ -static enum ap_wait ap_sm_write(struct ap_device *ap_dev) -{ - struct ap_queue_status status; - struct ap_message *ap_msg; - - if (ap_dev->requestq_count <= 0) - return AP_WAIT_NONE; - /* Start the next request on the queue. */ - ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); - status = __ap_send(ap_dev->qid, ap_msg->psmid, - ap_msg->message, ap_msg->length, ap_msg->special); - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - atomic_inc(&ap_poll_requests); - ap_dev->queue_count++; - if (ap_dev->queue_count == 1) - mod_timer(&ap_dev->timeout, - jiffies + ap_dev->drv->request_timeout); - list_move_tail(&ap_msg->list, &ap_dev->pendingq); - ap_dev->requestq_count--; - ap_dev->pendingq_count++; - if (ap_dev->queue_count < ap_dev->queue_depth) { - ap_dev->state = AP_STATE_WORKING; - return AP_WAIT_AGAIN; - } - /* fall through */ - case AP_RESPONSE_Q_FULL: - ap_dev->state = AP_STATE_QUEUE_FULL; - return AP_WAIT_INTERRUPT; - case AP_RESPONSE_RESET_IN_PROGRESS: - ap_dev->state = AP_STATE_RESET_WAIT; - return AP_WAIT_TIMEOUT; - case AP_RESPONSE_MESSAGE_TOO_BIG: - case AP_RESPONSE_REQ_FAC_NOT_INST: - list_del_init(&ap_msg->list); - ap_dev->requestq_count--; - ap_msg->rc = -EINVAL; - ap_msg->receive(ap_dev, ap_msg, NULL); - return AP_WAIT_AGAIN; - default: - ap_dev->state = AP_STATE_BORKED; - return AP_WAIT_NONE; - } -} - -/** - * ap_sm_read_write(): Send and receive messages to/from an AP device. - * @ap_dev: pointer to the AP device - * - * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT - */ -static enum ap_wait ap_sm_read_write(struct ap_device *ap_dev) -{ - return min(ap_sm_read(ap_dev), ap_sm_write(ap_dev)); -} - -/** - * ap_sm_reset(): Reset an AP queue. - * @qid: The AP queue number - * - * Submit the Reset command to an AP queue. - */ -static enum ap_wait ap_sm_reset(struct ap_device *ap_dev) -{ - struct ap_queue_status status; - - status = ap_reset_queue(ap_dev->qid); - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - case AP_RESPONSE_RESET_IN_PROGRESS: - ap_dev->state = AP_STATE_RESET_WAIT; - ap_dev->interrupt = AP_INTR_DISABLED; - return AP_WAIT_TIMEOUT; - case AP_RESPONSE_BUSY: - return AP_WAIT_TIMEOUT; - case AP_RESPONSE_Q_NOT_AVAIL: - case AP_RESPONSE_DECONFIGURED: - case AP_RESPONSE_CHECKSTOPPED: - default: - ap_dev->state = AP_STATE_BORKED; - return AP_WAIT_NONE; - } -} - -/** - * ap_sm_reset_wait(): Test queue for completion of the reset operation - * @ap_dev: pointer to the AP device - * - * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. - */ -static enum ap_wait ap_sm_reset_wait(struct ap_device *ap_dev) -{ - struct ap_queue_status status; - unsigned long info; - - if (ap_dev->queue_count > 0 && ap_dev->reply) - /* Try to read a completed message and get the status */ - status = ap_sm_recv(ap_dev); - else - /* Get the status with TAPQ */ - status = ap_test_queue(ap_dev->qid, &info); - - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - if (ap_using_interrupts() && - ap_queue_enable_interruption(ap_dev, - ap_airq.lsi_ptr) == 0) - ap_dev->state = AP_STATE_SETIRQ_WAIT; - else - ap_dev->state = (ap_dev->queue_count > 0) ? - AP_STATE_WORKING : AP_STATE_IDLE; - return AP_WAIT_AGAIN; - case AP_RESPONSE_BUSY: - case AP_RESPONSE_RESET_IN_PROGRESS: - return AP_WAIT_TIMEOUT; - case AP_RESPONSE_Q_NOT_AVAIL: - case AP_RESPONSE_DECONFIGURED: - case AP_RESPONSE_CHECKSTOPPED: - default: - ap_dev->state = AP_STATE_BORKED; - return AP_WAIT_NONE; - } -} - -/** - * ap_sm_setirq_wait(): Test queue for completion of the irq enablement - * @ap_dev: pointer to the AP device - * - * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. - */ -static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev) -{ - struct ap_queue_status status; - unsigned long info; - - if (ap_dev->queue_count > 0 && ap_dev->reply) - /* Try to read a completed message and get the status */ - status = ap_sm_recv(ap_dev); - else - /* Get the status with TAPQ */ - status = ap_test_queue(ap_dev->qid, &info); - - if (status.int_enabled == 1) { - /* Irqs are now enabled */ - ap_dev->interrupt = AP_INTR_ENABLED; - ap_dev->state = (ap_dev->queue_count > 0) ? - AP_STATE_WORKING : AP_STATE_IDLE; - } - - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - if (ap_dev->queue_count > 0) - return AP_WAIT_AGAIN; - /* fallthrough */ - case AP_RESPONSE_NO_PENDING_REPLY: - return AP_WAIT_TIMEOUT; - default: - ap_dev->state = AP_STATE_BORKED; - return AP_WAIT_NONE; - } -} - -/* - * AP state machine jump table - */ -static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = { - [AP_STATE_RESET_START] = { - [AP_EVENT_POLL] = ap_sm_reset, - [AP_EVENT_TIMEOUT] = ap_sm_nop, - }, - [AP_STATE_RESET_WAIT] = { - [AP_EVENT_POLL] = ap_sm_reset_wait, - [AP_EVENT_TIMEOUT] = ap_sm_nop, - }, - [AP_STATE_SETIRQ_WAIT] = { - [AP_EVENT_POLL] = ap_sm_setirq_wait, - [AP_EVENT_TIMEOUT] = ap_sm_nop, - }, - [AP_STATE_IDLE] = { - [AP_EVENT_POLL] = ap_sm_write, - [AP_EVENT_TIMEOUT] = ap_sm_nop, - }, - [AP_STATE_WORKING] = { - [AP_EVENT_POLL] = ap_sm_read_write, - [AP_EVENT_TIMEOUT] = ap_sm_reset, - }, - [AP_STATE_QUEUE_FULL] = { - [AP_EVENT_POLL] = ap_sm_read, - [AP_EVENT_TIMEOUT] = ap_sm_reset, - }, - [AP_STATE_SUSPEND_WAIT] = { - [AP_EVENT_POLL] = ap_sm_suspend_read, - [AP_EVENT_TIMEOUT] = ap_sm_nop, - }, - [AP_STATE_BORKED] = { - [AP_EVENT_POLL] = ap_sm_nop, - [AP_EVENT_TIMEOUT] = ap_sm_nop, - }, -}; - -static inline enum ap_wait ap_sm_event(struct ap_device *ap_dev, - enum ap_event event) -{ - return ap_jumptable[ap_dev->state][event](ap_dev); -} - -static inline enum ap_wait ap_sm_event_loop(struct ap_device *ap_dev, - enum ap_event event) -{ - enum ap_wait wait; - - while ((wait = ap_sm_event(ap_dev, event)) == AP_WAIT_AGAIN) - ; - return wait; -} - /** * ap_request_timeout(): Handling of request timeouts * @data: Holds the AP device. * * Handles request timeouts. */ -static void ap_request_timeout(unsigned long data) +void ap_request_timeout(unsigned long data) { - struct ap_device *ap_dev = (struct ap_device *) data; + struct ap_queue *aq = (struct ap_queue *) data; if (ap_suspend_flag) return; - spin_lock_bh(&ap_dev->lock); - ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_TIMEOUT)); - spin_unlock_bh(&ap_dev->lock); + spin_lock_bh(&aq->lock); + ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT)); + spin_unlock_bh(&aq->lock); } /** @@ -937,7 +394,8 @@ static void ap_interrupt_handler(struct airq_struct *airq) */ static void ap_tasklet_fn(unsigned long dummy) { - struct ap_device *ap_dev; + struct ap_card *ac; + struct ap_queue *aq; enum ap_wait wait = AP_WAIT_NONE; /* Reset the indicator if interrupts are used. Thus new interrupts can @@ -947,14 +405,35 @@ static void ap_tasklet_fn(unsigned long dummy) if (ap_using_interrupts()) xchg(ap_airq.lsi_ptr, 0); - spin_lock(&ap_device_list_lock); - list_for_each_entry(ap_dev, &ap_device_list, list) { - spin_lock_bh(&ap_dev->lock); - wait = min(wait, ap_sm_event_loop(ap_dev, AP_EVENT_POLL)); - spin_unlock_bh(&ap_dev->lock); + spin_lock_bh(&ap_list_lock); + for_each_ap_card(ac) { + for_each_ap_queue(aq, ac) { + spin_lock_bh(&aq->lock); + wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL)); + spin_unlock_bh(&aq->lock); + } } - spin_unlock(&ap_device_list_lock); - ap_sm_wait(wait); + spin_unlock_bh(&ap_list_lock); + + ap_wait(wait); +} + +static int ap_pending_requests(void) +{ + struct ap_card *ac; + struct ap_queue *aq; + + spin_lock_bh(&ap_list_lock); + for_each_ap_card(ac) { + for_each_ap_queue(aq, ac) { + if (aq->queue_count == 0) + continue; + spin_unlock_bh(&ap_list_lock); + return 1; + } + } + spin_unlock_bh(&ap_list_lock); + return 0; } /** @@ -976,8 +455,7 @@ static int ap_poll_thread(void *data) while (!kthread_should_stop()) { add_wait_queue(&ap_poll_wait, &wait); set_current_state(TASK_INTERRUPTIBLE); - if (ap_suspend_flag || - atomic_read(&ap_poll_requests) <= 0) { + if (ap_suspend_flag || !ap_pending_requests()) { schedule(); try_to_freeze(); } @@ -989,7 +467,8 @@ static int ap_poll_thread(void *data) continue; } ap_tasklet_fn(0); - } while (!kthread_should_stop()); + } + return 0; } @@ -1018,207 +497,8 @@ static void ap_poll_thread_stop(void) mutex_unlock(&ap_poll_thread_mutex); } -/** - * ap_queue_message(): Queue a request to an AP device. - * @ap_dev: The AP device to queue the message to - * @ap_msg: The message that is to be added - */ -void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) -{ - /* For asynchronous message handling a valid receive-callback - * is required. */ - BUG_ON(!ap_msg->receive); - - spin_lock_bh(&ap_dev->lock); - /* Queue the message. */ - list_add_tail(&ap_msg->list, &ap_dev->requestq); - ap_dev->requestq_count++; - ap_dev->total_request_count++; - /* Send/receive as many request from the queue as possible. */ - ap_sm_wait(ap_sm_event_loop(ap_dev, AP_EVENT_POLL)); - spin_unlock_bh(&ap_dev->lock); -} -EXPORT_SYMBOL(ap_queue_message); - -/** - * ap_cancel_message(): Cancel a crypto request. - * @ap_dev: The AP device that has the message queued - * @ap_msg: The message that is to be removed - * - * Cancel a crypto request. This is done by removing the request - * from the device pending or request queue. Note that the - * request stays on the AP queue. When it finishes the message - * reply will be discarded because the psmid can't be found. - */ -void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) -{ - struct ap_message *tmp; - - spin_lock_bh(&ap_dev->lock); - if (!list_empty(&ap_msg->list)) { - list_for_each_entry(tmp, &ap_dev->pendingq, list) - if (tmp->psmid == ap_msg->psmid) { - ap_dev->pendingq_count--; - goto found; - } - ap_dev->requestq_count--; -found: - list_del_init(&ap_msg->list); - } - spin_unlock_bh(&ap_dev->lock); -} -EXPORT_SYMBOL(ap_cancel_message); - -/* - * AP device related attributes. - */ -static ssize_t ap_hwtype_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct ap_device *ap_dev = to_ap_dev(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); -} - -static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); - -static ssize_t ap_raw_hwtype_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct ap_device *ap_dev = to_ap_dev(dev); - - return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->raw_hwtype); -} - -static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL); - -static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct ap_device *ap_dev = to_ap_dev(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); -} - -static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); -static ssize_t ap_request_count_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct ap_device *ap_dev = to_ap_dev(dev); - int rc; - - spin_lock_bh(&ap_dev->lock); - rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count); - spin_unlock_bh(&ap_dev->lock); - return rc; -} - -static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); - -static ssize_t ap_requestq_count_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct ap_device *ap_dev = to_ap_dev(dev); - int rc; - - spin_lock_bh(&ap_dev->lock); - rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count); - spin_unlock_bh(&ap_dev->lock); - return rc; -} - -static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); - -static ssize_t ap_pendingq_count_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct ap_device *ap_dev = to_ap_dev(dev); - int rc; - - spin_lock_bh(&ap_dev->lock); - rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count); - spin_unlock_bh(&ap_dev->lock); - return rc; -} - -static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); - -static ssize_t ap_reset_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct ap_device *ap_dev = to_ap_dev(dev); - int rc = 0; - - spin_lock_bh(&ap_dev->lock); - switch (ap_dev->state) { - case AP_STATE_RESET_START: - case AP_STATE_RESET_WAIT: - rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n"); - break; - case AP_STATE_WORKING: - case AP_STATE_QUEUE_FULL: - rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n"); - break; - default: - rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n"); - } - spin_unlock_bh(&ap_dev->lock); - return rc; -} - -static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL); - -static ssize_t ap_interrupt_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct ap_device *ap_dev = to_ap_dev(dev); - int rc = 0; - - spin_lock_bh(&ap_dev->lock); - if (ap_dev->state == AP_STATE_SETIRQ_WAIT) - rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n"); - else if (ap_dev->interrupt == AP_INTR_ENABLED) - rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n"); - else - rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n"); - spin_unlock_bh(&ap_dev->lock); - return rc; -} - -static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL); - -static ssize_t ap_modalias_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type); -} - -static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); - -static ssize_t ap_functions_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct ap_device *ap_dev = to_ap_dev(dev); - return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions); -} - -static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); - -static struct attribute *ap_dev_attrs[] = { - &dev_attr_hwtype.attr, - &dev_attr_raw_hwtype.attr, - &dev_attr_depth.attr, - &dev_attr_request_count.attr, - &dev_attr_requestq_count.attr, - &dev_attr_pendingq_count.attr, - &dev_attr_reset.attr, - &dev_attr_interrupt.attr, - &dev_attr_modalias.attr, - &dev_attr_ap_functions.attr, - NULL -}; -static struct attribute_group ap_dev_attr_group = { - .attrs = ap_dev_attrs -}; +#define is_card_dev(x) ((x)->parent == ap_root_device) +#define is_queue_dev(x) ((x)->parent != ap_root_device) /** * ap_bus_match() @@ -1229,7 +509,6 @@ static struct attribute_group ap_dev_attr_group = { */ static int ap_bus_match(struct device *dev, struct device_driver *drv) { - struct ap_device *ap_dev = to_ap_dev(dev); struct ap_driver *ap_drv = to_ap_drv(drv); struct ap_device_id *id; @@ -1238,10 +517,14 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv) * supported types of the device_driver. */ for (id = ap_drv->ids; id->match_flags; id++) { - if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) && - (id->dev_type != ap_dev->device_type)) - continue; - return 1; + if (is_card_dev(dev) && + id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE && + id->dev_type == to_ap_dev(dev)->device_type) + return 1; + if (is_queue_dev(dev) && + id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE && + id->dev_type == to_ap_dev(dev)->device_type) + return 1; } return 0; } @@ -1273,27 +556,28 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) return retval; } -static int ap_dev_suspend(struct device *dev, pm_message_t state) +static int ap_dev_suspend(struct device *dev) { struct ap_device *ap_dev = to_ap_dev(dev); - /* Poll on the device until all requests are finished. */ - spin_lock_bh(&ap_dev->lock); - ap_dev->state = AP_STATE_SUSPEND_WAIT; - while (ap_sm_event(ap_dev, AP_EVENT_POLL) != AP_WAIT_NONE) - ; - ap_dev->state = AP_STATE_BORKED; - spin_unlock_bh(&ap_dev->lock); + if (ap_dev->drv && ap_dev->drv->suspend) + ap_dev->drv->suspend(ap_dev); return 0; } static int ap_dev_resume(struct device *dev) { + struct ap_device *ap_dev = to_ap_dev(dev); + + if (ap_dev->drv && ap_dev->drv->resume) + ap_dev->drv->resume(ap_dev); return 0; } static void ap_bus_suspend(void) { + AP_DBF(DBF_DEBUG, "ap_bus_suspend running\n"); + ap_suspend_flag = 1; /* * Disable scanning for devices, thus we do not want to scan @@ -1303,9 +587,25 @@ static void ap_bus_suspend(void) tasklet_disable(&ap_tasklet); } -static int __ap_devices_unregister(struct device *dev, void *dummy) +static int __ap_card_devices_unregister(struct device *dev, void *dummy) +{ + if (is_card_dev(dev)) + device_unregister(dev); + return 0; +} + +static int __ap_queue_devices_unregister(struct device *dev, void *dummy) +{ + if (is_queue_dev(dev)) + device_unregister(dev); + return 0; +} + +static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data) { - device_unregister(dev); + if (is_queue_dev(dev) && + AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data) + device_unregister(dev); return 0; } @@ -1313,8 +613,15 @@ static void ap_bus_resume(void) { int rc; - /* Unconditionally remove all AP devices */ - bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister); + AP_DBF(DBF_DEBUG, "ap_bus_resume running\n"); + + /* remove all queue devices */ + bus_for_each_dev(&ap_bus_type, NULL, NULL, + __ap_queue_devices_unregister); + /* remove all card devices */ + bus_for_each_dev(&ap_bus_type, NULL, NULL, + __ap_card_devices_unregister); + /* Reset thin interrupt setting */ if (ap_interrupts_available() && !ap_using_interrupts()) { rc = register_adapter_interrupt(&ap_airq); @@ -1356,25 +663,15 @@ static struct notifier_block ap_power_notifier = { .notifier_call = ap_power_event, }; +static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume); + static struct bus_type ap_bus_type = { .name = "ap", .match = &ap_bus_match, .uevent = &ap_uevent, - .suspend = ap_dev_suspend, - .resume = ap_dev_resume, + .pm = &ap_bus_pm_ops, }; -void ap_device_init_reply(struct ap_device *ap_dev, - struct ap_message *reply) -{ - ap_dev->reply = reply; - - spin_lock_bh(&ap_dev->lock); - ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL)); - spin_unlock_bh(&ap_dev->lock); -} -EXPORT_SYMBOL(ap_device_init_reply); - static int ap_device_probe(struct device *dev) { struct ap_device *ap_dev = to_ap_dev(dev); @@ -1388,61 +685,22 @@ static int ap_device_probe(struct device *dev) return rc; } -/** - * __ap_flush_queue(): Flush requests. - * @ap_dev: Pointer to the AP device - * - * Flush all requests from the request/pending queue of an AP device. - */ -static void __ap_flush_queue(struct ap_device *ap_dev) -{ - struct ap_message *ap_msg, *next; - - list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { - list_del_init(&ap_msg->list); - ap_dev->pendingq_count--; - ap_msg->rc = -EAGAIN; - ap_msg->receive(ap_dev, ap_msg, NULL); - } - list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { - list_del_init(&ap_msg->list); - ap_dev->requestq_count--; - ap_msg->rc = -EAGAIN; - ap_msg->receive(ap_dev, ap_msg, NULL); - } -} - -void ap_flush_queue(struct ap_device *ap_dev) -{ - spin_lock_bh(&ap_dev->lock); - __ap_flush_queue(ap_dev); - spin_unlock_bh(&ap_dev->lock); -} -EXPORT_SYMBOL(ap_flush_queue); - static int ap_device_remove(struct device *dev) { struct ap_device *ap_dev = to_ap_dev(dev); struct ap_driver *ap_drv = ap_dev->drv; - ap_flush_queue(ap_dev); - del_timer_sync(&ap_dev->timeout); - spin_lock_bh(&ap_device_list_lock); - list_del_init(&ap_dev->list); - spin_unlock_bh(&ap_device_list_lock); + spin_lock_bh(&ap_list_lock); + if (is_card_dev(dev)) + list_del_init(&to_ap_card(dev)->list); + else + list_del_init(&to_ap_queue(dev)->list); + spin_unlock_bh(&ap_list_lock); if (ap_drv->remove) ap_drv->remove(ap_dev); - spin_lock_bh(&ap_dev->lock); - atomic_sub(ap_dev->queue_count, &ap_poll_requests); - spin_unlock_bh(&ap_dev->lock); return 0; } -static void ap_device_release(struct device *dev) -{ - kfree(to_ap_dev(dev)); -} - int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, char *name) { @@ -1485,18 +743,30 @@ static ssize_t ap_domain_show(struct bus_type *bus, char *buf) return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); } -static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); +static ssize_t ap_domain_store(struct bus_type *bus, + const char *buf, size_t count) +{ + int domain; + + if (sscanf(buf, "%i\n", &domain) != 1 || + domain < 0 || domain > ap_max_domain_id) + return -EINVAL; + spin_lock_bh(&ap_domain_lock); + ap_domain_index = domain; + spin_unlock_bh(&ap_domain_lock); + + AP_DBF(DBF_DEBUG, "store new default domain=%d\n", domain); + + return count; +} + +static BUS_ATTR(ap_domain, 0644, ap_domain_show, ap_domain_store); static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) { if (!ap_configuration) /* QCI not supported */ return snprintf(buf, PAGE_SIZE, "not supported\n"); - if (!test_facility(76)) - /* format 0 - 16 bit domain field */ - return snprintf(buf, PAGE_SIZE, "%08x%08x\n", - ap_configuration->adm[0], - ap_configuration->adm[1]); - /* format 1 - 256 bit domain field */ + return snprintf(buf, PAGE_SIZE, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", ap_configuration->adm[0], ap_configuration->adm[1], @@ -1508,6 +778,22 @@ static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) static BUS_ATTR(ap_control_domain_mask, 0444, ap_control_domain_mask_show, NULL); +static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf) +{ + if (!ap_configuration) /* QCI not supported */ + return snprintf(buf, PAGE_SIZE, "not supported\n"); + + return snprintf(buf, PAGE_SIZE, + "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", + ap_configuration->aqm[0], ap_configuration->aqm[1], + ap_configuration->aqm[2], ap_configuration->aqm[3], + ap_configuration->aqm[4], ap_configuration->aqm[5], + ap_configuration->aqm[6], ap_configuration->aqm[7]); +} + +static BUS_ATTR(ap_usage_domain_mask, 0444, + ap_usage_domain_mask_show, NULL); + static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); @@ -1574,7 +860,7 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, time > 120000000000ULL) return -EINVAL; poll_timeout = time; - hr_time = ktime_set(0, poll_timeout); + hr_time = poll_timeout; spin_lock_bh(&ap_poll_timer_lock); hrtimer_cancel(&ap_poll_timer); @@ -1603,6 +889,7 @@ static BUS_ATTR(ap_max_domain_id, 0444, ap_max_domain_id_show, NULL); static struct bus_attribute *const ap_bus_attrs[] = { &bus_attr_ap_domain, &bus_attr_ap_control_domain_mask, + &bus_attr_ap_usage_domain_mask, &bus_attr_config_time, &bus_attr_poll_thread, &bus_attr_ap_interrupts, @@ -1627,9 +914,12 @@ static int ap_select_domain(void) * the "domain=" parameter or the domain with the maximum number * of devices. */ - if (ap_domain_index >= 0) + spin_lock_bh(&ap_domain_lock); + if (ap_domain_index >= 0) { /* Domain has already been selected. */ + spin_unlock_bh(&ap_domain_lock); return 0; + } best_domain = -1; max_count = 0; for (i = 0; i < AP_DOMAINS; i++) { @@ -1651,109 +941,171 @@ static int ap_select_domain(void) } if (best_domain >= 0){ ap_domain_index = best_domain; + spin_unlock_bh(&ap_domain_lock); return 0; } + spin_unlock_bh(&ap_domain_lock); return -ENODEV; } -/** - * __ap_scan_bus(): Scan the AP bus. - * @dev: Pointer to device - * @data: Pointer to data - * - * Scan the AP bus for new devices. +/* + * helper function to be used with bus_find_dev + * matches for the card device with the given id */ -static int __ap_scan_bus(struct device *dev, void *data) +static int __match_card_device_with_id(struct device *dev, void *data) { - return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; + return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long) data; } +/* helper function to be used with bus_find_dev + * matches for the queue device with a given qid + */ +static int __match_queue_device_with_qid(struct device *dev, void *data) +{ + return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data; +} + +/** + * ap_scan_bus(): Scan the AP bus for new devices + * Runs periodically, workqueue timer (ap_config_time) + */ static void ap_scan_bus(struct work_struct *unused) { - struct ap_device *ap_dev; + struct ap_queue *aq; + struct ap_card *ac; struct device *dev; ap_qid_t qid; - int queue_depth = 0, device_type = 0; - unsigned int device_functions = 0; - int rc, i, borked; + int depth = 0, type = 0; + unsigned int functions = 0; + int rc, id, dom, borked, domains; + + AP_DBF(DBF_DEBUG, "ap_scan_bus running\n"); ap_query_configuration(); if (ap_select_domain() != 0) goto out; - for (i = 0; i < AP_DEVICES; i++) { - qid = AP_MKQID(i, ap_domain_index); + for (id = 0; id < AP_DEVICES; id++) { + /* check if device is registered */ dev = bus_find_device(&ap_bus_type, NULL, - (void *)(unsigned long)qid, - __ap_scan_bus); - rc = ap_query_queue(qid, &queue_depth, &device_type, - &device_functions); - if (dev) { - ap_dev = to_ap_dev(dev); - spin_lock_bh(&ap_dev->lock); - if (rc == -ENODEV) - ap_dev->state = AP_STATE_BORKED; - borked = ap_dev->state == AP_STATE_BORKED; - spin_unlock_bh(&ap_dev->lock); - if (borked) /* Remove broken device */ + (void *)(long) id, + __match_card_device_with_id); + ac = dev ? to_ap_card(dev) : NULL; + if (!ap_test_config_card_id(id)) { + if (dev) { + /* Card device has been removed from + * configuration, remove the belonging + * queue devices. + */ + bus_for_each_dev(&ap_bus_type, NULL, + (void *)(long) id, + __ap_queue_devices_with_id_unregister); + /* now remove the card device */ device_unregister(dev); - put_device(dev); - if (!borked) - continue; - } - if (rc) - continue; - ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL); - if (!ap_dev) - break; - ap_dev->qid = qid; - ap_dev->state = AP_STATE_RESET_START; - ap_dev->interrupt = AP_INTR_DISABLED; - ap_dev->queue_depth = queue_depth; - ap_dev->raw_hwtype = device_type; - ap_dev->device_type = device_type; - ap_dev->functions = device_functions; - spin_lock_init(&ap_dev->lock); - INIT_LIST_HEAD(&ap_dev->pendingq); - INIT_LIST_HEAD(&ap_dev->requestq); - INIT_LIST_HEAD(&ap_dev->list); - setup_timer(&ap_dev->timeout, ap_request_timeout, - (unsigned long) ap_dev); - - ap_dev->device.bus = &ap_bus_type; - ap_dev->device.parent = ap_root_device; - rc = dev_set_name(&ap_dev->device, "card%02x", - AP_QID_DEVICE(ap_dev->qid)); - if (rc) { - kfree(ap_dev); - continue; - } - /* Add to list of devices */ - spin_lock_bh(&ap_device_list_lock); - list_add(&ap_dev->list, &ap_device_list); - spin_unlock_bh(&ap_device_list_lock); - /* Start with a device reset */ - spin_lock_bh(&ap_dev->lock); - ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL)); - spin_unlock_bh(&ap_dev->lock); - /* Register device */ - ap_dev->device.release = ap_device_release; - rc = device_register(&ap_dev->device); - if (rc) { - spin_lock_bh(&ap_dev->lock); - list_del_init(&ap_dev->list); - spin_unlock_bh(&ap_dev->lock); - put_device(&ap_dev->device); + put_device(dev); + } continue; } - /* Add device attributes. */ - rc = sysfs_create_group(&ap_dev->device.kobj, - &ap_dev_attr_group); - if (rc) { - device_unregister(&ap_dev->device); - continue; + /* According to the configuration there should be a card + * device, so check if there is at least one valid queue + * and maybe create queue devices and the card device. + */ + domains = 0; + for (dom = 0; dom < AP_DOMAINS; dom++) { + qid = AP_MKQID(id, dom); + dev = bus_find_device(&ap_bus_type, NULL, + (void *)(long) qid, + __match_queue_device_with_qid); + aq = dev ? to_ap_queue(dev) : NULL; + if (!ap_test_config_domain(dom)) { + if (dev) { + /* Queue device exists but has been + * removed from configuration. + */ + device_unregister(dev); + put_device(dev); + } + continue; + } + rc = ap_query_queue(qid, &depth, &type, &functions); + if (dev) { + spin_lock_bh(&aq->lock); + if (rc == -ENODEV || + /* adapter reconfiguration */ + (ac && ac->functions != functions)) + aq->state = AP_STATE_BORKED; + borked = aq->state == AP_STATE_BORKED; + spin_unlock_bh(&aq->lock); + if (borked) /* Remove broken device */ + device_unregister(dev); + put_device(dev); + if (!borked) { + domains++; + continue; + } + } + if (rc) + continue; + /* new queue device needed */ + if (!ac) { + /* but first create the card device */ + ac = ap_card_create(id, depth, + type, functions); + if (!ac) + continue; + ac->ap_dev.device.bus = &ap_bus_type; + ac->ap_dev.device.parent = ap_root_device; + dev_set_name(&ac->ap_dev.device, + "card%02x", id); + /* Register card with AP bus */ + rc = device_register(&ac->ap_dev.device); + if (rc) { + put_device(&ac->ap_dev.device); + ac = NULL; + break; + } + /* get it and thus adjust reference counter */ + get_device(&ac->ap_dev.device); + /* Add card device to card list */ + spin_lock_bh(&ap_list_lock); + list_add(&ac->list, &ap_card_list); + spin_unlock_bh(&ap_list_lock); + } + /* now create the new queue device */ + aq = ap_queue_create(qid, type); + if (!aq) + continue; + aq->card = ac; + aq->ap_dev.device.bus = &ap_bus_type; + aq->ap_dev.device.parent = &ac->ap_dev.device; + dev_set_name(&aq->ap_dev.device, + "%02x.%04x", id, dom); + /* Add queue device to card queue list */ + spin_lock_bh(&ap_list_lock); + list_add(&aq->list, &ac->queues); + spin_unlock_bh(&ap_list_lock); + /* Start with a device reset */ + spin_lock_bh(&aq->lock); + ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); + spin_unlock_bh(&aq->lock); + /* Register device */ + rc = device_register(&aq->ap_dev.device); + if (rc) { + spin_lock_bh(&ap_list_lock); + list_del_init(&aq->list); + spin_unlock_bh(&ap_list_lock); + put_device(&aq->ap_dev.device); + continue; + } + domains++; + } /* end domain loop */ + if (ac) { + /* remove card dev if there are no queue devices */ + if (!domains) + device_unregister(&ac->ap_dev.device); + put_device(&ac->ap_dev.device); } - } + } /* end device loop */ out: mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); } @@ -1772,7 +1124,7 @@ static void ap_reset_domain(void) if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index)) return; for (i = 0; i < AP_DEVICES; i++) - ap_reset_queue(AP_MKQID(i, ap_domain_index)); + ap_rapq(AP_MKQID(i, ap_domain_index)); } static void ap_reset_all(void) @@ -1785,7 +1137,7 @@ static void ap_reset_all(void) for (j = 0; j < AP_DEVICES; j++) { if (!ap_test_config_card_id(j)) continue; - ap_reset_queue(AP_MKQID(j, i)); + ap_rapq(AP_MKQID(j, i)); } } } @@ -1794,6 +1146,23 @@ static struct reset_call ap_reset_call = { .fn = ap_reset_all, }; +int __init ap_debug_init(void) +{ + ap_dbf_root = debugfs_create_dir("ap", NULL); + ap_dbf_info = debug_register("ap", 1, 1, + DBF_MAX_SPRINTF_ARGS * sizeof(long)); + debug_register_view(ap_dbf_info, &debug_sprintf_view); + debug_set_level(ap_dbf_info, DBF_ERR); + + return 0; +} + +void ap_debug_exit(void) +{ + debugfs_remove(ap_dbf_root); + debug_unregister(ap_dbf_info); +} + /** * ap_module_init(): The module initialization code. * @@ -1804,6 +1173,10 @@ int __init ap_module_init(void) int max_domain_id; int rc, i; + rc = ap_debug_init(); + if (rc) + return rc; + if (ap_instructions_available() != 0) { pr_warn("The hardware system does not support AP instructions\n"); return -ENODEV; @@ -1913,7 +1286,15 @@ void ap_module_exit(void) del_timer_sync(&ap_config_timer); hrtimer_cancel(&ap_poll_timer); tasklet_kill(&ap_tasklet); - bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister); + + /* first remove queue devices */ + bus_for_each_dev(&ap_bus_type, NULL, NULL, + __ap_queue_devices_unregister); + /* now remove the card devices */ + bus_for_each_dev(&ap_bus_type, NULL, NULL, + __ap_card_devices_unregister); + + /* remove bus attributes */ for (i = 0; ap_bus_attrs[i]; i++) bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); unregister_pm_notifier(&ap_power_notifier); @@ -1923,6 +1304,8 @@ void ap_module_exit(void) unregister_reset_call(&ap_reset_call); if (ap_using_interrupts()) unregister_adapter_interrupt(&ap_airq); + + ap_debug_exit(); } module_init(ap_module_init); diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index d7fdf5c024d730c53838bac9576c42f40c6a9d8f..4dc7c88fb0544622fc2ab9cd641b7212a2f14c32 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -27,7 +27,6 @@ #define _AP_BUS_H_ #include -#include #include #define AP_DEVICES 64 /* Number of AP devices. */ @@ -38,14 +37,17 @@ extern int ap_domain_index; +extern spinlock_t ap_list_lock; +extern struct list_head ap_card_list; + /** * The ap_qid_t identifier of an ap queue. It contains a - * 6 bit device index and a 4 bit queue index (domain). + * 6 bit card index and a 4 bit queue index (domain). */ typedef unsigned int ap_qid_t; -#define AP_MKQID(_device, _queue) (((_device) & 63) << 8 | ((_queue) & 255)) -#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63) +#define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255)) +#define AP_QID_CARD(_qid) (((_qid) >> 8) & 63) #define AP_QID_QUEUE(_qid) ((_qid) & 255) /** @@ -55,7 +57,7 @@ typedef unsigned int ap_qid_t; * @queue_full: Is 1 if the queue is full * @pad: A 4 bit pad * @int_enabled: Shows if interrupts are enabled for the AP - * @response_conde: Holds the 8 bit response code + * @response_code: Holds the 8 bit response code * @pad2: A 16 bit pad * * The ap queue status word is returned by all three AP functions @@ -105,6 +107,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) #define AP_DEVICE_TYPE_CEX3C 9 #define AP_DEVICE_TYPE_CEX4 10 #define AP_DEVICE_TYPE_CEX5 11 +#define AP_DEVICE_TYPE_CEX6 12 /* * Known function facilities @@ -166,7 +169,8 @@ struct ap_driver { int (*probe)(struct ap_device *); void (*remove)(struct ap_device *); - int request_timeout; /* request timeout in jiffies */ + void (*suspend)(struct ap_device *); + void (*resume)(struct ap_device *); }; #define to_ap_drv(x) container_of((x), struct ap_driver, driver) @@ -174,38 +178,51 @@ struct ap_driver { int ap_driver_register(struct ap_driver *, struct module *, char *); void ap_driver_unregister(struct ap_driver *); -typedef enum ap_wait (ap_func_t)(struct ap_device *ap_dev); - struct ap_device { struct device device; struct ap_driver *drv; /* Pointer to AP device driver. */ - spinlock_t lock; /* Per device lock. */ - struct list_head list; /* private list of all AP devices. */ + int device_type; /* AP device type. */ +}; - enum ap_state state; /* State of the AP device. */ +#define to_ap_dev(x) container_of((x), struct ap_device, device) - ap_qid_t qid; /* AP queue id. */ - int queue_depth; /* AP queue depth.*/ - int device_type; /* AP device type. */ +struct ap_card { + struct ap_device ap_dev; + struct list_head list; /* Private list of AP cards. */ + struct list_head queues; /* List of assoc. AP queues */ + void *private; /* ap driver private pointer. */ int raw_hwtype; /* AP raw hardware type. */ unsigned int functions; /* AP device function bitfield. */ - struct timer_list timeout; /* Timer for request timeouts. */ + int queue_depth; /* AP queue depth.*/ + int id; /* AP card number. */ + atomic_t total_request_count; /* # requests ever for this AP device.*/ +}; + +#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device) +struct ap_queue { + struct ap_device ap_dev; + struct list_head list; /* Private list of AP queues. */ + struct ap_card *card; /* Ptr to assoc. AP card. */ + spinlock_t lock; /* Per device lock. */ + void *private; /* ap driver private pointer. */ + ap_qid_t qid; /* AP queue id. */ int interrupt; /* indicate if interrupts are enabled */ int queue_count; /* # messages currently on AP queue. */ - - struct list_head pendingq; /* List of message sent to AP queue. */ + enum ap_state state; /* State of the AP device. */ int pendingq_count; /* # requests on pendingq list. */ - struct list_head requestq; /* List of message yet to be sent. */ int requestq_count; /* # requests on requestq list. */ - int total_request_count; /* # requests ever for this AP device. */ - + int total_request_count; /* # requests ever for this AP device.*/ + int request_timeout; /* Request timout in jiffies. */ + struct timer_list timeout; /* Timer for request timeouts. */ + struct list_head pendingq; /* List of message sent to AP queue. */ + struct list_head requestq; /* List of message yet to be sent. */ struct ap_message *reply; /* Per device reply message. */ - - void *private; /* ap driver private pointer. */ }; -#define to_ap_dev(x) container_of((x), struct ap_device, device) +#define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device) + +typedef enum ap_wait (ap_func_t)(struct ap_queue *queue); struct ap_message { struct list_head list; /* Request queueing. */ @@ -217,7 +234,7 @@ struct ap_message { void *private; /* ap driver private pointer. */ unsigned int special:1; /* Used for special commands. */ /* receive is called from tasklet context */ - void (*receive)(struct ap_device *, struct ap_message *, + void (*receive)(struct ap_queue *, struct ap_message *, struct ap_message *); }; @@ -232,10 +249,6 @@ struct ap_config_info { unsigned char reserved4[16]; } __packed; -#define AP_DEVICE(dt) \ - .dev_type=(dt), \ - .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE, - /** * ap_init_message() - Initialize ap_message. * Initialize a message before using. Otherwise this might result in @@ -250,6 +263,12 @@ static inline void ap_init_message(struct ap_message *ap_msg) ap_msg->receive = NULL; } +#define for_each_ap_card(_ac) \ + list_for_each_entry(_ac, &ap_card_list, list) + +#define for_each_ap_queue(_aq, _ac) \ + list_for_each_entry(_aq, &(_ac)->queues, list) + /* * Note: don't use ap_send/ap_recv after using ap_queue_message * for the first time. Otherwise the ap message queue will get @@ -258,11 +277,26 @@ static inline void ap_init_message(struct ap_message *ap_msg) int ap_send(ap_qid_t, unsigned long long, void *, size_t); int ap_recv(ap_qid_t, unsigned long long *, void *, size_t); -void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg); -void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg); -void ap_flush_queue(struct ap_device *ap_dev); +enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event); +enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event); + +void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg); +void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg); +void ap_flush_queue(struct ap_queue *aq); + +void *ap_airq_ptr(void); +void ap_wait(enum ap_wait wait); +void ap_request_timeout(unsigned long data); void ap_bus_force_rescan(void); -void ap_device_init_reply(struct ap_device *ap_dev, struct ap_message *ap_msg); + +void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); +struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); +void ap_queue_remove(struct ap_queue *aq); +void ap_queue_suspend(struct ap_device *ap_dev); +void ap_queue_resume(struct ap_device *ap_dev); + +struct ap_card *ap_card_create(int id, int queue_depth, int device_type, + unsigned int device_functions); int ap_module_init(void); void ap_module_exit(void); diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c new file mode 100644 index 0000000000000000000000000000000000000000..0110d44172a3a5bd3b9c5ffa82464efeba34f461 --- /dev/null +++ b/drivers/s390/crypto/ap_card.c @@ -0,0 +1,170 @@ +/* + * Copyright IBM Corp. 2016 + * Author(s): Martin Schwidefsky + * + * Adjunct processor bus, card related code. + */ + +#define KMSG_COMPONENT "ap" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include +#include + +#include "ap_bus.h" +#include "ap_asm.h" + +/* + * AP card related attributes. + */ +static ssize_t ap_hwtype_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type); +} + +static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); + +static ssize_t ap_raw_hwtype_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype); +} + +static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL); + +static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth); +} + +static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); + +static ssize_t ap_functions_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + + return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions); +} + +static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); + +static ssize_t ap_request_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + unsigned int req_cnt; + + req_cnt = 0; + spin_lock_bh(&ap_list_lock); + req_cnt = atomic_read(&ac->total_request_count); + spin_unlock_bh(&ap_list_lock); + return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); +} + +static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); + +static ssize_t ap_requestq_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + struct ap_queue *aq; + unsigned int reqq_cnt; + + reqq_cnt = 0; + spin_lock_bh(&ap_list_lock); + for_each_ap_queue(aq, ac) + reqq_cnt += aq->requestq_count; + spin_unlock_bh(&ap_list_lock); + return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); +} + +static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); + +static ssize_t ap_pendingq_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + struct ap_queue *aq; + unsigned int penq_cnt; + + penq_cnt = 0; + spin_lock_bh(&ap_list_lock); + for_each_ap_queue(aq, ac) + penq_cnt += aq->pendingq_count; + spin_unlock_bh(&ap_list_lock); + return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); +} + +static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); + +static ssize_t ap_modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type); +} + +static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); + +static struct attribute *ap_card_dev_attrs[] = { + &dev_attr_hwtype.attr, + &dev_attr_raw_hwtype.attr, + &dev_attr_depth.attr, + &dev_attr_ap_functions.attr, + &dev_attr_request_count.attr, + &dev_attr_requestq_count.attr, + &dev_attr_pendingq_count.attr, + &dev_attr_modalias.attr, + NULL +}; + +static struct attribute_group ap_card_dev_attr_group = { + .attrs = ap_card_dev_attrs +}; + +static const struct attribute_group *ap_card_dev_attr_groups[] = { + &ap_card_dev_attr_group, + NULL +}; + +struct device_type ap_card_type = { + .name = "ap_card", + .groups = ap_card_dev_attr_groups, +}; + +static void ap_card_device_release(struct device *dev) +{ + kfree(to_ap_card(dev)); +} + +struct ap_card *ap_card_create(int id, int queue_depth, int device_type, + unsigned int functions) +{ + struct ap_card *ac; + + ac = kzalloc(sizeof(*ac), GFP_KERNEL); + if (!ac) + return NULL; + INIT_LIST_HEAD(&ac->queues); + ac->ap_dev.device.release = ap_card_device_release; + ac->ap_dev.device.type = &ap_card_type; + ac->ap_dev.device_type = device_type; + /* CEX6 toleration: map to CEX5 */ + if (device_type == AP_DEVICE_TYPE_CEX6) + ac->ap_dev.device_type = AP_DEVICE_TYPE_CEX5; + ac->raw_hwtype = device_type; + ac->queue_depth = queue_depth; + ac->functions = functions; + ac->id = id; + return ac; +} diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..78dbff842dae8142714874f1715ab8fa55067417 --- /dev/null +++ b/drivers/s390/crypto/ap_debug.h @@ -0,0 +1,28 @@ +/* + * Copyright IBM Corp. 2016 + * Author(s): Harald Freudenberger + */ +#ifndef AP_DEBUG_H +#define AP_DEBUG_H + +#include + +#define DBF_ERR 3 /* error conditions */ +#define DBF_WARN 4 /* warning conditions */ +#define DBF_INFO 5 /* informational */ +#define DBF_DEBUG 6 /* for debugging only */ + +#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO) +#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO) + +#define DBF_MAX_SPRINTF_ARGS 5 + +#define AP_DBF(...) \ + debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__) + +extern debug_info_t *ap_dbf_info; + +int ap_debug_init(void); +void ap_debug_exit(void); + +#endif /* AP_DEBUG_H */ diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c new file mode 100644 index 0000000000000000000000000000000000000000..b58a917dc5107318b31d5a3805f19c55771e11b7 --- /dev/null +++ b/drivers/s390/crypto/ap_queue.c @@ -0,0 +1,701 @@ +/* + * Copyright IBM Corp. 2016 + * Author(s): Martin Schwidefsky + * + * Adjunct processor bus, queue related code. + */ + +#define KMSG_COMPONENT "ap" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include +#include + +#include "ap_bus.h" +#include "ap_asm.h" + +/** + * ap_queue_enable_interruption(): Enable interruption on an AP queue. + * @qid: The AP queue number + * @ind: the notification indicator byte + * + * Enables interruption on AP queue via ap_aqic(). Based on the return + * value it waits a while and tests the AP queue if interrupts + * have been switched on using ap_test_queue(). + */ +static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind) +{ + struct ap_queue_status status; + + status = ap_aqic(aq->qid, ind); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + case AP_RESPONSE_OTHERWISE_CHANGED: + return 0; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + case AP_RESPONSE_INVALID_ADDRESS: + pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n", + AP_QID_CARD(aq->qid), + AP_QID_QUEUE(aq->qid)); + return -EOPNOTSUPP; + case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_BUSY: + default: + return -EBUSY; + } +} + +/** + * __ap_send(): Send message to adjunct processor queue. + * @qid: The AP queue number + * @psmid: The program supplied message identifier + * @msg: The message text + * @length: The message length + * @special: Special Bit + * + * Returns AP queue status structure. + * Condition code 1 on NQAP can't happen because the L bit is 1. + * Condition code 2 on NQAP also means the send is incomplete, + * because a segment boundary was reached. The NQAP is repeated. + */ +static inline struct ap_queue_status +__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, + unsigned int special) +{ + if (special == 1) + qid |= 0x400000UL; + return ap_nqap(qid, psmid, msg, length); +} + +int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) +{ + struct ap_queue_status status; + + status = __ap_send(qid, psmid, msg, length, 0); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + return 0; + case AP_RESPONSE_Q_FULL: + case AP_RESPONSE_RESET_IN_PROGRESS: + return -EBUSY; + case AP_RESPONSE_REQ_FAC_NOT_INST: + return -EINVAL; + default: /* Device is gone. */ + return -ENODEV; + } +} +EXPORT_SYMBOL(ap_send); + +int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) +{ + struct ap_queue_status status; + + if (msg == NULL) + return -EINVAL; + status = ap_dqap(qid, psmid, msg, length); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + return 0; + case AP_RESPONSE_NO_PENDING_REPLY: + if (status.queue_empty) + return -ENOENT; + return -EBUSY; + case AP_RESPONSE_RESET_IN_PROGRESS: + return -EBUSY; + default: + return -ENODEV; + } +} +EXPORT_SYMBOL(ap_recv); + +/* State machine definitions and helpers */ + +static enum ap_wait ap_sm_nop(struct ap_queue *aq) +{ + return AP_WAIT_NONE; +} + +/** + * ap_sm_recv(): Receive pending reply messages from an AP queue but do + * not change the state of the device. + * @aq: pointer to the AP queue + * + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT + */ +static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) +{ + struct ap_queue_status status; + struct ap_message *ap_msg; + + status = ap_dqap(aq->qid, &aq->reply->psmid, + aq->reply->message, aq->reply->length); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + aq->queue_count--; + if (aq->queue_count > 0) + mod_timer(&aq->timeout, + jiffies + aq->request_timeout); + list_for_each_entry(ap_msg, &aq->pendingq, list) { + if (ap_msg->psmid != aq->reply->psmid) + continue; + list_del_init(&ap_msg->list); + aq->pendingq_count--; + ap_msg->receive(aq, ap_msg, aq->reply); + break; + } + case AP_RESPONSE_NO_PENDING_REPLY: + if (!status.queue_empty || aq->queue_count <= 0) + break; + /* The card shouldn't forget requests but who knows. */ + aq->queue_count = 0; + list_splice_init(&aq->pendingq, &aq->requestq); + aq->requestq_count += aq->pendingq_count; + aq->pendingq_count = 0; + break; + default: + break; + } + return status; +} + +/** + * ap_sm_read(): Receive pending reply messages from an AP queue. + * @aq: pointer to the AP queue + * + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT + */ +static enum ap_wait ap_sm_read(struct ap_queue *aq) +{ + struct ap_queue_status status; + + if (!aq->reply) + return AP_WAIT_NONE; + status = ap_sm_recv(aq); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + if (aq->queue_count > 0) { + aq->state = AP_STATE_WORKING; + return AP_WAIT_AGAIN; + } + aq->state = AP_STATE_IDLE; + return AP_WAIT_NONE; + case AP_RESPONSE_NO_PENDING_REPLY: + if (aq->queue_count > 0) + return AP_WAIT_INTERRUPT; + aq->state = AP_STATE_IDLE; + return AP_WAIT_NONE; + default: + aq->state = AP_STATE_BORKED; + return AP_WAIT_NONE; + } +} + +/** + * ap_sm_suspend_read(): Receive pending reply messages from an AP queue + * without changing the device state in between. In suspend mode we don't + * allow sending new requests, therefore just fetch pending replies. + * @aq: pointer to the AP queue + * + * Returns AP_WAIT_NONE or AP_WAIT_AGAIN + */ +static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq) +{ + struct ap_queue_status status; + + if (!aq->reply) + return AP_WAIT_NONE; + status = ap_sm_recv(aq); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + if (aq->queue_count > 0) + return AP_WAIT_AGAIN; + /* fall through */ + default: + return AP_WAIT_NONE; + } +} + +/** + * ap_sm_write(): Send messages from the request queue to an AP queue. + * @aq: pointer to the AP queue + * + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT + */ +static enum ap_wait ap_sm_write(struct ap_queue *aq) +{ + struct ap_queue_status status; + struct ap_message *ap_msg; + + if (aq->requestq_count <= 0) + return AP_WAIT_NONE; + /* Start the next request on the queue. */ + ap_msg = list_entry(aq->requestq.next, struct ap_message, list); + status = __ap_send(aq->qid, ap_msg->psmid, + ap_msg->message, ap_msg->length, ap_msg->special); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + aq->queue_count++; + if (aq->queue_count == 1) + mod_timer(&aq->timeout, jiffies + aq->request_timeout); + list_move_tail(&ap_msg->list, &aq->pendingq); + aq->requestq_count--; + aq->pendingq_count++; + if (aq->queue_count < aq->card->queue_depth) { + aq->state = AP_STATE_WORKING; + return AP_WAIT_AGAIN; + } + /* fall through */ + case AP_RESPONSE_Q_FULL: + aq->state = AP_STATE_QUEUE_FULL; + return AP_WAIT_INTERRUPT; + case AP_RESPONSE_RESET_IN_PROGRESS: + aq->state = AP_STATE_RESET_WAIT; + return AP_WAIT_TIMEOUT; + case AP_RESPONSE_MESSAGE_TOO_BIG: + case AP_RESPONSE_REQ_FAC_NOT_INST: + list_del_init(&ap_msg->list); + aq->requestq_count--; + ap_msg->rc = -EINVAL; + ap_msg->receive(aq, ap_msg, NULL); + return AP_WAIT_AGAIN; + default: + aq->state = AP_STATE_BORKED; + return AP_WAIT_NONE; + } +} + +/** + * ap_sm_read_write(): Send and receive messages to/from an AP queue. + * @aq: pointer to the AP queue + * + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT + */ +static enum ap_wait ap_sm_read_write(struct ap_queue *aq) +{ + return min(ap_sm_read(aq), ap_sm_write(aq)); +} + +/** + * ap_sm_reset(): Reset an AP queue. + * @qid: The AP queue number + * + * Submit the Reset command to an AP queue. + */ +static enum ap_wait ap_sm_reset(struct ap_queue *aq) +{ + struct ap_queue_status status; + + status = ap_rapq(aq->qid); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + case AP_RESPONSE_RESET_IN_PROGRESS: + aq->state = AP_STATE_RESET_WAIT; + aq->interrupt = AP_INTR_DISABLED; + return AP_WAIT_TIMEOUT; + case AP_RESPONSE_BUSY: + return AP_WAIT_TIMEOUT; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + default: + aq->state = AP_STATE_BORKED; + return AP_WAIT_NONE; + } +} + +/** + * ap_sm_reset_wait(): Test queue for completion of the reset operation + * @aq: pointer to the AP queue + * + * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. + */ +static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq) +{ + struct ap_queue_status status; + void *lsi_ptr; + + if (aq->queue_count > 0 && aq->reply) + /* Try to read a completed message and get the status */ + status = ap_sm_recv(aq); + else + /* Get the status with TAPQ */ + status = ap_tapq(aq->qid, NULL); + + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + lsi_ptr = ap_airq_ptr(); + if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0) + aq->state = AP_STATE_SETIRQ_WAIT; + else + aq->state = (aq->queue_count > 0) ? + AP_STATE_WORKING : AP_STATE_IDLE; + return AP_WAIT_AGAIN; + case AP_RESPONSE_BUSY: + case AP_RESPONSE_RESET_IN_PROGRESS: + return AP_WAIT_TIMEOUT; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + default: + aq->state = AP_STATE_BORKED; + return AP_WAIT_NONE; + } +} + +/** + * ap_sm_setirq_wait(): Test queue for completion of the irq enablement + * @aq: pointer to the AP queue + * + * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. + */ +static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq) +{ + struct ap_queue_status status; + + if (aq->queue_count > 0 && aq->reply) + /* Try to read a completed message and get the status */ + status = ap_sm_recv(aq); + else + /* Get the status with TAPQ */ + status = ap_tapq(aq->qid, NULL); + + if (status.int_enabled == 1) { + /* Irqs are now enabled */ + aq->interrupt = AP_INTR_ENABLED; + aq->state = (aq->queue_count > 0) ? + AP_STATE_WORKING : AP_STATE_IDLE; + } + + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + if (aq->queue_count > 0) + return AP_WAIT_AGAIN; + /* fallthrough */ + case AP_RESPONSE_NO_PENDING_REPLY: + return AP_WAIT_TIMEOUT; + default: + aq->state = AP_STATE_BORKED; + return AP_WAIT_NONE; + } +} + +/* + * AP state machine jump table + */ +static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = { + [AP_STATE_RESET_START] = { + [AP_EVENT_POLL] = ap_sm_reset, + [AP_EVENT_TIMEOUT] = ap_sm_nop, + }, + [AP_STATE_RESET_WAIT] = { + [AP_EVENT_POLL] = ap_sm_reset_wait, + [AP_EVENT_TIMEOUT] = ap_sm_nop, + }, + [AP_STATE_SETIRQ_WAIT] = { + [AP_EVENT_POLL] = ap_sm_setirq_wait, + [AP_EVENT_TIMEOUT] = ap_sm_nop, + }, + [AP_STATE_IDLE] = { + [AP_EVENT_POLL] = ap_sm_write, + [AP_EVENT_TIMEOUT] = ap_sm_nop, + }, + [AP_STATE_WORKING] = { + [AP_EVENT_POLL] = ap_sm_read_write, + [AP_EVENT_TIMEOUT] = ap_sm_reset, + }, + [AP_STATE_QUEUE_FULL] = { + [AP_EVENT_POLL] = ap_sm_read, + [AP_EVENT_TIMEOUT] = ap_sm_reset, + }, + [AP_STATE_SUSPEND_WAIT] = { + [AP_EVENT_POLL] = ap_sm_suspend_read, + [AP_EVENT_TIMEOUT] = ap_sm_nop, + }, + [AP_STATE_BORKED] = { + [AP_EVENT_POLL] = ap_sm_nop, + [AP_EVENT_TIMEOUT] = ap_sm_nop, + }, +}; + +enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event) +{ + return ap_jumptable[aq->state][event](aq); +} + +enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event) +{ + enum ap_wait wait; + + while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN) + ; + return wait; +} + +/* + * Power management for queue devices + */ +void ap_queue_suspend(struct ap_device *ap_dev) +{ + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + + /* Poll on the device until all requests are finished. */ + spin_lock_bh(&aq->lock); + aq->state = AP_STATE_SUSPEND_WAIT; + while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE) + ; + aq->state = AP_STATE_BORKED; + spin_unlock_bh(&aq->lock); +} +EXPORT_SYMBOL(ap_queue_suspend); + +void ap_queue_resume(struct ap_device *ap_dev) +{ +} +EXPORT_SYMBOL(ap_queue_resume); + +/* + * AP queue related attributes. + */ +static ssize_t ap_request_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + unsigned int req_cnt; + + spin_lock_bh(&aq->lock); + req_cnt = aq->total_request_count; + spin_unlock_bh(&aq->lock); + return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); +} + +static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); + +static ssize_t ap_requestq_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + unsigned int reqq_cnt = 0; + + spin_lock_bh(&aq->lock); + reqq_cnt = aq->requestq_count; + spin_unlock_bh(&aq->lock); + return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); +} + +static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); + +static ssize_t ap_pendingq_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + unsigned int penq_cnt = 0; + + spin_lock_bh(&aq->lock); + penq_cnt = aq->pendingq_count; + spin_unlock_bh(&aq->lock); + return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); +} + +static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); + +static ssize_t ap_reset_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + int rc = 0; + + spin_lock_bh(&aq->lock); + switch (aq->state) { + case AP_STATE_RESET_START: + case AP_STATE_RESET_WAIT: + rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n"); + break; + case AP_STATE_WORKING: + case AP_STATE_QUEUE_FULL: + rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n"); + break; + default: + rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n"); + } + spin_unlock_bh(&aq->lock); + return rc; +} + +static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL); + +static ssize_t ap_interrupt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + int rc = 0; + + spin_lock_bh(&aq->lock); + if (aq->state == AP_STATE_SETIRQ_WAIT) + rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n"); + else if (aq->interrupt == AP_INTR_ENABLED) + rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n"); + else + rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n"); + spin_unlock_bh(&aq->lock); + return rc; +} + +static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL); + +static struct attribute *ap_queue_dev_attrs[] = { + &dev_attr_request_count.attr, + &dev_attr_requestq_count.attr, + &dev_attr_pendingq_count.attr, + &dev_attr_reset.attr, + &dev_attr_interrupt.attr, + NULL +}; + +static struct attribute_group ap_queue_dev_attr_group = { + .attrs = ap_queue_dev_attrs +}; + +static const struct attribute_group *ap_queue_dev_attr_groups[] = { + &ap_queue_dev_attr_group, + NULL +}; + +struct device_type ap_queue_type = { + .name = "ap_queue", + .groups = ap_queue_dev_attr_groups, +}; + +static void ap_queue_device_release(struct device *dev) +{ + kfree(to_ap_queue(dev)); +} + +struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) +{ + struct ap_queue *aq; + + aq = kzalloc(sizeof(*aq), GFP_KERNEL); + if (!aq) + return NULL; + aq->ap_dev.device.release = ap_queue_device_release; + aq->ap_dev.device.type = &ap_queue_type; + aq->ap_dev.device_type = device_type; + /* CEX6 toleration: map to CEX5 */ + if (device_type == AP_DEVICE_TYPE_CEX6) + aq->ap_dev.device_type = AP_DEVICE_TYPE_CEX5; + aq->qid = qid; + aq->state = AP_STATE_RESET_START; + aq->interrupt = AP_INTR_DISABLED; + spin_lock_init(&aq->lock); + INIT_LIST_HEAD(&aq->pendingq); + INIT_LIST_HEAD(&aq->requestq); + setup_timer(&aq->timeout, ap_request_timeout, (unsigned long) aq); + + return aq; +} + +void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply) +{ + aq->reply = reply; + + spin_lock_bh(&aq->lock); + ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); + spin_unlock_bh(&aq->lock); +} +EXPORT_SYMBOL(ap_queue_init_reply); + +/** + * ap_queue_message(): Queue a request to an AP device. + * @aq: The AP device to queue the message to + * @ap_msg: The message that is to be added + */ +void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg) +{ + /* For asynchronous message handling a valid receive-callback + * is required. + */ + BUG_ON(!ap_msg->receive); + + spin_lock_bh(&aq->lock); + /* Queue the message. */ + list_add_tail(&ap_msg->list, &aq->requestq); + aq->requestq_count++; + aq->total_request_count++; + atomic_inc(&aq->card->total_request_count); + /* Send/receive as many request from the queue as possible. */ + ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL)); + spin_unlock_bh(&aq->lock); +} +EXPORT_SYMBOL(ap_queue_message); + +/** + * ap_cancel_message(): Cancel a crypto request. + * @aq: The AP device that has the message queued + * @ap_msg: The message that is to be removed + * + * Cancel a crypto request. This is done by removing the request + * from the device pending or request queue. Note that the + * request stays on the AP queue. When it finishes the message + * reply will be discarded because the psmid can't be found. + */ +void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg) +{ + struct ap_message *tmp; + + spin_lock_bh(&aq->lock); + if (!list_empty(&ap_msg->list)) { + list_for_each_entry(tmp, &aq->pendingq, list) + if (tmp->psmid == ap_msg->psmid) { + aq->pendingq_count--; + goto found; + } + aq->requestq_count--; +found: + list_del_init(&ap_msg->list); + } + spin_unlock_bh(&aq->lock); +} +EXPORT_SYMBOL(ap_cancel_message); + +/** + * __ap_flush_queue(): Flush requests. + * @aq: Pointer to the AP queue + * + * Flush all requests from the request/pending queue of an AP device. + */ +static void __ap_flush_queue(struct ap_queue *aq) +{ + struct ap_message *ap_msg, *next; + + list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) { + list_del_init(&ap_msg->list); + aq->pendingq_count--; + ap_msg->rc = -EAGAIN; + ap_msg->receive(aq, ap_msg, NULL); + } + list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) { + list_del_init(&ap_msg->list); + aq->requestq_count--; + ap_msg->rc = -EAGAIN; + ap_msg->receive(aq, ap_msg, NULL); + } +} + +void ap_flush_queue(struct ap_queue *aq) +{ + spin_lock_bh(&aq->lock); + __ap_flush_queue(aq); + spin_unlock_bh(&aq->lock); +} +EXPORT_SYMBOL(ap_flush_queue); + +void ap_queue_remove(struct ap_queue *aq) +{ + ap_flush_queue(aq); + del_timer_sync(&aq->timeout); +} +EXPORT_SYMBOL(ap_queue_remove); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 5d3d04c040c2be15af34d6977d17c658e35aaad5..51eece9af57778d3dc62a7b347add9855592ec44 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -36,15 +36,19 @@ #include #include #include -#include +#include #include #include #include -#include "zcrypt_debug.h" +#define CREATE_TRACE_POINTS +#include + #include "zcrypt_api.h" +#include "zcrypt_debug.h" #include "zcrypt_msgtype6.h" +#include "zcrypt_msgtype50.h" /* * Module description. @@ -54,76 +58,31 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); +/* + * zcrypt tracepoint functions + */ +EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); +EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); + static int zcrypt_hwrng_seed = 1; module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP); MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); -static DEFINE_SPINLOCK(zcrypt_device_lock); -static LIST_HEAD(zcrypt_device_list); -static int zcrypt_device_count = 0; +DEFINE_SPINLOCK(zcrypt_list_lock); +LIST_HEAD(zcrypt_card_list); +int zcrypt_device_count; + static atomic_t zcrypt_open_count = ATOMIC_INIT(0); static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); EXPORT_SYMBOL(zcrypt_rescan_req); -static int zcrypt_rng_device_add(void); -static void zcrypt_rng_device_remove(void); - -static DEFINE_SPINLOCK(zcrypt_ops_list_lock); static LIST_HEAD(zcrypt_ops_list); -static debug_info_t *zcrypt_dbf_common; -static debug_info_t *zcrypt_dbf_devices; -static struct dentry *debugfs_root; - -/* - * Device attributes common for all crypto devices. - */ -static ssize_t zcrypt_type_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct zcrypt_device *zdev = to_ap_dev(dev)->private; - return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string); -} - -static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL); - -static ssize_t zcrypt_online_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct zcrypt_device *zdev = to_ap_dev(dev)->private; - return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online); -} - -static ssize_t zcrypt_online_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct zcrypt_device *zdev = to_ap_dev(dev)->private; - int online; - - if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) - return -EINVAL; - zdev->online = online; - ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid, - zdev->online); - if (!online) - ap_flush_queue(zdev->ap_dev); - return count; -} - -static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store); - -static struct attribute * zcrypt_device_attrs[] = { - &dev_attr_type.attr, - &dev_attr_online.attr, - NULL, -}; - -static struct attribute_group zcrypt_device_attr_group = { - .attrs = zcrypt_device_attrs, -}; +/* Zcrypt related debug feature stuff. */ +static struct dentry *zcrypt_dbf_root; +debug_info_t *zcrypt_dbf_info; /** * Process a rescan of the transport layer. @@ -136,242 +95,34 @@ static inline int zcrypt_process_rescan(void) atomic_set(&zcrypt_rescan_req, 0); atomic_inc(&zcrypt_rescan_count); ap_bus_force_rescan(); - ZCRYPT_DBF_COMMON(DBF_INFO, "rescan%07d", - atomic_inc_return(&zcrypt_rescan_count)); + ZCRYPT_DBF(DBF_INFO, "rescan count=%07d", + atomic_inc_return(&zcrypt_rescan_count)); return 1; } return 0; } -/** - * __zcrypt_increase_preference(): Increase preference of a crypto device. - * @zdev: Pointer the crypto device - * - * Move the device towards the head of the device list. - * Need to be called while holding the zcrypt device list lock. - * Note: cards with speed_rating of 0 are kept at the end of the list. - */ -static void __zcrypt_increase_preference(struct zcrypt_device *zdev) -{ - struct zcrypt_device *tmp; - struct list_head *l; - - if (zdev->speed_rating == 0) - return; - for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) { - tmp = list_entry(l, struct zcrypt_device, list); - if ((tmp->request_count + 1) * tmp->speed_rating <= - (zdev->request_count + 1) * zdev->speed_rating && - tmp->speed_rating != 0) - break; - } - if (l == zdev->list.prev) - return; - /* Move zdev behind l */ - list_move(&zdev->list, l); -} - -/** - * __zcrypt_decrease_preference(): Decrease preference of a crypto device. - * @zdev: Pointer to a crypto device. - * - * Move the device towards the tail of the device list. - * Need to be called while holding the zcrypt device list lock. - * Note: cards with speed_rating of 0 are kept at the end of the list. - */ -static void __zcrypt_decrease_preference(struct zcrypt_device *zdev) -{ - struct zcrypt_device *tmp; - struct list_head *l; - - if (zdev->speed_rating == 0) - return; - for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) { - tmp = list_entry(l, struct zcrypt_device, list); - if ((tmp->request_count + 1) * tmp->speed_rating > - (zdev->request_count + 1) * zdev->speed_rating || - tmp->speed_rating == 0) - break; - } - if (l == zdev->list.next) - return; - /* Move zdev before l */ - list_move_tail(&zdev->list, l); -} - -static void zcrypt_device_release(struct kref *kref) -{ - struct zcrypt_device *zdev = - container_of(kref, struct zcrypt_device, refcount); - zcrypt_device_free(zdev); -} - -void zcrypt_device_get(struct zcrypt_device *zdev) -{ - kref_get(&zdev->refcount); -} -EXPORT_SYMBOL(zcrypt_device_get); - -int zcrypt_device_put(struct zcrypt_device *zdev) -{ - return kref_put(&zdev->refcount, zcrypt_device_release); -} -EXPORT_SYMBOL(zcrypt_device_put); - -struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size) -{ - struct zcrypt_device *zdev; - - zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL); - if (!zdev) - return NULL; - zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL); - if (!zdev->reply.message) - goto out_free; - zdev->reply.length = max_response_size; - spin_lock_init(&zdev->lock); - INIT_LIST_HEAD(&zdev->list); - zdev->dbf_area = zcrypt_dbf_devices; - return zdev; - -out_free: - kfree(zdev); - return NULL; -} -EXPORT_SYMBOL(zcrypt_device_alloc); - -void zcrypt_device_free(struct zcrypt_device *zdev) -{ - kfree(zdev->reply.message); - kfree(zdev); -} -EXPORT_SYMBOL(zcrypt_device_free); - -/** - * zcrypt_device_register() - Register a crypto device. - * @zdev: Pointer to a crypto device - * - * Register a crypto device. Returns 0 if successful. - */ -int zcrypt_device_register(struct zcrypt_device *zdev) -{ - int rc; - - if (!zdev->ops) - return -ENODEV; - rc = sysfs_create_group(&zdev->ap_dev->device.kobj, - &zcrypt_device_attr_group); - if (rc) - goto out; - get_device(&zdev->ap_dev->device); - kref_init(&zdev->refcount); - spin_lock_bh(&zcrypt_device_lock); - zdev->online = 1; /* New devices are online by default. */ - ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid, - zdev->online); - list_add_tail(&zdev->list, &zcrypt_device_list); - __zcrypt_increase_preference(zdev); - zcrypt_device_count++; - spin_unlock_bh(&zcrypt_device_lock); - if (zdev->ops->rng) { - rc = zcrypt_rng_device_add(); - if (rc) - goto out_unregister; - } - return 0; - -out_unregister: - spin_lock_bh(&zcrypt_device_lock); - zcrypt_device_count--; - list_del_init(&zdev->list); - spin_unlock_bh(&zcrypt_device_lock); - sysfs_remove_group(&zdev->ap_dev->device.kobj, - &zcrypt_device_attr_group); - put_device(&zdev->ap_dev->device); - zcrypt_device_put(zdev); -out: - return rc; -} -EXPORT_SYMBOL(zcrypt_device_register); - -/** - * zcrypt_device_unregister(): Unregister a crypto device. - * @zdev: Pointer to crypto device - * - * Unregister a crypto device. - */ -void zcrypt_device_unregister(struct zcrypt_device *zdev) -{ - if (zdev->ops->rng) - zcrypt_rng_device_remove(); - spin_lock_bh(&zcrypt_device_lock); - zcrypt_device_count--; - list_del_init(&zdev->list); - spin_unlock_bh(&zcrypt_device_lock); - sysfs_remove_group(&zdev->ap_dev->device.kobj, - &zcrypt_device_attr_group); - put_device(&zdev->ap_dev->device); - zcrypt_device_put(zdev); -} -EXPORT_SYMBOL(zcrypt_device_unregister); - void zcrypt_msgtype_register(struct zcrypt_ops *zops) { - spin_lock_bh(&zcrypt_ops_list_lock); list_add_tail(&zops->list, &zcrypt_ops_list); - spin_unlock_bh(&zcrypt_ops_list_lock); } -EXPORT_SYMBOL(zcrypt_msgtype_register); void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) { - spin_lock_bh(&zcrypt_ops_list_lock); list_del_init(&zops->list); - spin_unlock_bh(&zcrypt_ops_list_lock); } -EXPORT_SYMBOL(zcrypt_msgtype_unregister); -static inline -struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant) +struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) { struct zcrypt_ops *zops; - int found = 0; - spin_lock_bh(&zcrypt_ops_list_lock); - list_for_each_entry(zops, &zcrypt_ops_list, list) { + list_for_each_entry(zops, &zcrypt_ops_list, list) if ((zops->variant == variant) && - (!strncmp(zops->name, name, sizeof(zops->name)))) { - found = 1; - break; - } - } - if (!found || !try_module_get(zops->owner)) - zops = NULL; - - spin_unlock_bh(&zcrypt_ops_list_lock); - - return zops; -} - -struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant) -{ - struct zcrypt_ops *zops = NULL; - - zops = __ops_lookup(name, variant); - if (!zops) { - request_module("%s", name); - zops = __ops_lookup(name, variant); - } - return zops; -} -EXPORT_SYMBOL(zcrypt_msgtype_request); - -void zcrypt_msgtype_release(struct zcrypt_ops *zops) -{ - if (zops) - module_put(zops->owner); + (!strncmp(zops->name, name, sizeof(zops->name)))) + return zops; + return NULL; } -EXPORT_SYMBOL(zcrypt_msgtype_release); +EXPORT_SYMBOL(zcrypt_msgtype); /** * zcrypt_read (): Not supported beyond zcrypt 1.3.1. @@ -417,16 +168,80 @@ static int zcrypt_release(struct inode *inode, struct file *filp) return 0; } +static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, + struct zcrypt_queue *zq, + unsigned int weight) +{ + if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) + return NULL; + zcrypt_queue_get(zq); + get_device(&zq->queue->ap_dev.device); + atomic_add(weight, &zc->load); + atomic_add(weight, &zq->load); + zq->request_count++; + return zq; +} + +static inline void zcrypt_drop_queue(struct zcrypt_card *zc, + struct zcrypt_queue *zq, + unsigned int weight) +{ + struct module *mod = zq->queue->ap_dev.drv->driver.owner; + + zq->request_count--; + atomic_sub(weight, &zc->load); + atomic_sub(weight, &zq->load); + put_device(&zq->queue->ap_dev.device); + zcrypt_queue_put(zq); + module_put(mod); +} + +static inline bool zcrypt_card_compare(struct zcrypt_card *zc, + struct zcrypt_card *pref_zc, + unsigned weight, unsigned pref_weight) +{ + if (!pref_zc) + return 0; + weight += atomic_read(&zc->load); + pref_weight += atomic_read(&pref_zc->load); + if (weight == pref_weight) + return atomic_read(&zc->card->total_request_count) > + atomic_read(&pref_zc->card->total_request_count); + return weight > pref_weight; +} + +static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, + struct zcrypt_queue *pref_zq, + unsigned weight, unsigned pref_weight) +{ + if (!pref_zq) + return 0; + weight += atomic_read(&zq->load); + pref_weight += atomic_read(&pref_zq->load); + if (weight == pref_weight) + return &zq->queue->total_request_count > + &pref_zq->queue->total_request_count; + return weight > pref_weight; +} + /* * zcrypt ioctls. */ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) { - struct zcrypt_device *zdev; - int rc; + struct zcrypt_card *zc, *pref_zc; + struct zcrypt_queue *zq, *pref_zq; + unsigned int weight, pref_weight; + unsigned int func_code; + int qid = 0, rc = -ENODEV; + + trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); + + if (mex->outputdatalength < mex->inputdatalength) { + rc = -EINVAL; + goto out; + } - if (mex->outputdatalength < mex->inputdatalength) - return -EINVAL; /* * As long as outputdatalength is big enough, we can set the * outputdatalength equal to the inputdatalength, since that is the @@ -434,44 +249,73 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) */ mex->outputdatalength = mex->inputdatalength; - spin_lock_bh(&zcrypt_device_lock); - list_for_each_entry(zdev, &zcrypt_device_list, list) { - if (!zdev->online || - !zdev->ops->rsa_modexpo || - zdev->min_mod_size > mex->inputdatalength || - zdev->max_mod_size < mex->inputdatalength) + rc = get_rsa_modex_fc(mex, &func_code); + if (rc) + goto out; + + pref_zc = NULL; + pref_zq = NULL; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + /* Check for online accelarator and CCA cards */ + if (!zc->online || !(zc->card->functions & 0x18000000)) + continue; + /* Check for size limits */ + if (zc->min_mod_size > mex->inputdatalength || + zc->max_mod_size < mex->inputdatalength) + continue; + /* get weight index of the card device */ + weight = zc->speed_rating[func_code]; + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) continue; - zcrypt_device_get(zdev); - get_device(&zdev->ap_dev->device); - zdev->request_count++; - __zcrypt_decrease_preference(zdev); - if (try_module_get(zdev->ap_dev->drv->driver.owner)) { - spin_unlock_bh(&zcrypt_device_lock); - rc = zdev->ops->rsa_modexpo(zdev, mex); - spin_lock_bh(&zcrypt_device_lock); - module_put(zdev->ap_dev->drv->driver.owner); + for_each_zcrypt_queue(zq, zc) { + /* check if device is online and eligible */ + if (!zq->online || !zq->ops->rsa_modexpo) + continue; + if (zcrypt_queue_compare(zq, pref_zq, + weight, pref_weight)) + continue; + pref_zc = zc; + pref_zq = zq; + pref_weight = weight; } - else - rc = -EAGAIN; - zdev->request_count--; - __zcrypt_increase_preference(zdev); - put_device(&zdev->ap_dev->device); - zcrypt_device_put(zdev); - spin_unlock_bh(&zcrypt_device_lock); - return rc; } - spin_unlock_bh(&zcrypt_device_lock); - return -ENODEV; + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + + if (!pref_zq) { + rc = -ENODEV; + goto out; + } + + qid = pref_zq->queue->qid; + rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); + + spin_lock(&zcrypt_list_lock); + zcrypt_drop_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + +out: + trace_s390_zcrypt_rep(mex, func_code, rc, + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + return rc; } static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) { - struct zcrypt_device *zdev; - unsigned long long z1, z2, z3; - int rc, copied; + struct zcrypt_card *zc, *pref_zc; + struct zcrypt_queue *zq, *pref_zq; + unsigned int weight, pref_weight; + unsigned int func_code; + int qid = 0, rc = -ENODEV; + + trace_s390_zcrypt_req(crt, TP_ICARSACRT); + + if (crt->outputdatalength < crt->inputdatalength) { + rc = -EINVAL; + goto out; + } - if (crt->outputdatalength < crt->inputdatalength) - return -EINVAL; /* * As long as outputdatalength is big enough, we can set the * outputdatalength equal to the inputdatalength, since that is the @@ -479,308 +323,445 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) */ crt->outputdatalength = crt->inputdatalength; - copied = 0; - restart: - spin_lock_bh(&zcrypt_device_lock); - list_for_each_entry(zdev, &zcrypt_device_list, list) { - if (!zdev->online || - !zdev->ops->rsa_modexpo_crt || - zdev->min_mod_size > crt->inputdatalength || - zdev->max_mod_size < crt->inputdatalength) + rc = get_rsa_crt_fc(crt, &func_code); + if (rc) + goto out; + + pref_zc = NULL; + pref_zq = NULL; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + /* Check for online accelarator and CCA cards */ + if (!zc->online || !(zc->card->functions & 0x18000000)) + continue; + /* Check for size limits */ + if (zc->min_mod_size > crt->inputdatalength || + zc->max_mod_size < crt->inputdatalength) continue; - if (zdev->short_crt && crt->inputdatalength > 240) { - /* - * Check inputdata for leading zeros for cards - * that can't handle np_prime, bp_key, or - * u_mult_inv > 128 bytes. - */ - if (copied == 0) { - unsigned int len; - spin_unlock_bh(&zcrypt_device_lock); - /* len is max 256 / 2 - 120 = 8 - * For bigger device just assume len of leading - * 0s is 8 as stated in the requirements for - * ica_rsa_modexpo_crt struct in zcrypt.h. - */ - if (crt->inputdatalength <= 256) - len = crt->inputdatalength / 2 - 120; - else - len = 8; - if (len > sizeof(z1)) - return -EFAULT; - z1 = z2 = z3 = 0; - if (copy_from_user(&z1, crt->np_prime, len) || - copy_from_user(&z2, crt->bp_key, len) || - copy_from_user(&z3, crt->u_mult_inv, len)) - return -EFAULT; - z1 = z2 = z3 = 0; - copied = 1; - /* - * We have to restart device lookup - - * the device list may have changed by now. - */ - goto restart; - } - if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL) - /* The device can't handle this request. */ + /* get weight index of the card device */ + weight = zc->speed_rating[func_code]; + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) + continue; + for_each_zcrypt_queue(zq, zc) { + /* check if device is online and eligible */ + if (!zq->online || !zq->ops->rsa_modexpo_crt) continue; + if (zcrypt_queue_compare(zq, pref_zq, + weight, pref_weight)) + continue; + pref_zc = zc; + pref_zq = zq; + pref_weight = weight; } - zcrypt_device_get(zdev); - get_device(&zdev->ap_dev->device); - zdev->request_count++; - __zcrypt_decrease_preference(zdev); - if (try_module_get(zdev->ap_dev->drv->driver.owner)) { - spin_unlock_bh(&zcrypt_device_lock); - rc = zdev->ops->rsa_modexpo_crt(zdev, crt); - spin_lock_bh(&zcrypt_device_lock); - module_put(zdev->ap_dev->drv->driver.owner); - } - else - rc = -EAGAIN; - zdev->request_count--; - __zcrypt_increase_preference(zdev); - put_device(&zdev->ap_dev->device); - zcrypt_device_put(zdev); - spin_unlock_bh(&zcrypt_device_lock); - return rc; } - spin_unlock_bh(&zcrypt_device_lock); - return -ENODEV; + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + + if (!pref_zq) { + rc = -ENODEV; + goto out; + } + + qid = pref_zq->queue->qid; + rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); + + spin_lock(&zcrypt_list_lock); + zcrypt_drop_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + +out: + trace_s390_zcrypt_rep(crt, func_code, rc, + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + return rc; } static long zcrypt_send_cprb(struct ica_xcRB *xcRB) { - struct zcrypt_device *zdev; - int rc; + struct zcrypt_card *zc, *pref_zc; + struct zcrypt_queue *zq, *pref_zq; + struct ap_message ap_msg; + unsigned int weight, pref_weight; + unsigned int func_code; + unsigned short *domain; + int qid = 0, rc = -ENODEV; + + trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); - spin_lock_bh(&zcrypt_device_lock); - list_for_each_entry(zdev, &zcrypt_device_list, list) { - if (!zdev->online || !zdev->ops->send_cprb || - (zdev->ops->variant == MSGTYPE06_VARIANT_EP11) || - (xcRB->user_defined != AUTOSELECT && - AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)) + rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); + if (rc) + goto out; + + pref_zc = NULL; + pref_zq = NULL; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + /* Check for online CCA cards */ + if (!zc->online || !(zc->card->functions & 0x10000000)) + continue; + /* Check for user selected CCA card */ + if (xcRB->user_defined != AUTOSELECT && + xcRB->user_defined != zc->card->id) continue; - zcrypt_device_get(zdev); - get_device(&zdev->ap_dev->device); - zdev->request_count++; - __zcrypt_decrease_preference(zdev); - if (try_module_get(zdev->ap_dev->drv->driver.owner)) { - spin_unlock_bh(&zcrypt_device_lock); - rc = zdev->ops->send_cprb(zdev, xcRB); - spin_lock_bh(&zcrypt_device_lock); - module_put(zdev->ap_dev->drv->driver.owner); + /* get weight index of the card device */ + weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) + continue; + for_each_zcrypt_queue(zq, zc) { + /* check if device is online and eligible */ + if (!zq->online || + !zq->ops->send_cprb || + ((*domain != (unsigned short) AUTOSELECT) && + (*domain != AP_QID_QUEUE(zq->queue->qid)))) + continue; + if (zcrypt_queue_compare(zq, pref_zq, + weight, pref_weight)) + continue; + pref_zc = zc; + pref_zq = zq; + pref_weight = weight; } - else - rc = -EAGAIN; - zdev->request_count--; - __zcrypt_increase_preference(zdev); - put_device(&zdev->ap_dev->device); - zcrypt_device_put(zdev); - spin_unlock_bh(&zcrypt_device_lock); - return rc; } - spin_unlock_bh(&zcrypt_device_lock); - return -ENODEV; -} + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); -struct ep11_target_dev_list { - unsigned short targets_num; - struct ep11_target_dev *targets; -}; + if (!pref_zq) { + rc = -ENODEV; + goto out; + } + + /* in case of auto select, provide the correct domain */ + qid = pref_zq->queue->qid; + if (*domain == (unsigned short) AUTOSELECT) + *domain = AP_QID_QUEUE(qid); -static bool is_desired_ep11dev(unsigned int dev_qid, - struct ep11_target_dev_list dev_list) + rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); + + spin_lock(&zcrypt_list_lock); + zcrypt_drop_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + +out: + trace_s390_zcrypt_rep(xcRB, func_code, rc, + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + return rc; +} + +static bool is_desired_ep11_card(unsigned int dev_id, + unsigned short target_num, + struct ep11_target_dev *targets) { - int n; + while (target_num-- > 0) { + if (dev_id == targets->ap_id) + return true; + targets++; + } + return false; +} - for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) { - if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) && - (AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) { +static bool is_desired_ep11_queue(unsigned int dev_qid, + unsigned short target_num, + struct ep11_target_dev *targets) +{ + while (target_num-- > 0) { + if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid) return true; - } + targets++; } return false; } static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) { - struct zcrypt_device *zdev; - bool autoselect = false; - int rc; - struct ep11_target_dev_list ep11_dev_list = { - .targets_num = 0x00, - .targets = NULL, - }; + struct zcrypt_card *zc, *pref_zc; + struct zcrypt_queue *zq, *pref_zq; + struct ep11_target_dev *targets; + unsigned short target_num; + unsigned int weight, pref_weight; + unsigned int func_code; + struct ap_message ap_msg; + int qid = 0, rc = -ENODEV; + + trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); - ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num; + target_num = (unsigned short) xcrb->targets_num; /* empty list indicates autoselect (all available targets) */ - if (ep11_dev_list.targets_num == 0) - autoselect = true; - else { - ep11_dev_list.targets = kcalloc((unsigned short) - xcrb->targets_num, - sizeof(struct ep11_target_dev), - GFP_KERNEL); - if (!ep11_dev_list.targets) - return -ENOMEM; + targets = NULL; + if (target_num != 0) { + struct ep11_target_dev __user *uptr; - if (copy_from_user(ep11_dev_list.targets, - (struct ep11_target_dev __force __user *) - xcrb->targets, xcrb->targets_num * - sizeof(struct ep11_target_dev))) - return -EFAULT; + targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); + if (!targets) { + rc = -ENOMEM; + goto out; + } + + uptr = (struct ep11_target_dev __force __user *) xcrb->targets; + if (copy_from_user(targets, uptr, + target_num * sizeof(*targets))) { + rc = -EFAULT; + goto out; + } } - spin_lock_bh(&zcrypt_device_lock); - list_for_each_entry(zdev, &zcrypt_device_list, list) { - /* check if device is eligible */ - if (!zdev->online || - zdev->ops->variant != MSGTYPE06_VARIANT_EP11) - continue; + rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code); + if (rc) + goto out_free; - /* check if device is selected as valid target */ - if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) && - !autoselect) + pref_zc = NULL; + pref_zq = NULL; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + /* Check for online EP11 cards */ + if (!zc->online || !(zc->card->functions & 0x04000000)) + continue; + /* Check for user selected EP11 card */ + if (targets && + !is_desired_ep11_card(zc->card->id, target_num, targets)) continue; + /* get weight index of the card device */ + weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) + continue; + for_each_zcrypt_queue(zq, zc) { + /* check if device is online and eligible */ + if (!zq->online || + !zq->ops->send_ep11_cprb || + (targets && + !is_desired_ep11_queue(zq->queue->qid, + target_num, targets))) + continue; + if (zcrypt_queue_compare(zq, pref_zq, + weight, pref_weight)) + continue; + pref_zc = zc; + pref_zq = zq; + pref_weight = weight; + } + } + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); - zcrypt_device_get(zdev); - get_device(&zdev->ap_dev->device); - zdev->request_count++; - __zcrypt_decrease_preference(zdev); - if (try_module_get(zdev->ap_dev->drv->driver.owner)) { - spin_unlock_bh(&zcrypt_device_lock); - rc = zdev->ops->send_ep11_cprb(zdev, xcrb); - spin_lock_bh(&zcrypt_device_lock); - module_put(zdev->ap_dev->drv->driver.owner); - } else { - rc = -EAGAIN; - } - zdev->request_count--; - __zcrypt_increase_preference(zdev); - put_device(&zdev->ap_dev->device); - zcrypt_device_put(zdev); - spin_unlock_bh(&zcrypt_device_lock); - return rc; + if (!pref_zq) { + rc = -ENODEV; + goto out_free; } - spin_unlock_bh(&zcrypt_device_lock); - return -ENODEV; + + qid = pref_zq->queue->qid; + rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); + + spin_lock(&zcrypt_list_lock); + zcrypt_drop_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + +out_free: + kfree(targets); +out: + trace_s390_zcrypt_rep(xcrb, func_code, rc, + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + return rc; } static long zcrypt_rng(char *buffer) { - struct zcrypt_device *zdev; - int rc; + struct zcrypt_card *zc, *pref_zc; + struct zcrypt_queue *zq, *pref_zq; + unsigned int weight, pref_weight; + unsigned int func_code; + struct ap_message ap_msg; + unsigned int domain; + int qid = 0, rc = -ENODEV; + + trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); - spin_lock_bh(&zcrypt_device_lock); - list_for_each_entry(zdev, &zcrypt_device_list, list) { - if (!zdev->online || !zdev->ops->rng) + rc = get_rng_fc(&ap_msg, &func_code, &domain); + if (rc) + goto out; + + pref_zc = NULL; + pref_zq = NULL; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + /* Check for online CCA cards */ + if (!zc->online || !(zc->card->functions & 0x10000000)) continue; - zcrypt_device_get(zdev); - get_device(&zdev->ap_dev->device); - zdev->request_count++; - __zcrypt_decrease_preference(zdev); - if (try_module_get(zdev->ap_dev->drv->driver.owner)) { - spin_unlock_bh(&zcrypt_device_lock); - rc = zdev->ops->rng(zdev, buffer); - spin_lock_bh(&zcrypt_device_lock); - module_put(zdev->ap_dev->drv->driver.owner); - } else - rc = -EAGAIN; - zdev->request_count--; - __zcrypt_increase_preference(zdev); - put_device(&zdev->ap_dev->device); - zcrypt_device_put(zdev); - spin_unlock_bh(&zcrypt_device_lock); - return rc; + /* get weight index of the card device */ + weight = zc->speed_rating[func_code]; + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) + continue; + for_each_zcrypt_queue(zq, zc) { + /* check if device is online and eligible */ + if (!zq->online || !zq->ops->rng) + continue; + if (zcrypt_queue_compare(zq, pref_zq, + weight, pref_weight)) + continue; + pref_zc = zc; + pref_zq = zq; + pref_weight = weight; + } + } + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + + if (!pref_zq) + return -ENODEV; + + qid = pref_zq->queue->qid; + rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); + + spin_lock(&zcrypt_list_lock); + zcrypt_drop_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + +out: + trace_s390_zcrypt_rep(buffer, func_code, rc, + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + return rc; +} + +static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix) +{ + struct zcrypt_card *zc; + struct zcrypt_queue *zq; + struct zcrypt_device_status *stat; + + memset(matrix, 0, sizeof(*matrix)); + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + stat = matrix->device; + stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS; + stat += AP_QID_QUEUE(zq->queue->qid); + stat->hwtype = zc->card->ap_dev.device_type; + stat->functions = zc->card->functions >> 26; + stat->qid = zq->queue->qid; + stat->online = zq->online ? 0x01 : 0x00; + } } - spin_unlock_bh(&zcrypt_device_lock); - return -ENODEV; + spin_unlock(&zcrypt_list_lock); } +EXPORT_SYMBOL(zcrypt_device_status_mask); static void zcrypt_status_mask(char status[AP_DEVICES]) { - struct zcrypt_device *zdev; + struct zcrypt_card *zc; + struct zcrypt_queue *zq; memset(status, 0, sizeof(char) * AP_DEVICES); - spin_lock_bh(&zcrypt_device_lock); - list_for_each_entry(zdev, &zcrypt_device_list, list) - status[AP_QID_DEVICE(zdev->ap_dev->qid)] = - zdev->online ? zdev->user_space_type : 0x0d; - spin_unlock_bh(&zcrypt_device_lock); + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) + continue; + status[AP_QID_CARD(zq->queue->qid)] = + zc->online ? zc->user_space_type : 0x0d; + } + } + spin_unlock(&zcrypt_list_lock); } static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) { - struct zcrypt_device *zdev; + struct zcrypt_card *zc; + struct zcrypt_queue *zq; memset(qdepth, 0, sizeof(char) * AP_DEVICES); - spin_lock_bh(&zcrypt_device_lock); - list_for_each_entry(zdev, &zcrypt_device_list, list) { - spin_lock(&zdev->ap_dev->lock); - qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] = - zdev->ap_dev->pendingq_count + - zdev->ap_dev->requestq_count; - spin_unlock(&zdev->ap_dev->lock); + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) + continue; + spin_lock(&zq->queue->lock); + qdepth[AP_QID_CARD(zq->queue->qid)] = + zq->queue->pendingq_count + + zq->queue->requestq_count; + spin_unlock(&zq->queue->lock); + } } - spin_unlock_bh(&zcrypt_device_lock); + spin_unlock(&zcrypt_list_lock); } static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) { - struct zcrypt_device *zdev; + struct zcrypt_card *zc; + struct zcrypt_queue *zq; memset(reqcnt, 0, sizeof(int) * AP_DEVICES); - spin_lock_bh(&zcrypt_device_lock); - list_for_each_entry(zdev, &zcrypt_device_list, list) { - spin_lock(&zdev->ap_dev->lock); - reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] = - zdev->ap_dev->total_request_count; - spin_unlock(&zdev->ap_dev->lock); + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) + continue; + spin_lock(&zq->queue->lock); + reqcnt[AP_QID_CARD(zq->queue->qid)] = + zq->queue->total_request_count; + spin_unlock(&zq->queue->lock); + } } - spin_unlock_bh(&zcrypt_device_lock); + spin_unlock(&zcrypt_list_lock); } static int zcrypt_pendingq_count(void) { - struct zcrypt_device *zdev; - int pendingq_count = 0; - - spin_lock_bh(&zcrypt_device_lock); - list_for_each_entry(zdev, &zcrypt_device_list, list) { - spin_lock(&zdev->ap_dev->lock); - pendingq_count += zdev->ap_dev->pendingq_count; - spin_unlock(&zdev->ap_dev->lock); + struct zcrypt_card *zc; + struct zcrypt_queue *zq; + int pendingq_count; + + pendingq_count = 0; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) + continue; + spin_lock(&zq->queue->lock); + pendingq_count += zq->queue->pendingq_count; + spin_unlock(&zq->queue->lock); + } } - spin_unlock_bh(&zcrypt_device_lock); + spin_unlock(&zcrypt_list_lock); return pendingq_count; } static int zcrypt_requestq_count(void) { - struct zcrypt_device *zdev; - int requestq_count = 0; - - spin_lock_bh(&zcrypt_device_lock); - list_for_each_entry(zdev, &zcrypt_device_list, list) { - spin_lock(&zdev->ap_dev->lock); - requestq_count += zdev->ap_dev->requestq_count; - spin_unlock(&zdev->ap_dev->lock); + struct zcrypt_card *zc; + struct zcrypt_queue *zq; + int requestq_count; + + requestq_count = 0; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) + continue; + spin_lock(&zq->queue->lock); + requestq_count += zq->queue->requestq_count; + spin_unlock(&zq->queue->lock); + } } - spin_unlock_bh(&zcrypt_device_lock); + spin_unlock(&zcrypt_list_lock); return requestq_count; } static int zcrypt_count_type(int type) { - struct zcrypt_device *zdev; - int device_count = 0; - - spin_lock_bh(&zcrypt_device_lock); - list_for_each_entry(zdev, &zcrypt_device_list, list) - if (zdev->user_space_type == type) + struct zcrypt_card *zc; + struct zcrypt_queue *zq; + int device_count; + + device_count = 0; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + if (zc->card->id != type) + continue; + for_each_zcrypt_queue(zq, zc) { + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) + continue; device_count++; - spin_unlock_bh(&zcrypt_device_lock); + } + } + spin_unlock(&zcrypt_list_lock); return device_count; } @@ -887,6 +868,25 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; return rc; } + case ZDEVICESTATUS: { + struct zcrypt_device_matrix *device_status; + + device_status = kzalloc(sizeof(struct zcrypt_device_matrix), + GFP_KERNEL); + if (!device_status) + return -ENOMEM; + + zcrypt_device_status_mask(device_status); + + if (copy_to_user((char __user *) arg, device_status, + sizeof(struct zcrypt_device_matrix))) { + kfree(device_status); + return -EFAULT; + } + + kfree(device_status); + return 0; + } case Z90STAT_STATUS_MASK: { char status[AP_DEVICES]; zcrypt_status_mask(status); @@ -1249,29 +1249,36 @@ static int zcrypt_proc_open(struct inode *inode, struct file *file) static void zcrypt_disable_card(int index) { - struct zcrypt_device *zdev; + struct zcrypt_card *zc; + struct zcrypt_queue *zq; - spin_lock_bh(&zcrypt_device_lock); - list_for_each_entry(zdev, &zcrypt_device_list, list) - if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { - zdev->online = 0; - ap_flush_queue(zdev->ap_dev); - break; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) + continue; + zq->online = 0; + ap_flush_queue(zq->queue); } - spin_unlock_bh(&zcrypt_device_lock); + } + spin_unlock(&zcrypt_list_lock); } static void zcrypt_enable_card(int index) { - struct zcrypt_device *zdev; + struct zcrypt_card *zc; + struct zcrypt_queue *zq; - spin_lock_bh(&zcrypt_device_lock); - list_for_each_entry(zdev, &zcrypt_device_list, list) - if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { - zdev->online = 1; - break; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) + continue; + zq->online = 1; + ap_flush_queue(zq->queue); } - spin_unlock_bh(&zcrypt_device_lock); + } + spin_unlock(&zcrypt_list_lock); } static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, @@ -1369,7 +1376,7 @@ static struct hwrng zcrypt_rng_dev = { .quality = 990, }; -static int zcrypt_rng_device_add(void) +int zcrypt_rng_device_add(void) { int rc = 0; @@ -1399,7 +1406,7 @@ static int zcrypt_rng_device_add(void) return rc; } -static void zcrypt_rng_device_remove(void) +void zcrypt_rng_device_remove(void) { mutex_lock(&zcrypt_rng_mutex); zcrypt_rng_device_count--; @@ -1412,24 +1419,19 @@ static void zcrypt_rng_device_remove(void) int __init zcrypt_debug_init(void) { - debugfs_root = debugfs_create_dir("zcrypt", NULL); - - zcrypt_dbf_common = debug_register("zcrypt_common", 1, 1, 16); - debug_register_view(zcrypt_dbf_common, &debug_hex_ascii_view); - debug_set_level(zcrypt_dbf_common, DBF_ERR); - - zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16); - debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view); - debug_set_level(zcrypt_dbf_devices, DBF_ERR); + zcrypt_dbf_root = debugfs_create_dir("zcrypt", NULL); + zcrypt_dbf_info = debug_register("zcrypt", 1, 1, + DBF_MAX_SPRINTF_ARGS * sizeof(long)); + debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); + debug_set_level(zcrypt_dbf_info, DBF_ERR); return 0; } void zcrypt_debug_exit(void) { - debugfs_remove(debugfs_root); - debug_unregister(zcrypt_dbf_common); - debug_unregister(zcrypt_dbf_devices); + debugfs_remove(zcrypt_dbf_root); + debug_unregister(zcrypt_dbf_info); } /** @@ -1453,12 +1455,15 @@ int __init zcrypt_api_init(void) goto out; /* Set up the proc file system */ - zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops); + zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, + &zcrypt_proc_fops); if (!zcrypt_entry) { rc = -ENOMEM; goto out_misc; } + zcrypt_msgtype6_init(); + zcrypt_msgtype50_init(); return 0; out_misc: @@ -1472,10 +1477,12 @@ int __init zcrypt_api_init(void) * * The module termination code. */ -void zcrypt_api_exit(void) +void __exit zcrypt_api_exit(void) { remove_proc_entry("driver/z90crypt", NULL); misc_deregister(&zcrypt_misc_device); + zcrypt_msgtype6_exit(); + zcrypt_msgtype50_exit(); zcrypt_debug_exit(); } diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 38618f05ad924c340d324500d9d3b887798c7cd8..274a590515347094b5b27a79efd6607dd50d085b 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -84,57 +84,110 @@ struct ica_z90_status { */ #define ZCRYPT_RNG_BUFFER_SIZE 4096 -struct zcrypt_device; +/* + * Identifier for Crypto Request Performance Index + */ +enum crypto_ops { + MEX_1K, + MEX_2K, + MEX_4K, + CRT_1K, + CRT_2K, + CRT_4K, + HWRNG, + SECKEY, + NUM_OPS +}; + +struct zcrypt_queue; struct zcrypt_ops { - long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *); - long (*rsa_modexpo_crt)(struct zcrypt_device *, + long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *); + long (*rsa_modexpo_crt)(struct zcrypt_queue *, struct ica_rsa_modexpo_crt *); - long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *); - long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *); - long (*rng)(struct zcrypt_device *, char *); + long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *, + struct ap_message *); + long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *, + struct ap_message *); + long (*rng)(struct zcrypt_queue *, char *, struct ap_message *); struct list_head list; /* zcrypt ops list. */ struct module *owner; int variant; char name[128]; }; -struct zcrypt_device { +struct zcrypt_card { struct list_head list; /* Device list. */ - spinlock_t lock; /* Per device lock. */ + struct list_head zqueues; /* List of zcrypt queues */ struct kref refcount; /* device refcounting */ - struct ap_device *ap_dev; /* The "real" ap device. */ - struct zcrypt_ops *ops; /* Crypto operations. */ + struct ap_card *card; /* The "real" ap card device. */ int online; /* User online/offline */ int user_space_type; /* User space device id. */ char *type_string; /* User space device name. */ int min_mod_size; /* Min number of bits. */ int max_mod_size; /* Max number of bits. */ - int short_crt; /* Card has crt length restriction. */ - int speed_rating; /* Speed of the crypto device. */ + int max_exp_bit_length; + int speed_rating[NUM_OPS]; /* Speed idx of crypto ops. */ + atomic_t load; /* Utilization of the crypto device */ int request_count; /* # current requests. */ +}; - struct ap_message reply; /* Per-device reply structure. */ - int max_exp_bit_length; +struct zcrypt_queue { + struct list_head list; /* Device list. */ + struct kref refcount; /* device refcounting */ + struct zcrypt_card *zcard; + struct zcrypt_ops *ops; /* Crypto operations. */ + struct ap_queue *queue; /* The "real" ap queue device. */ + int online; /* User online/offline */ + + atomic_t load; /* Utilization of the crypto device */ - debug_info_t *dbf_area; /* debugging */ + int request_count; /* # current requests. */ + + struct ap_message reply; /* Per-device reply structure. */ }; /* transport layer rescanning */ extern atomic_t zcrypt_rescan_req; -struct zcrypt_device *zcrypt_device_alloc(size_t); -void zcrypt_device_free(struct zcrypt_device *); -void zcrypt_device_get(struct zcrypt_device *); -int zcrypt_device_put(struct zcrypt_device *); -int zcrypt_device_register(struct zcrypt_device *); -void zcrypt_device_unregister(struct zcrypt_device *); +extern spinlock_t zcrypt_list_lock; +extern int zcrypt_device_count; +extern struct list_head zcrypt_card_list; + +#define for_each_zcrypt_card(_zc) \ + list_for_each_entry(_zc, &zcrypt_card_list, list) + +#define for_each_zcrypt_queue(_zq, _zc) \ + list_for_each_entry(_zq, &(_zc)->zqueues, list) + +struct zcrypt_card *zcrypt_card_alloc(void); +void zcrypt_card_free(struct zcrypt_card *); +void zcrypt_card_get(struct zcrypt_card *); +int zcrypt_card_put(struct zcrypt_card *); +int zcrypt_card_register(struct zcrypt_card *); +void zcrypt_card_unregister(struct zcrypt_card *); +struct zcrypt_card *zcrypt_card_get_best(unsigned int *, + unsigned int, unsigned int); +void zcrypt_card_put_best(struct zcrypt_card *, unsigned int); + +struct zcrypt_queue *zcrypt_queue_alloc(size_t); +void zcrypt_queue_free(struct zcrypt_queue *); +void zcrypt_queue_get(struct zcrypt_queue *); +int zcrypt_queue_put(struct zcrypt_queue *); +int zcrypt_queue_register(struct zcrypt_queue *); +void zcrypt_queue_unregister(struct zcrypt_queue *); +void zcrypt_queue_force_online(struct zcrypt_queue *, int); +struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int); +void zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int); + +int zcrypt_rng_device_add(void); +void zcrypt_rng_device_remove(void); + void zcrypt_msgtype_register(struct zcrypt_ops *); void zcrypt_msgtype_unregister(struct zcrypt_ops *); -struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *, int); -void zcrypt_msgtype_release(struct zcrypt_ops *); +struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int); int zcrypt_api_init(void); void zcrypt_api_exit(void); diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c new file mode 100644 index 0000000000000000000000000000000000000000..53436ea522305dfd27fa3e89d216d6b1d910ae9e --- /dev/null +++ b/drivers/s390/crypto/zcrypt_card.c @@ -0,0 +1,187 @@ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * Cornelia Huck + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky + * Ralph Wuerthner + * MSGTYPE restruct: Holger Dengler + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "zcrypt_debug.h" +#include "zcrypt_api.h" + +#include "zcrypt_msgtype6.h" +#include "zcrypt_msgtype50.h" + +/* + * Device attributes common for all crypto card devices. + */ + +static ssize_t zcrypt_card_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct zcrypt_card *zc = to_ap_card(dev)->private; + + return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string); +} + +static DEVICE_ATTR(type, 0444, zcrypt_card_type_show, NULL); + +static ssize_t zcrypt_card_online_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct zcrypt_card *zc = to_ap_card(dev)->private; + + return snprintf(buf, PAGE_SIZE, "%d\n", zc->online); +} + +static ssize_t zcrypt_card_online_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct zcrypt_card *zc = to_ap_card(dev)->private; + struct zcrypt_queue *zq; + int online, id; + + if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) + return -EINVAL; + + zc->online = online; + id = zc->card->id; + + ZCRYPT_DBF(DBF_INFO, "card=%02x online=%d\n", id, online); + + spin_lock(&zcrypt_list_lock); + list_for_each_entry(zq, &zc->zqueues, list) + zcrypt_queue_force_online(zq, online); + spin_unlock(&zcrypt_list_lock); + return count; +} + +static DEVICE_ATTR(online, 0644, zcrypt_card_online_show, + zcrypt_card_online_store); + +static struct attribute *zcrypt_card_attrs[] = { + &dev_attr_type.attr, + &dev_attr_online.attr, + NULL, +}; + +static struct attribute_group zcrypt_card_attr_group = { + .attrs = zcrypt_card_attrs, +}; + +struct zcrypt_card *zcrypt_card_alloc(void) +{ + struct zcrypt_card *zc; + + zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL); + if (!zc) + return NULL; + INIT_LIST_HEAD(&zc->list); + INIT_LIST_HEAD(&zc->zqueues); + kref_init(&zc->refcount); + return zc; +} +EXPORT_SYMBOL(zcrypt_card_alloc); + +void zcrypt_card_free(struct zcrypt_card *zc) +{ + kfree(zc); +} +EXPORT_SYMBOL(zcrypt_card_free); + +static void zcrypt_card_release(struct kref *kref) +{ + struct zcrypt_card *zdev = + container_of(kref, struct zcrypt_card, refcount); + zcrypt_card_free(zdev); +} + +void zcrypt_card_get(struct zcrypt_card *zc) +{ + kref_get(&zc->refcount); +} +EXPORT_SYMBOL(zcrypt_card_get); + +int zcrypt_card_put(struct zcrypt_card *zc) +{ + return kref_put(&zc->refcount, zcrypt_card_release); +} +EXPORT_SYMBOL(zcrypt_card_put); + +/** + * zcrypt_card_register() - Register a crypto card device. + * @zc: Pointer to a crypto card device + * + * Register a crypto card device. Returns 0 if successful. + */ +int zcrypt_card_register(struct zcrypt_card *zc) +{ + int rc; + + rc = sysfs_create_group(&zc->card->ap_dev.device.kobj, + &zcrypt_card_attr_group); + if (rc) + return rc; + + spin_lock(&zcrypt_list_lock); + list_add_tail(&zc->list, &zcrypt_card_list); + spin_unlock(&zcrypt_list_lock); + + zc->online = 1; + + ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id); + + return rc; +} +EXPORT_SYMBOL(zcrypt_card_register); + +/** + * zcrypt_card_unregister(): Unregister a crypto card device. + * @zc: Pointer to crypto card device + * + * Unregister a crypto card device. + */ +void zcrypt_card_unregister(struct zcrypt_card *zc) +{ + ZCRYPT_DBF(DBF_INFO, "card=%02x unregister\n", zc->card->id); + + spin_lock(&zcrypt_list_lock); + list_del_init(&zc->list); + spin_unlock(&zcrypt_list_lock); + sysfs_remove_group(&zc->card->ap_dev.device.kobj, + &zcrypt_card_attr_group); +} +EXPORT_SYMBOL(zcrypt_card_unregister); diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index 15104aaa075a41db8728f6bd412ff44636e6f6ee..b97c5d5ee5a4aba9e70f88f791b674046092a6f7 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -30,7 +30,8 @@ #include #include #include -#include +#include +#include #include "ap_bus.h" #include "zcrypt_api.h" @@ -43,9 +44,6 @@ #define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE #define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */ -#define CEX2A_SPEED_RATING 970 -#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */ - #define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ #define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ @@ -57,107 +55,195 @@ #define CEX2A_CLEANUP_TIME (15*HZ) #define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME -static struct ap_device_id zcrypt_cex2a_ids[] = { - { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, - { AP_DEVICE(AP_DEVICE_TYPE_CEX3A) }, - { /* end of list */ }, -}; - -MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids); MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \ "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); -static int zcrypt_cex2a_probe(struct ap_device *ap_dev); -static void zcrypt_cex2a_remove(struct ap_device *ap_dev); +static struct ap_device_id zcrypt_cex2a_card_ids[] = { + { .dev_type = AP_DEVICE_TYPE_CEX2A, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX3A, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { /* end of list */ }, +}; + +MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids); -static struct ap_driver zcrypt_cex2a_driver = { - .probe = zcrypt_cex2a_probe, - .remove = zcrypt_cex2a_remove, - .ids = zcrypt_cex2a_ids, - .request_timeout = CEX2A_CLEANUP_TIME, +static struct ap_device_id zcrypt_cex2a_queue_ids[] = { + { .dev_type = AP_DEVICE_TYPE_CEX2A, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX3A, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { /* end of list */ }, }; +MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids); + /** - * Probe function for CEX2A cards. It always accepts the AP device - * since the bus_match already checked the hardware type. + * Probe function for CEX2A card devices. It always accepts the AP device + * since the bus_match already checked the card type. * @ap_dev: pointer to the AP device. */ -static int zcrypt_cex2a_probe(struct ap_device *ap_dev) +static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev) { - struct zcrypt_device *zdev = NULL; + /* + * Normalized speed ratings per crypto adapter + * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY + */ + static const int CEX2A_SPEED_IDX[] = { + 800, 1000, 2000, 900, 1200, 2400, 0, 0}; + static const int CEX3A_SPEED_IDX[] = { + 400, 500, 1000, 450, 550, 1200, 0, 0}; + + struct ap_card *ac = to_ap_card(&ap_dev->device); + struct zcrypt_card *zc; int rc = 0; + zc = zcrypt_card_alloc(); + if (!zc) + return -ENOMEM; + zc->card = ac; + ac->private = zc; + + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) { + zc->min_mod_size = CEX2A_MIN_MOD_SIZE; + zc->max_mod_size = CEX2A_MAX_MOD_SIZE; + memcpy(zc->speed_rating, CEX2A_SPEED_IDX, + sizeof(CEX2A_SPEED_IDX)); + zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; + zc->type_string = "CEX2A"; + zc->user_space_type = ZCRYPT_CEX2A; + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) { + zc->min_mod_size = CEX2A_MIN_MOD_SIZE; + zc->max_mod_size = CEX2A_MAX_MOD_SIZE; + zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; + if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && + ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) { + zc->max_mod_size = CEX3A_MAX_MOD_SIZE; + zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE; + } + memcpy(zc->speed_rating, CEX3A_SPEED_IDX, + sizeof(CEX3A_SPEED_IDX)); + zc->type_string = "CEX3A"; + zc->user_space_type = ZCRYPT_CEX3A; + } else { + zcrypt_card_free(zc); + return -ENODEV; + } + zc->online = 1; + + rc = zcrypt_card_register(zc); + if (rc) { + ac->private = NULL; + zcrypt_card_free(zc); + } + + return rc; +} + +/** + * This is called to remove the CEX2A card driver information + * if an AP card device is removed. + */ +static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev) +{ + struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private; + + if (zc) + zcrypt_card_unregister(zc); +} + +static struct ap_driver zcrypt_cex2a_card_driver = { + .probe = zcrypt_cex2a_card_probe, + .remove = zcrypt_cex2a_card_remove, + .ids = zcrypt_cex2a_card_ids, +}; + +/** + * Probe function for CEX2A queue devices. It always accepts the AP device + * since the bus_match already checked the queue type. + * @ap_dev: pointer to the AP device. + */ +static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev) +{ + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + struct zcrypt_queue *zq = NULL; + int rc; + switch (ap_dev->device_type) { case AP_DEVICE_TYPE_CEX2A: - zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE); - if (!zdev) + zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE); + if (!zq) return -ENOMEM; - zdev->user_space_type = ZCRYPT_CEX2A; - zdev->type_string = "CEX2A"; - zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; - zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; - zdev->short_crt = 1; - zdev->speed_rating = CEX2A_SPEED_RATING; - zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; break; case AP_DEVICE_TYPE_CEX3A: - zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE); - if (!zdev) + zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE); + if (!zq) return -ENOMEM; - zdev->user_space_type = ZCRYPT_CEX3A; - zdev->type_string = "CEX3A"; - zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; - zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; - zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; - if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) && - ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) { - zdev->max_mod_size = CEX3A_MAX_MOD_SIZE; - zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE; - } - zdev->short_crt = 1; - zdev->speed_rating = CEX3A_SPEED_RATING; break; } - if (!zdev) + if (!zq) return -ENODEV; - zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME, - MSGTYPE50_VARIANT_DEFAULT); - zdev->ap_dev = ap_dev; - zdev->online = 1; - ap_device_init_reply(ap_dev, &zdev->reply); - ap_dev->private = zdev; - rc = zcrypt_device_register(zdev); + zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT); + zq->queue = aq; + zq->online = 1; + atomic_set(&zq->load, 0); + ap_queue_init_reply(aq, &zq->reply); + aq->request_timeout = CEX2A_CLEANUP_TIME, + aq->private = zq; + rc = zcrypt_queue_register(zq); if (rc) { - ap_dev->private = NULL; - zcrypt_msgtype_release(zdev->ops); - zcrypt_device_free(zdev); + aq->private = NULL; + zcrypt_queue_free(zq); } + return rc; } /** - * This is called to remove the extended CEX2A driver information - * if an AP device is removed. + * This is called to remove the CEX2A queue driver information + * if an AP queue device is removed. */ -static void zcrypt_cex2a_remove(struct ap_device *ap_dev) +static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev) { - struct zcrypt_device *zdev = ap_dev->private; - struct zcrypt_ops *zops = zdev->ops; + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + struct zcrypt_queue *zq = aq->private; - zcrypt_device_unregister(zdev); - zcrypt_msgtype_release(zops); + ap_queue_remove(aq); + if (zq) + zcrypt_queue_unregister(zq); } +static struct ap_driver zcrypt_cex2a_queue_driver = { + .probe = zcrypt_cex2a_queue_probe, + .remove = zcrypt_cex2a_queue_remove, + .suspend = ap_queue_suspend, + .resume = ap_queue_resume, + .ids = zcrypt_cex2a_queue_ids, +}; + int __init zcrypt_cex2a_init(void) { - return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a"); + int rc; + + rc = ap_driver_register(&zcrypt_cex2a_card_driver, + THIS_MODULE, "cex2acard"); + if (rc) + return rc; + + rc = ap_driver_register(&zcrypt_cex2a_queue_driver, + THIS_MODULE, "cex2aqueue"); + if (rc) + ap_driver_unregister(&zcrypt_cex2a_card_driver); + + return rc; } void __exit zcrypt_cex2a_exit(void) { - ap_driver_unregister(&zcrypt_cex2a_driver); + ap_driver_unregister(&zcrypt_cex2a_queue_driver); + ap_driver_unregister(&zcrypt_cex2a_card_driver); } module_init(zcrypt_cex2a_init); diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index ccb2e78ebf0e811e91c79ce72d7667a10b5030a5..4e91163d70a6eb1341bb2306ca9a6b1137aa159a 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "ap_bus.h" #include "zcrypt_api.h" @@ -24,13 +25,6 @@ #define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */ #define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */ -#define CEX4A_SPEED_RATING 900 /* TODO new card, new speed rating */ -#define CEX4C_SPEED_RATING 6500 /* TODO new card, new speed rating */ -#define CEX4P_SPEED_RATING 7000 /* TODO new card, new speed rating */ -#define CEX5A_SPEED_RATING 450 /* TODO new card, new speed rating */ -#define CEX5C_SPEED_RATING 3250 /* TODO new card, new speed rating */ -#define CEX5P_SPEED_RATING 3500 /* TODO new card, new speed rating */ - #define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE #define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE @@ -41,147 +35,246 @@ */ #define CEX4_CLEANUP_TIME (900*HZ) -static struct ap_device_id zcrypt_cex4_ids[] = { - { AP_DEVICE(AP_DEVICE_TYPE_CEX4) }, - { AP_DEVICE(AP_DEVICE_TYPE_CEX5) }, - { /* end of list */ }, -}; - -MODULE_DEVICE_TABLE(ap, zcrypt_cex4_ids); MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \ "Copyright IBM Corp. 2012"); MODULE_LICENSE("GPL"); -static int zcrypt_cex4_probe(struct ap_device *ap_dev); -static void zcrypt_cex4_remove(struct ap_device *ap_dev); +static struct ap_device_id zcrypt_cex4_card_ids[] = { + { .dev_type = AP_DEVICE_TYPE_CEX4, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX5, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { /* end of list */ }, +}; -static struct ap_driver zcrypt_cex4_driver = { - .probe = zcrypt_cex4_probe, - .remove = zcrypt_cex4_remove, - .ids = zcrypt_cex4_ids, - .request_timeout = CEX4_CLEANUP_TIME, +MODULE_DEVICE_TABLE(ap, zcrypt_cex4_card_ids); + +static struct ap_device_id zcrypt_cex4_queue_ids[] = { + { .dev_type = AP_DEVICE_TYPE_CEX4, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX5, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { /* end of list */ }, }; +MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids); + /** - * Probe function for CEX4 cards. It always accepts the AP device + * Probe function for CEX4 card device. It always accepts the AP device * since the bus_match already checked the hardware type. * @ap_dev: pointer to the AP device. */ -static int zcrypt_cex4_probe(struct ap_device *ap_dev) +static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) { - struct zcrypt_device *zdev = NULL; + /* + * Normalized speed ratings per crypto adapter + * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY + */ + static const int CEX4A_SPEED_IDX[] = { + 5, 6, 59, 20, 115, 581, 0, 0}; + static const int CEX5A_SPEED_IDX[] = { + 3, 3, 6, 8, 32, 218, 0, 0}; + static const int CEX4C_SPEED_IDX[] = { + 24, 25, 82, 41, 138, 1111, 79, 8}; + static const int CEX5C_SPEED_IDX[] = { + 10, 14, 23, 17, 45, 242, 63, 4}; + static const int CEX4P_SPEED_IDX[] = { + 142, 198, 1852, 203, 331, 1563, 0, 8}; + static const int CEX5P_SPEED_IDX[] = { + 49, 67, 131, 52, 85, 287, 0, 4}; + + struct ap_card *ac = to_ap_card(&ap_dev->device); + struct zcrypt_card *zc; int rc = 0; - switch (ap_dev->device_type) { - case AP_DEVICE_TYPE_CEX4: - case AP_DEVICE_TYPE_CEX5: - if (ap_test_bit(&ap_dev->functions, AP_FUNC_ACCEL)) { - zdev = zcrypt_device_alloc(CEX4A_MAX_MESSAGE_SIZE); - if (!zdev) - return -ENOMEM; - if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) { - zdev->type_string = "CEX4A"; - zdev->speed_rating = CEX4A_SPEED_RATING; - } else { - zdev->type_string = "CEX5A"; - zdev->speed_rating = CEX5A_SPEED_RATING; - } - zdev->user_space_type = ZCRYPT_CEX3A; - zdev->min_mod_size = CEX4A_MIN_MOD_SIZE; - if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) && - ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) { - zdev->max_mod_size = - CEX4A_MAX_MOD_SIZE_4K; - zdev->max_exp_bit_length = - CEX4A_MAX_MOD_SIZE_4K; - } else { - zdev->max_mod_size = - CEX4A_MAX_MOD_SIZE_2K; - zdev->max_exp_bit_length = - CEX4A_MAX_MOD_SIZE_2K; - } - zdev->short_crt = 1; - zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME, - MSGTYPE50_VARIANT_DEFAULT); - } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_COPRO)) { - zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE); - if (!zdev) - return -ENOMEM; - if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) { - zdev->type_string = "CEX4C"; - zdev->speed_rating = CEX4C_SPEED_RATING; - } else { - zdev->type_string = "CEX5C"; - zdev->speed_rating = CEX5C_SPEED_RATING; - } - zdev->user_space_type = ZCRYPT_CEX3C; - zdev->min_mod_size = CEX4C_MIN_MOD_SIZE; - zdev->max_mod_size = CEX4C_MAX_MOD_SIZE; - zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; - zdev->short_crt = 0; - zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, - MSGTYPE06_VARIANT_DEFAULT); - } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) { - zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE); - if (!zdev) - return -ENOMEM; - if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) { - zdev->type_string = "CEX4P"; - zdev->speed_rating = CEX4P_SPEED_RATING; - } else { - zdev->type_string = "CEX5P"; - zdev->speed_rating = CEX5P_SPEED_RATING; - } - zdev->user_space_type = ZCRYPT_CEX4; - zdev->min_mod_size = CEX4C_MIN_MOD_SIZE; - zdev->max_mod_size = CEX4C_MAX_MOD_SIZE; - zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; - zdev->short_crt = 0; - zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, - MSGTYPE06_VARIANT_EP11); + zc = zcrypt_card_alloc(); + if (!zc) + return -ENOMEM; + zc->card = ac; + ac->private = zc; + if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) { + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { + zc->type_string = "CEX4A"; + zc->user_space_type = ZCRYPT_CEX4; + memcpy(zc->speed_rating, CEX4A_SPEED_IDX, + sizeof(CEX4A_SPEED_IDX)); + } else { + zc->type_string = "CEX5A"; + zc->user_space_type = ZCRYPT_CEX5; + memcpy(zc->speed_rating, CEX5A_SPEED_IDX, + sizeof(CEX5A_SPEED_IDX)); } - break; - } - if (!zdev) + zc->min_mod_size = CEX4A_MIN_MOD_SIZE; + if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && + ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) { + zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K; + zc->max_exp_bit_length = + CEX4A_MAX_MOD_SIZE_4K; + } else { + zc->max_mod_size = CEX4A_MAX_MOD_SIZE_2K; + zc->max_exp_bit_length = + CEX4A_MAX_MOD_SIZE_2K; + } + } else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) { + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { + zc->type_string = "CEX4C"; + /* wrong user space type, must be CEX4 + * just keep it for cca compatibility + */ + zc->user_space_type = ZCRYPT_CEX3C; + memcpy(zc->speed_rating, CEX4C_SPEED_IDX, + sizeof(CEX4C_SPEED_IDX)); + } else { + zc->type_string = "CEX5C"; + /* wrong user space type, must be CEX5 + * just keep it for cca compatibility + */ + zc->user_space_type = ZCRYPT_CEX3C; + memcpy(zc->speed_rating, CEX5C_SPEED_IDX, + sizeof(CEX5C_SPEED_IDX)); + } + zc->min_mod_size = CEX4C_MIN_MOD_SIZE; + zc->max_mod_size = CEX4C_MAX_MOD_SIZE; + zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; + } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) { + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { + zc->type_string = "CEX4P"; + zc->user_space_type = ZCRYPT_CEX4; + memcpy(zc->speed_rating, CEX4P_SPEED_IDX, + sizeof(CEX4P_SPEED_IDX)); + } else { + zc->type_string = "CEX5P"; + zc->user_space_type = ZCRYPT_CEX5; + memcpy(zc->speed_rating, CEX5P_SPEED_IDX, + sizeof(CEX5P_SPEED_IDX)); + } + zc->min_mod_size = CEX4C_MIN_MOD_SIZE; + zc->max_mod_size = CEX4C_MAX_MOD_SIZE; + zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; + } else { + zcrypt_card_free(zc); return -ENODEV; - zdev->ap_dev = ap_dev; - zdev->online = 1; - ap_device_init_reply(ap_dev, &zdev->reply); - ap_dev->private = zdev; - rc = zcrypt_device_register(zdev); + } + zc->online = 1; + + rc = zcrypt_card_register(zc); if (rc) { - zcrypt_msgtype_release(zdev->ops); - ap_dev->private = NULL; - zcrypt_device_free(zdev); + ac->private = NULL; + zcrypt_card_free(zc); } + return rc; } /** - * This is called to remove the extended CEX4 driver information - * if an AP device is removed. + * This is called to remove the CEX4 card driver information + * if an AP card device is removed. + */ +static void zcrypt_cex4_card_remove(struct ap_device *ap_dev) +{ + struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private; + + if (zc) + zcrypt_card_unregister(zc); +} + +static struct ap_driver zcrypt_cex4_card_driver = { + .probe = zcrypt_cex4_card_probe, + .remove = zcrypt_cex4_card_remove, + .ids = zcrypt_cex4_card_ids, +}; + +/** + * Probe function for CEX4 queue device. It always accepts the AP device + * since the bus_match already checked the hardware type. + * @ap_dev: pointer to the AP device. */ -static void zcrypt_cex4_remove(struct ap_device *ap_dev) +static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) { - struct zcrypt_device *zdev = ap_dev->private; - struct zcrypt_ops *zops; + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + struct zcrypt_queue *zq; + int rc; - if (zdev) { - zops = zdev->ops; - zcrypt_device_unregister(zdev); - zcrypt_msgtype_release(zops); + if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) { + zq = zcrypt_queue_alloc(CEX4A_MAX_MESSAGE_SIZE); + if (!zq) + return -ENOMEM; + zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, + MSGTYPE50_VARIANT_DEFAULT); + } else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) { + zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE); + if (!zq) + return -ENOMEM; + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, + MSGTYPE06_VARIANT_DEFAULT); + } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) { + zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE); + if (!zq) + return -ENOMEM; + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, + MSGTYPE06_VARIANT_EP11); + } else { + return -ENODEV; } + zq->queue = aq; + zq->online = 1; + atomic_set(&zq->load, 0); + ap_queue_init_reply(aq, &zq->reply); + aq->request_timeout = CEX4_CLEANUP_TIME, + aq->private = zq; + rc = zcrypt_queue_register(zq); + if (rc) { + aq->private = NULL; + zcrypt_queue_free(zq); + } + + return rc; +} + +/** + * This is called to remove the CEX4 queue driver information + * if an AP queue device is removed. + */ +static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev) +{ + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + struct zcrypt_queue *zq = aq->private; + + ap_queue_remove(aq); + if (zq) + zcrypt_queue_unregister(zq); } +static struct ap_driver zcrypt_cex4_queue_driver = { + .probe = zcrypt_cex4_queue_probe, + .remove = zcrypt_cex4_queue_remove, + .suspend = ap_queue_suspend, + .resume = ap_queue_resume, + .ids = zcrypt_cex4_queue_ids, +}; + int __init zcrypt_cex4_init(void) { - return ap_driver_register(&zcrypt_cex4_driver, THIS_MODULE, "cex4"); + int rc; + + rc = ap_driver_register(&zcrypt_cex4_card_driver, + THIS_MODULE, "cex4card"); + if (rc) + return rc; + + rc = ap_driver_register(&zcrypt_cex4_queue_driver, + THIS_MODULE, "cex4queue"); + if (rc) + ap_driver_unregister(&zcrypt_cex4_card_driver); + + return rc; } void __exit zcrypt_cex4_exit(void) { - ap_driver_unregister(&zcrypt_cex4_driver); + ap_driver_unregister(&zcrypt_cex4_queue_driver); + ap_driver_unregister(&zcrypt_cex4_card_driver); } module_init(zcrypt_cex4_init); diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h index 28d9349de1adf77cf20dbe5f938d6a27afd3eaeb..13e38defb6b8d954ec6587a2f6bf1d9ae418b28b 100644 --- a/drivers/s390/crypto/zcrypt_debug.h +++ b/drivers/s390/crypto/zcrypt_debug.h @@ -1,51 +1,27 @@ /* - * Copyright IBM Corp. 2012 + * Copyright IBM Corp. 2016 * Author(s): Holger Dengler (hd@linux.vnet.ibm.com) + * Harald Freudenberger */ #ifndef ZCRYPT_DEBUG_H #define ZCRYPT_DEBUG_H #include -#include "zcrypt_api.h" -/* that gives us 15 characters in the text event views */ -#define ZCRYPT_DBF_LEN 16 - -#define DBF_ERR 3 /* error conditions */ -#define DBF_WARN 4 /* warning conditions */ -#define DBF_INFO 6 /* informational */ +#define DBF_ERR 3 /* error conditions */ +#define DBF_WARN 4 /* warning conditions */ +#define DBF_INFO 5 /* informational */ +#define DBF_DEBUG 6 /* for debugging only */ +#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO) #define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO) -#define ZCRYPT_DBF_COMMON(level, text...) \ - do { \ - if (debug_level_enabled(zcrypt_dbf_common, level)) { \ - char debug_buffer[ZCRYPT_DBF_LEN]; \ - snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \ - debug_text_event(zcrypt_dbf_common, level, \ - debug_buffer); \ - } \ - } while (0) - -#define ZCRYPT_DBF_DEVICES(level, text...) \ - do { \ - if (debug_level_enabled(zcrypt_dbf_devices, level)) { \ - char debug_buffer[ZCRYPT_DBF_LEN]; \ - snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \ - debug_text_event(zcrypt_dbf_devices, level, \ - debug_buffer); \ - } \ - } while (0) - -#define ZCRYPT_DBF_DEV(level, device, text...) \ - do { \ - if (debug_level_enabled(device->dbf_area, level)) { \ - char debug_buffer[ZCRYPT_DBF_LEN]; \ - snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \ - debug_text_event(device->dbf_area, level, \ - debug_buffer); \ - } \ - } while (0) +#define DBF_MAX_SPRINTF_ARGS 5 + +#define ZCRYPT_DBF(...) \ + debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__) + +extern debug_info_t *zcrypt_dbf_info; int zcrypt_debug_init(void); void zcrypt_debug_exit(void); diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h index de1b6c1d172c8c0866dc0fd8fb709d032bdef820..13df60209ed33a05604e34e381d40a50b1c40904 100644 --- a/drivers/s390/crypto/zcrypt_error.h +++ b/drivers/s390/crypto/zcrypt_error.h @@ -55,52 +55,61 @@ struct error_hdr { #define TYPE82_RSP_CODE 0x82 #define TYPE88_RSP_CODE 0x88 -#define REP82_ERROR_MACHINE_FAILURE 0x10 -#define REP82_ERROR_PREEMPT_FAILURE 0x12 -#define REP82_ERROR_CHECKPT_FAILURE 0x14 -#define REP82_ERROR_MESSAGE_TYPE 0x20 -#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */ -#define REP82_ERROR_INVALID_MSG_LEN 0x23 -#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */ -#define REP82_ERROR_FORMAT_FIELD 0x29 -#define REP82_ERROR_INVALID_COMMAND 0x30 -#define REP82_ERROR_MALFORMED_MSG 0x40 -#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */ -#define REP82_ERROR_WORD_ALIGNMENT 0x60 -#define REP82_ERROR_MESSAGE_LENGTH 0x80 -#define REP82_ERROR_OPERAND_INVALID 0x82 -#define REP82_ERROR_OPERAND_SIZE 0x84 -#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85 -#define REP82_ERROR_RESERVED_FIELD 0x88 -#define REP82_ERROR_TRANSPORT_FAIL 0x90 -#define REP82_ERROR_PACKET_TRUNCATED 0xA0 -#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 +#define REP82_ERROR_MACHINE_FAILURE 0x10 +#define REP82_ERROR_PREEMPT_FAILURE 0x12 +#define REP82_ERROR_CHECKPT_FAILURE 0x14 +#define REP82_ERROR_MESSAGE_TYPE 0x20 +#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */ +#define REP82_ERROR_INVALID_MSG_LEN 0x23 +#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */ +#define REP82_ERROR_FORMAT_FIELD 0x29 +#define REP82_ERROR_INVALID_COMMAND 0x30 +#define REP82_ERROR_MALFORMED_MSG 0x40 +#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42 +#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */ +#define REP82_ERROR_WORD_ALIGNMENT 0x60 +#define REP82_ERROR_MESSAGE_LENGTH 0x80 +#define REP82_ERROR_OPERAND_INVALID 0x82 +#define REP82_ERROR_OPERAND_SIZE 0x84 +#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85 +#define REP82_ERROR_RESERVED_FIELD 0x88 +#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A +#define REP82_ERROR_TRANSPORT_FAIL 0x90 +#define REP82_ERROR_PACKET_TRUNCATED 0xA0 +#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 -#define REP88_ERROR_MODULE_FAILURE 0x10 +#define REP88_ERROR_MODULE_FAILURE 0x10 -#define REP88_ERROR_MESSAGE_TYPE 0x20 -#define REP88_ERROR_MESSAGE_MALFORMD 0x22 -#define REP88_ERROR_MESSAGE_LENGTH 0x23 -#define REP88_ERROR_RESERVED_FIELD 0x24 -#define REP88_ERROR_KEY_TYPE 0x34 -#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */ -#define REP88_ERROR_OPERAND 0x84 /* CEX2A */ -#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */ +#define REP88_ERROR_MESSAGE_TYPE 0x20 +#define REP88_ERROR_MESSAGE_MALFORMD 0x22 +#define REP88_ERROR_MESSAGE_LENGTH 0x23 +#define REP88_ERROR_RESERVED_FIELD 0x24 +#define REP88_ERROR_KEY_TYPE 0x34 +#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */ +#define REP88_ERROR_OPERAND 0x84 /* CEX2A */ +#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */ -static inline int convert_error(struct zcrypt_device *zdev, +static inline int convert_error(struct zcrypt_queue *zq, struct ap_message *reply) { struct error_hdr *ehdr = reply->message; + int card = AP_QID_CARD(zq->queue->qid); + int queue = AP_QID_QUEUE(zq->queue->qid); switch (ehdr->reply_code) { case REP82_ERROR_OPERAND_INVALID: case REP82_ERROR_OPERAND_SIZE: case REP82_ERROR_EVEN_MOD_IN_OPND: case REP88_ERROR_MESSAGE_MALFORMD: + case REP82_ERROR_INVALID_DOMAIN_PRECHECK: + case REP82_ERROR_INVALID_DOMAIN_PENDING: // REP88_ERROR_INVALID_KEY // '82' CEX2A // REP88_ERROR_OPERAND // '84' CEX2A // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A /* Invalid input data. */ + ZCRYPT_DBF(DBF_WARN, + "device=%02x.%04x reply=0x%02x => rc=EINVAL\n", + card, queue, ehdr->reply_code); return -EINVAL; case REP82_ERROR_MESSAGE_TYPE: // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A @@ -110,32 +119,32 @@ static inline int convert_error(struct zcrypt_device *zdev, * and then repeat the request. */ atomic_set(&zcrypt_rescan_req, 1); - zdev->online = 0; - pr_err("Cryptographic device %x failed and was set offline\n", - AP_QID_DEVICE(zdev->ap_dev->qid)); - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, - ehdr->reply_code); + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + card, queue); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n", + card, queue, ehdr->reply_code); return -EAGAIN; case REP82_ERROR_TRANSPORT_FAIL: case REP82_ERROR_MACHINE_FAILURE: // REP88_ERROR_MODULE_FAILURE // '10' CEX2A /* If a card fails disable it and repeat the request. */ atomic_set(&zcrypt_rescan_req, 1); - zdev->online = 0; - pr_err("Cryptographic device %x failed and was set offline\n", - AP_QID_DEVICE(zdev->ap_dev->qid)); - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, - ehdr->reply_code); + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + card, queue); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n", + card, queue, ehdr->reply_code); return -EAGAIN; default: - zdev->online = 0; - pr_err("Cryptographic device %x failed and was set offline\n", - AP_QID_DEVICE(zdev->ap_dev->qid)); - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, - ehdr->reply_code); + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + card, queue); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n", + card, queue, ehdr->reply_code); return -EAGAIN; /* repeat the request on a different device. */ } } diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index eedfaa2cf715f698db447c1a79e857f170727dc5..6dd5d7c58dd08eae4f9d27074328069050f1b179 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -53,9 +53,6 @@ MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \ "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); -static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *, - struct ap_message *); - /** * The type 50 message family is associated with a CEX2A card. * @@ -173,16 +170,48 @@ struct type80_hdr { unsigned char reserved3[8]; } __packed; +unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode) +{ + + if (!mex->inputdatalength) + return -EINVAL; + + if (mex->inputdatalength <= 128) /* 1024 bit */ + *fcode = MEX_1K; + else if (mex->inputdatalength <= 256) /* 2048 bit */ + *fcode = MEX_2K; + else /* 4096 bit */ + *fcode = MEX_4K; + + return 0; +} + +unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode) +{ + + if (!crt->inputdatalength) + return -EINVAL; + + if (crt->inputdatalength <= 128) /* 1024 bit */ + *fcode = CRT_1K; + else if (crt->inputdatalength <= 256) /* 2048 bit */ + *fcode = CRT_2K; + else /* 4096 bit */ + *fcode = CRT_4K; + + return 0; +} + /** * Convert a ICAMEX message to a type50 MEX message. * - * @zdev: crypto device pointer - * @zreq: crypto request pointer + * @zq: crypto queue pointer + * @ap_msg: crypto request pointer * @mex: pointer to user input data * * Returns 0 on success or -EFAULT. */ -static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev, +static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq, struct ap_message *ap_msg, struct ica_rsa_modexpo *mex) { @@ -234,13 +263,13 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev, /** * Convert a ICACRT message to a type50 CRT message. * - * @zdev: crypto device pointer - * @zreq: crypto request pointer + * @zq: crypto queue pointer + * @ap_msg: crypto request pointer * @crt: pointer to user input data * * Returns 0 on success or -EFAULT. */ -static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev, +static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq, struct ap_message *ap_msg, struct ica_rsa_modexpo_crt *crt) { @@ -283,7 +312,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev, u = crb2->u + sizeof(crb2->u) - short_len; inp = crb2->message + sizeof(crb2->message) - mod_len; } else if ((mod_len <= 512) && /* up to 4096 bit key size */ - (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)) { /* >= CEX3A */ + (zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) { struct type50_crb3_msg *crb3 = ap_msg->message; memset(crb3, 0, sizeof(*crb3)); ap_msg->length = sizeof(*crb3); @@ -317,14 +346,14 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev, /** * Copy results from a type 80 reply message back to user space. * - * @zdev: crypto device pointer + * @zq: crypto device pointer * @reply: reply AP message. * @data: pointer to user output data * @length: size of user output data * * Returns 0 on success or -EFAULT. */ -static int convert_type80(struct zcrypt_device *zdev, +static int convert_type80(struct zcrypt_queue *zq, struct ap_message *reply, char __user *outputdata, unsigned int outputdatalength) @@ -334,16 +363,18 @@ static int convert_type80(struct zcrypt_device *zdev, if (t80h->len < sizeof(*t80h) + outputdatalength) { /* The result is too short, the CEX2A card may not do that.. */ - zdev->online = 0; - pr_err("Cryptographic device %x failed and was set offline\n", - AP_QID_DEVICE(zdev->ap_dev->qid)); - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", - AP_QID_DEVICE(zdev->ap_dev->qid), - zdev->online, t80h->code); - + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid)); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + t80h->code); return -EAGAIN; /* repeat the request on a different device. */ } - if (zdev->user_space_type == ZCRYPT_CEX2A) + if (zq->zcard->user_space_type == ZCRYPT_CEX2A) BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); else BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE); @@ -353,25 +384,31 @@ static int convert_type80(struct zcrypt_device *zdev, return 0; } -static int convert_response(struct zcrypt_device *zdev, +static int convert_response(struct zcrypt_queue *zq, struct ap_message *reply, char __user *outputdata, unsigned int outputdatalength) { /* Response type byte is the second byte in the response. */ - switch (((unsigned char *) reply->message)[1]) { + unsigned char rtype = ((unsigned char *) reply->message)[1]; + + switch (rtype) { case TYPE82_RSP_CODE: case TYPE88_RSP_CODE: - return convert_error(zdev, reply); + return convert_error(zq, reply); case TYPE80_RSP_CODE: - return convert_type80(zdev, reply, + return convert_type80(zq, reply, outputdata, outputdatalength); default: /* Unknown response type, this should NEVER EVER happen */ - zdev->online = 0; - pr_err("Cryptographic device %x failed and was set offline\n", - AP_QID_DEVICE(zdev->ap_dev->qid)); - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid)); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (unsigned int) rtype); return -EAGAIN; /* repeat the request on a different device. */ } } @@ -380,11 +417,11 @@ static int convert_response(struct zcrypt_device *zdev, * This function is called from the AP bus code after a crypto request * "msg" has finished with the reply message "reply". * It is called from tasklet context. - * @ap_dev: pointer to the AP device + * @aq: pointer to the AP device * @msg: pointer to the AP message * @reply: pointer to the AP reply message */ -static void zcrypt_cex2a_receive(struct ap_device *ap_dev, +static void zcrypt_cex2a_receive(struct ap_queue *aq, struct ap_message *msg, struct ap_message *reply) { @@ -400,7 +437,7 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev, goto out; /* ap_msg->rc indicates the error */ t80h = reply->message; if (t80h->type == TYPE80_RSP_CODE) { - if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A) + if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) length = min_t(int, CEX2A_MAX_RESPONSE_SIZE, t80h->len); else @@ -418,11 +455,11 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0); /** * The request distributor calls this function if it picked the CEX2A * device to handle a modexpo request. - * @zdev: pointer to zcrypt_device structure that identifies the + * @zq: pointer to zcrypt_queue structure that identifies the * CEX2A device to the request distributor * @mex: pointer to the modexpo request buffer */ -static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, +static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq, struct ica_rsa_modexpo *mex) { struct ap_message ap_msg; @@ -430,7 +467,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, int rc; ap_init_message(&ap_msg); - if (zdev->user_space_type == ZCRYPT_CEX2A) + if (zq->zcard->user_space_type == ZCRYPT_CEX2A) ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL); else @@ -442,20 +479,20 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, ap_msg.psmid = (((unsigned long long) current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg.private = &work; - rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex); + rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex); if (rc) goto out_free; init_completion(&work); - ap_queue_message(zdev->ap_dev, &ap_msg); + ap_queue_message(zq->queue, &ap_msg); rc = wait_for_completion_interruptible(&work); if (rc == 0) { rc = ap_msg.rc; if (rc == 0) - rc = convert_response(zdev, &ap_msg, mex->outputdata, + rc = convert_response(zq, &ap_msg, mex->outputdata, mex->outputdatalength); } else /* Signal pending. */ - ap_cancel_message(zdev->ap_dev, &ap_msg); + ap_cancel_message(zq->queue, &ap_msg); out_free: kfree(ap_msg.message); return rc; @@ -464,11 +501,11 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, /** * The request distributor calls this function if it picked the CEX2A * device to handle a modexpo_crt request. - * @zdev: pointer to zcrypt_device structure that identifies the + * @zq: pointer to zcrypt_queue structure that identifies the * CEX2A device to the request distributor * @crt: pointer to the modexpoc_crt request buffer */ -static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, +static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq, struct ica_rsa_modexpo_crt *crt) { struct ap_message ap_msg; @@ -476,7 +513,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, int rc; ap_init_message(&ap_msg); - if (zdev->user_space_type == ZCRYPT_CEX2A) + if (zq->zcard->user_space_type == ZCRYPT_CEX2A) ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL); else @@ -488,20 +525,20 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, ap_msg.psmid = (((unsigned long long) current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg.private = &work; - rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt); + rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt); if (rc) goto out_free; init_completion(&work); - ap_queue_message(zdev->ap_dev, &ap_msg); + ap_queue_message(zq->queue, &ap_msg); rc = wait_for_completion_interruptible(&work); if (rc == 0) { rc = ap_msg.rc; if (rc == 0) - rc = convert_response(zdev, &ap_msg, crt->outputdata, + rc = convert_response(zq, &ap_msg, crt->outputdata, crt->outputdatalength); } else /* Signal pending. */ - ap_cancel_message(zdev->ap_dev, &ap_msg); + ap_cancel_message(zq->queue, &ap_msg); out_free: kfree(ap_msg.message); return rc; @@ -518,16 +555,12 @@ static struct zcrypt_ops zcrypt_msgtype50_ops = { .variant = MSGTYPE50_VARIANT_DEFAULT, }; -int __init zcrypt_msgtype50_init(void) +void __init zcrypt_msgtype50_init(void) { zcrypt_msgtype_register(&zcrypt_msgtype50_ops); - return 0; } void __exit zcrypt_msgtype50_exit(void) { zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops); } - -module_init(zcrypt_msgtype50_init); -module_exit(zcrypt_msgtype50_exit); diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h index 0a66e4aeeb5047fb1c4cb3bfad63e665da8736f1..5cc280318ee705e9e64fe035b9ccb32c8aa82a40 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.h +++ b/drivers/s390/crypto/zcrypt_msgtype50.h @@ -35,7 +35,10 @@ #define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/ -int zcrypt_msgtype50_init(void); +unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *, int *); +unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *, int *); + +void zcrypt_msgtype50_init(void); void zcrypt_msgtype50_exit(void); #endif /* _ZCRYPT_MSGTYPE50_H_ */ diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 21959719daef94a00bd3f40282a2022077ec1344..e5563ffeb8398158485e2842502479199608ebff 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -60,9 +60,6 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \ "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); -static void zcrypt_msgtype6_receive(struct ap_device *, struct ap_message *, - struct ap_message *); - /** * CPRB * Note that all shorts, ints and longs are little-endian. @@ -149,16 +146,122 @@ static struct CPRBX static_cprbx = { .func_id = {0x54, 0x32}, }; +int speed_idx_cca(int req_type) +{ + switch (req_type) { + case 0x4142: + case 0x4149: + case 0x414D: + case 0x4341: + case 0x4344: + case 0x4354: + case 0x4358: + case 0x444B: + case 0x4558: + case 0x4643: + case 0x4651: + case 0x4C47: + case 0x4C4B: + case 0x4C51: + case 0x4F48: + case 0x504F: + case 0x5053: + case 0x5058: + case 0x5343: + case 0x5344: + case 0x5345: + case 0x5350: + return LOW; + case 0x414B: + case 0x4345: + case 0x4349: + case 0x434D: + case 0x4847: + case 0x4849: + case 0x484D: + case 0x4850: + case 0x4851: + case 0x4954: + case 0x4958: + case 0x4B43: + case 0x4B44: + case 0x4B45: + case 0x4B47: + case 0x4B48: + case 0x4B49: + case 0x4B4E: + case 0x4B50: + case 0x4B52: + case 0x4B54: + case 0x4B58: + case 0x4D50: + case 0x4D53: + case 0x4D56: + case 0x4D58: + case 0x5044: + case 0x5045: + case 0x5046: + case 0x5047: + case 0x5049: + case 0x504B: + case 0x504D: + case 0x5254: + case 0x5347: + case 0x5349: + case 0x534B: + case 0x534D: + case 0x5356: + case 0x5358: + case 0x5443: + case 0x544B: + case 0x5647: + return HIGH; + default: + return MEDIUM; + } +} + +int speed_idx_ep11(int req_type) +{ + switch (req_type) { + case 1: + case 2: + case 36: + case 37: + case 38: + case 39: + case 40: + return LOW; + case 17: + case 18: + case 19: + case 20: + case 21: + case 22: + case 26: + case 30: + case 31: + case 32: + case 33: + case 34: + case 35: + return HIGH; + default: + return MEDIUM; + } +} + + /** * Convert a ICAMEX message to a type6 MEX message. * - * @zdev: crypto device pointer + * @zq: crypto device pointer * @ap_msg: pointer to AP message * @mex: pointer to user input data * * Returns 0 on success or -EFAULT. */ -static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev, +static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq, struct ap_message *ap_msg, struct ica_rsa_modexpo *mex) { @@ -173,11 +276,6 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev, .ulen = 10, .only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '} }; - static struct function_and_rules_block static_pke_fnr_MCL2 = { - .function_code = {'P', 'K'}, - .ulen = 10, - .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'} - }; struct { struct type6_hdr hdr; struct CPRBX cprbx; @@ -204,11 +302,10 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev, msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); msg->cprbx = static_cprbx; - msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); + msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1; - msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? - static_pke_fnr_MCL2 : static_pke_fnr; + msg->fr = static_pke_fnr; msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx); @@ -219,13 +316,13 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev, /** * Convert a ICACRT message to a type6 CRT message. * - * @zdev: crypto device pointer + * @zq: crypto device pointer * @ap_msg: pointer to AP message * @crt: pointer to user input data * * Returns 0 on success or -EFAULT. */ -static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev, +static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq, struct ap_message *ap_msg, struct ica_rsa_modexpo_crt *crt) { @@ -241,11 +338,6 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev, .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'} }; - static struct function_and_rules_block static_pkd_fnr_MCL2 = { - .function_code = {'P', 'D'}, - .ulen = 10, - .only_rule = {'P', 'K', 'C', 'S', '-', '1', '.', '2'} - }; struct { struct type6_hdr hdr; struct CPRBX cprbx; @@ -272,12 +364,11 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev, msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); msg->cprbx = static_cprbx; - msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); + msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); msg->cprbx.req_parml = msg->cprbx.rpl_msgbl = size - sizeof(msg->hdr) - sizeof(msg->cprbx); - msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? - static_pkd_fnr_MCL2 : static_pkd_fnr; + msg->fr = static_pkd_fnr; ap_msg->length = size; return 0; @@ -286,7 +377,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev, /** * Convert a XCRB message to a type6 CPRB message. * - * @zdev: crypto device pointer + * @zq: crypto device pointer * @ap_msg: pointer to AP message * @xcRB: pointer to user input data * @@ -297,9 +388,10 @@ struct type86_fmt2_msg { struct type86_fmt2_ext fmt2; } __packed; -static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, - struct ap_message *ap_msg, - struct ica_xcRB *xcRB) +static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg, + struct ica_xcRB *xcRB, + unsigned int *fcode, + unsigned short **dom) { static struct type6_hdr static_type6_hdrX = { .type = 0x06, @@ -379,6 +471,9 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code)); + *fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1]; + *dom = (unsigned short *)&msg->cprbx.domain; + if (memcmp(function_code, "US", 2) == 0) ap_msg->special = 1; else @@ -389,15 +484,15 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, copy_from_user(req_data, xcRB->request_data_address, xcRB->request_data_length)) return -EFAULT; + return 0; } -static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev, - struct ap_message *ap_msg, - struct ep11_urb *xcRB) +static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg, + struct ep11_urb *xcRB, + unsigned int *fcode) { unsigned int lfmt; - static struct type6_hdr static_type6_ep11_hdr = { .type = 0x06, .rqid = {0x00, 0x01}, @@ -421,7 +516,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev, unsigned char dom_tag; /* fixed value 0x4 */ unsigned char dom_len; /* fixed value 0x4 */ unsigned int dom_val; /* domain id */ - } __packed * payload_hdr; + } __packed * payload_hdr = NULL; if (CEIL4(xcRB->req_len) < xcRB->req_len) return -EINVAL; /* overflow after alignment*/ @@ -450,43 +545,30 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev, return -EFAULT; } - /* - The target domain field within the cprb body/payload block will be - replaced by the usage domain for non-management commands only. - Therefore we check the first bit of the 'flags' parameter for - management command indication. - 0 - non management command - 1 - management command - */ - if (!((msg->cprbx.flags & 0x80) == 0x80)) { - msg->cprbx.target_id = (unsigned int) - AP_QID_QUEUE(zdev->ap_dev->qid); - - if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ - switch (msg->pld_lenfmt & 0x03) { - case 1: - lfmt = 2; - break; - case 2: - lfmt = 3; - break; - default: - return -EINVAL; - } - } else { - lfmt = 1; /* length format #1 */ - } - payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); - payload_hdr->dom_val = (unsigned int) - AP_QID_QUEUE(zdev->ap_dev->qid); + if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ + switch (msg->pld_lenfmt & 0x03) { + case 1: + lfmt = 2; + break; + case 2: + lfmt = 3; + break; + default: + return -EINVAL; + } + } else { + lfmt = 1; /* length format #1 */ } + payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); + *fcode = payload_hdr->func_val & 0xFFFF; + return 0; } /** * Copy results from a type 86 ICA reply message back to user space. * - * @zdev: crypto device pointer + * @zq: crypto device pointer * @reply: reply AP message. * @data: pointer to user output data * @length: size of user output data @@ -508,7 +590,7 @@ struct type86_ep11_reply { struct ep11_cprb cprbx; } __packed; -static int convert_type86_ica(struct zcrypt_device *zdev, +static int convert_type86_ica(struct zcrypt_queue *zq, struct ap_message *reply, char __user *outputdata, unsigned int outputdatalength) @@ -556,26 +638,37 @@ static int convert_type86_ica(struct zcrypt_device *zdev, service_rc = msg->cprbx.ccp_rtcode; if (unlikely(service_rc != 0)) { service_rs = msg->cprbx.ccp_rscode; - if (service_rc == 8 && service_rs == 66) - return -EINVAL; - if (service_rc == 8 && service_rs == 65) - return -EINVAL; - if (service_rc == 8 && service_rs == 770) + if ((service_rc == 8 && service_rs == 66) || + (service_rc == 8 && service_rs == 65) || + (service_rc == 8 && service_rs == 72) || + (service_rc == 8 && service_rs == 770) || + (service_rc == 12 && service_rs == 769)) { + ZCRYPT_DBF(DBF_DEBUG, + "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) service_rc, (int) service_rs); return -EINVAL; + } if (service_rc == 8 && service_rs == 783) { - zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; + zq->zcard->min_mod_size = + PCIXCC_MIN_MOD_SIZE_OLD; + ZCRYPT_DBF(DBF_DEBUG, + "device=%02x.%04x rc/rs=%d/%d => rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) service_rc, (int) service_rs); return -EAGAIN; } - if (service_rc == 12 && service_rs == 769) - return -EINVAL; - if (service_rc == 8 && service_rs == 72) - return -EINVAL; - zdev->online = 0; - pr_err("Cryptographic device %x failed and was set offline\n", - AP_QID_DEVICE(zdev->ap_dev->qid)); - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, - msg->hdr.reply_code); + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid)); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) service_rc, (int) service_rs); return -EAGAIN; /* repeat the request on a different device. */ } data = msg->text; @@ -611,13 +704,13 @@ static int convert_type86_ica(struct zcrypt_device *zdev, /** * Copy results from a type 86 XCRB reply message back to user space. * - * @zdev: crypto device pointer + * @zq: crypto device pointer * @reply: reply AP message. * @xcRB: pointer to XCRB * * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. */ -static int convert_type86_xcrb(struct zcrypt_device *zdev, +static int convert_type86_xcrb(struct zcrypt_queue *zq, struct ap_message *reply, struct ica_xcRB *xcRB) { @@ -642,13 +735,13 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev, /** * Copy results from a type 86 EP11 XCRB reply message back to user space. * - * @zdev: crypto device pointer + * @zq: crypto device pointer * @reply: reply AP message. * @xcRB: pointer to EP11 user request block * * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. */ -static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev, +static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq, struct ap_message *reply, struct ep11_urb *xcRB) { @@ -666,7 +759,7 @@ static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev, return 0; } -static int convert_type86_rng(struct zcrypt_device *zdev, +static int convert_type86_rng(struct zcrypt_queue *zq, struct ap_message *reply, char *buffer) { @@ -683,104 +776,113 @@ static int convert_type86_rng(struct zcrypt_device *zdev, return msg->fmt2.count2; } -static int convert_response_ica(struct zcrypt_device *zdev, +static int convert_response_ica(struct zcrypt_queue *zq, struct ap_message *reply, char __user *outputdata, unsigned int outputdatalength) { struct type86x_reply *msg = reply->message; - /* Response type byte is the second byte in the response. */ - switch (((unsigned char *) reply->message)[1]) { + switch (msg->hdr.type) { case TYPE82_RSP_CODE: case TYPE88_RSP_CODE: - return convert_error(zdev, reply); + return convert_error(zq, reply); case TYPE86_RSP_CODE: if (msg->cprbx.ccp_rtcode && (msg->cprbx.ccp_rscode == 0x14f) && (outputdatalength > 256)) { - if (zdev->max_exp_bit_length <= 17) { - zdev->max_exp_bit_length = 17; + if (zq->zcard->max_exp_bit_length <= 17) { + zq->zcard->max_exp_bit_length = 17; return -EAGAIN; } else return -EINVAL; } if (msg->hdr.reply_code) - return convert_error(zdev, reply); + return convert_error(zq, reply); if (msg->cprbx.cprb_ver_id == 0x02) - return convert_type86_ica(zdev, reply, + return convert_type86_ica(zq, reply, outputdata, outputdatalength); /* Fall through, no break, incorrect cprb version is an unknown * response */ default: /* Unknown response type, this should NEVER EVER happen */ - zdev->online = 0; - pr_err("Cryptographic device %x failed and was set offline\n", - AP_QID_DEVICE(zdev->ap_dev->qid)); - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid)); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); return -EAGAIN; /* repeat the request on a different device. */ } } -static int convert_response_xcrb(struct zcrypt_device *zdev, +static int convert_response_xcrb(struct zcrypt_queue *zq, struct ap_message *reply, struct ica_xcRB *xcRB) { struct type86x_reply *msg = reply->message; - /* Response type byte is the second byte in the response. */ - switch (((unsigned char *) reply->message)[1]) { + switch (msg->hdr.type) { case TYPE82_RSP_CODE: case TYPE88_RSP_CODE: xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ - return convert_error(zdev, reply); + return convert_error(zq, reply); case TYPE86_RSP_CODE: if (msg->hdr.reply_code) { memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32)); - return convert_error(zdev, reply); + return convert_error(zq, reply); } if (msg->cprbx.cprb_ver_id == 0x02) - return convert_type86_xcrb(zdev, reply, xcRB); + return convert_type86_xcrb(zq, reply, xcRB); /* Fall through, no break, incorrect cprb version is an unknown * response */ default: /* Unknown response type, this should NEVER EVER happen */ xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ - zdev->online = 0; - pr_err("Cryptographic device %x failed and was set offline\n", - AP_QID_DEVICE(zdev->ap_dev->qid)); - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid)); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); return -EAGAIN; /* repeat the request on a different device. */ } } -static int convert_response_ep11_xcrb(struct zcrypt_device *zdev, +static int convert_response_ep11_xcrb(struct zcrypt_queue *zq, struct ap_message *reply, struct ep11_urb *xcRB) { struct type86_ep11_reply *msg = reply->message; - /* Response type byte is the second byte in the response. */ - switch (((unsigned char *)reply->message)[1]) { + switch (msg->hdr.type) { case TYPE82_RSP_CODE: case TYPE87_RSP_CODE: - return convert_error(zdev, reply); + return convert_error(zq, reply); case TYPE86_RSP_CODE: if (msg->hdr.reply_code) - return convert_error(zdev, reply); + return convert_error(zq, reply); if (msg->cprbx.cprb_ver_id == 0x04) - return convert_type86_ep11_xcrb(zdev, reply, xcRB); + return convert_type86_ep11_xcrb(zq, reply, xcRB); /* Fall through, no break, incorrect cprb version is an unknown resp.*/ default: /* Unknown response type, this should NEVER EVER happen */ - zdev->online = 0; - pr_err("Cryptographic device %x failed and was set offline\n", - AP_QID_DEVICE(zdev->ap_dev->qid)); - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid)); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); return -EAGAIN; /* repeat the request on a different device. */ } } -static int convert_response_rng(struct zcrypt_device *zdev, +static int convert_response_rng(struct zcrypt_queue *zq, struct ap_message *reply, char *data) { @@ -794,15 +896,19 @@ static int convert_response_rng(struct zcrypt_device *zdev, if (msg->hdr.reply_code) return -EINVAL; if (msg->cprbx.cprb_ver_id == 0x02) - return convert_type86_rng(zdev, reply, data); + return convert_type86_rng(zq, reply, data); /* Fall through, no break, incorrect cprb version is an unknown * response */ default: /* Unknown response type, this should NEVER EVER happen */ - zdev->online = 0; - pr_err("Cryptographic device %x failed and was set offline\n", - AP_QID_DEVICE(zdev->ap_dev->qid)); - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid)); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); return -EAGAIN; /* repeat the request on a different device. */ } } @@ -811,11 +917,11 @@ static int convert_response_rng(struct zcrypt_device *zdev, * This function is called from the AP bus code after a crypto request * "msg" has finished with the reply message "reply". * It is called from tasklet context. - * @ap_dev: pointer to the AP device + * @aq: pointer to the AP queue * @msg: pointer to the AP message * @reply: pointer to the AP reply message */ -static void zcrypt_msgtype6_receive(struct ap_device *ap_dev, +static void zcrypt_msgtype6_receive(struct ap_queue *aq, struct ap_message *msg, struct ap_message *reply) { @@ -860,11 +966,11 @@ static void zcrypt_msgtype6_receive(struct ap_device *ap_dev, * This function is called from the AP bus code after a crypto request * "msg" has finished with the reply message "reply". * It is called from tasklet context. - * @ap_dev: pointer to the AP device + * @aq: pointer to the AP queue * @msg: pointer to the AP message * @reply: pointer to the AP reply message */ -static void zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev, +static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq, struct ap_message *msg, struct ap_message *reply) { @@ -904,11 +1010,11 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0); /** * The request distributor calls this function if it picked the PCIXCC/CEX2C * device to handle a modexpo request. - * @zdev: pointer to zcrypt_device structure that identifies the + * @zq: pointer to zcrypt_queue structure that identifies the * PCIXCC/CEX2C device to the request distributor * @mex: pointer to the modexpo request buffer */ -static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev, +static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, struct ica_rsa_modexpo *mex) { struct ap_message ap_msg; @@ -925,21 +1031,21 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev, ap_msg.psmid = (((unsigned long long) current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg.private = &resp_type; - rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex); + rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex); if (rc) goto out_free; init_completion(&resp_type.work); - ap_queue_message(zdev->ap_dev, &ap_msg); + ap_queue_message(zq->queue, &ap_msg); rc = wait_for_completion_interruptible(&resp_type.work); if (rc == 0) { rc = ap_msg.rc; if (rc == 0) - rc = convert_response_ica(zdev, &ap_msg, + rc = convert_response_ica(zq, &ap_msg, mex->outputdata, mex->outputdatalength); } else /* Signal pending. */ - ap_cancel_message(zdev->ap_dev, &ap_msg); + ap_cancel_message(zq->queue, &ap_msg); out_free: free_page((unsigned long) ap_msg.message); return rc; @@ -948,11 +1054,11 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev, /** * The request distributor calls this function if it picked the PCIXCC/CEX2C * device to handle a modexpo_crt request. - * @zdev: pointer to zcrypt_device structure that identifies the + * @zq: pointer to zcrypt_queue structure that identifies the * PCIXCC/CEX2C device to the request distributor * @crt: pointer to the modexpoc_crt request buffer */ -static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev, +static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, struct ica_rsa_modexpo_crt *crt) { struct ap_message ap_msg; @@ -969,148 +1075,258 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev, ap_msg.psmid = (((unsigned long long) current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg.private = &resp_type; - rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt); + rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt); if (rc) goto out_free; init_completion(&resp_type.work); - ap_queue_message(zdev->ap_dev, &ap_msg); + ap_queue_message(zq->queue, &ap_msg); rc = wait_for_completion_interruptible(&resp_type.work); if (rc == 0) { rc = ap_msg.rc; if (rc == 0) - rc = convert_response_ica(zdev, &ap_msg, + rc = convert_response_ica(zq, &ap_msg, crt->outputdata, crt->outputdatalength); - } else + } else { /* Signal pending. */ - ap_cancel_message(zdev->ap_dev, &ap_msg); + ap_cancel_message(zq->queue, &ap_msg); + } out_free: free_page((unsigned long) ap_msg.message); return rc; } +unsigned int get_cprb_fc(struct ica_xcRB *xcRB, + struct ap_message *ap_msg, + unsigned int *func_code, unsigned short **dom) +{ + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_XCRB, + }; + int rc; + + ap_init_message(ap_msg); + ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); + if (!ap_msg->message) + return -ENOMEM; + ap_msg->receive = zcrypt_msgtype6_receive; + ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL); + if (!ap_msg->private) { + kzfree(ap_msg->message); + return -ENOMEM; + } + memcpy(ap_msg->private, &resp_type, sizeof(resp_type)); + rc = XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom); + if (rc) { + kzfree(ap_msg->message); + kzfree(ap_msg->private); + } + return rc; +} + /** * The request distributor calls this function if it picked the PCIXCC/CEX2C * device to handle a send_cprb request. - * @zdev: pointer to zcrypt_device structure that identifies the + * @zq: pointer to zcrypt_queue structure that identifies the * PCIXCC/CEX2C device to the request distributor * @xcRB: pointer to the send_cprb request buffer */ -static long zcrypt_msgtype6_send_cprb(struct zcrypt_device *zdev, - struct ica_xcRB *xcRB) +static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq, + struct ica_xcRB *xcRB, + struct ap_message *ap_msg) { - struct ap_message ap_msg; - struct response_type resp_type = { - .type = PCIXCC_RESPONSE_TYPE_XCRB, - }; int rc; + struct response_type *rtype = (struct response_type *)(ap_msg->private); - ap_init_message(&ap_msg); - ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); - if (!ap_msg.message) - return -ENOMEM; - ap_msg.receive = zcrypt_msgtype6_receive; - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + - atomic_inc_return(&zcrypt_step); - ap_msg.private = &resp_type; - rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB); - if (rc) - goto out_free; - init_completion(&resp_type.work); - ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible(&resp_type.work); + init_completion(&rtype->work); + ap_queue_message(zq->queue, ap_msg); + rc = wait_for_completion_interruptible(&rtype->work); if (rc == 0) { - rc = ap_msg.rc; + rc = ap_msg->rc; if (rc == 0) - rc = convert_response_xcrb(zdev, &ap_msg, xcRB); + rc = convert_response_xcrb(zq, ap_msg, xcRB); } else /* Signal pending. */ - ap_cancel_message(zdev->ap_dev, &ap_msg); -out_free: - kzfree(ap_msg.message); + ap_cancel_message(zq->queue, ap_msg); + + kzfree(ap_msg->message); + kzfree(ap_msg->private); + return rc; +} + +unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb, + struct ap_message *ap_msg, + unsigned int *func_code) +{ + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_EP11, + }; + int rc; + + ap_init_message(ap_msg); + ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); + if (!ap_msg->message) + return -ENOMEM; + ap_msg->receive = zcrypt_msgtype6_receive_ep11; + ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL); + if (!ap_msg->private) { + kzfree(ap_msg->message); + return -ENOMEM; + } + memcpy(ap_msg->private, &resp_type, sizeof(resp_type)); + rc = xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code); + if (rc) { + kzfree(ap_msg->message); + kzfree(ap_msg->private); + } return rc; } /** * The request distributor calls this function if it picked the CEX4P * device to handle a send_ep11_cprb request. - * @zdev: pointer to zcrypt_device structure that identifies the + * @zq: pointer to zcrypt_queue structure that identifies the * CEX4P device to the request distributor * @xcRB: pointer to the ep11 user request block */ -static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_device *zdev, - struct ep11_urb *xcrb) +static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq, + struct ep11_urb *xcrb, + struct ap_message *ap_msg) { - struct ap_message ap_msg; - struct response_type resp_type = { - .type = PCIXCC_RESPONSE_TYPE_EP11, - }; int rc; + unsigned int lfmt; + struct response_type *rtype = (struct response_type *)(ap_msg->private); + struct { + struct type6_hdr hdr; + struct ep11_cprb cprbx; + unsigned char pld_tag; /* fixed value 0x30 */ + unsigned char pld_lenfmt; /* payload length format */ + } __packed * msg = ap_msg->message; + struct pld_hdr { + unsigned char func_tag; /* fixed value 0x4 */ + unsigned char func_len; /* fixed value 0x4 */ + unsigned int func_val; /* function ID */ + unsigned char dom_tag; /* fixed value 0x4 */ + unsigned char dom_len; /* fixed value 0x4 */ + unsigned int dom_val; /* domain id */ + } __packed * payload_hdr = NULL; - ap_init_message(&ap_msg); - ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); - if (!ap_msg.message) - return -ENOMEM; - ap_msg.receive = zcrypt_msgtype6_receive_ep11; - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + - atomic_inc_return(&zcrypt_step); - ap_msg.private = &resp_type; - rc = xcrb_msg_to_type6_ep11cprb_msgx(zdev, &ap_msg, xcrb); - if (rc) - goto out_free; - init_completion(&resp_type.work); - ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible(&resp_type.work); + + /** + * The target domain field within the cprb body/payload block will be + * replaced by the usage domain for non-management commands only. + * Therefore we check the first bit of the 'flags' parameter for + * management command indication. + * 0 - non management command + * 1 - management command + */ + if (!((msg->cprbx.flags & 0x80) == 0x80)) { + msg->cprbx.target_id = (unsigned int) + AP_QID_QUEUE(zq->queue->qid); + + if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ + switch (msg->pld_lenfmt & 0x03) { + case 1: + lfmt = 2; + break; + case 2: + lfmt = 3; + break; + default: + return -EINVAL; + } + } else { + lfmt = 1; /* length format #1 */ + } + payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); + payload_hdr->dom_val = (unsigned int) + AP_QID_QUEUE(zq->queue->qid); + } + + init_completion(&rtype->work); + ap_queue_message(zq->queue, ap_msg); + rc = wait_for_completion_interruptible(&rtype->work); if (rc == 0) { - rc = ap_msg.rc; + rc = ap_msg->rc; if (rc == 0) - rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb); + rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb); } else /* Signal pending. */ - ap_cancel_message(zdev->ap_dev, &ap_msg); + ap_cancel_message(zq->queue, ap_msg); -out_free: - kzfree(ap_msg.message); + kzfree(ap_msg->message); + kzfree(ap_msg->private); return rc; } +unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code, + unsigned int *domain) +{ + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_XCRB, + }; + + ap_init_message(ap_msg); + ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); + if (!ap_msg->message) + return -ENOMEM; + ap_msg->receive = zcrypt_msgtype6_receive; + ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL); + if (!ap_msg->private) { + kzfree(ap_msg->message); + return -ENOMEM; + } + memcpy(ap_msg->private, &resp_type, sizeof(resp_type)); + + rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); + + *func_code = HWRNG; + return 0; +} + /** * The request distributor calls this function if it picked the PCIXCC/CEX2C * device to generate random data. - * @zdev: pointer to zcrypt_device structure that identifies the + * @zq: pointer to zcrypt_queue structure that identifies the * PCIXCC/CEX2C device to the request distributor * @buffer: pointer to a memory page to return random data */ - -static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev, - char *buffer) +static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq, + char *buffer, struct ap_message *ap_msg) { - struct ap_message ap_msg; - struct response_type resp_type = { - .type = PCIXCC_RESPONSE_TYPE_XCRB, - }; + struct { + struct type6_hdr hdr; + struct CPRBX cprbx; + char function_code[2]; + short int rule_length; + char rule[8]; + short int verb_length; + short int key_length; + } __packed * msg = ap_msg->message; + struct response_type *rtype = (struct response_type *)(ap_msg->private); int rc; - ap_init_message(&ap_msg); - ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); - if (!ap_msg.message) - return -ENOMEM; - ap_msg.receive = zcrypt_msgtype6_receive; - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + - atomic_inc_return(&zcrypt_step); - ap_msg.private = &resp_type; - rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE); - init_completion(&resp_type.work); - ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible(&resp_type.work); + msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); + + init_completion(&rtype->work); + ap_queue_message(zq->queue, ap_msg); + rc = wait_for_completion_interruptible(&rtype->work); if (rc == 0) { - rc = ap_msg.rc; + rc = ap_msg->rc; if (rc == 0) - rc = convert_response_rng(zdev, &ap_msg, buffer); + rc = convert_response_rng(zq, ap_msg, buffer); } else /* Signal pending. */ - ap_cancel_message(zdev->ap_dev, &ap_msg); - kfree(ap_msg.message); + ap_cancel_message(zq->queue, ap_msg); + + kzfree(ap_msg->message); + kzfree(ap_msg->private); return rc; } @@ -1145,12 +1361,11 @@ static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = { .send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb, }; -int __init zcrypt_msgtype6_init(void) +void __init zcrypt_msgtype6_init(void) { zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops); zcrypt_msgtype_register(&zcrypt_msgtype6_ops); zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops); - return 0; } void __exit zcrypt_msgtype6_exit(void) @@ -1159,6 +1374,3 @@ void __exit zcrypt_msgtype6_exit(void) zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops); zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops); } - -module_init(zcrypt_msgtype6_init); -module_exit(zcrypt_msgtype6_exit); diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h index 207247570623709c946f3a7708d3e3bfc7a0313e..7a0d5b57821f07868c9af78da9873c1eca997c2e 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.h +++ b/drivers/s390/crypto/zcrypt_msgtype6.h @@ -116,15 +116,28 @@ struct type86_fmt2_ext { unsigned int offset4; /* 0x00000000 */ } __packed; +unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *, + unsigned int *, unsigned short **); +unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *, + unsigned int *); +unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *); + +#define LOW 10 +#define MEDIUM 100 +#define HIGH 500 + +int speed_idx_cca(int); +int speed_idx_ep11(int); + /** * Prepare a type6 CPRB message for random number generation * * @ap_dev: AP device pointer * @ap_msg: pointer to AP message */ -static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev, - struct ap_message *ap_msg, - unsigned random_number_length) +static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg, + unsigned int random_number_length, + unsigned int *domain) { struct { struct type6_hdr hdr; @@ -156,16 +169,16 @@ static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev, msg->hdr.FromCardLen2 = random_number_length, msg->cprbx = local_cprbx; msg->cprbx.rpl_datal = random_number_length, - msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid); memcpy(msg->function_code, msg->hdr.function_code, 0x02); msg->rule_length = 0x0a; memcpy(msg->rule, "RANDOM ", 8); msg->verb_length = 0x02; msg->key_length = 0x02; ap_msg->length = sizeof(*msg); + *domain = (unsigned short)msg->cprbx.domain; } -int zcrypt_msgtype6_init(void); +void zcrypt_msgtype6_init(void); void zcrypt_msgtype6_exit(void); #endif /* _ZCRYPT_MSGTYPE6_H_ */ diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index df8f0c4dacb7c49bc3e15670ad8bac976b989670..600604782b65e972705d01984568134949e61039 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c @@ -31,7 +31,8 @@ #include #include #include -#include +#include +#include #include "ap_bus.h" #include "zcrypt_api.h" @@ -46,11 +47,6 @@ #define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE #define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */ -#define PCIXCC_MCL2_SPEED_RATING 7870 -#define PCIXCC_MCL3_SPEED_RATING 7870 -#define CEX2C_SPEED_RATING 7000 -#define CEX3C_SPEED_RATING 6500 - #define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */ #define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ @@ -67,142 +63,34 @@ struct response_type { #define PCIXCC_RESPONSE_TYPE_ICA 0 #define PCIXCC_RESPONSE_TYPE_XCRB 1 -static struct ap_device_id zcrypt_pcixcc_ids[] = { - { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, - { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, - { AP_DEVICE(AP_DEVICE_TYPE_CEX3C) }, - { /* end of list */ }, -}; - -MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids); MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \ "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); -static int zcrypt_pcixcc_probe(struct ap_device *ap_dev); -static void zcrypt_pcixcc_remove(struct ap_device *ap_dev); - -static struct ap_driver zcrypt_pcixcc_driver = { - .probe = zcrypt_pcixcc_probe, - .remove = zcrypt_pcixcc_remove, - .ids = zcrypt_pcixcc_ids, - .request_timeout = PCIXCC_CLEANUP_TIME, +static struct ap_device_id zcrypt_pcixcc_card_ids[] = { + { .dev_type = AP_DEVICE_TYPE_PCIXCC, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX2C, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX3C, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { /* end of list */ }, }; -/** - * Micro-code detection function. Its sends a message to a pcixcc card - * to find out the microcode level. - * @ap_dev: pointer to the AP device. - */ -static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev) -{ - static unsigned char msg[] = { - 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00, - 0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8, - 0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A, - 0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20, - 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05, - 0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D, - 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55, - 0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD, - 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA, - 0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22, - 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB, - 0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54, - 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00, - 0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00, - 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40, - 0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C, - 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF, - 0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9, - 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63, - 0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5, - 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A, - 0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01, - 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28, - 0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91, - 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5, - 0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C, - 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98, - 0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96, - 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19, - 0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47, - 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36, - 0xF1,0x3D,0x93,0x53 - }; - unsigned long long psmid; - struct CPRBX *cprbx; - char *reply; - int rc, i; - - reply = (void *) get_zeroed_page(GFP_KERNEL); - if (!reply) - return -ENOMEM; +MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_card_ids); - rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg)); - if (rc) - goto out_free; - - /* Wait for the test message to complete. */ - for (i = 0; i < 6; i++) { - msleep(300); - rc = ap_recv(ap_dev->qid, &psmid, reply, 4096); - if (rc == 0 && psmid == 0x0102030405060708ULL) - break; - } - - if (i >= 6) { - /* Got no answer. */ - rc = -ENODEV; - goto out_free; - } +static struct ap_device_id zcrypt_pcixcc_queue_ids[] = { + { .dev_type = AP_DEVICE_TYPE_PCIXCC, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX2C, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX3C, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { /* end of list */ }, +}; - cprbx = (struct CPRBX *) (reply + 48); - if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33) - rc = ZCRYPT_PCIXCC_MCL2; - else - rc = ZCRYPT_PCIXCC_MCL3; -out_free: - free_page((unsigned long) reply); - return rc; -} +MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_queue_ids); /** * Large random number detection function. Its sends a message to a pcixcc @@ -211,15 +99,25 @@ static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev) * * Returns 1 if large random numbers are supported, 0 if not and < 0 on error. */ -static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev) +static int zcrypt_pcixcc_rng_supported(struct ap_queue *aq) { struct ap_message ap_msg; unsigned long long psmid; + unsigned int domain; struct { struct type86_hdr hdr; struct type86_fmt2_ext fmt2; struct CPRBX cprbx; } __attribute__((packed)) *reply; + struct { + struct type6_hdr hdr; + struct CPRBX cprbx; + char function_code[2]; + short int rule_length; + char rule[8]; + short int verb_length; + short int key_length; + } __packed * msg; int rc, i; ap_init_message(&ap_msg); @@ -227,8 +125,12 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev) if (!ap_msg.message) return -ENOMEM; - rng_type6CPRB_msgX(ap_dev, &ap_msg, 4); - rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message, + rng_type6CPRB_msgX(&ap_msg, 4, &domain); + + msg = ap_msg.message; + msg->cprbx.domain = AP_QID_QUEUE(aq->qid); + + rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message, ap_msg.length); if (rc) goto out_free; @@ -236,7 +138,7 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev) /* Wait for the test message to complete. */ for (i = 0; i < 2 * HZ; i++) { msleep(1000 / HZ); - rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096); + rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096); if (rc == 0 && psmid == 0x0102030405060708ULL) break; } @@ -258,110 +160,168 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev) } /** - * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device - * since the bus_match already checked the hardware type. The PCIXCC - * cards come in two flavours: micro code level 2 and micro code level 3. - * This is checked by sending a test message to the device. - * @ap_dev: pointer to the AP device. + * Probe function for PCIXCC/CEX2C card devices. It always accepts the + * AP device since the bus_match already checked the hardware type. The + * PCIXCC cards come in two flavours: micro code level 2 and micro code + * level 3. This is checked by sending a test message to the device. + * @ap_dev: pointer to the AP card device. */ -static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) +static int zcrypt_pcixcc_card_probe(struct ap_device *ap_dev) { - struct zcrypt_device *zdev; + /* + * Normalized speed ratings per crypto adapter + * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY + */ + static const int CEX2C_SPEED_IDX[] = { + 1000, 1400, 2400, 1100, 1500, 2600, 100, 12}; + static const int CEX3C_SPEED_IDX[] = { + 500, 700, 1400, 550, 800, 1500, 80, 10}; + + struct ap_card *ac = to_ap_card(&ap_dev->device); + struct zcrypt_card *zc; int rc = 0; - zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE); - if (!zdev) + zc = zcrypt_card_alloc(); + if (!zc) return -ENOMEM; - zdev->ap_dev = ap_dev; - zdev->online = 1; - switch (ap_dev->device_type) { - case AP_DEVICE_TYPE_PCIXCC: - rc = zcrypt_pcixcc_mcl(ap_dev); - if (rc < 0) { - zcrypt_device_free(zdev); - return rc; - } - zdev->user_space_type = rc; - if (rc == ZCRYPT_PCIXCC_MCL2) { - zdev->type_string = "PCIXCC_MCL2"; - zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING; - zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; - zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; - zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; - } else { - zdev->type_string = "PCIXCC_MCL3"; - zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING; - zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; - zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; - zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; - } - break; + zc->card = ac; + ac->private = zc; + switch (ac->ap_dev.device_type) { case AP_DEVICE_TYPE_CEX2C: - zdev->user_space_type = ZCRYPT_CEX2C; - zdev->type_string = "CEX2C"; - zdev->speed_rating = CEX2C_SPEED_RATING; - zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; - zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; - zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; + zc->user_space_type = ZCRYPT_CEX2C; + zc->type_string = "CEX2C"; + memcpy(zc->speed_rating, CEX2C_SPEED_IDX, + sizeof(CEX2C_SPEED_IDX)); + zc->min_mod_size = PCIXCC_MIN_MOD_SIZE; + zc->max_mod_size = PCIXCC_MAX_MOD_SIZE; + zc->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; break; case AP_DEVICE_TYPE_CEX3C: - zdev->user_space_type = ZCRYPT_CEX3C; - zdev->type_string = "CEX3C"; - zdev->speed_rating = CEX3C_SPEED_RATING; - zdev->min_mod_size = CEX3C_MIN_MOD_SIZE; - zdev->max_mod_size = CEX3C_MAX_MOD_SIZE; - zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE; + zc->user_space_type = ZCRYPT_CEX3C; + zc->type_string = "CEX3C"; + memcpy(zc->speed_rating, CEX3C_SPEED_IDX, + sizeof(CEX3C_SPEED_IDX)); + zc->min_mod_size = CEX3C_MIN_MOD_SIZE; + zc->max_mod_size = CEX3C_MAX_MOD_SIZE; + zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE; break; default: - goto out_free; + zcrypt_card_free(zc); + return -ENODEV; } + zc->online = 1; + + rc = zcrypt_card_register(zc); + if (rc) { + ac->private = NULL; + zcrypt_card_free(zc); + } + + return rc; +} - rc = zcrypt_pcixcc_rng_supported(ap_dev); +/** + * This is called to remove the PCIXCC/CEX2C card driver information + * if an AP card device is removed. + */ +static void zcrypt_pcixcc_card_remove(struct ap_device *ap_dev) +{ + struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private; + + if (zc) + zcrypt_card_unregister(zc); +} + +static struct ap_driver zcrypt_pcixcc_card_driver = { + .probe = zcrypt_pcixcc_card_probe, + .remove = zcrypt_pcixcc_card_remove, + .ids = zcrypt_pcixcc_card_ids, +}; + +/** + * Probe function for PCIXCC/CEX2C queue devices. It always accepts the + * AP device since the bus_match already checked the hardware type. The + * PCIXCC cards come in two flavours: micro code level 2 and micro code + * level 3. This is checked by sending a test message to the device. + * @ap_dev: pointer to the AP card device. + */ +static int zcrypt_pcixcc_queue_probe(struct ap_device *ap_dev) +{ + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + struct zcrypt_queue *zq; + int rc; + + zq = zcrypt_queue_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE); + if (!zq) + return -ENOMEM; + zq->queue = aq; + zq->online = 1; + atomic_set(&zq->load, 0); + rc = zcrypt_pcixcc_rng_supported(aq); if (rc < 0) { - zcrypt_device_free(zdev); + zcrypt_queue_free(zq); return rc; } if (rc) - zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, - MSGTYPE06_VARIANT_DEFAULT); + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, + MSGTYPE06_VARIANT_DEFAULT); else - zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, - MSGTYPE06_VARIANT_NORNG); - ap_device_init_reply(ap_dev, &zdev->reply); - ap_dev->private = zdev; - rc = zcrypt_device_register(zdev); - if (rc) - goto out_free; - return 0; - - out_free: - ap_dev->private = NULL; - zcrypt_msgtype_release(zdev->ops); - zcrypt_device_free(zdev); + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, + MSGTYPE06_VARIANT_NORNG); + ap_queue_init_reply(aq, &zq->reply); + aq->request_timeout = PCIXCC_CLEANUP_TIME, + aq->private = zq; + rc = zcrypt_queue_register(zq); + if (rc) { + aq->private = NULL; + zcrypt_queue_free(zq); + } return rc; } /** - * This is called to remove the extended PCIXCC/CEX2C driver information - * if an AP device is removed. + * This is called to remove the PCIXCC/CEX2C queue driver information + * if an AP queue device is removed. */ -static void zcrypt_pcixcc_remove(struct ap_device *ap_dev) +static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev) { - struct zcrypt_device *zdev = ap_dev->private; - struct zcrypt_ops *zops = zdev->ops; + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + struct zcrypt_queue *zq = aq->private; - zcrypt_device_unregister(zdev); - zcrypt_msgtype_release(zops); + ap_queue_remove(aq); + if (zq) + zcrypt_queue_unregister(zq); } +static struct ap_driver zcrypt_pcixcc_queue_driver = { + .probe = zcrypt_pcixcc_queue_probe, + .remove = zcrypt_pcixcc_queue_remove, + .suspend = ap_queue_suspend, + .resume = ap_queue_resume, + .ids = zcrypt_pcixcc_queue_ids, +}; + int __init zcrypt_pcixcc_init(void) { - return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc"); + int rc; + + rc = ap_driver_register(&zcrypt_pcixcc_card_driver, + THIS_MODULE, "pcixcccard"); + if (rc) + return rc; + + rc = ap_driver_register(&zcrypt_pcixcc_queue_driver, + THIS_MODULE, "pcixccqueue"); + if (rc) + ap_driver_unregister(&zcrypt_pcixcc_card_driver); + + return rc; } void zcrypt_pcixcc_exit(void) { - ap_driver_unregister(&zcrypt_pcixcc_driver); + ap_driver_unregister(&zcrypt_pcixcc_queue_driver); + ap_driver_unregister(&zcrypt_pcixcc_card_driver); } module_init(zcrypt_pcixcc_init); diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c new file mode 100644 index 0000000000000000000000000000000000000000..a303f3b2c328624ed3bd299372cbb258f02e409a --- /dev/null +++ b/drivers/s390/crypto/zcrypt_queue.c @@ -0,0 +1,226 @@ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * Cornelia Huck + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky + * Ralph Wuerthner + * MSGTYPE restruct: Holger Dengler + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "zcrypt_debug.h" +#include "zcrypt_api.h" + +#include "zcrypt_msgtype6.h" +#include "zcrypt_msgtype50.h" + +/* + * Device attributes common for all crypto queue devices. + */ + +static ssize_t zcrypt_queue_online_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct zcrypt_queue *zq = to_ap_queue(dev)->private; + + return snprintf(buf, PAGE_SIZE, "%d\n", zq->online); +} + +static ssize_t zcrypt_queue_online_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct zcrypt_queue *zq = to_ap_queue(dev)->private; + struct zcrypt_card *zc = zq->zcard; + int online; + + if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) + return -EINVAL; + + if (online && !zc->online) + return -EINVAL; + zq->online = online; + + ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x online=%d\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + online); + + if (!online) + ap_flush_queue(zq->queue); + return count; +} + +static DEVICE_ATTR(online, 0644, zcrypt_queue_online_show, + zcrypt_queue_online_store); + +static struct attribute *zcrypt_queue_attrs[] = { + &dev_attr_online.attr, + NULL, +}; + +static struct attribute_group zcrypt_queue_attr_group = { + .attrs = zcrypt_queue_attrs, +}; + +void zcrypt_queue_force_online(struct zcrypt_queue *zq, int online) +{ + zq->online = online; + if (!online) + ap_flush_queue(zq->queue); +} + +struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size) +{ + struct zcrypt_queue *zq; + + zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL); + if (!zq) + return NULL; + zq->reply.message = kmalloc(max_response_size, GFP_KERNEL); + if (!zq->reply.message) + goto out_free; + zq->reply.length = max_response_size; + INIT_LIST_HEAD(&zq->list); + kref_init(&zq->refcount); + return zq; + +out_free: + kfree(zq); + return NULL; +} +EXPORT_SYMBOL(zcrypt_queue_alloc); + +void zcrypt_queue_free(struct zcrypt_queue *zq) +{ + kfree(zq->reply.message); + kfree(zq); +} +EXPORT_SYMBOL(zcrypt_queue_free); + +static void zcrypt_queue_release(struct kref *kref) +{ + struct zcrypt_queue *zq = + container_of(kref, struct zcrypt_queue, refcount); + zcrypt_queue_free(zq); +} + +void zcrypt_queue_get(struct zcrypt_queue *zq) +{ + kref_get(&zq->refcount); +} +EXPORT_SYMBOL(zcrypt_queue_get); + +int zcrypt_queue_put(struct zcrypt_queue *zq) +{ + return kref_put(&zq->refcount, zcrypt_queue_release); +} +EXPORT_SYMBOL(zcrypt_queue_put); + +/** + * zcrypt_queue_register() - Register a crypto queue device. + * @zq: Pointer to a crypto queue device + * + * Register a crypto queue device. Returns 0 if successful. + */ +int zcrypt_queue_register(struct zcrypt_queue *zq) +{ + struct zcrypt_card *zc; + int rc; + + spin_lock(&zcrypt_list_lock); + zc = zq->queue->card->private; + zcrypt_card_get(zc); + zq->zcard = zc; + zq->online = 1; /* New devices are online by default. */ + + ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x register online=1\n", + AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid)); + + list_add_tail(&zq->list, &zc->zqueues); + zcrypt_device_count++; + spin_unlock(&zcrypt_list_lock); + + rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj, + &zcrypt_queue_attr_group); + if (rc) + goto out; + get_device(&zq->queue->ap_dev.device); + + if (zq->ops->rng) { + rc = zcrypt_rng_device_add(); + if (rc) + goto out_unregister; + } + return 0; + +out_unregister: + sysfs_remove_group(&zq->queue->ap_dev.device.kobj, + &zcrypt_queue_attr_group); + put_device(&zq->queue->ap_dev.device); +out: + spin_lock(&zcrypt_list_lock); + list_del_init(&zq->list); + spin_unlock(&zcrypt_list_lock); + zcrypt_card_put(zc); + return rc; +} +EXPORT_SYMBOL(zcrypt_queue_register); + +/** + * zcrypt_queue_unregister(): Unregister a crypto queue device. + * @zq: Pointer to crypto queue device + * + * Unregister a crypto queue device. + */ +void zcrypt_queue_unregister(struct zcrypt_queue *zq) +{ + struct zcrypt_card *zc; + + ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x unregister\n", + AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid)); + + zc = zq->zcard; + spin_lock(&zcrypt_list_lock); + list_del_init(&zq->list); + zcrypt_device_count--; + spin_unlock(&zcrypt_list_lock); + zcrypt_card_put(zc); + if (zq->ops->rng) + zcrypt_rng_device_remove(); + sysfs_remove_group(&zq->queue->ap_dev.device.kobj, + &zcrypt_queue_attr_group); + put_device(&zq->queue->ap_dev.device); + zcrypt_queue_put(zq); +} +EXPORT_SYMBOL(zcrypt_queue_unregister); diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index ad17fc5883f61f61bc509f6d95754c881fca1c24..ac65f12bcd43c9dca411940ecabbf315e1b8585f 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1032,9 +1032,6 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu) struct ctcm_priv *priv; int max_bufsize; - if (new_mtu < 576 || new_mtu > 65527) - return -EINVAL; - priv = dev->ml_priv; max_bufsize = priv->channel[CTCM_READ]->max_bufsize; @@ -1123,6 +1120,8 @@ void static ctcm_dev_setup(struct net_device *dev) dev->type = ARPHRD_SLIP; dev->tx_queue_len = 100; dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->min_mtu = 576; + dev->max_mtu = 65527; } /* diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 251db0a02e73c81c275594d0b72b091ced13137b..211b31d9f157dfeb16441d6852b85b319083e095 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -1888,7 +1888,7 @@ lcs_stop_device(struct net_device *dev) rc = lcs_stopcard(card); if (rc) dev_err(&card->dev->dev, - " Shutting down the LCS device failed\n "); + " Shutting down the LCS device failed\n"); return rc; } diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index b0e8ffdf864b049c1d72797c039b91a6e29d373e..3f85b97ab8d2704820ba1f6f038a99534a14461a 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -62,7 +62,7 @@ #include #include -#include +#include #include #include @@ -302,8 +302,7 @@ static char *netiucv_printuser(struct iucv_connection *conn) if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) { tmp_uid[8] = '\0'; tmp_udat[16] = '\0'; - memcpy(tmp_uid, conn->userid, 8); - memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8); + memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8); memcpy(tmp_udat, conn->userdata, 16); EBCASC(tmp_udat, 16); memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16); @@ -1429,27 +1428,6 @@ static struct net_device_stats *netiucv_stats (struct net_device * dev) return &priv->stats; } -/** - * netiucv_change_mtu - * @dev: Pointer to interface struct. - * @new_mtu: The new MTU to use for this interface. - * - * Sets MTU of an interface. - * - * Returns 0 on success, -EINVAL if MTU is out of valid range. - * (valid range is 576 .. NETIUCV_MTU_MAX). - */ -static int netiucv_change_mtu(struct net_device * dev, int new_mtu) -{ - IUCV_DBF_TEXT(trace, 3, __func__); - if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) { - IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n"); - return -EINVAL; - } - dev->mtu = new_mtu; - return 0; -} - /* * attributes in sysfs */ @@ -1564,21 +1542,21 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, { struct netiucv_priv *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->conn->netdev; - char *e; - int bs1; + unsigned int bs1; + int rc; IUCV_DBF_TEXT(trace, 3, __func__); if (count >= 39) return -EINVAL; - bs1 = simple_strtoul(buf, &e, 0); + rc = kstrtouint(buf, 0, &bs1); - if (e && (!isspace(*e))) { - IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n", - *e); + if (rc == -EINVAL) { + IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n", + buf); return -EINVAL; } - if (bs1 > NETIUCV_BUFSIZE_MAX) { + if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) { IUCV_DBF_TEXT_(setup, 2, "buffer_write: buffer size %d too large\n", bs1); @@ -1987,12 +1965,13 @@ static const struct net_device_ops netiucv_netdev_ops = { .ndo_stop = netiucv_close, .ndo_get_stats = netiucv_stats, .ndo_start_xmit = netiucv_tx, - .ndo_change_mtu = netiucv_change_mtu, }; static void netiucv_setup_netdevice(struct net_device *dev) { dev->mtu = NETIUCV_MTU_DEFAULT; + dev->min_mtu = 576; + dev->max_mtu = NETIUCV_MTU_MAX; dev->destructor = netiucv_free_netdevice; dev->hard_header_len = NETIUCV_HDRLEN; dev->addr_len = 0; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 20cf29613043a7f87a29c444ff2be88d87ae3a53..e33558313834dc5e118d478fb5595452f2f6c9ed 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4202,10 +4202,6 @@ int qeth_change_mtu(struct net_device *dev, int new_mtu) sprintf(dbf_text, "%8x", new_mtu); QETH_CARD_TEXT(card, 4, dbf_text); - if (new_mtu < 64) - return -EINVAL; - if (new_mtu > 65535) - return -EINVAL; if ((!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) && (!qeth_mtu_is_valid(card, new_mtu))) return -EINVAL; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index bb27058fa9f089bdb61a7f4727148b2d8a9bdeed..9c921c2833f16ea29113bd6cc017d9d2ae01b72d 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1107,6 +1107,8 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->dev->ml_priv = card; card->dev->watchdog_timeo = QETH_TX_TIMEOUT; card->dev->mtu = card->info.initial_mtu; + card->dev->min_mtu = 64; + card->dev->max_mtu = ETH_MAX_MTU; card->dev->netdev_ops = &qeth_l2_netdev_ops; card->dev->ethtool_ops = (card->info.type != QETH_CARD_TYPE_OSN) ? diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 272d9e7419be31f5f19297839f04f8fce88f1eac..ac37d050e765bad4e96a72336c41a9ae18236313 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3140,6 +3140,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->ml_priv = card; card->dev->watchdog_timeo = QETH_TX_TIMEOUT; card->dev->mtu = card->info.initial_mtu; + card->dev->min_mtu = 64; + card->dev->max_mtu = ETH_MAX_MTU; card->dev->ethtool_ops = &qeth_l3_ethtool_ops; card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 581001989937ce1e0aaab11c26136d5e11b4fa4d..d5bf36ec8a751326062e47abe80ddd1f61a5f43b 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -289,11 +289,12 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, /** - * zfcp_dbf_rec_run - trace event related to running recovery + * zfcp_dbf_rec_run_lvl - trace event related to running recovery + * @level: trace level to be used for event * @tag: identifier for event * @erp: erp_action running */ -void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) +void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp) { struct zfcp_dbf *dbf = erp->adapter->dbf; struct zfcp_dbf_rec *rec = &dbf->rec_buf; @@ -319,10 +320,20 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) else rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter); - debug_event(dbf->rec, 1, rec, sizeof(*rec)); + debug_event(dbf->rec, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->rec_lock, flags); } +/** + * zfcp_dbf_rec_run - trace event related to running recovery + * @tag: identifier for event + * @erp: erp_action running + */ +void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) +{ + zfcp_dbf_rec_run_lvl(1, tag, erp); +} + /** * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery * @tag: identifier for event diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 36d07584271d569d27ec2eeb3706235d6459e026..db186d44cfafb6036a7e452e68e2c8f078cff6d1 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -2,7 +2,7 @@ * zfcp device driver * debug feature declarations * - * Copyright IBM Corp. 2008, 2015 + * Copyright IBM Corp. 2008, 2016 */ #ifndef ZFCP_DBF_H @@ -283,6 +283,30 @@ struct zfcp_dbf { struct zfcp_dbf_scsi scsi_buf; }; +/** + * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default + * @req: request that has been completed + * + * Returns true if FCP response with only benign residual under count. + */ +static inline +bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req) +{ + struct fsf_qtcb *qtcb = req->qtcb; + u32 fsf_stat = qtcb->header.fsf_status; + struct fcp_resp *fcp_rsp; + u8 rsp_flags, fr_status; + + if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND) + return false; /* not an FCP response */ + fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp; + rsp_flags = fcp_rsp->fr_flags; + fr_status = fcp_rsp->fr_status; + return (fsf_stat == FSF_FCP_RSP_AVAILABLE) && + (rsp_flags == FCP_RESID_UNDER) && + (fr_status == SAM_STAT_GOOD); +} + static inline void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req) { @@ -304,7 +328,9 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) zfcp_dbf_hba_fsf_resp("fs_perr", 1, req); } else if (qtcb->header.fsf_status != FSF_GOOD) { - zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req); + zfcp_dbf_hba_fsf_resp("fs_ferr", + zfcp_dbf_hba_fsf_resp_suppress(req) + ? 5 : 1, req); } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || (req->fsf_command == FSF_QTCB_OPEN_LUN)) { @@ -388,4 +414,15 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag) _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL); } +/** + * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset. + * @scmnd: SCSI command that was NULLified. + * @fsf_req: request that owned @scmnd. + */ +static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd, + struct zfcp_fsf_req *fsf_req) +{ + _zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req); +} + #endif /* ZFCP_DBF_H */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index a59d678125bd0e0ad0bd1ca74b0d42985abb25d8..7ccfce55903423f5e998fe3d93f6942a5879376f 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -3,7 +3,7 @@ * * Error Recovery Procedures (ERP). * - * Copyright IBM Corp. 2002, 2015 + * Copyright IBM Corp. 2002, 2016 */ #define KMSG_COMPONENT "zfcp" @@ -1204,6 +1204,62 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) } } +/** + * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery + * @port: zfcp_port whose fc_rport we should try to unblock + */ +static void zfcp_erp_try_rport_unblock(struct zfcp_port *port) +{ + unsigned long flags; + struct zfcp_adapter *adapter = port->adapter; + int port_status; + struct Scsi_Host *shost = adapter->scsi_host; + struct scsi_device *sdev; + + write_lock_irqsave(&adapter->erp_lock, flags); + port_status = atomic_read(&port->status); + if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 || + (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE | + ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) { + /* new ERP of severity >= port triggered elsewhere meanwhile or + * local link down (adapter erp_failed but not clear unblock) + */ + zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action); + write_unlock_irqrestore(&adapter->erp_lock, flags); + return; + } + spin_lock(shost->host_lock); + __shost_for_each_device(sdev, shost) { + struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); + int lun_status; + + if (zsdev->port != port) + continue; + /* LUN under port of interest */ + lun_status = atomic_read(&zsdev->status); + if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0) + continue; /* unblock rport despite failed LUNs */ + /* LUN recovery not given up yet [maybe follow-up pending] */ + if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 || + (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) { + /* LUN blocked: + * not yet unblocked [LUN recovery pending] + * or meanwhile blocked [new LUN recovery triggered] + */ + zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action); + spin_unlock(shost->host_lock); + write_unlock_irqrestore(&adapter->erp_lock, flags); + return; + } + } + /* now port has no child or all children have completed recovery, + * and no ERP of severity >= port was meanwhile triggered elsewhere + */ + zfcp_scsi_schedule_rport_register(port); + spin_unlock(shost->host_lock); + write_unlock_irqrestore(&adapter->erp_lock, flags); +} + static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) { struct zfcp_adapter *adapter = act->adapter; @@ -1214,6 +1270,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) case ZFCP_ERP_ACTION_REOPEN_LUN: if (!(act->status & ZFCP_STATUS_ERP_NO_REF)) scsi_device_put(sdev); + zfcp_erp_try_rport_unblock(port); break; case ZFCP_ERP_ACTION_REOPEN_PORT: @@ -1224,7 +1281,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) */ if (act->step != ZFCP_ERP_STEP_UNINITIALIZED) if (result == ZFCP_ERP_SUCCEEDED) - zfcp_scsi_schedule_rport_register(port); + zfcp_erp_try_rport_unblock(port); /* fall through */ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: put_device(&port->dev); diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index c8fed9fa1cca3680015913162ce3ddfee50c85b1..9afdbc32b23f6386a7dce2b51e4cf67ad562d215 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -3,7 +3,7 @@ * * External function declarations. * - * Copyright IBM Corp. 2002, 2015 + * Copyright IBM Corp. 2002, 2016 */ #ifndef ZFCP_EXT_H @@ -35,6 +35,8 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, struct zfcp_port *, struct scsi_device *, u8, u8); extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); +extern void zfcp_dbf_rec_run_lvl(int level, char *tag, + struct zfcp_erp_action *erp); extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64); extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *); @@ -84,8 +86,8 @@ extern void zfcp_fc_link_test_work(struct work_struct *); extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *); extern int zfcp_fc_gs_setup(struct zfcp_adapter *); extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); -extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); -extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *); +extern int zfcp_fc_exec_bsg_job(struct bsg_job *); +extern int zfcp_fc_timeout_bsg_job(struct bsg_job *); extern void zfcp_fc_sym_name_update(struct work_struct *); extern unsigned int zfcp_fc_port_scan_backoff(void); extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 237688af179b947b2b23098687b047ae8ae1e8fb..7331eea67435c28d2f96a70ce99b864544a69bf2 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include "zfcp_ext.h" @@ -885,26 +886,30 @@ void zfcp_fc_sym_name_update(struct work_struct *work) static void zfcp_fc_ct_els_job_handler(void *data) { - struct fc_bsg_job *job = data; + struct bsg_job *job = data; struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data; struct fc_bsg_reply *jr = job->reply; jr->reply_payload_rcv_len = job->reply_payload.payload_len; jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; jr->result = zfcp_ct_els->status ? -EIO : 0; - job->job_done(job); + bsg_job_done(job, jr->result, jr->reply_payload_rcv_len); } -static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job) +static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct bsg_job *job) { u32 preamble_word1; u8 gs_type; struct zfcp_adapter *adapter; + struct fc_bsg_request *bsg_request = job->request; + struct fc_rport *rport = fc_bsg_to_rport(job); + struct Scsi_Host *shost; - preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; + preamble_word1 = bsg_request->rqst_data.r_ct.preamble_word1; gs_type = (preamble_word1 & 0xff000000) >> 24; - adapter = (struct zfcp_adapter *) job->shost->hostdata[0]; + shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job); + adapter = (struct zfcp_adapter *) shost->hostdata[0]; switch (gs_type) { case FC_FST_ALIAS: @@ -924,7 +929,7 @@ static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job) static void zfcp_fc_ct_job_handler(void *data) { - struct fc_bsg_job *job = data; + struct bsg_job *job = data; struct zfcp_fc_wka_port *wka_port; wka_port = zfcp_fc_job_wka_port(job); @@ -933,11 +938,12 @@ static void zfcp_fc_ct_job_handler(void *data) zfcp_fc_ct_els_job_handler(data); } -static int zfcp_fc_exec_els_job(struct fc_bsg_job *job, +static int zfcp_fc_exec_els_job(struct bsg_job *job, struct zfcp_adapter *adapter) { struct zfcp_fsf_ct_els *els = job->dd_data; - struct fc_rport *rport = job->rport; + struct fc_rport *rport = fc_bsg_to_rport(job); + struct fc_bsg_request *bsg_request = job->request; struct zfcp_port *port; u32 d_id; @@ -949,13 +955,13 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job, d_id = port->d_id; put_device(&port->dev); } else - d_id = ntoh24(job->request->rqst_data.h_els.port_id); + d_id = ntoh24(bsg_request->rqst_data.h_els.port_id); els->handler = zfcp_fc_ct_els_job_handler; return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ); } -static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job, +static int zfcp_fc_exec_ct_job(struct bsg_job *job, struct zfcp_adapter *adapter) { int ret; @@ -978,13 +984,15 @@ static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job, return ret; } -int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job) +int zfcp_fc_exec_bsg_job(struct bsg_job *job) { struct Scsi_Host *shost; struct zfcp_adapter *adapter; struct zfcp_fsf_ct_els *ct_els = job->dd_data; + struct fc_bsg_request *bsg_request = job->request; + struct fc_rport *rport = fc_bsg_to_rport(job); - shost = job->rport ? rport_to_shost(job->rport) : job->shost; + shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job); adapter = (struct zfcp_adapter *)shost->hostdata[0]; if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN)) @@ -994,7 +1002,7 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job) ct_els->resp = job->reply_payload.sg_list; ct_els->handler_data = job; - switch (job->request->msgcode) { + switch (bsg_request->msgcode) { case FC_BSG_RPT_ELS: case FC_BSG_HST_ELS_NOLOGIN: return zfcp_fc_exec_els_job(job, adapter); @@ -1006,7 +1014,7 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job) } } -int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job) +int zfcp_fc_timeout_bsg_job(struct bsg_job *job) { /* hardware tracks timeout, reset bsg timeout to not interfere */ return -EAGAIN; diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index be1c04b334c51f678d643e4c488173f8fd6be0ee..ea3c76ac0de14dc8ea9ad147d1f5b9ab80cc1172 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -3,7 +3,7 @@ * * Interface to the FSF support functions. * - * Copyright IBM Corp. 2002, 2015 + * Copyright IBM Corp. 2002, 2016 */ #ifndef FSF_H @@ -78,6 +78,7 @@ #define FSF_APP_TAG_CHECK_FAILURE 0x00000082 #define FSF_REF_TAG_CHECK_FAILURE 0x00000083 #define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD +#define FSF_FCP_RSP_AVAILABLE 0x000000AF #define FSF_UNKNOWN_COMMAND 0x000000E2 #define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3 #define FSF_INVALID_COMMAND_OPTION 0x000000E5 diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h index 7c2c6194dfca58e1eb2fe6466abf64fd928deb96..703fce59befef0be884449e00addad4a49981d61 100644 --- a/drivers/s390/scsi/zfcp_reqlist.h +++ b/drivers/s390/scsi/zfcp_reqlist.h @@ -4,7 +4,7 @@ * Data structure and helper functions for tracking pending FSF * requests. * - * Copyright IBM Corp. 2009 + * Copyright IBM Corp. 2009, 2016 */ #ifndef ZFCP_REQLIST_H @@ -180,4 +180,32 @@ static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl, spin_unlock_irqrestore(&rl->lock, flags); } +/** + * zfcp_reqlist_apply_for_all() - apply a function to every request. + * @rl: the requestlist that contains the target requests. + * @f: the function to apply to each request; the first parameter of the + * function will be the target-request; the second parameter is the same + * pointer as given with the argument @data. + * @data: freely chosen argument; passed through to @f as second parameter. + * + * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash- + * table (not a 'safe' variant, so don't modify the list). + * + * Holds @rl->lock over the entire request-iteration. + */ +static inline void +zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl, + void (*f)(struct zfcp_fsf_req *, void *), void *data) +{ + struct zfcp_fsf_req *req; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&rl->lock, flags); + for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++) + list_for_each_entry(req, &rl->buckets[i], list) + f(req, data); + spin_unlock_irqrestore(&rl->lock, flags); +} + #endif /* ZFCP_REQLIST_H */ diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 9069f98a18172e754c943010654de65e91c2fb7c..07ffdbb5107f732082e88c94b0362b845e7e17d6 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -3,7 +3,7 @@ * * Interface to Linux SCSI midlayer. * - * Copyright IBM Corp. 2002, 2015 + * Copyright IBM Corp. 2002, 2016 */ #define KMSG_COMPONENT "zfcp" @@ -88,9 +88,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) } if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { - /* This could be either - * open LUN pending: this is temporary, will result in - * open LUN or ERP_FAILED, so retry command + /* This could be * call to rport_delete pending: mimic retry from * fc_remote_port_chkready until rport is BLOCKED */ @@ -209,6 +207,57 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) return retval; } +struct zfcp_scsi_req_filter { + u8 tmf_scope; + u32 lun_handle; + u32 port_handle; +}; + +static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data) +{ + struct zfcp_scsi_req_filter *filter = + (struct zfcp_scsi_req_filter *)data; + + /* already aborted - prevent side-effects - or not a SCSI command */ + if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND) + return; + + /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */ + if (old_req->qtcb->header.port_handle != filter->port_handle) + return; + + if (filter->tmf_scope == FCP_TMF_LUN_RESET && + old_req->qtcb->header.lun_handle != filter->lun_handle) + return; + + zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req); + old_req->data = NULL; +} + +static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags) +{ + struct zfcp_adapter *adapter = zsdev->port->adapter; + struct zfcp_scsi_req_filter filter = { + .tmf_scope = FCP_TMF_TGT_RESET, + .port_handle = zsdev->port->handle, + }; + unsigned long flags; + + if (tm_flags == FCP_TMF_LUN_RESET) { + filter.tmf_scope = FCP_TMF_LUN_RESET; + filter.lun_handle = zsdev->lun_handle; + } + + /* + * abort_lock secures against other processings - in the abort-function + * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data + */ + write_lock_irqsave(&adapter->abort_lock, flags); + zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd, + &filter); + write_unlock_irqrestore(&adapter->abort_lock, flags); +} + static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); @@ -241,8 +290,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); retval = FAILED; - } else + } else { zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); + zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags); + } zfcp_fsf_req_free(fsf_req); return retval; diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 8688ad4c825f0e3a0bc2644ee9f5784b030ed36b..639ed4e6afd19b46d1e8fb3ec5669b217da8d3c4 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include @@ -235,16 +235,6 @@ static struct airq_info *new_airq_info(void) return info; } -static void destroy_airq_info(struct airq_info *info) -{ - if (!info) - return; - - unregister_adapter_interrupt(&info->airq); - airq_iv_release(info->aiv); - kfree(info); -} - static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, u64 *first, void **airq_info) { @@ -1294,7 +1284,6 @@ static struct ccw_device_id virtio_ids[] = { { CCW_DEVICE(0x3832, 0) }, {}, }; -MODULE_DEVICE_TABLE(ccw, virtio_ids); static struct ccw_driver virtio_ccw_driver = { .driver = { @@ -1406,14 +1395,4 @@ static int __init virtio_ccw_init(void) no_auto_parse(); return ccw_driver_register(&virtio_ccw_driver); } -module_init(virtio_ccw_init); - -static void __exit virtio_ccw_exit(void) -{ - int i; - - ccw_driver_unregister(&virtio_ccw_driver); - for (i = 0; i < MAX_AIRQ_AREAS; i++) - destroy_airq_info(airq_areas[i]); -} -module_exit(virtio_ccw_exit); +device_initcall(virtio_ccw_init); diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c index 33fbe8249fd5c16b25d83d29c0e4f86ea0e8608c..04efed171c88981e23e23bf6ac8438c42ebae69a 100644 --- a/drivers/sbus/char/display7seg.c +++ b/drivers/sbus/char/display7seg.c @@ -17,7 +17,7 @@ #include #include #include -#include /* put_/get_user */ +#include /* put_/get_user */ #include #include diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index 5609b602c54d31077f4f6136129eda7ffda310e1..56e962a014939e31c7ad74687263ecd69558ef7b 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c @@ -29,7 +29,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c index 206ef4232adf3a03933b4b28ca490c6181ee9aab..216f923161d1007866130c22872ef22f604197d9 100644 --- a/drivers/sbus/char/flash.c +++ b/drivers/sbus/char/flash.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c index a40ee1e37486bd9b05062074507fd2a64d472677..6ff61dad5e21570f27ed68474bbe60f9e7095948 100644 --- a/drivers/sbus/char/jsflash.c +++ b/drivers/sbus/char/jsflash.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c index 4612691c6619e6ed19bd29790028e5c94e32fe00..2c2e6a3b4c7e51b6d61981b93a7cb3c7b19b16fa 100644 --- a/drivers/sbus/char/openprom.c +++ b/drivers/sbus/char/openprom.c @@ -40,7 +40,7 @@ #include #include #include -#include +#include #include #ifdef CONFIG_PCI #include diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index a56a7b243e91fae96b05cae0118d96e9d284dd7b..00e7968a1d70f6b120aa0309087d196f77f5f145 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -1,8 +1,8 @@ /* 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux. - Written By: Adam Radford - Modifications By: Tom Couch + Written By: Adam Radford + Modifications By: Tom Couch Copyright (C) 2004-2009 Applied Micro Circuits Corporation. Copyright (C) 2010 LSI Corporation. @@ -41,10 +41,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Bugs/Comments/Suggestions should be mailed to: - linuxraid@lsi.com - - For more information, goto: - http://www.lsi.com + aradford@gmail.com Note: This version of the driver does not contain a bundled firmware image. @@ -95,7 +92,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h index 0fdc83cfa0e1a28a42757ae52f434523238075b3..b6c208cc474f0e4ae5d37b0c7465c99129121cc7 100644 --- a/drivers/scsi/3w-9xxx.h +++ b/drivers/scsi/3w-9xxx.h @@ -1,8 +1,8 @@ /* 3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux. - Written By: Adam Radford - Modifications By: Tom Couch + Written By: Adam Radford + Modifications By: Tom Couch Copyright (C) 2004-2009 Applied Micro Circuits Corporation. Copyright (C) 2010 LSI Corporation. @@ -41,10 +41,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Bugs/Comments/Suggestions should be mailed to: - linuxraid@lsi.com - - For more information, goto: - http://www.lsi.com + aradford@gmail.com */ #ifndef _3W_9XXX_H diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index f8374850f714dd09c53aa1eb25a38bbcb7d0ee8d..b150e131b2e76a172aa13bbf84949ec0cc269cd4 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -1,7 +1,7 @@ /* 3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux. - Written By: Adam Radford + Written By: Adam Radford Copyright (C) 2009 LSI Corporation. @@ -43,10 +43,7 @@ LSI 3ware 9750 6Gb/s SAS/SATA-RAID Bugs/Comments/Suggestions should be mailed to: - linuxraid@lsi.com - - For more information, goto: - http://www.lsi.com + aradford@gmail.com History ------- @@ -67,7 +64,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h index fec6449c7595132f706439277cd396b4dc66c0a8..05e77d84c16d95254544750c4e297a35887fc5dc 100644 --- a/drivers/scsi/3w-sas.h +++ b/drivers/scsi/3w-sas.h @@ -1,7 +1,7 @@ /* 3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux. - Written By: Adam Radford + Written By: Adam Radford Copyright (C) 2009 LSI Corporation. @@ -39,10 +39,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Bugs/Comments/Suggestions should be mailed to: - linuxraid@lsi.com - - For more information, goto: - http://www.lsi.com + aradford@gmail.com */ #ifndef _3W_SAS_H diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index 25aba1613e2157f7a2e468007c202a16015b3f40..33261b690774a8deeb8ec20835d0b24100a926b0 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -1,7 +1,7 @@ /* 3w-xxxx.c -- 3ware Storage Controller device driver for Linux. - Written By: Adam Radford + Written By: Adam Radford Modifications By: Joel Jacobson Arnaldo Carvalho de Melo Brad Strand @@ -47,10 +47,9 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Bugs/Comments/Suggestions should be mailed to: - linuxraid@lsi.com - For more information, goto: - http://www.lsi.com + aradford@gmail.com + History ------- @@ -211,7 +210,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h index 6f65e663d3932108edaed6b75eba328d469f4fce..69e80c1ed1ca642806698177f0c345e2d600b2c5 100644 --- a/drivers/scsi/3w-xxxx.h +++ b/drivers/scsi/3w-xxxx.h @@ -1,7 +1,7 @@ /* 3w-xxxx.h -- 3ware Storage Controller device driver for Linux. - Written By: Adam Radford + Written By: Adam Radford Modifications By: Joel Jacobson Arnaldo Carvalho de Melo Brad Strand @@ -45,7 +45,8 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Bugs/Comments/Suggestions should be mailed to: - linuxraid@lsi.com + + aradford@gmail.com For more information, goto: http://www.lsi.com diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 3e2bdb90813ced106e899decc3b2cd672b3df2d5..a4f6b0d955159cde292b9c7f09811d5d542c3fa8 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -263,6 +263,7 @@ config SCSI_SPI_ATTRS config SCSI_FC_ATTRS tristate "FiberChannel Transport Attributes" depends on SCSI && NET + select BLK_DEV_BSGLIB select SCSI_NETLINK help If you wish to export transport-specific information about @@ -743,40 +744,18 @@ config SCSI_ISCI control unit found in the Intel(R) C600 series chipset. config SCSI_GENERIC_NCR5380 - tristate "Generic NCR5380/53c400 SCSI PIO support" - depends on ISA && SCSI + tristate "Generic NCR5380/53c400 SCSI ISA card support" + depends on ISA && SCSI && HAS_IOPORT_MAP select SCSI_SPI_ATTRS ---help--- - This is a driver for the old NCR 53c80 series of SCSI controllers - on boards using PIO. Most boards such as the Trantor T130 fit this - category, along with a large number of ISA 8bit controllers shipped - for free with SCSI scanners. If you have a PAS16, T128 or DMX3191 - you should select the specific driver for that card rather than - generic 5380 support. - - It is explained in section 3.8 of the SCSI-HOWTO, available from - . If it doesn't work out - of the box, you may have to change some settings in - . + This is a driver for old ISA card SCSI controllers based on a + NCR 5380, 53C80, 53C400, 53C400A, or DTC 436 device. + Most boards such as the Trantor T130 fit this category, as do + various 8-bit and 16-bit ISA cards bundled with SCSI scanners. To compile this driver as a module, choose M here: the module will be called g_NCR5380. -config SCSI_GENERIC_NCR5380_MMIO - tristate "Generic NCR5380/53c400 SCSI MMIO support" - depends on ISA && SCSI - select SCSI_SPI_ATTRS - ---help--- - This is a driver for the old NCR 53c80 series of SCSI controllers - on boards using memory mapped I/O. - It is explained in section 3.8 of the SCSI-HOWTO, available from - . If it doesn't work out - of the box, you may have to change some settings in - . - - To compile this driver as a module, choose M here: the - module will be called g_NCR5380_mmio. - config SCSI_IPS tristate "IBM ServeRAID support" depends on PCI && SCSI @@ -1254,6 +1233,7 @@ config SCSI_QLOGICPTI source "drivers/scsi/qla2xxx/Kconfig" source "drivers/scsi/qla4xxx/Kconfig" +source "drivers/scsi/qedi/Kconfig" config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 38d938d7fe679cd89541e0390a3b6513d20c7c0f..736b77414a4baae3fe9520dc1eea00df591b4d7f 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -74,7 +74,6 @@ obj-$(CONFIG_SCSI_ISCI) += isci/ obj-$(CONFIG_SCSI_IPS) += ips.o obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o -obj-$(CONFIG_SCSI_GENERIC_NCR5380_MMIO) += g_NCR5380_mmio.o obj-$(CONFIG_SCSI_NCR53C406A) += NCR53c406a.o obj-$(CONFIG_SCSI_NCR_D700) += 53c700.o NCR_D700.o obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o @@ -132,6 +131,7 @@ obj-$(CONFIG_PS3_ROM) += ps3rom.o obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ +obj-$(CONFIG_QEDI) += libiscsi.o qedi/ obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ obj-$(CONFIG_SCSI_ESAS2R) += esas2r/ obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o @@ -173,6 +173,7 @@ hv_storvsc-y := storvsc_drv.o sd_mod-objs := sd.o sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o +sd_mod-$(CONFIG_BLK_DEV_ZONED) += sd_zbc.o sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \ diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 790babc5ef660334c86ecb096e405a15d50bceee..4f5ca794bb71507a90879af23f0f4ea998fea9ec 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -97,9 +97,6 @@ * and macros and include this file in your driver. * * These macros control options : - * AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be - * defined. - * * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically * for commands that return with a CHECK CONDITION status. * @@ -121,14 +118,13 @@ * * Either real DMA *or* pseudo DMA may be implemented * - * NCR5380_dma_write_setup(instance, src, count) - initialize - * NCR5380_dma_read_setup(instance, dst, count) - initialize - * NCR5380_dma_residual(instance); - residual count + * NCR5380_dma_xfer_len - determine size of DMA/PDMA transfer + * NCR5380_dma_send_setup - execute DMA/PDMA from memory to 5380 + * NCR5380_dma_recv_setup - execute DMA/PDMA from 5380 to memory + * NCR5380_dma_residual - residual byte count * * The generic driver is initialized by calling NCR5380_init(instance), - * after setting the appropriate host specific fields and ID. If the - * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, - * possible) function may be used. + * after setting the appropriate host specific fields and ID. */ #ifndef NCR5380_io_delay @@ -178,7 +174,7 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd) /** * NCR5380_poll_politely2 - wait for two chip register values - * @instance: controller to poll + * @hostdata: host private data * @reg1: 5380 register to poll * @bit1: Bitmask to check * @val1: Expected value @@ -195,18 +191,14 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd) * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT. */ -static int NCR5380_poll_politely2(struct Scsi_Host *instance, - int reg1, int bit1, int val1, - int reg2, int bit2, int val2, int wait) +static int NCR5380_poll_politely2(struct NCR5380_hostdata *hostdata, + unsigned int reg1, u8 bit1, u8 val1, + unsigned int reg2, u8 bit2, u8 val2, + unsigned long wait) { - struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned long n = hostdata->poll_loops; unsigned long deadline = jiffies + wait; - unsigned long n; - /* Busy-wait for up to 10 ms */ - n = min(10000U, jiffies_to_usecs(wait)); - n *= hostdata->accesses_per_ms; - n /= 2000; do { if ((NCR5380_read(reg1) & bit1) == val1) return 0; @@ -288,6 +280,7 @@ mrs[] = { static void NCR5380_print(struct Scsi_Host *instance) { + struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char status, data, basr, mr, icr, i; data = NCR5380_read(CURRENT_SCSI_DATA_REG); @@ -337,6 +330,7 @@ static struct { static void NCR5380_print_phase(struct Scsi_Host *instance) { + struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char status; int i; @@ -352,76 +346,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance) } #endif - -static int probe_irq; - -/** - * probe_intr - helper for IRQ autoprobe - * @irq: interrupt number - * @dev_id: unused - * @regs: unused - * - * Set a flag to indicate the IRQ in question was received. This is - * used by the IRQ probe code. - */ - -static irqreturn_t probe_intr(int irq, void *dev_id) -{ - probe_irq = irq; - return IRQ_HANDLED; -} - -/** - * NCR5380_probe_irq - find the IRQ of an NCR5380 - * @instance: NCR5380 controller - * @possible: bitmask of ISA IRQ lines - * - * Autoprobe for the IRQ line used by the NCR5380 by triggering an IRQ - * and then looking to see what interrupt actually turned up. - */ - -static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, - int possible) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - unsigned long timeout; - int trying_irqs, i, mask; - - for (trying_irqs = 0, i = 1, mask = 2; i < 16; ++i, mask <<= 1) - if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0)) - trying_irqs |= mask; - - timeout = jiffies + msecs_to_jiffies(250); - probe_irq = NO_IRQ; - - /* - * A interrupt is triggered whenever BSY = false, SEL = true - * and a bit set in the SELECT_ENABLE_REG is asserted on the - * SCSI bus. - * - * Note that the bus is only driven when the phase control signals - * (I/O, C/D, and MSG) match those in the TCR, so we must reset that - * to zero. - */ - - NCR5380_write(TARGET_COMMAND_REG, 0); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL); - - while (probe_irq == NO_IRQ && time_before(jiffies, timeout)) - schedule_timeout_uninterruptible(1); - - NCR5380_write(SELECT_ENABLE_REG, 0); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - for (i = 1, mask = 2; i < 16; ++i, mask <<= 1) - if (trying_irqs & mask) - free_irq(i, NULL); - - return probe_irq; -} - /** * NCR58380_info - report driver and host information * @instance: relevant scsi host instance @@ -441,14 +365,14 @@ static void prepare_info(struct Scsi_Host *instance) struct NCR5380_hostdata *hostdata = shost_priv(instance); snprintf(hostdata->info, sizeof(hostdata->info), - "%s, io_port 0x%lx, n_io_port %d, " - "base 0x%lx, irq %d, " + "%s, irq %d, " + "io_port 0x%lx, base 0x%lx, " "can_queue %d, cmd_per_lun %d, " "sg_tablesize %d, this_id %d, " "flags { %s%s%s}, " "options { %s} ", - instance->hostt->name, instance->io_port, instance->n_io_port, - instance->base, instance->irq, + instance->hostt->name, instance->irq, + hostdata->io_port, hostdata->base, instance->can_queue, instance->cmd_per_lun, instance->sg_tablesize, instance->this_id, hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "", @@ -482,6 +406,7 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags) struct NCR5380_hostdata *hostdata = shost_priv(instance); int i; unsigned long deadline; + unsigned long accesses_per_ms; instance->max_lun = 7; @@ -530,7 +455,8 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags) ++i; cpu_relax(); } while (time_is_after_jiffies(deadline)); - hostdata->accesses_per_ms = i / 256; + accesses_per_ms = i / 256; + hostdata->poll_loops = NCR5380_REG_POLL_TIME * accesses_per_ms / 2; return 0; } @@ -560,7 +486,7 @@ static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance) case 3: case 5: shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n"); - NCR5380_poll_politely(instance, + NCR5380_poll_politely(hostdata, STATUS_REG, SR_BSY, 0, 5 * HZ); break; case 2: @@ -871,7 +797,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_read(RESET_PARITY_INTERRUPT_REG); - transferred = hostdata->dma_len - NCR5380_dma_residual(instance); + transferred = hostdata->dma_len - NCR5380_dma_residual(hostdata); hostdata->dma_len = 0; data = (unsigned char **)&hostdata->connected->SCp.ptr; @@ -994,7 +920,7 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id) } handled = 1; } else { - shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n"); + dsprintk(NDEBUG_INTR, instance, "interrupt without IRQ bit\n"); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif @@ -1075,7 +1001,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, */ spin_unlock_irq(&hostdata->lock); - err = NCR5380_poll_politely2(instance, MODE_REG, MR_ARBITRATE, 0, + err = NCR5380_poll_politely2(hostdata, MODE_REG, MR_ARBITRATE, 0, INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS, ICR_ARBITRATION_PROGRESS, HZ); spin_lock_irq(&hostdata->lock); @@ -1201,7 +1127,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, * selection. */ - err = NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, SR_BSY, + err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_BSY, SR_BSY, msecs_to_jiffies(250)); if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { @@ -1247,7 +1173,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, /* Wait for start of REQ/ACK handshake */ - err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ); + err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ); spin_lock_irq(&hostdata->lock); if (err < 0) { shost_printk(KERN_ERR, instance, "select: REQ timeout\n"); @@ -1318,6 +1244,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { + struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char p = *phase, tmp; int c = *count; unsigned char *d = *data; @@ -1336,7 +1263,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, * valid */ - if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0) + if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0) break; dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n"); @@ -1381,7 +1308,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); } - if (NCR5380_poll_politely(instance, + if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 5 * HZ) < 0) break; @@ -1440,6 +1367,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, static void do_reset(struct Scsi_Host *instance) { + struct NCR5380_hostdata __maybe_unused *hostdata = shost_priv(instance); unsigned long flags; local_irq_save(flags); @@ -1462,6 +1390,7 @@ static void do_reset(struct Scsi_Host *instance) static int do_abort(struct Scsi_Host *instance) { + struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char *msgptr, phase, tmp; int len; int rc; @@ -1479,7 +1408,7 @@ static int do_abort(struct Scsi_Host *instance) * the target sees, so we just handshake. */ - rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ); + rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ); if (rc < 0) goto timeout; @@ -1490,7 +1419,7 @@ static int do_abort(struct Scsi_Host *instance) if (tmp != PHASE_MSGOUT) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); - rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 3 * HZ); + rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ); if (rc < 0) goto timeout; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); @@ -1575,9 +1504,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, * starting the NCR. This is also the cleaner way for the TT. */ if (p & SR_IO) - result = NCR5380_dma_recv_setup(instance, d, c); + result = NCR5380_dma_recv_setup(hostdata, d, c); else - result = NCR5380_dma_send_setup(instance, d, c); + result = NCR5380_dma_send_setup(hostdata, d, c); } /* @@ -1609,9 +1538,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, * NCR access, else the DMA setup gets trashed! */ if (p & SR_IO) - result = NCR5380_dma_recv_setup(instance, d, c); + result = NCR5380_dma_recv_setup(hostdata, d, c); else - result = NCR5380_dma_send_setup(instance, d, c); + result = NCR5380_dma_send_setup(hostdata, d, c); } /* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */ @@ -1678,12 +1607,12 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, * byte. */ - if (NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, + if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, HZ) < 0) { result = -1; shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); } - if (NCR5380_poll_politely(instance, STATUS_REG, + if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, HZ) < 0) { result = -1; shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); @@ -1694,7 +1623,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, * Wait for the last byte to be sent. If REQ is being asserted for * the byte we're interested, we'll ACK it and it will go false. */ - if (NCR5380_poll_politely2(instance, + if (NCR5380_poll_politely2(hostdata, BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) { result = -1; @@ -1751,22 +1680,26 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); } #ifdef CONFIG_SUN3 - if (phase == PHASE_CMDOUT) { - void *d; - unsigned long count; + if (phase == PHASE_CMDOUT && + sun3_dma_setup_done != cmd) { + int count; if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { - count = cmd->SCp.buffer->length; - d = sg_virt(cmd->SCp.buffer); - } else { - count = cmd->SCp.this_residual; - d = cmd->SCp.ptr; + ++cmd->SCp.buffer; + --cmd->SCp.buffers_residual; + cmd->SCp.this_residual = cmd->SCp.buffer->length; + cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); } - if (sun3_dma_setup_done != cmd && - sun3scsi_dma_xfer_len(count, cmd) > 0) { - sun3scsi_dma_setup(instance, d, count, - rq_data_dir(cmd->request)); + count = sun3scsi_dma_xfer_len(hostdata, cmd); + + if (count > 0) { + if (rq_data_dir(cmd->request)) + sun3scsi_dma_send_setup(hostdata, + cmd->SCp.ptr, count); + else + sun3scsi_dma_recv_setup(hostdata, + cmd->SCp.ptr, count); sun3_dma_setup_done = cmd; } #ifdef SUN3_SCSI_VME @@ -1827,7 +1760,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) transfersize = 0; if (!cmd->device->borken) - transfersize = NCR5380_dma_xfer_len(instance, cmd, phase); + transfersize = NCR5380_dma_xfer_len(hostdata, cmd); if (transfersize > 0) { len = transfersize; @@ -2073,7 +2006,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) } /* switch(phase) */ } else { spin_unlock_irq(&hostdata->lock); - NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ); + NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ); spin_lock_irq(&hostdata->lock); } } @@ -2119,7 +2052,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); - if (NCR5380_poll_politely(instance, + if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return; @@ -2130,7 +2063,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) * Wait for target to go into MSGIN. */ - if (NCR5380_poll_politely(instance, + if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) { do_abort(instance); return; @@ -2204,22 +2137,25 @@ static void NCR5380_reselect(struct Scsi_Host *instance) } #ifdef CONFIG_SUN3 - { - void *d; - unsigned long count; + if (sun3_dma_setup_done != tmp) { + int count; if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { - count = tmp->SCp.buffer->length; - d = sg_virt(tmp->SCp.buffer); - } else { - count = tmp->SCp.this_residual; - d = tmp->SCp.ptr; + ++tmp->SCp.buffer; + --tmp->SCp.buffers_residual; + tmp->SCp.this_residual = tmp->SCp.buffer->length; + tmp->SCp.ptr = sg_virt(tmp->SCp.buffer); } - if (sun3_dma_setup_done != tmp && - sun3scsi_dma_xfer_len(count, tmp) > 0) { - sun3scsi_dma_setup(instance, d, count, - rq_data_dir(tmp->request)); + count = sun3scsi_dma_xfer_len(hostdata, tmp); + + if (count > 0) { + if (rq_data_dir(tmp->request)) + sun3scsi_dma_send_setup(hostdata, + tmp->SCp.ptr, count); + else + sun3scsi_dma_recv_setup(hostdata, + tmp->SCp.ptr, count); sun3_dma_setup_done = tmp; } } diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index 965d92339455c3e613ad106c615796a47afc0acb..51a3567a6fb2a36a3eb44fbf94edf5536e333fe4 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h @@ -199,16 +199,6 @@ #define PHASE_SR_TO_TCR(phase) ((phase) >> 2) -/* - * These are "special" values for the irq and dma_channel fields of the - * Scsi_Host structure - */ - -#define DMA_NONE 255 -#define IRQ_AUTO 254 -#define DMA_AUTO 254 -#define PORT_AUTO 0xffff /* autoprobe io port for 53c400a */ - #ifndef NO_IRQ #define NO_IRQ 0 #endif @@ -219,27 +209,32 @@ #define FLAG_TOSHIBA_DELAY 128 /* Allow for borken CD-ROMs */ struct NCR5380_hostdata { - NCR5380_implementation_fields; /* implementation specific */ - struct Scsi_Host *host; /* Host backpointer */ - unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */ - unsigned char busy[8]; /* index = target, bit = lun */ - int dma_len; /* requested length of DMA */ - unsigned char last_message; /* last message OUT */ - struct scsi_cmnd *connected; /* currently connected cmnd */ - struct scsi_cmnd *selecting; /* cmnd to be connected */ - struct list_head unissued; /* waiting to be issued */ - struct list_head autosense; /* priority issue queue */ - struct list_head disconnected; /* waiting for reconnect */ - spinlock_t lock; /* protects this struct */ - int flags; - struct scsi_eh_save ses; - struct scsi_cmnd *sensing; + NCR5380_implementation_fields; /* Board-specific data */ + u8 __iomem *io; /* Remapped 5380 address */ + u8 __iomem *pdma_io; /* Remapped PDMA address */ + unsigned long poll_loops; /* Register polling limit */ + spinlock_t lock; /* Protects this struct */ + struct scsi_cmnd *connected; /* Currently connected cmnd */ + struct list_head disconnected; /* Waiting for reconnect */ + struct Scsi_Host *host; /* SCSI host backpointer */ + struct workqueue_struct *work_q; /* SCSI host work queue */ + struct work_struct main_task; /* Work item for main loop */ + int flags; /* Board-specific quirks */ + int dma_len; /* Requested length of DMA */ + int read_overruns; /* Transfer size reduction for DMA erratum */ + unsigned long io_port; /* Device IO port */ + unsigned long base; /* Device base address */ + struct list_head unissued; /* Waiting to be issued */ + struct scsi_cmnd *selecting; /* Cmnd to be connected */ + struct list_head autosense; /* Priority cmnd queue */ + struct scsi_cmnd *sensing; /* Cmnd needing autosense */ + struct scsi_eh_save ses; /* Cmnd state saved for EH */ + unsigned char busy[8]; /* Index = target, bit = lun */ + unsigned char id_mask; /* 1 << Host ID */ + unsigned char id_higher_mask; /* All bits above id_mask */ + unsigned char last_message; /* Last Message Out */ + unsigned long region_size; /* Size of address/port range */ char info[256]; - int read_overruns; /* number of bytes to cut from a - * transfer to handle chip overruns */ - struct work_struct main_task; - struct workqueue_struct *work_q; - unsigned long accesses_per_ms; /* chip register accesses per ms */ }; #ifdef __KERNEL__ @@ -252,6 +247,9 @@ struct NCR5380_cmd { #define NCR5380_PIO_CHUNK_SIZE 256 +/* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */ +#define NCR5380_REG_POLL_TIME 15 + static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr) { return ((struct scsi_cmnd *)ncmd_ptr) - 1; @@ -282,7 +280,6 @@ static void NCR5380_print(struct Scsi_Host *instance); #define NCR5380_dprint_phase(flg, arg) do {} while (0) #endif -static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible); static int NCR5380_init(struct Scsi_Host *instance, int flags); static int NCR5380_maybe_reset_bus(struct Scsi_Host *); static void NCR5380_exit(struct Scsi_Host *instance); @@ -294,14 +291,45 @@ static void NCR5380_reselect(struct Scsi_Host *instance); static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); -static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int); +static int NCR5380_poll_politely2(struct NCR5380_hostdata *, + unsigned int, u8, u8, + unsigned int, u8, u8, unsigned long); -static inline int NCR5380_poll_politely(struct Scsi_Host *instance, - int reg, int bit, int val, int wait) +static inline int NCR5380_poll_politely(struct NCR5380_hostdata *hostdata, + unsigned int reg, u8 bit, u8 val, + unsigned long wait) { - return NCR5380_poll_politely2(instance, reg, bit, val, + if ((NCR5380_read(reg) & bit) == val) + return 0; + + return NCR5380_poll_politely2(hostdata, reg, bit, val, reg, bit, val, wait); } +static int NCR5380_dma_xfer_len(struct NCR5380_hostdata *, + struct scsi_cmnd *); +static int NCR5380_dma_send_setup(struct NCR5380_hostdata *, + unsigned char *, int); +static int NCR5380_dma_recv_setup(struct NCR5380_hostdata *, + unsigned char *, int); +static int NCR5380_dma_residual(struct NCR5380_hostdata *); + +static inline int NCR5380_dma_xfer_none(struct NCR5380_hostdata *hostdata, + struct scsi_cmnd *cmd) +{ + return 0; +} + +static inline int NCR5380_dma_setup_none(struct NCR5380_hostdata *hostdata, + unsigned char *data, int count) +{ + return 0; +} + +static inline int NCR5380_dma_residual_none(struct NCR5380_hostdata *hostdata) +{ + return 0; +} + #endif /* __KERNEL__ */ #endif /* NCR5380_H */ diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 6678d1fd897bec099a0ac4fe147d3c11af01e614..1ee7c654f7b80a44b395715fb5675aa3cef3940d 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include /* For flush_kernel_dcache_page */ #include diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 969c312de1be38b9f27f5c5746c0eaf8e93865d0..f059c14efa0c7cb0c094451ef13e41a0f6d6bee4 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1246,7 +1246,6 @@ struct aac_dev u32 max_msix; /* max. MSI-X vectors */ u32 vector_cap; /* MSI-X vector capab.*/ int msi_enabled; /* MSI/MSI-X enabled */ - struct msix_entry msixentry[AAC_MAX_MSIX]; struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */ u8 adapter_shutdown; u32 handle_pci_error; diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 5648b715fed9c2d4e448c9f477953ecb83e94641..e1daff230c7dab05b794cf44a8675d9cad04fcaa 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -41,7 +41,7 @@ #include /* ssleep prototype */ #include #include -#include +#include #include #include "aacraid.h" diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 341ea327ae7946f758bbcc8b9a518ef2051bf7e9..4f56b1003cc7d9d8c56dcb98ead00b0fb5348788 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -378,16 +378,12 @@ void aac_define_int_mode(struct aac_dev *dev) if (msi_count > AAC_MAX_MSIX) msi_count = AAC_MAX_MSIX; - for (i = 0; i < msi_count; i++) - dev->msixentry[i].entry = i; - if (msi_count > 1 && pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { min_msix = 2; - i = pci_enable_msix_range(dev->pdev, - dev->msixentry, - min_msix, - msi_count); + i = pci_alloc_irq_vectors(dev->pdev, + min_msix, msi_count, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); if (i > 0) { dev->msi_enabled = 1; msi_count = i; diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 0aeecec1f5eafa6390a020eb0174cb2582519524..9e7551fe4b19cc542219cea5c0c854c3ff456d3e 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -2043,30 +2043,22 @@ int aac_acquire_irq(struct aac_dev *dev) int i; int j; int ret = 0; - int cpu; - cpu = cpumask_first(cpu_online_mask); if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) { for (i = 0; i < dev->max_msix; i++) { dev->aac_msix[i].vector_no = i; dev->aac_msix[i].dev = dev; - if (request_irq(dev->msixentry[i].vector, + if (request_irq(pci_irq_vector(dev->pdev, i), dev->a_ops.adapter_intr, 0, "aacraid", &(dev->aac_msix[i]))) { printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n", dev->name, dev->id, i); for (j = 0 ; j < i ; j++) - free_irq(dev->msixentry[j].vector, + free_irq(pci_irq_vector(dev->pdev, j), &(dev->aac_msix[j])); pci_disable_msix(dev->pdev); ret = -1; } - if (irq_set_affinity_hint(dev->msixentry[i].vector, - get_cpu_mask(cpu))) { - printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n", - dev->name, dev->id, cpu); - } - cpu = cpumask_next(cpu, cpu_online_mask); } } else { dev->aac_msix[0].vector_no = 0; @@ -2096,16 +2088,9 @@ void aac_free_irq(struct aac_dev *dev) dev->pdev->device == PMC_DEVICE_S8 || dev->pdev->device == PMC_DEVICE_S9) { if (dev->max_msix > 1) { - for (i = 0; i < dev->max_msix; i++) { - if (irq_set_affinity_hint( - dev->msixentry[i].vector, NULL)) { - printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n", - dev->name, dev->id, cpu); - } - cpu = cpumask_next(cpu, cpu_online_mask); - free_irq(dev->msixentry[i].vector, - &(dev->aac_msix[i])); - } + for (i = 0; i < dev->max_msix; i++) + free_irq(pci_irq_vector(dev->pdev, i), + &(dev->aac_msix[i])); } else { free_irq(dev->pdev->irq, &(dev->aac_msix[0])); } diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 79871f3519ffc923ed9c41848d70b17120c37844..3ecbf20ca29f96b970cd4b14c38eb3fdbb3b1511 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -160,7 +160,6 @@ static const struct pci_device_id aac_pci_tbl[] = { { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */ { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */ { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */ - { 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */ { 0,} }; MODULE_DEVICE_TABLE(pci, aac_pci_tbl); @@ -239,7 +238,6 @@ static struct aac_driver_ident aac_drivers[] = { { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */ - { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */ }; /** @@ -1071,7 +1069,6 @@ static struct scsi_host_template aac_driver_template = { static void __aac_shutdown(struct aac_dev * aac) { int i; - int cpu; aac_send_shutdown(aac); @@ -1087,24 +1084,13 @@ static void __aac_shutdown(struct aac_dev * aac) kthread_stop(aac->thread); } aac_adapter_disable_int(aac); - cpu = cpumask_first(cpu_online_mask); if (aac->pdev->device == PMC_DEVICE_S6 || aac->pdev->device == PMC_DEVICE_S7 || aac->pdev->device == PMC_DEVICE_S8 || aac->pdev->device == PMC_DEVICE_S9) { if (aac->max_msix > 1) { for (i = 0; i < aac->max_msix; i++) { - if (irq_set_affinity_hint( - aac->msixentry[i].vector, - NULL)) { - printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n", - aac->name, - aac->id, - cpu); - } - cpu = cpumask_next(cpu, - cpu_online_mask); - free_irq(aac->msixentry[i].vector, + free_irq(pci_irq_vector(aac->pdev, i), &(aac->aac_msix[i])); } } else { @@ -1350,7 +1336,7 @@ static void aac_release_resources(struct aac_dev *aac) aac->pdev->device == PMC_DEVICE_S9) { if (aac->max_msix > 1) { for (i = 0; i < aac->max_msix; i++) - free_irq(aac->msixentry[i].vector, + free_irq(pci_irq_vector(aac->pdev, i), &(aac->aac_msix[i])); } else { free_irq(aac->pdev->irq, &(aac->aac_msix[0])); @@ -1396,13 +1382,13 @@ static int aac_acquire_resources(struct aac_dev *dev) dev->aac_msix[i].vector_no = i; dev->aac_msix[i].dev = dev; - if (request_irq(dev->msixentry[i].vector, + if (request_irq(pci_irq_vector(dev->pdev, i), dev->a_ops.adapter_intr, 0, "aacraid", &(dev->aac_msix[i]))) { printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n", name, instance, i); for (j = 0 ; j < i ; j++) - free_irq(dev->msixentry[j].vector, + free_irq(pci_irq_vector(dev->pdev, j), &(dev->aac_msix[j])); pci_disable_msix(dev->pdev); goto error_iounmap; diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index febbd83e2ecd9d6a6984500eb8cc02d6690c3984..81dd0927246b70eecc04ff6860af1f1fc815786d 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -11030,6 +11030,9 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop, ASC_DBG(2, "AdvInitGetConfig()\n"); ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0; +#else + share_irq = 0; + ret = -ENODEV; #endif /* CONFIG_PCI */ } diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c index 2e3117aa382f2c701dad91de81fcfef4c38f1b8a..21ac265280bf2f6d001b06daa0365f80f8f6976e 100644 --- a/drivers/scsi/aic7xxx/aicasm/aicasm.c +++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c @@ -254,7 +254,7 @@ main(int argc, char *argv[]) argv += optind; if (argc != 1) { - fprintf(stderr, "%s: No input file specifiled\n", appname); + fprintf(stderr, "%s: No input file specified\n", appname); usage(); /* NOTREACHED */ } diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c index 7c713f797535315b9c10e760c1226b84c9a6f34e..f2671a8fa7e3358f304c539e7431d73733b95258 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.c +++ b/drivers/scsi/aic94xx/aic94xx_hwi.c @@ -228,8 +228,11 @@ static int asd_init_scbs(struct asd_ha_struct *asd_ha) bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8; bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long); asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL); - if (!asd_ha->seq.tc_index_bitmap) + if (!asd_ha->seq.tc_index_bitmap) { + kfree(asd_ha->seq.tc_index_array); + asd_ha->seq.tc_index_array = NULL; return -ENOMEM; + } spin_lock_init(&seq->tc_index_lock); diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index cf99f8cf4cdd80cf0f27127e7e7ed50e426ee034..a254b32eba39caf73b44c9c1116379f4b2023f38 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -629,7 +629,6 @@ struct AdapterControlBlock struct pci_dev * pdev; struct Scsi_Host * host; unsigned long vir2phy_offset; - struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS]; /* Offset is used in making arc cdb physical to virtual calculations */ uint32_t outbound_int_enable; uint32_t cdb_phyaddr_hi32; @@ -671,8 +670,6 @@ struct AdapterControlBlock /* iop init */ #define ACB_F_ABORT 0x0200 #define ACB_F_FIRMWARE_TRAP 0x0400 - #define ACB_F_MSI_ENABLED 0x1000 - #define ACB_F_MSIX_ENABLED 0x2000 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; /* used for memory free */ struct list_head ccb_free_list; @@ -725,7 +722,7 @@ struct AdapterControlBlock atomic_t rq_map_token; atomic_t ante_token_value; uint32_t maxOutstanding; - int msix_vector_count; + int vector_count; };/* HW_DEVICE_EXTENSION */ /* ******************************************************************************* diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index f0cfb04517570839b968b0271b6029e07a3e4897..af032c46ec0e1950deecbc69a33c24035bb4ecae 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -61,7 +61,7 @@ #include #include #include -#include +#include #include #include #include @@ -720,51 +720,39 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work) static int arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb) { - int i, j, r; - struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS]; - - for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++) - entries[i].entry = i; - r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS); - if (r < 0) - goto msi_int; - acb->msix_vector_count = r; - for (i = 0; i < r; i++) { - if (request_irq(entries[i].vector, - arcmsr_do_interrupt, 0, "arcmsr", acb)) { + unsigned long flags; + int nvec, i; + + nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS, + PCI_IRQ_MSIX); + if (nvec > 0) { + pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no); + flags = 0; + } else { + nvec = pci_alloc_irq_vectors(pdev, 1, 1, + PCI_IRQ_MSI | PCI_IRQ_LEGACY); + if (nvec < 1) + return FAILED; + + flags = IRQF_SHARED; + } + + acb->vector_count = nvec; + for (i = 0; i < nvec; i++) { + if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt, + flags, "arcmsr", acb)) { pr_warn("arcmsr%d: request_irq =%d failed!\n", - acb->host->host_no, entries[i].vector); - for (j = 0 ; j < i ; j++) - free_irq(entries[j].vector, acb); - pci_disable_msix(pdev); - goto msi_int; + acb->host->host_no, pci_irq_vector(pdev, i)); + goto out_free_irq; } - acb->entries[i] = entries[i]; - } - acb->acb_flags |= ACB_F_MSIX_ENABLED; - pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no); - return SUCCESS; -msi_int: - if (pci_enable_msi_exact(pdev, 1) < 0) - goto legacy_int; - if (request_irq(pdev->irq, arcmsr_do_interrupt, - IRQF_SHARED, "arcmsr", acb)) { - pr_warn("arcmsr%d: request_irq =%d failed!\n", - acb->host->host_no, pdev->irq); - pci_disable_msi(pdev); - goto legacy_int; - } - acb->acb_flags |= ACB_F_MSI_ENABLED; - pr_info("arcmsr%d: msi enabled\n", acb->host->host_no); - return SUCCESS; -legacy_int: - if (request_irq(pdev->irq, arcmsr_do_interrupt, - IRQF_SHARED, "arcmsr", acb)) { - pr_warn("arcmsr%d: request_irq = %d failed!\n", - acb->host->host_no, pdev->irq); - return FAILED; } + return SUCCESS; +out_free_irq: + while (--i >= 0) + free_irq(pci_irq_vector(pdev, i), acb); + pci_free_irq_vectors(pdev); + return FAILED; } static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -886,15 +874,9 @@ static void arcmsr_free_irq(struct pci_dev *pdev, { int i; - if (acb->acb_flags & ACB_F_MSI_ENABLED) { - free_irq(pdev->irq, acb); - pci_disable_msi(pdev); - } else if (acb->acb_flags & ACB_F_MSIX_ENABLED) { - for (i = 0; i < acb->msix_vector_count; i++) - free_irq(acb->entries[i].vector, acb); - pci_disable_msix(pdev); - } else - free_irq(pdev->irq, acb); + for (i = 0; i < acb->vector_count; i++) + free_irq(pci_irq_vector(pdev, i), acb); + pci_free_irq_vectors(pdev); } static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state) diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c index 8e9cfe8f22f505d9865aa8a870dcde9d94234826..a87b99c7fb9a7596810b3a7c4ecdddb4b3ff294f 100644 --- a/drivers/scsi/arm/cumana_1.c +++ b/drivers/scsi/arm/cumana_1.c @@ -14,49 +14,48 @@ #include #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) -#define NCR5380_read(reg) cumanascsi_read(instance, reg) -#define NCR5380_write(reg, value) cumanascsi_write(instance, reg, value) +#define NCR5380_read(reg) cumanascsi_read(hostdata, reg) +#define NCR5380_write(reg, value) cumanascsi_write(hostdata, reg, value) -#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) +#define NCR5380_dma_xfer_len cumanascsi_dma_xfer_len #define NCR5380_dma_recv_setup cumanascsi_pread #define NCR5380_dma_send_setup cumanascsi_pwrite -#define NCR5380_dma_residual(instance) (0) +#define NCR5380_dma_residual NCR5380_dma_residual_none #define NCR5380_intr cumanascsi_intr #define NCR5380_queue_command cumanascsi_queue_command #define NCR5380_info cumanascsi_info #define NCR5380_implementation_fields \ - unsigned ctrl; \ - void __iomem *base; \ - void __iomem *dma + unsigned ctrl -#include "../NCR5380.h" +struct NCR5380_hostdata; +static u8 cumanascsi_read(struct NCR5380_hostdata *, unsigned int); +static void cumanascsi_write(struct NCR5380_hostdata *, unsigned int, u8); -void cumanascsi_setup(char *str, int *ints) -{ -} +#include "../NCR5380.h" #define CTRL 0x16fc #define STAT 0x2004 #define L(v) (((v)<<16)|((v) & 0x0000ffff)) #define H(v) (((v)>>16)|((v) & 0xffff0000)) -static inline int cumanascsi_pwrite(struct Scsi_Host *host, +static inline int cumanascsi_pwrite(struct NCR5380_hostdata *hostdata, unsigned char *addr, int len) { unsigned long *laddr; - void __iomem *dma = priv(host)->dma + 0x2000; + u8 __iomem *base = hostdata->io; + u8 __iomem *dma = hostdata->pdma_io + 0x2000; if(!len) return 0; - writeb(0x02, priv(host)->base + CTRL); + writeb(0x02, base + CTRL); laddr = (unsigned long *)addr; while(len >= 32) { unsigned int status; unsigned long v; - status = readb(priv(host)->base + STAT); + status = readb(base + STAT); if(status & 0x80) goto end; if(!(status & 0x40)) @@ -75,12 +74,12 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host, } addr = (unsigned char *)laddr; - writeb(0x12, priv(host)->base + CTRL); + writeb(0x12, base + CTRL); while(len > 0) { unsigned int status; - status = readb(priv(host)->base + STAT); + status = readb(base + STAT); if(status & 0x80) goto end; if(status & 0x40) @@ -90,7 +89,7 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host, break; } - status = readb(priv(host)->base + STAT); + status = readb(base + STAT); if(status & 0x80) goto end; if(status & 0x40) @@ -101,27 +100,28 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host, } } end: - writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL); + writeb(hostdata->ctrl | 0x40, base + CTRL); if (len) return -1; return 0; } -static inline int cumanascsi_pread(struct Scsi_Host *host, +static inline int cumanascsi_pread(struct NCR5380_hostdata *hostdata, unsigned char *addr, int len) { unsigned long *laddr; - void __iomem *dma = priv(host)->dma + 0x2000; + u8 __iomem *base = hostdata->io; + u8 __iomem *dma = hostdata->pdma_io + 0x2000; if(!len) return 0; - writeb(0x00, priv(host)->base + CTRL); + writeb(0x00, base + CTRL); laddr = (unsigned long *)addr; while(len >= 32) { unsigned int status; - status = readb(priv(host)->base + STAT); + status = readb(base + STAT); if(status & 0x80) goto end; if(!(status & 0x40)) @@ -140,12 +140,12 @@ static inline int cumanascsi_pread(struct Scsi_Host *host, } addr = (unsigned char *)laddr; - writeb(0x10, priv(host)->base + CTRL); + writeb(0x10, base + CTRL); while(len > 0) { unsigned int status; - status = readb(priv(host)->base + STAT); + status = readb(base + STAT); if(status & 0x80) goto end; if(status & 0x40) @@ -155,7 +155,7 @@ static inline int cumanascsi_pread(struct Scsi_Host *host, break; } - status = readb(priv(host)->base + STAT); + status = readb(base + STAT); if(status & 0x80) goto end; if(status & 0x40) @@ -166,37 +166,45 @@ static inline int cumanascsi_pread(struct Scsi_Host *host, } } end: - writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL); + writeb(hostdata->ctrl | 0x40, base + CTRL); if (len) return -1; return 0; } -static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg) +static int cumanascsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, + struct scsi_cmnd *cmd) +{ + return cmd->transfersize; +} + +static u8 cumanascsi_read(struct NCR5380_hostdata *hostdata, + unsigned int reg) { - void __iomem *base = priv(host)->base; - unsigned char val; + u8 __iomem *base = hostdata->io; + u8 val; writeb(0, base + CTRL); val = readb(base + 0x2100 + (reg << 2)); - priv(host)->ctrl = 0x40; + hostdata->ctrl = 0x40; writeb(0x40, base + CTRL); return val; } -static void cumanascsi_write(struct Scsi_Host *host, unsigned int reg, unsigned int value) +static void cumanascsi_write(struct NCR5380_hostdata *hostdata, + unsigned int reg, u8 value) { - void __iomem *base = priv(host)->base; + u8 __iomem *base = hostdata->io; writeb(0, base + CTRL); writeb(value, base + 0x2100 + (reg << 2)); - priv(host)->ctrl = 0x40; + hostdata->ctrl = 0x40; writeb(0x40, base + CTRL); } @@ -235,11 +243,11 @@ static int cumanascsi1_probe(struct expansion_card *ec, goto out_release; } - priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW), - ecard_resource_len(ec, ECARD_RES_IOCSLOW)); - priv(host)->dma = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), - ecard_resource_len(ec, ECARD_RES_MEMC)); - if (!priv(host)->base || !priv(host)->dma) { + priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW), + ecard_resource_len(ec, ECARD_RES_IOCSLOW)); + priv(host)->pdma_io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), + ecard_resource_len(ec, ECARD_RES_MEMC)); + if (!priv(host)->io || !priv(host)->pdma_io) { ret = -ENOMEM; goto out_unmap; } @@ -253,7 +261,7 @@ static int cumanascsi1_probe(struct expansion_card *ec, NCR5380_maybe_reset_bus(host); priv(host)->ctrl = 0; - writeb(0, priv(host)->base + CTRL); + writeb(0, priv(host)->io + CTRL); ret = request_irq(host->irq, cumanascsi_intr, 0, "CumanaSCSI-1", host); @@ -275,8 +283,8 @@ static int cumanascsi1_probe(struct expansion_card *ec, out_exit: NCR5380_exit(host); out_unmap: - iounmap(priv(host)->base); - iounmap(priv(host)->dma); + iounmap(priv(host)->io); + iounmap(priv(host)->pdma_io); scsi_host_put(host); out_release: ecard_release_resources(ec); @@ -287,15 +295,17 @@ static int cumanascsi1_probe(struct expansion_card *ec, static void cumanascsi1_remove(struct expansion_card *ec) { struct Scsi_Host *host = ecard_get_drvdata(ec); + void __iomem *base = priv(host)->io; + void __iomem *dma = priv(host)->pdma_io; ecard_set_drvdata(ec, NULL); scsi_remove_host(host); free_irq(host->irq, host); NCR5380_exit(host); - iounmap(priv(host)->base); - iounmap(priv(host)->dma); scsi_host_put(host); + iounmap(base); + iounmap(dma); ecard_release_resources(ec); } diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c index a396024a3caeca5d8850c68cc188048167549138..6be6666534d4f662ff9e32cd8df1538d0c107515 100644 --- a/drivers/scsi/arm/oak.c +++ b/drivers/scsi/arm/oak.c @@ -16,21 +16,18 @@ #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) -#define NCR5380_read(reg) \ - readb(priv(instance)->base + ((reg) << 2)) -#define NCR5380_write(reg, value) \ - writeb(value, priv(instance)->base + ((reg) << 2)) +#define NCR5380_read(reg) readb(hostdata->io + ((reg) << 2)) +#define NCR5380_write(reg, value) writeb(value, hostdata->io + ((reg) << 2)) -#define NCR5380_dma_xfer_len(instance, cmd, phase) (0) +#define NCR5380_dma_xfer_len NCR5380_dma_xfer_none #define NCR5380_dma_recv_setup oakscsi_pread #define NCR5380_dma_send_setup oakscsi_pwrite -#define NCR5380_dma_residual(instance) (0) +#define NCR5380_dma_residual NCR5380_dma_residual_none #define NCR5380_queue_command oakscsi_queue_command #define NCR5380_info oakscsi_info -#define NCR5380_implementation_fields \ - void __iomem *base +#define NCR5380_implementation_fields /* none */ #include "../NCR5380.h" @@ -40,10 +37,10 @@ #define STAT ((128 + 16) << 2) #define DATA ((128 + 8) << 2) -static inline int oakscsi_pwrite(struct Scsi_Host *instance, +static inline int oakscsi_pwrite(struct NCR5380_hostdata *hostdata, unsigned char *addr, int len) { - void __iomem *base = priv(instance)->base; + u8 __iomem *base = hostdata->io; printk("writing %p len %d\n",addr, len); @@ -55,10 +52,11 @@ printk("writing %p len %d\n",addr, len); return 0; } -static inline int oakscsi_pread(struct Scsi_Host *instance, +static inline int oakscsi_pread(struct NCR5380_hostdata *hostdata, unsigned char *addr, int len) { - void __iomem *base = priv(instance)->base; + u8 __iomem *base = hostdata->io; + printk("reading %p len %d\n", addr, len); while(len > 0) { @@ -133,15 +131,14 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id) goto release; } - priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), - ecard_resource_len(ec, ECARD_RES_MEMC)); - if (!priv(host)->base) { + priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), + ecard_resource_len(ec, ECARD_RES_MEMC)); + if (!priv(host)->io) { ret = -ENOMEM; goto unreg; } host->irq = NO_IRQ; - host->n_io_port = 255; ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP); if (ret) @@ -159,7 +156,7 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id) out_exit: NCR5380_exit(host); out_unmap: - iounmap(priv(host)->base); + iounmap(priv(host)->io); unreg: scsi_host_put(host); release: @@ -171,13 +168,14 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id) static void oakscsi_remove(struct expansion_card *ec) { struct Scsi_Host *host = ecard_get_drvdata(ec); + void __iomem *base = priv(host)->io; ecard_set_drvdata(ec, NULL); scsi_remove_host(host); NCR5380_exit(host); - iounmap(priv(host)->base); scsi_host_put(host); + iounmap(base); ecard_release_resources(ec); } diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index a59ad94ea52b371272d4878b857f5efa532c4c16..105b35393ce91751d859948595ed70508d90e7d7 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -57,6 +57,9 @@ #define NCR5380_implementation_fields /* none */ +static u8 (*atari_scsi_reg_read)(unsigned int); +static void (*atari_scsi_reg_write)(unsigned int, u8); + #define NCR5380_read(reg) atari_scsi_reg_read(reg) #define NCR5380_write(reg, value) atari_scsi_reg_write(reg, value) @@ -64,14 +67,10 @@ #define NCR5380_abort atari_scsi_abort #define NCR5380_info atari_scsi_info -#define NCR5380_dma_recv_setup(instance, data, count) \ - atari_scsi_dma_setup(instance, data, count, 0) -#define NCR5380_dma_send_setup(instance, data, count) \ - atari_scsi_dma_setup(instance, data, count, 1) -#define NCR5380_dma_residual(instance) \ - atari_scsi_dma_residual(instance) -#define NCR5380_dma_xfer_len(instance, cmd, phase) \ - atari_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO)) +#define NCR5380_dma_xfer_len atari_scsi_dma_xfer_len +#define NCR5380_dma_recv_setup atari_scsi_dma_recv_setup +#define NCR5380_dma_send_setup atari_scsi_dma_send_setup +#define NCR5380_dma_residual atari_scsi_dma_residual #define NCR5380_acquire_dma_irq(instance) falcon_get_lock(instance) #define NCR5380_release_dma_irq(instance) falcon_release_lock() @@ -126,9 +125,6 @@ static inline unsigned long SCSI_DMA_GETADR(void) static void atari_scsi_fetch_restbytes(void); -static unsigned char (*atari_scsi_reg_read)(unsigned char reg); -static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value); - static unsigned long atari_dma_residual, atari_dma_startaddr; static short atari_dma_active; /* pointer to the dribble buffer */ @@ -457,15 +453,14 @@ static int __init atari_scsi_setup(char *str) __setup("atascsi=", atari_scsi_setup); #endif /* !MODULE */ - -static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, +static unsigned long atari_scsi_dma_setup(struct NCR5380_hostdata *hostdata, void *data, unsigned long count, int dir) { unsigned long addr = virt_to_phys(data); - dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " - "dir = %d\n", instance->host_no, data, addr, count, dir); + dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, dir = %d\n", + hostdata->host->host_no, data, addr, count, dir); if (!IS_A_TT() && !STRAM_ADDR(addr)) { /* If we have a non-DMAable address on a Falcon, use the dribble @@ -522,8 +517,19 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, return count; } +static inline int atari_scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata, + unsigned char *data, int count) +{ + return atari_scsi_dma_setup(hostdata, data, count, 0); +} + +static inline int atari_scsi_dma_send_setup(struct NCR5380_hostdata *hostdata, + unsigned char *data, int count) +{ + return atari_scsi_dma_setup(hostdata, data, count, 1); +} -static long atari_scsi_dma_residual(struct Scsi_Host *instance) +static int atari_scsi_dma_residual(struct NCR5380_hostdata *hostdata) { return atari_dma_residual; } @@ -564,10 +570,11 @@ static int falcon_classify_cmd(struct scsi_cmnd *cmd) * the overrun problem, so this question is academic :-) */ -static unsigned long atari_dma_xfer_len(unsigned long wanted_len, - struct scsi_cmnd *cmd, int write_flag) +static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, + struct scsi_cmnd *cmd) { - unsigned long possible_len, limit; + int wanted_len = cmd->SCp.this_residual; + int possible_len, limit; if (wanted_len < DMA_MIN_SIZE) return 0; @@ -604,7 +611,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len, * use the dribble buffer and thus can do only STRAM_BUFFER_SIZE bytes. */ - if (write_flag) { + if (cmd->sc_data_direction == DMA_TO_DEVICE) { /* Write operation can always use the DMA, but the transfer size must * be rounded up to the next multiple of 512 (atari_dma_setup() does * this). @@ -644,8 +651,8 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len, possible_len = limit; if (possible_len != wanted_len) - dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes " - "instead of %ld\n", possible_len, wanted_len); + dprintk(NDEBUG_DMA, "DMA transfer now %d bytes instead of %d\n", + possible_len, wanted_len); return possible_len; } @@ -658,26 +665,38 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len, * NCR5380_write call these functions via function pointers. */ -static unsigned char atari_scsi_tt_reg_read(unsigned char reg) +static u8 atari_scsi_tt_reg_read(unsigned int reg) { return tt_scsi_regp[reg * 2]; } -static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value) +static void atari_scsi_tt_reg_write(unsigned int reg, u8 value) { tt_scsi_regp[reg * 2] = value; } -static unsigned char atari_scsi_falcon_reg_read(unsigned char reg) +static u8 atari_scsi_falcon_reg_read(unsigned int reg) { - dma_wd.dma_mode_status= (u_short)(0x88 + reg); - return (u_char)dma_wd.fdc_acces_seccount; + unsigned long flags; + u8 result; + + reg += 0x88; + local_irq_save(flags); + dma_wd.dma_mode_status = (u_short)reg; + result = (u8)dma_wd.fdc_acces_seccount; + local_irq_restore(flags); + return result; } -static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value) +static void atari_scsi_falcon_reg_write(unsigned int reg, u8 value) { - dma_wd.dma_mode_status = (u_short)(0x88 + reg); + unsigned long flags; + + reg += 0x88; + local_irq_save(flags); + dma_wd.dma_mode_status = (u_short)reg; dma_wd.fdc_acces_seccount = (u_short)value; + local_irq_restore(flags); } diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index d9239c2d49b117175d34379716b464c7b06dcfaa..b5112d6d7e7343a8f7bce4c0fb4089095ca53460 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -3049,8 +3049,10 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba, eq_vaddress = pci_alloc_consistent(phba->pcidev, num_eq_pages * PAGE_SIZE, &paddr); - if (!eq_vaddress) + if (!eq_vaddress) { + ret = -ENOMEM; goto create_eq_error; + } mem->va = eq_vaddress; ret = be_fill_queue(eq, phba->params.num_eq_entries, @@ -3113,8 +3115,10 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba, cq_vaddress = pci_alloc_consistent(phba->pcidev, num_cq_pages * PAGE_SIZE, &paddr); - if (!cq_vaddress) + if (!cq_vaddress) { + ret = -ENOMEM; goto create_cq_error; + } ret = be_fill_queue(cq, phba->params.num_cq_entries, sizeof(struct sol_cqe), cq_vaddress); diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index aebc4ddb3060ee177c06ec9f11b62a021c346f10..ac05317bba7f3342d016e581b91ae6feffa18eff 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1083,7 +1083,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba) nonemb_cmd = &phba->boot_struct.nonemb_cmd; nonemb_cmd->size = sizeof(*resp); nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev, - sizeof(nonemb_cmd->size), + nonemb_cmd->size, &nonemb_cmd->dma); if (!nonemb_cmd->va) { mutex_unlock(&ctrl->mbox_lock); diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 713745da44c6ac457ad61b3174ffe4056bdcba83..0f9fab770339abb174590d684c166dc568a3f721 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -111,20 +111,24 @@ struct bfa_meminfo_s { struct bfa_mem_kva_s kva_info; }; -/* BFA memory segment setup macros */ -#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do { \ - ((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz); \ - if (_seg_sz) \ - list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe, \ - &(_meminfo)->dma_info.qe); \ -} while (0) +/* BFA memory segment setup helpers */ +static inline void bfa_mem_dma_setup(struct bfa_meminfo_s *meminfo, + struct bfa_mem_dma_s *dm_ptr, + size_t seg_sz) +{ + dm_ptr->mem_len = seg_sz; + if (seg_sz) + list_add_tail(&dm_ptr->qe, &meminfo->dma_info.qe); +} -#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do { \ - ((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz); \ - if (_seg_sz) \ - list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe, \ - &(_meminfo)->kva_info.qe); \ -} while (0) +static inline void bfa_mem_kva_setup(struct bfa_meminfo_s *meminfo, + struct bfa_mem_kva_s *kva_ptr, + size_t seg_sz) +{ + kva_ptr->mem_len = seg_sz; + if (seg_sz) + list_add_tail(&kva_ptr->qe, &meminfo->kva_info.qe); +} /* BFA dma memory segments iterator */ #define bfa_mem_dma_sptr(_mod, _i) (&(_mod)->dma_seg[(_i)]) diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 9d253cb83ee762692c1ec0f56303f3c7fc8b416e..d9e15210b110efcd3147d5a02f0e06c0673d4ad3 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include "bfad_drv.h" diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index d1ad0208dfe754e08d648ff8f3e8abd750ac845d..a9a00169ad91960798c35114c69b4d17f13d1039 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -3130,11 +3130,12 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, } static int -bfad_im_bsg_vendor_request(struct fc_bsg_job *job) +bfad_im_bsg_vendor_request(struct bsg_job *job) { - uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0]; - struct bfad_im_port_s *im_port = - (struct bfad_im_port_s *) job->shost->hostdata[0]; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; + struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); struct bfad_s *bfad = im_port->bfad; struct request_queue *request_q = job->req->q; void *payload_kbuf; @@ -3175,18 +3176,19 @@ bfad_im_bsg_vendor_request(struct fc_bsg_job *job) /* Fill the BSG job reply data */ job->reply_len = job->reply_payload.payload_len; - job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; - job->reply->result = rc; + bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len; + bsg_reply->result = rc; - job->job_done(job); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; error: /* free the command buffer */ kfree(payload_kbuf); out: - job->reply->result = rc; + bsg_reply->result = rc; job->reply_len = sizeof(uint32_t); - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; return rc; } @@ -3312,7 +3314,7 @@ bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base, } int -bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp, +bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp, bfa_bsg_fcpt_t *bsg_fcpt) { struct bfa_fcxp_s *hal_fcxp; @@ -3352,28 +3354,29 @@ bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp, } int -bfad_im_bsg_els_ct_request(struct fc_bsg_job *job) +bfad_im_bsg_els_ct_request(struct bsg_job *job) { struct bfa_bsg_data *bsg_data; - struct bfad_im_port_s *im_port = - (struct bfad_im_port_s *) job->shost->hostdata[0]; + struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); struct bfad_s *bfad = im_port->bfad; bfa_bsg_fcpt_t *bsg_fcpt; struct bfad_fcxp *drv_fcxp; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_rport_s *fcs_rport; - uint32_t command_type = job->request->msgcode; + struct fc_bsg_request *bsg_request = bsg_request; + struct fc_bsg_reply *bsg_reply = job->reply; + uint32_t command_type = bsg_request->msgcode; unsigned long flags; struct bfad_buf_info *rsp_buf_info; void *req_kbuf = NULL, *rsp_kbuf = NULL; int rc = -EINVAL; job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */ - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; /* Get the payload passed in from userspace */ - bsg_data = (struct bfa_bsg_data *) (((char *)job->request) + - sizeof(struct fc_bsg_request)); + bsg_data = (struct bfa_bsg_data *) (((char *)bsg_request) + + sizeof(struct fc_bsg_request)); if (bsg_data == NULL) goto out; @@ -3517,13 +3520,13 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job) /* fill the job->reply data */ if (drv_fcxp->req_status == BFA_STATUS_OK) { job->reply_len = drv_fcxp->rsp_len; - job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len; - job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; + bsg_reply->reply_payload_rcv_len = drv_fcxp->rsp_len; + bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; } else { - job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = sizeof(struct fc_bsg_ctels_reply); job->reply_len = sizeof(uint32_t); - job->reply->reply_data.ctels_reply.status = + bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_REJECT; } @@ -3549,20 +3552,23 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job) kfree(bsg_fcpt); kfree(drv_fcxp); out: - job->reply->result = rc; + bsg_reply->result = rc; if (rc == BFA_STATUS_OK) - job->job_done(job); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } int -bfad_im_bsg_request(struct fc_bsg_job *job) +bfad_im_bsg_request(struct bsg_job *job) { + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; uint32_t rc = BFA_STATUS_OK; - switch (job->request->msgcode) { + switch (bsg_request->msgcode) { case FC_BSG_HST_VENDOR: /* Process BSG HST Vendor requests */ rc = bfad_im_bsg_vendor_request(job); @@ -3575,8 +3581,8 @@ bfad_im_bsg_request(struct fc_bsg_job *job) rc = bfad_im_bsg_els_ct_request(job); break; default: - job->reply->result = rc = -EINVAL; - job->reply->reply_payload_rcv_len = 0; + bsg_reply->result = rc = -EINVAL; + bsg_reply->reply_payload_rcv_len = 0; break; } @@ -3584,7 +3590,7 @@ bfad_im_bsg_request(struct fc_bsg_job *job) } int -bfad_im_bsg_timeout(struct fc_bsg_job *job) +bfad_im_bsg_timeout(struct bsg_job *job) { /* Don't complete the BSG job request - return -EAGAIN * to reset bsg job timeout : for ELS/CT pass thru we diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index 836fdc221edd31ccabd9e4ec3227c247f5f12fdf..c81ec2a77ef5034d3fdd3c70f1466002e012be10 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h @@ -166,8 +166,8 @@ extern struct device_attribute *bfad_im_vport_attrs[]; irqreturn_t bfad_intx(int irq, void *dev_id); -int bfad_im_bsg_request(struct fc_bsg_job *job); -int bfad_im_bsg_timeout(struct fc_bsg_job *job); +int bfad_im_bsg_request(struct bsg_job *job); +int bfad_im_bsg_timeout(struct bsg_job *job); /* * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index f9ddb6156f1499cf2f5b11a00d4bd688b3dce32e..c639d5a02656abf9678f1ef358c4166e5db04a66 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -127,13 +127,6 @@ module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is " "initiating a FIP keep alive when debug logging is enabled."); -static int bnx2fc_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu); -/* notification function for CPU hotplug events */ -static struct notifier_block bnx2fc_cpu_notifier = { - .notifier_call = bnx2fc_cpu_callback, -}; - static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport) { return ((struct bnx2fc_interface *) @@ -970,7 +963,6 @@ static int bnx2fc_libfc_config(struct fc_lport *lport) sizeof(struct libfc_function_template)); fc_elsct_init(lport); fc_exch_init(lport); - fc_rport_init(lport); fc_disc_init(lport); fc_disc_config(lport, lport); return 0; @@ -2623,37 +2615,19 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu) kthread_stop(thread); } -/** - * bnx2fc_cpu_callback - Handler for CPU hotplug events - * - * @nfb: The callback data block - * @action: The event triggering the callback - * @hcpu: The index of the CPU that the event is for - * - * This creates or destroys per-CPU data for fcoe - * - * Returns NOTIFY_OK always. - */ -static int bnx2fc_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) + +static int bnx2fc_cpu_online(unsigned int cpu) { - unsigned cpu = (unsigned long)hcpu; + printk(PFX "CPU %x online: Create Rx thread\n", cpu); + bnx2fc_percpu_thread_create(cpu); + return 0; +} - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - printk(PFX "CPU %x online: Create Rx thread\n", cpu); - bnx2fc_percpu_thread_create(cpu); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - printk(PFX "CPU %x offline: Remove Rx thread\n", cpu); - bnx2fc_percpu_thread_destroy(cpu); - break; - default: - break; - } - return NOTIFY_OK; +static int bnx2fc_cpu_dead(unsigned int cpu) +{ + printk(PFX "CPU %x offline: Remove Rx thread\n", cpu); + bnx2fc_percpu_thread_destroy(cpu); + return 0; } static int bnx2fc_slave_configure(struct scsi_device *sdev) @@ -2665,6 +2639,8 @@ static int bnx2fc_slave_configure(struct scsi_device *sdev) return 0; } +static enum cpuhp_state bnx2fc_online_state; + /** * bnx2fc_mod_init - module init entry point * @@ -2725,21 +2701,31 @@ static int __init bnx2fc_mod_init(void) spin_lock_init(&p->fp_work_lock); } - cpu_notifier_register_begin(); + get_online_cpus(); - for_each_online_cpu(cpu) { + for_each_online_cpu(cpu) bnx2fc_percpu_thread_create(cpu); - } - /* Initialize per CPU interrupt thread */ - __register_hotcpu_notifier(&bnx2fc_cpu_notifier); + rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "scsi/bnx2fc:online", + bnx2fc_cpu_online, NULL); + if (rc < 0) + goto stop_threads; + bnx2fc_online_state = rc; - cpu_notifier_register_done(); + cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead", + NULL, bnx2fc_cpu_dead); + put_online_cpus(); cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); return 0; +stop_threads: + for_each_online_cpu(cpu) + bnx2fc_percpu_thread_destroy(cpu); + put_online_cpus(); + kthread_stop(l2_thread); free_wq: destroy_workqueue(bnx2fc_wq); release_bt: @@ -2798,16 +2784,16 @@ static void __exit bnx2fc_mod_exit(void) if (l2_thread) kthread_stop(l2_thread); - cpu_notifier_register_begin(); - + get_online_cpus(); /* Destroy per cpu threads */ for_each_online_cpu(cpu) { bnx2fc_percpu_thread_destroy(cpu); } - __unregister_hotcpu_notifier(&bnx2fc_cpu_notifier); + cpuhp_remove_state_nocalls(bnx2fc_online_state); + cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD); - cpu_notifier_register_done(); + put_online_cpus(); destroy_workqueue(bnx2fc_wq); /* diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 08ec318afb99ccdaf032959b701313234039c484..739bfb62aff62a0a63ee75642e69283412e50b15 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -80,7 +80,6 @@ static void bnx2fc_offload_session(struct fcoe_port *port, struct bnx2fc_rport *tgt, struct fc_rport_priv *rdata) { - struct fc_lport *lport = rdata->local_port; struct fc_rport *rport = rdata->rport; struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; @@ -160,7 +159,7 @@ static void bnx2fc_offload_session(struct fcoe_port *port, tgt_init_err: if (tgt->fcoe_conn_id != -1) bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); - lport->tt.rport_logoff(rdata); + fc_rport_logoff(rdata); } void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index c8b410c24cf03215a588e41557b3eaf7faa46c13..86afc002814cd07bbc3b55b64ca31201208a514b 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -70,14 +70,6 @@ u64 iscsi_error_mask = 0x00; DEFINE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu); -static int bnx2i_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu); -/* notification function for CPU hotplug events */ -static struct notifier_block bnx2i_cpu_notifier = { - .notifier_call = bnx2i_cpu_callback, -}; - - /** * bnx2i_identify_device - identifies NetXtreme II device type * @hba: Adapter structure pointer @@ -461,41 +453,21 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu) kthread_stop(thread); } - -/** - * bnx2i_cpu_callback - Handler for CPU hotplug events - * - * @nfb: The callback data block - * @action: The event triggering the callback - * @hcpu: The index of the CPU that the event is for - * - * This creates or destroys per-CPU data for iSCSI - * - * Returns NOTIFY_OK always. - */ -static int bnx2i_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int bnx2i_cpu_online(unsigned int cpu) { - unsigned cpu = (unsigned long)hcpu; + pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu); + bnx2i_percpu_thread_create(cpu); + return 0; +} - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - printk(KERN_INFO "bnx2i: CPU %x online: Create Rx thread\n", - cpu); - bnx2i_percpu_thread_create(cpu); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - printk(KERN_INFO "CPU %x offline: Remove Rx thread\n", cpu); - bnx2i_percpu_thread_destroy(cpu); - break; - default: - break; - } - return NOTIFY_OK; +static int bnx2i_cpu_dead(unsigned int cpu) +{ + pr_info("CPU %x offline: Remove Rx thread\n", cpu); + bnx2i_percpu_thread_destroy(cpu); + return 0; } +static enum cpuhp_state bnx2i_online_state; /** * bnx2i_mod_init - module init entry point @@ -539,18 +511,28 @@ static int __init bnx2i_mod_init(void) p->iothread = NULL; } - cpu_notifier_register_begin(); + get_online_cpus(); for_each_online_cpu(cpu) bnx2i_percpu_thread_create(cpu); - /* Initialize per CPU interrupt thread */ - __register_hotcpu_notifier(&bnx2i_cpu_notifier); - - cpu_notifier_register_done(); + err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "scsi/bnx2i:online", + bnx2i_cpu_online, NULL); + if (err < 0) + goto remove_threads; + bnx2i_online_state = err; + cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead", + NULL, bnx2i_cpu_dead); + put_online_cpus(); return 0; +remove_threads: + for_each_online_cpu(cpu) + bnx2i_percpu_thread_destroy(cpu); + put_online_cpus(); + cnic_unregister_driver(CNIC_ULP_ISCSI); unreg_xport: iscsi_unregister_transport(&bnx2i_iscsi_transport); out: @@ -587,14 +569,14 @@ static void __exit bnx2i_mod_exit(void) } mutex_unlock(&bnx2i_dev_lock); - cpu_notifier_register_begin(); + get_online_cpus(); for_each_online_cpu(cpu) bnx2i_percpu_thread_destroy(cpu); - __unregister_hotcpu_notifier(&bnx2i_cpu_notifier); - - cpu_notifier_register_done(); + cpuhp_remove_state_nocalls(bnx2i_online_state); + cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD); + put_online_cpus(); iscsi_unregister_transport(&bnx2i_iscsi_transport); cnic_unregister_driver(CNIC_ULP_ISCSI); diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 0039bebaa9e24407bc00650fe0906a60f05247af..9a2fdc305cf2a9a0c7b4f4fabae4191bb9f12ad1 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -85,6 +85,7 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *); static const struct cxgb4_uld_info cxgb4i_uld_info = { .name = DRV_MODULE_NAME, .nrxq = MAX_ULD_QSETS, + .ntxq = MAX_ULD_QSETS, .rxq_size = 1024, .lro = false, .add = t4_uld_add, @@ -188,7 +189,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, struct l2t_entry *e) { struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); - int t4 = is_t4(lldi->adapter_type); int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); unsigned long long opt0; unsigned int opt2; @@ -231,7 +231,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, csk, &req->local_ip, ntohs(req->local_port), &req->peer_ip, ntohs(req->peer_port), csk->atid, csk->rss_qid); - } else { + } else if (is_t5(lldi->adapter_type)) { struct cpl_t5_act_open_req *req = (struct cpl_t5_act_open_req *)skb->head; u32 isn = (prandom_u32() & ~7UL) - 1; @@ -259,12 +259,45 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, csk, &req->local_ip, ntohs(req->local_port), &req->peer_ip, ntohs(req->peer_port), csk->atid, csk->rss_qid); + } else { + struct cpl_t6_act_open_req *req = + (struct cpl_t6_act_open_req *)skb->head; + u32 isn = (prandom_u32() & ~7UL) - 1; + + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, + qid_atid)); + req->local_port = csk->saddr.sin_port; + req->peer_port = csk->daddr.sin_port; + req->local_ip = csk->saddr.sin_addr.s_addr; + req->peer_ip = csk->daddr.sin_addr.s_addr; + req->opt0 = cpu_to_be64(opt0); + req->params = cpu_to_be64(FILTER_TUPLE_V( + cxgb4_select_ntuple( + csk->cdev->ports[csk->port_id], + csk->l2t))); + req->rsvd = cpu_to_be32(isn); + + opt2 |= T5_ISS_VALID; + opt2 |= RX_FC_DISABLE_F; + opt2 |= T5_OPT_2_VALID_F; + + req->opt2 = cpu_to_be32(opt2); + req->rsvd2 = cpu_to_be32(0); + req->opt3 = cpu_to_be32(0); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", + csk, &req->local_ip, ntohs(req->local_port), + &req->peer_ip, ntohs(req->peer_port), + csk->atid, csk->rss_qid); } set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n", - (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk, + (&csk->saddr), (&csk->daddr), + CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state, csk->flags, csk->atid, csk->rss_qid); cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); @@ -275,7 +308,6 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, struct l2t_entry *e) { struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); - int t4 = is_t4(lldi->adapter_type); int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); unsigned long long opt0; unsigned int opt2; @@ -293,10 +325,9 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, opt2 = RX_CHANNEL_V(0) | RSS_QUEUE_VALID_F | - RX_FC_DISABLE_F | RSS_QUEUE_V(csk->rss_qid); - if (t4) { + if (is_t4(lldi->adapter_type)) { struct cpl_act_open_req6 *req = (struct cpl_act_open_req6 *)skb->head; @@ -321,7 +352,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, req->params = cpu_to_be32(cxgb4_select_ntuple( csk->cdev->ports[csk->port_id], csk->l2t)); - } else { + } else if (is_t5(lldi->adapter_type)) { struct cpl_t5_act_open_req6 *req = (struct cpl_t5_act_open_req6 *)skb->head; @@ -344,12 +375,41 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( csk->cdev->ports[csk->port_id], csk->l2t))); + } else { + struct cpl_t6_act_open_req6 *req = + (struct cpl_t6_act_open_req6 *)skb->head; + + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, + qid_atid)); + req->local_port = csk->saddr6.sin6_port; + req->peer_port = csk->daddr6.sin6_port; + req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); + req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + + 8); + req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); + req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + + 8); + req->opt0 = cpu_to_be64(opt0); + + opt2 |= RX_FC_DISABLE_F; + opt2 |= T5_OPT_2_VALID_F; + + req->opt2 = cpu_to_be32(opt2); + + req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( + csk->cdev->ports[csk->port_id], + csk->l2t))); + + req->rsvd2 = cpu_to_be32(0); + req->opt3 = cpu_to_be32(0); } set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n", - t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid, + CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state, + csk->flags, csk->atid, &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), csk->rss_qid); @@ -741,7 +801,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) (&csk->saddr), (&csk->daddr), atid, tid, csk, csk->state, csk->flags, rcv_isn); - module_put(THIS_MODULE); + module_put(cdev->owner); cxgbi_sock_get(csk); csk->tid = tid; @@ -890,7 +950,7 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) if (is_neg_adv(status)) goto rel_skb; - module_put(THIS_MODULE); + module_put(cdev->owner); if (status && status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && @@ -1172,6 +1232,101 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) __kfree_skb(skb); } +static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + struct sk_buff *lskb; + u32 tid = GET_TID(cpl); + u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find conn. for tid %u.\n", tid); + goto rel_skb; + } + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", + csk, csk->state, csk->flags, csk->tid, skb, + skb->len, pdu_len_ddp); + + spin_lock_bh(&csk->lock); + + if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, bad state.\n", + csk, csk->state, csk->flags, csk->tid); + + if (csk->state != CTP_ABORTING) + goto abort_conn; + else + goto discard; + } + + cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq); + cxgbi_skcb_flags(skb) = 0; + + skb_reset_transport_header(skb); + __skb_pull(skb, sizeof(*cpl)); + __pskb_trim(skb, ntohs(cpl->len)); + + if (!csk->skb_ulp_lhdr) + csk->skb_ulp_lhdr = skb; + + lskb = csk->skb_ulp_lhdr; + cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", + csk, csk->state, csk->flags, skb, lskb); + + __skb_queue_tail(&csk->receive_queue, skb); + spin_unlock_bh(&csk->lock); + return; + +abort_conn: + send_abort_req(csk); +discard: + spin_unlock_bh(&csk->lock); +rel_skb: + __kfree_skb(skb); +} + +static void +cxgb4i_process_ddpvld(struct cxgbi_sock *csk, + struct sk_buff *skb, u32 ddpvld) +{ + if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { + pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", + csk, skb, ddpvld, cxgbi_skcb_flags(skb)); + cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); + } + + if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { + pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n", + csk, skb, ddpvld, cxgbi_skcb_flags(skb)); + cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); + } + + if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n", + csk, skb, ddpvld); + cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); + } + + if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) && + !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n", + csk, skb, ddpvld); + cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); + } +} + static void do_rx_data_ddp(struct cxgbi_device *cdev, struct sk_buff *skb) { @@ -1181,7 +1336,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev, unsigned int tid = GET_TID(rpl); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; - unsigned int status = ntohl(rpl->ddpvld); + u32 ddpvld = be32_to_cpu(rpl->ddpvld); csk = lookup_tid(t, tid); if (unlikely(!csk)) { @@ -1191,7 +1346,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev, log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n", - csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr); + csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr); spin_lock_bh(&csk->lock); @@ -1219,29 +1374,8 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev, pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n", csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); - if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { - pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", - csk, lskb, status, cxgbi_skcb_flags(lskb)); - cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR); - } - if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { - pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n", - csk, lskb, status, cxgbi_skcb_flags(lskb)); - cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR); - } - if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { - log_debug(1 << CXGBI_DBG_PDU_RX, - "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n", - csk, lskb, status); - cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR); - } - if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) && - !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) { - log_debug(1 << CXGBI_DBG_PDU_RX, - "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n", - csk, lskb, status); - cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD); - } + cxgb4i_process_ddpvld(csk, lskb, ddpvld); + log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, lskb 0x%p, f 0x%lx.\n", csk, lskb, cxgbi_skcb_flags(lskb)); @@ -1259,6 +1393,98 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev, __kfree_skb(skb); } +static void +do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data; + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + struct sk_buff *data_skb = NULL; + u32 tid = GET_TID(rpl); + u32 ddpvld = be32_to_cpu(rpl->ddpvld); + u32 seq = be32_to_cpu(rpl->seq); + u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp); + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find connection for tid %u.\n", tid); + goto rel_skb; + } + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, " + "pdu_len_ddp %u, status %u.\n", + csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr, + ntohs(rpl->len), pdu_len_ddp, rpl->status); + + spin_lock_bh(&csk->lock); + + if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, bad state.\n", + csk, csk->state, csk->flags, csk->tid); + + if (csk->state != CTP_ABORTING) + goto abort_conn; + else + goto discard; + } + + cxgbi_skcb_tcp_seq(skb) = seq; + cxgbi_skcb_flags(skb) = 0; + cxgbi_skcb_rx_pdulen(skb) = 0; + + skb_reset_transport_header(skb); + __skb_pull(skb, sizeof(*rpl)); + __pskb_trim(skb, be16_to_cpu(rpl->len)); + + csk->rcv_nxt = seq + pdu_len_ddp; + + if (csk->skb_ulp_lhdr) { + data_skb = skb_peek(&csk->receive_queue); + if (!data_skb || + !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) { + pr_err("Error! freelist data not found 0x%p, tid %u\n", + data_skb, tid); + + goto abort_conn; + } + __skb_unlink(data_skb, &csk->receive_queue); + + cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA); + + __skb_queue_tail(&csk->receive_queue, skb); + __skb_queue_tail(&csk->receive_queue, data_skb); + } else { + __skb_queue_tail(&csk->receive_queue, skb); + } + + csk->skb_ulp_lhdr = NULL; + + cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); + cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); + cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL); + cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc); + + cxgb4i_process_ddpvld(csk, skb, ddpvld); + + log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n", + csk, skb, cxgbi_skcb_flags(skb)); + + cxgbi_conn_pdu_ready(csk); + spin_unlock_bh(&csk->lock); + + return; + +abort_conn: + send_abort_req(csk); +discard: + spin_unlock_bh(&csk->lock); +rel_skb: + __kfree_skb(skb); +} + static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) { struct cxgbi_sock *csk; @@ -1381,7 +1607,6 @@ static int init_act_open(struct cxgbi_sock *csk) void *daddr; unsigned int step; unsigned int size, size6; - int t4 = is_t4(lldi->adapter_type); unsigned int linkspeed; unsigned int rcv_winf, snd_winf; @@ -1410,7 +1635,7 @@ static int init_act_open(struct cxgbi_sock *csk) csk->atid = cxgb4_alloc_atid(lldi->tids, csk); if (csk->atid < 0) { pr_err("%s, NO atid available.\n", ndev->name); - return -EINVAL; + goto rel_resource_without_clip; } cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_get(csk); @@ -1427,12 +1652,15 @@ static int init_act_open(struct cxgbi_sock *csk) cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); #endif - if (t4) { + if (is_t4(lldi->adapter_type)) { size = sizeof(struct cpl_act_open_req); size6 = sizeof(struct cpl_act_open_req6); - } else { + } else if (is_t5(lldi->adapter_type)) { size = sizeof(struct cpl_t5_act_open_req); size6 = sizeof(struct cpl_t5_act_open_req6); + } else { + size = sizeof(struct cpl_t6_act_open_req); + size6 = sizeof(struct cpl_t6_act_open_req6); } if (csk->csk_family == AF_INET) @@ -1451,8 +1679,8 @@ static int init_act_open(struct cxgbi_sock *csk) csk->mtu = dst_mtu(csk->dst); cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); csk->tx_chan = cxgb4_port_chan(ndev); - /* SMT two entries per row */ - csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1; + csk->smac_idx = cxgb4_tp_smt_idx(lldi->adapter_type, + cxgb4_port_viid(ndev)); step = lldi->ntxq / lldi->nchan; csk->txq_idx = cxgb4_port_idx(ndev) * step; step = lldi->nrxq / lldi->nchan; @@ -1485,7 +1713,11 @@ static int init_act_open(struct cxgbi_sock *csk) csk->mtu, csk->mss_idx, csk->smac_idx); /* must wait for either a act_open_rpl or act_open_establish */ - try_module_get(THIS_MODULE); + if (!try_module_get(cdev->owner)) { + pr_err("%s, try_module_get failed.\n", ndev->name); + goto rel_resource; + } + cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); if (csk->csk_family == AF_INET) send_act_open_req(csk, skb, csk->l2t); @@ -1520,10 +1752,11 @@ static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { [CPL_CLOSE_CON_RPL] = do_close_con_rpl, [CPL_FW4_ACK] = do_fw4_ack, [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, - [CPL_ISCSI_DATA] = do_rx_iscsi_hdr, + [CPL_ISCSI_DATA] = do_rx_iscsi_data, [CPL_SET_TCB_RPL] = do_set_tcb_rpl, [CPL_RX_DATA_DDP] = do_rx_data_ddp, [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, + [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp, [CPL_RX_DATA] = do_rx_data, }; @@ -1793,10 +2026,12 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi) cdev->nports = lldi->nports; cdev->mtus = lldi->mtus; cdev->nmtus = NMTUS; - cdev->rx_credit_thres = cxgb4i_rx_credit_thres; + cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <= + CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0; cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); cdev->itp = &cxgb4i_iscsi_transport; + cdev->owner = THIS_MODULE; cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0])) << FW_VIID_PFN_S; diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index d1421139e6eac615d1e3ae2a9af1e02ee78d0822..9167bcd9fffe9b3a5fea9005671239a96551aace 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -642,6 +642,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) n->dev->name, ndev->name, mtu); } + if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) { + pr_info("%s interface not up.\n", ndev->name); + err = -ENETDOWN; + goto rel_neigh; + } + cdev = cxgbi_device_find_by_netdev(ndev, &port); if (!cdev) { pr_info("dst %pI4, %s, NOT cxgbi device.\n", @@ -736,6 +742,12 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr) } ndev = n->dev; + if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) { + pr_info("%s interface not up.\n", ndev->name); + err = -ENETDOWN; + goto rel_rt; + } + if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) { pr_info("multi-cast route %pI6 port %u, dev %s.\n", daddr6->sin6_addr.s6_addr, @@ -896,6 +908,7 @@ EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open); void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) { struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; + struct module *owner = csk->cdev->owner; log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, (csk)->state, (csk)->flags, (csk)->tid); @@ -906,6 +919,8 @@ void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); __kfree_skb(skb); + + module_put(owner); } EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure); @@ -1574,6 +1589,25 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) return -EIO; } + if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) && + cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) { + /* If completion flag is set and data is directly + * placed in to the host memory then update + * task->exp_datasn to the datasn in completion + * iSCSI hdr as T6 adapter generates completion only + * for the last pdu of a sequence. + */ + itt_t itt = ((struct iscsi_data *)skb->data)->itt; + struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt); + u32 data_sn = be32_to_cpu(((struct iscsi_data *) + skb->data)->datasn); + if (task && task->sc) { + struct iscsi_tcp_task *tcp_task = task->dd_data; + + tcp_task->exp_datasn = data_sn; + } + } + return read_pdu_skb(conn, skb, 0, 0); } @@ -1627,15 +1661,15 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) csk->rcv_wup, cdev->rx_credit_thres, csk->rcv_win); + if (!cdev->rx_credit_thres) + return; + if (csk->state != CTP_ESTABLISHED) return; credits = csk->copied_seq - csk->rcv_wup; if (unlikely(!credits)) return; - if (unlikely(cdev->rx_credit_thres == 0)) - return; - must_send = credits + 16384 >= csk->rcv_win; if (must_send || credits >= cdev->rx_credit_thres) csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); @@ -2081,9 +2115,10 @@ void cxgbi_cleanup_task(struct iscsi_task *task) /* never reached the xmit task callout */ if (tdata->skb) __kfree_skb(tdata->skb); - memset(tdata, 0, sizeof(*tdata)); task_release_itt(task, task->hdr_itt); + memset(tdata, 0, sizeof(*tdata)); + iscsi_tcp_cleanup_task(task); } EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index e7802738f5d28e0b7e7fe4e3f6bf8075197b838f..95ba99044c3e9c2af6f0a8deb656b878c15cf656 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -207,6 +207,7 @@ enum cxgbi_skcb_flags { SKCBF_RX_HDR, /* received pdu header */ SKCBF_RX_DATA, /* received pdu payload */ SKCBF_RX_STATUS, /* received ddp status */ + SKCBF_RX_ISCSI_COMPL, /* received iscsi completion */ SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */ SKCBF_RX_HCRC_ERR, /* header digest error */ SKCBF_RX_DCRC_ERR, /* data digest error */ @@ -467,6 +468,7 @@ struct cxgbi_device { struct pci_dev *pdev; struct dentry *debugfs_root; struct iscsi_transport *itp; + struct module *owner; unsigned int pfvf; unsigned int rx_credit_thres; diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index 6e6815545a711cac237f0ae54223b64c7a912391..0e9de5d62da25f56bcb0645a35a98582896fc37b 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -19,6 +19,7 @@ #include #include #include +#include #include extern const struct file_operations cxlflash_cxl_fops; @@ -62,11 +63,6 @@ static inline void check_sizes(void) /* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */ #define CMD_BUFSIZE SIZE_4K -/* flags in IOA status area for host use */ -#define B_DONE 0x01 -#define B_ERROR 0x02 /* set with B_DONE */ -#define B_TIMEOUT 0x04 /* set with B_DONE & B_ERROR */ - enum cxlflash_lr_state { LINK_RESET_INVALID, LINK_RESET_REQUIRED, @@ -132,12 +128,9 @@ struct cxlflash_cfg { struct afu_cmd { struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */ struct sisl_ioasa sa; /* IOASA must follow IOARCB */ - spinlock_t slock; - struct completion cevent; - char *buf; /* per command buffer */ struct afu *parent; - int slot; - atomic_t free; + struct scsi_cmnd *scp; + struct completion cevent; u8 cmd_tmf:1; @@ -147,19 +140,31 @@ struct afu_cmd { */ } __aligned(cache_line_size()); +static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc) +{ + return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd)); +} + +static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc) +{ + struct afu_cmd *afuc = sc_to_afuc(sc); + + memset(afuc, 0, sizeof(*afuc)); + return afuc; +} + struct afu { /* Stuff requiring alignment go first. */ u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */ - /* - * Command & data for AFU commands. - */ - struct afu_cmd cmd[CXLFLASH_NUM_CMDS]; /* Beware of alignment till here. Preferably introduce new * fields after this point */ + int (*send_cmd)(struct afu *, struct afu_cmd *); + void (*context_reset)(struct afu_cmd *); + /* AFU HW */ struct cxl_ioctl_start_work work; struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */ @@ -173,10 +178,10 @@ struct afu { u64 *hrrq_end; u64 *hrrq_curr; bool toggle; - bool read_room; - atomic64_t room; + atomic_t cmds_active; /* Number of currently active AFU commands */ + s64 room; + spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */ u64 hb; - u32 cmd_couts; /* Number of command checkouts */ u32 internal_lun; /* User-desired LUN mode for this AFU */ char version[16]; diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c index a0923cade6f3ead93b53dbb868e492df08635c51..6c318db90c85cee7e02019ee06a757f26102bf06 100644 --- a/drivers/scsi/cxlflash/lunmgt.c +++ b/drivers/scsi/cxlflash/lunmgt.c @@ -254,8 +254,14 @@ int cxlflash_manage_lun(struct scsi_device *sdev, if (lli->parent->mode != MODE_NONE) rc = -EBUSY; else { + /* + * Clean up local LUN for this port and reset table + * tracking when no more references exist. + */ sdev->hostdata = NULL; lli->port_sel &= ~CHAN2PORT(chan); + if (lli->port_sel == 0U) + lli->in_table = false; } } diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index b301655f91cd0c90a2dc3f120c93e98f907aa390..b17ebf6d0a7e2b49b237ee244ce7c115487906af 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -34,67 +34,6 @@ MODULE_AUTHOR("Manoj N. Kumar "); MODULE_AUTHOR("Matthew R. Ochs "); MODULE_LICENSE("GPL"); -/** - * cmd_checkout() - checks out an AFU command - * @afu: AFU to checkout from. - * - * Commands are checked out in a round-robin fashion. Note that since - * the command pool is larger than the hardware queue, the majority of - * times we will only loop once or twice before getting a command. The - * buffer and CDB within the command are initialized (zeroed) prior to - * returning. - * - * Return: The checked out command or NULL when command pool is empty. - */ -static struct afu_cmd *cmd_checkout(struct afu *afu) -{ - int k, dec = CXLFLASH_NUM_CMDS; - struct afu_cmd *cmd; - - while (dec--) { - k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1)); - - cmd = &afu->cmd[k]; - - if (!atomic_dec_if_positive(&cmd->free)) { - pr_devel("%s: returning found index=%d cmd=%p\n", - __func__, cmd->slot, cmd); - memset(cmd->buf, 0, CMD_BUFSIZE); - memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); - return cmd; - } - } - - return NULL; -} - -/** - * cmd_checkin() - checks in an AFU command - * @cmd: AFU command to checkin. - * - * Safe to pass commands that have already been checked in. Several - * internal tracking fields are reset as part of the checkin. Note - * that these are intentionally reset prior to toggling the free bit - * to avoid clobbering values in the event that the command is checked - * out right away. - */ -static void cmd_checkin(struct afu_cmd *cmd) -{ - cmd->rcb.scp = NULL; - cmd->rcb.timeout = 0; - cmd->sa.ioasc = 0; - cmd->cmd_tmf = false; - cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */ - - if (unlikely(atomic_inc_return(&cmd->free) != 1)) { - pr_err("%s: Freeing cmd (%d) that is not in use!\n", - __func__, cmd->slot); - return; - } - - pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot); -} - /** * process_cmd_err() - command error handler * @cmd: AFU command that experienced the error. @@ -212,7 +151,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) * * Prepares and submits command that has either completed or timed out to * the SCSI stack. Checks AFU command back into command pool for non-internal - * (rcb.scp populated) commands. + * (cmd->scp populated) commands. */ static void cmd_complete(struct afu_cmd *cmd) { @@ -222,19 +161,14 @@ static void cmd_complete(struct afu_cmd *cmd) struct cxlflash_cfg *cfg = afu->parent; bool cmd_is_tmf; - spin_lock_irqsave(&cmd->slock, lock_flags); - cmd->sa.host_use_b[0] |= B_DONE; - spin_unlock_irqrestore(&cmd->slock, lock_flags); - - if (cmd->rcb.scp) { - scp = cmd->rcb.scp; + if (cmd->scp) { + scp = cmd->scp; if (unlikely(cmd->sa.ioasc)) process_cmd_err(cmd, scp); else scp->result = (DID_OK << 16); cmd_is_tmf = cmd->cmd_tmf; - cmd_checkin(cmd); /* Don't use cmd after here */ pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X " "ioasc=%d\n", __func__, scp, scp->result, @@ -254,49 +188,19 @@ static void cmd_complete(struct afu_cmd *cmd) } /** - * context_reset() - timeout handler for AFU commands + * context_reset_ioarrin() - reset command owner context via IOARRIN register * @cmd: AFU command that timed out. - * - * Sends a reset to the AFU. */ -static void context_reset(struct afu_cmd *cmd) +static void context_reset_ioarrin(struct afu_cmd *cmd) { int nretry = 0; u64 rrin = 0x1; - u64 room = 0; struct afu *afu = cmd->parent; - ulong lock_flags; + struct cxlflash_cfg *cfg = afu->parent; + struct device *dev = &cfg->dev->dev; pr_debug("%s: cmd=%p\n", __func__, cmd); - spin_lock_irqsave(&cmd->slock, lock_flags); - - /* Already completed? */ - if (cmd->sa.host_use_b[0] & B_DONE) { - spin_unlock_irqrestore(&cmd->slock, lock_flags); - return; - } - - cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT); - spin_unlock_irqrestore(&cmd->slock, lock_flags); - - /* - * We really want to send this reset at all costs, so spread - * out wait time on successive retries for available room. - */ - do { - room = readq_be(&afu->host_map->cmd_room); - atomic64_set(&afu->room, room); - if (room) - goto write_rrin; - udelay(1 << nretry); - } while (nretry++ < MC_ROOM_RETRY_CNT); - - pr_err("%s: no cmd_room to send reset\n", __func__); - return; - -write_rrin: - nretry = 0; writeq_be(rrin, &afu->host_map->ioarrin); do { rrin = readq_be(&afu->host_map->ioarrin); @@ -305,93 +209,81 @@ static void context_reset(struct afu_cmd *cmd) /* Double delay each time */ udelay(1 << nretry); } while (nretry++ < MC_ROOM_RETRY_CNT); + + dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n", + __func__, rrin, nretry); } /** - * send_cmd() - sends an AFU command + * send_cmd_ioarrin() - sends an AFU command via IOARRIN register * @afu: AFU associated with the host. * @cmd: AFU command to send. * * Return: * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure */ -static int send_cmd(struct afu *afu, struct afu_cmd *cmd) +static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) { struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; - int nretry = 0; int rc = 0; - u64 room; - long newval; + s64 room; + ulong lock_flags; /* - * This routine is used by critical users such an AFU sync and to - * send a task management function (TMF). Thus we want to retry a - * bit before returning an error. To avoid the performance penalty - * of MMIO, we spread the update of 'room' over multiple commands. + * To avoid the performance penalty of MMIO, spread the update of + * 'room' over multiple commands. */ -retry: - newval = atomic64_dec_if_positive(&afu->room); - if (!newval) { - do { - room = readq_be(&afu->host_map->cmd_room); - atomic64_set(&afu->room, room); - if (room) - goto write_ioarrin; - udelay(1 << nretry); - } while (nretry++ < MC_ROOM_RETRY_CNT); - - dev_err(dev, "%s: no cmd_room to send 0x%X\n", - __func__, cmd->rcb.cdb[0]); - - goto no_room; - } else if (unlikely(newval < 0)) { - /* This should be rare. i.e. Only if two threads race and - * decrement before the MMIO read is done. In this case - * just benefit from the other thread having updated - * afu->room. - */ - if (nretry++ < MC_ROOM_RETRY_CNT) { - udelay(1 << nretry); - goto retry; + spin_lock_irqsave(&afu->rrin_slock, lock_flags); + if (--afu->room < 0) { + room = readq_be(&afu->host_map->cmd_room); + if (room <= 0) { + dev_dbg_ratelimited(dev, "%s: no cmd_room to send " + "0x%02X, room=0x%016llX\n", + __func__, cmd->rcb.cdb[0], room); + afu->room = 0; + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; } - - goto no_room; + afu->room = room - 1; } -write_ioarrin: writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin); out: + spin_unlock_irqrestore(&afu->rrin_slock, lock_flags); pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd, cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc); return rc; - -no_room: - afu->read_room = true; - kref_get(&cfg->afu->mapcount); - schedule_work(&cfg->work_q); - rc = SCSI_MLQUEUE_HOST_BUSY; - goto out; } /** * wait_resp() - polls for a response or timeout to a sent AFU command * @afu: AFU associated with the host. * @cmd: AFU command that was sent. + * + * Return: + * 0 on success, -1 on timeout/error */ -static void wait_resp(struct afu *afu, struct afu_cmd *cmd) +static int wait_resp(struct afu *afu, struct afu_cmd *cmd) { + int rc = 0; ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); timeout = wait_for_completion_timeout(&cmd->cevent, timeout); - if (!timeout) - context_reset(cmd); + if (!timeout) { + afu->context_reset(cmd); + rc = -1; + } - if (unlikely(cmd->sa.ioasc != 0)) + if (unlikely(cmd->sa.ioasc != 0)) { pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, " "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0], cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc, cmd->sa.rc.fc_rc); + rc = -1; + } + + return rc; } /** @@ -405,24 +297,15 @@ static void wait_resp(struct afu *afu, struct afu_cmd *cmd) */ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) { - struct afu_cmd *cmd; - u32 port_sel = scp->device->channel + 1; - short lflag = 0; struct Scsi_Host *host = scp->device->host; struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; + struct afu_cmd *cmd = sc_to_afucz(scp); struct device *dev = &cfg->dev->dev; ulong lock_flags; int rc = 0; ulong to; - cmd = cmd_checkout(afu); - if (unlikely(!cmd)) { - dev_err(dev, "%s: could not get a free command\n", __func__); - rc = SCSI_MLQUEUE_HOST_BUSY; - goto out; - } - /* When Task Management Function is active do not send another */ spin_lock_irqsave(&cfg->tmf_slock, lock_flags); if (cfg->tmf_active) @@ -430,28 +313,23 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) !cfg->tmf_active, cfg->tmf_slock); cfg->tmf_active = true; - cmd->cmd_tmf = true; spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); + cmd->scp = scp; + cmd->parent = afu; + cmd->cmd_tmf = true; + cmd->rcb.ctx_id = afu->ctx_hndl; + cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; cmd->rcb.port_sel = port_sel; cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); - - lflag = SISL_REQ_FLAGS_TMF_CMD; - cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | - SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); - - /* Stash the scp in the reserved field, for reuse during interrupt */ - cmd->rcb.scp = scp; - - /* Copy the CDB from the cmd passed in */ + SISL_REQ_FLAGS_SUP_UNDERRUN | + SISL_REQ_FLAGS_TMF_CMD); memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); - /* Send the command */ - rc = send_cmd(afu, cmd); + rc = afu->send_cmd(afu, cmd); if (unlikely(rc)) { - cmd_checkin(cmd); spin_lock_irqsave(&cfg->tmf_slock, lock_flags); cfg->tmf_active = false; spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); @@ -507,12 +385,12 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; - struct afu_cmd *cmd; + struct afu_cmd *cmd = sc_to_afucz(scp); + struct scatterlist *sg = scsi_sglist(scp); u32 port_sel = scp->device->channel + 1; - int nseg, i, ncount; - struct scatterlist *sg; + u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN; ulong lock_flags; - short lflag = 0; + int nseg = 0; int rc = 0; int kref_got = 0; @@ -552,55 +430,38 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) break; } - cmd = cmd_checkout(afu); - if (unlikely(!cmd)) { - dev_err(dev, "%s: could not get a free command\n", __func__); - rc = SCSI_MLQUEUE_HOST_BUSY; - goto out; - } - kref_get(&cfg->afu->mapcount); kref_got = 1; + if (likely(sg)) { + nseg = scsi_dma_map(scp); + if (unlikely(nseg < 0)) { + dev_err(dev, "%s: Fail DMA map!\n", __func__); + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + + cmd->rcb.data_len = sg_dma_len(sg); + cmd->rcb.data_ea = sg_dma_address(sg); + } + + cmd->scp = scp; + cmd->parent = afu; + cmd->rcb.ctx_id = afu->ctx_hndl; + cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; cmd->rcb.port_sel = port_sel; cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); if (scp->sc_data_direction == DMA_TO_DEVICE) - lflag = SISL_REQ_FLAGS_HOST_WRITE; - else - lflag = SISL_REQ_FLAGS_HOST_READ; - - cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | - SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); - - /* Stash the scp in the reserved field, for reuse during interrupt */ - cmd->rcb.scp = scp; - - nseg = scsi_dma_map(scp); - if (unlikely(nseg < 0)) { - dev_err(dev, "%s: Fail DMA map! nseg=%d\n", - __func__, nseg); - rc = SCSI_MLQUEUE_HOST_BUSY; - goto out; - } + req_flags |= SISL_REQ_FLAGS_HOST_WRITE; - ncount = scsi_sg_count(scp); - scsi_for_each_sg(scp, sg, ncount, i) { - cmd->rcb.data_len = sg_dma_len(sg); - cmd->rcb.data_ea = sg_dma_address(sg); - } - - /* Copy the CDB from the scsi_cmnd passed in */ + cmd->rcb.req_flags = req_flags; memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); - /* Send the command */ - rc = send_cmd(afu, cmd); - if (unlikely(rc)) { - cmd_checkin(cmd); + rc = afu->send_cmd(afu, cmd); + if (unlikely(rc)) scsi_dma_unmap(scp); - } - out: if (kref_got) kref_put(&afu->mapcount, afu_unmap); @@ -628,17 +489,9 @@ static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) */ static void free_mem(struct cxlflash_cfg *cfg) { - int i; - char *buf = NULL; struct afu *afu = cfg->afu; if (cfg->afu) { - for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { - buf = afu->cmd[i].buf; - if (!((u64)buf & (PAGE_SIZE - 1))) - free_page((ulong)buf); - } - free_pages((ulong)afu, get_order(sizeof(struct afu))); cfg->afu = NULL; } @@ -650,30 +503,16 @@ static void free_mem(struct cxlflash_cfg *cfg) * * Safe to call with AFU in a partially allocated/initialized state. * - * Cleans up all state associated with the command queue, and unmaps + * Waits for any active internal AFU commands to timeout and then unmaps * the MMIO space. - * - * - complete() will take care of commands we initiated (they'll be checked - * in as part of the cleanup that occurs after the completion) - * - * - cmd_checkin() will take care of entries that we did not initiate and that - * have not (and will not) complete because they are sitting on a [now stale] - * hardware queue */ static void stop_afu(struct cxlflash_cfg *cfg) { - int i; struct afu *afu = cfg->afu; - struct afu_cmd *cmd; if (likely(afu)) { - for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { - cmd = &afu->cmd[i]; - complete(&cmd->cevent); - if (!atomic_read(&cmd->free)) - cmd_checkin(cmd); - } - + while (atomic_read(&afu->cmds_active)) + ssleep(1); if (likely(afu->afu_map)) { cxl_psa_unmap((void __iomem *)afu->afu_map); afu->afu_map = NULL; @@ -886,8 +725,6 @@ static void cxlflash_remove(struct pci_dev *pdev) static int alloc_mem(struct cxlflash_cfg *cfg) { int rc = 0; - int i; - char *buf = NULL; struct device *dev = &cfg->dev->dev; /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */ @@ -901,25 +738,6 @@ static int alloc_mem(struct cxlflash_cfg *cfg) } cfg->afu->parent = cfg; cfg->afu->afu_map = NULL; - - for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) { - if (!((u64)buf & (PAGE_SIZE - 1))) { - buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); - if (unlikely(!buf)) { - dev_err(dev, - "%s: Allocate command buffers fail!\n", - __func__); - rc = -ENOMEM; - free_mem(cfg); - goto out; - } - } - - cfg->afu->cmd[i].buf = buf; - atomic_set(&cfg->afu->cmd[i].free, 1); - cfg->afu->cmd[i].slot = i; - } - out: return rc; } @@ -1549,13 +1367,6 @@ static void init_pcr(struct cxlflash_cfg *cfg) /* Program the Endian Control for the master context */ writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl); - - /* Initialize cmd fields that never change */ - for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { - afu->cmd[i].rcb.ctx_id = afu->ctx_hndl; - afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED; - afu->cmd[i].rcb.rrq = 0x0; - } } /** @@ -1644,19 +1455,8 @@ static int init_global(struct cxlflash_cfg *cfg) static int start_afu(struct cxlflash_cfg *cfg) { struct afu *afu = cfg->afu; - struct afu_cmd *cmd; - - int i = 0; int rc = 0; - for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { - cmd = &afu->cmd[i]; - - init_completion(&cmd->cevent); - spin_lock_init(&cmd->slock); - cmd->parent = afu; - } - init_pcr(cfg); /* After an AFU reset, RRQ entries are stale, clear them */ @@ -1829,6 +1629,9 @@ static int init_afu(struct cxlflash_cfg *cfg) goto err2; } + afu->send_cmd = send_cmd_ioarrin; + afu->context_reset = context_reset_ioarrin; + pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__, afu->version, afu->interface_version); @@ -1840,7 +1643,8 @@ static int init_afu(struct cxlflash_cfg *cfg) } afu_err_intr_init(cfg->afu); - atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); + spin_lock_init(&afu->rrin_slock); + afu->room = readq_be(&afu->host_map->cmd_room); /* Restore the LUN mappings */ cxlflash_restore_luntable(cfg); @@ -1884,8 +1688,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; struct afu_cmd *cmd = NULL; + char *buf = NULL; int rc = 0; - int retry_cnt = 0; static DEFINE_MUTEX(sync_active); if (cfg->state != STATE_NORMAL) { @@ -1894,27 +1698,23 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, } mutex_lock(&sync_active); -retry: - cmd = cmd_checkout(afu); - if (unlikely(!cmd)) { - retry_cnt++; - udelay(1000 * retry_cnt); - if (retry_cnt < MC_RETRY_CNT) - goto retry; - dev_err(dev, "%s: could not get a free command\n", __func__); + atomic_inc(&afu->cmds_active); + buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); + if (unlikely(!buf)) { + dev_err(dev, "%s: no memory for command\n", __func__); rc = -1; goto out; } - pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); + cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); + init_completion(&cmd->cevent); + cmd->parent = afu; - memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); + pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; - cmd->rcb.port_sel = 0x0; /* NA */ - cmd->rcb.lun_id = 0x0; /* NA */ - cmd->rcb.data_len = 0x0; - cmd->rcb.data_ea = 0x0; + cmd->rcb.ctx_id = afu->ctx_hndl; + cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ @@ -1924,20 +1724,17 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u); *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u); - rc = send_cmd(afu, cmd); + rc = afu->send_cmd(afu, cmd); if (unlikely(rc)) goto out; - wait_resp(afu, cmd); - - /* Set on timeout */ - if (unlikely((cmd->sa.ioasc != 0) || - (cmd->sa.host_use_b[0] & B_ERROR))) + rc = wait_resp(afu, cmd); + if (unlikely(rc)) rc = -1; out: + atomic_dec(&afu->cmds_active); mutex_unlock(&sync_active); - if (cmd) - cmd_checkin(cmd); + kfree(buf); pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; } @@ -2376,8 +2173,9 @@ static struct scsi_host_template driver_template = { .change_queue_depth = cxlflash_change_queue_depth, .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, .can_queue = CXLFLASH_MAX_CMDS, + .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1, .this_id = -1, - .sg_tablesize = SG_NONE, /* No scatter gather support */ + .sg_tablesize = 1, /* No scatter gather support */ .max_sectors = CXLFLASH_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = cxlflash_host_attrs, @@ -2412,7 +2210,6 @@ MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); * Handles the following events: * - Link reset which cannot be performed on interrupt context due to * blocking up to a few seconds - * - Read AFU command room * - Rescan the host */ static void cxlflash_worker_thread(struct work_struct *work) @@ -2449,11 +2246,6 @@ static void cxlflash_worker_thread(struct work_struct *work) cfg->lr_state = LINK_RESET_COMPLETE; } - if (afu->read_room) { - atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); - afu->read_room = false; - } - spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h index 347fc16719754e7a7db5db9a5f0f559ddb61f0bb..1a2d09c148b34a7bec8f9295eda4ec912fbdffce 100644 --- a/drivers/scsi/cxlflash/sislite.h +++ b/drivers/scsi/cxlflash/sislite.h @@ -72,7 +72,7 @@ struct sisl_ioarcb { u16 timeout; /* in units specified by req_flags */ u32 rsvd1; u8 cdb[16]; /* must be in big endian */ - struct scsi_cmnd *scp; + u64 reserved; /* Reserved area */ } __packed; struct sisl_rc { diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 241829e596680f8be89941fc43ce0db642e29b18..d704752b63329f8094f52ec57cea5d823104c43c 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -95,7 +95,7 @@ struct alua_port_group { struct alua_dh_data { struct list_head node; - struct alua_port_group *pg; + struct alua_port_group __rcu *pg; int group_id; spinlock_t pg_lock; struct scsi_device *sdev; @@ -154,7 +154,8 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff, return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE, buff, bufflen, sshdr, ALUA_FAILOVER_TIMEOUT * HZ, - ALUA_FAILOVER_RETRIES, NULL, req_flags); + ALUA_FAILOVER_RETRIES, NULL, + req_flags, 0); } /* @@ -187,7 +188,8 @@ static int submit_stpg(struct scsi_device *sdev, int group_id, return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE, stpg_data, stpg_len, sshdr, ALUA_FAILOVER_TIMEOUT * HZ, - ALUA_FAILOVER_RETRIES, NULL, req_flags); + ALUA_FAILOVER_RETRIES, NULL, + req_flags, 0); } static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, @@ -369,7 +371,7 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, /* Check for existing port group references */ spin_lock(&h->pg_lock); - old_pg = h->pg; + old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); if (old_pg != pg) { /* port group has changed. Update to new port group */ if (h->pg) { @@ -388,7 +390,9 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, list_add_rcu(&h->node, &pg->dh_list); spin_unlock_irqrestore(&pg->lock, flags); - alua_rtpg_queue(h->pg, sdev, NULL, true); + alua_rtpg_queue(rcu_dereference_protected(h->pg, + lockdep_is_held(&h->pg_lock)), + sdev, NULL, true); spin_unlock(&h->pg_lock); if (old_pg) @@ -793,6 +797,7 @@ static void alua_rtpg_work(struct work_struct *work) WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); WARN_ON(pg->flags & ALUA_PG_RUN_STPG); spin_unlock_irqrestore(&pg->lock, flags); + kref_put(&pg->kref, release_port_group); return; } if (pg->flags & ALUA_SYNC_STPG) @@ -890,6 +895,7 @@ static void alua_rtpg_queue(struct alua_port_group *pg, /* Do not queue if the worker is already running */ if (!(pg->flags & ALUA_PG_RUNNING)) { kref_get(&pg->kref); + sdev = NULL; start_queue = 1; } } @@ -901,7 +907,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg, if (start_queue && !queue_delayed_work(alua_wq, &pg->rtpg_work, msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { - scsi_device_put(sdev); + if (sdev) + scsi_device_put(sdev); kref_put(&pg->kref, release_port_group); } } @@ -937,7 +944,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) static int alua_set_params(struct scsi_device *sdev, const char *params) { struct alua_dh_data *h = sdev->handler_data; - struct alua_port_group __rcu *pg = NULL; + struct alua_port_group *pg = NULL; unsigned int optimize = 0, argc; const char *p = params; int result = SCSI_DH_OK; @@ -984,7 +991,7 @@ static int alua_activate(struct scsi_device *sdev, struct alua_dh_data *h = sdev->handler_data; int err = SCSI_DH_OK; struct alua_queue_data *qdata; - struct alua_port_group __rcu *pg; + struct alua_port_group *pg; qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); if (!qdata) { @@ -1048,7 +1055,7 @@ static void alua_check(struct scsi_device *sdev, bool force) static int alua_prep_fn(struct scsi_device *sdev, struct request *req) { struct alua_dh_data *h = sdev->handler_data; - struct alua_port_group __rcu *pg; + struct alua_port_group *pg; unsigned char state = SCSI_ACCESS_STATE_OPTIMAL; int ret = BLKPREP_OK; @@ -1063,7 +1070,7 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req) state != SCSI_ACCESS_STATE_ACTIVE && state != SCSI_ACCESS_STATE_LBA) { ret = BLKPREP_KILL; - req->cmd_flags |= REQ_QUIET; + req->rq_flags |= RQF_QUIET; } return ret; @@ -1118,7 +1125,7 @@ static void alua_bus_detach(struct scsi_device *sdev) struct alua_port_group *pg; spin_lock(&h->pg_lock); - pg = h->pg; + pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); rcu_assign_pointer(h->pg, NULL); h->sdev = NULL; spin_unlock(&h->pg_lock); diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index 375d81850f15ad2b0e2125aac6672a3cded4cb60..5b80746980b8fffef0345e4abd21b1bd5e1030dc 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -452,7 +452,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req) if (h->lun_state != CLARIION_LUN_OWNED) { ret = BLKPREP_KILL; - req->cmd_flags |= REQ_QUIET; + req->rq_flags |= RQF_QUIET; } return ret; diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 9406d5f4a3d3893b6ac8f20308a8ff812c90177f..308e87195dc1ec60b076f03671000945fde56919 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -266,7 +266,7 @@ static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) if (h->path_state != HP_SW_PATH_ACTIVE) { ret = BLKPREP_KILL; - req->cmd_flags |= REQ_QUIET; + req->rq_flags |= RQF_QUIET; } return ret; diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 06fbd0b0c68a308876ef98d6431ba7d56233ca95..00d9c326158eba8f4e94777e674873a9d3cd0c8d 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -724,7 +724,7 @@ static int rdac_prep_fn(struct scsi_device *sdev, struct request *req) if (h->state != RDAC_STATE_ACTIVE) { ret = BLKPREP_KILL; - req->cmd_flags |= REQ_QUIET; + req->rq_flags |= RQF_QUIET; } return ret; diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c index 9b5a457d4bca4371a147af0cf786dc08a7c97f9d..6af3394d051dcebee8394e01a5cebe657f044968 100644 --- a/drivers/scsi/dmx3191d.c +++ b/drivers/scsi/dmx3191d.c @@ -34,13 +34,13 @@ * Definitions for the generic 5380 driver. */ -#define NCR5380_read(reg) inb(instance->io_port + reg) -#define NCR5380_write(reg, value) outb(value, instance->io_port + reg) +#define NCR5380_read(reg) inb(hostdata->base + (reg)) +#define NCR5380_write(reg, value) outb(value, hostdata->base + (reg)) -#define NCR5380_dma_xfer_len(instance, cmd, phase) (0) -#define NCR5380_dma_recv_setup(instance, dst, len) (0) -#define NCR5380_dma_send_setup(instance, src, len) (0) -#define NCR5380_dma_residual(instance) (0) +#define NCR5380_dma_xfer_len NCR5380_dma_xfer_none +#define NCR5380_dma_recv_setup NCR5380_dma_setup_none +#define NCR5380_dma_send_setup NCR5380_dma_setup_none +#define NCR5380_dma_residual NCR5380_dma_residual_none #define NCR5380_implementation_fields /* none */ @@ -71,6 +71,7 @@ static int dmx3191d_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *shost; + struct NCR5380_hostdata *hostdata; unsigned long io; int error = -ENODEV; @@ -88,7 +89,9 @@ static int dmx3191d_probe_one(struct pci_dev *pdev, sizeof(struct NCR5380_hostdata)); if (!shost) goto out_release_region; - shost->io_port = io; + + hostdata = shost_priv(shost); + hostdata->base = io; /* This card does not seem to raise an interrupt on pdev->irq. * Steam-powered SCSI controllers run without an IRQ anyway. @@ -125,7 +128,8 @@ static int dmx3191d_probe_one(struct pci_dev *pdev, static void dmx3191d_remove_one(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); - unsigned long io = shost->io_port; + struct NCR5380_hostdata *hostdata = shost_priv(shost); + unsigned long io = hostdata->base; scsi_remove_host(shost); @@ -149,18 +153,7 @@ static struct pci_driver dmx3191d_pci_driver = { .remove = dmx3191d_remove_one, }; -static int __init dmx3191d_init(void) -{ - return pci_register_driver(&dmx3191d_pci_driver); -} - -static void __exit dmx3191d_exit(void) -{ - pci_unregister_driver(&dmx3191d_pci_driver); -} - -module_init(dmx3191d_init); -module_exit(dmx3191d_exit); +module_pci_driver(dmx3191d_pci_driver); MODULE_AUTHOR("Massimo Piccioni "); MODULE_DESCRIPTION("Domex DMX3191D SCSI driver"); diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 21c8d210c456d839fd1e63299cc6789938b08d40..5f75e638ec95af5476f15780bf4fa6f7ea12a0b7 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -37,7 +37,7 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver"); //////////////////////////////////////////////////////////////// #include /* For SCSI-Passthrough */ -#include +#include #include #include /* for kmalloc() */ @@ -651,7 +651,6 @@ static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply) } spin_unlock_irqrestore(pHba->host->host_lock, flags); if (i >= nr) { - kfree (reply); printk(KERN_WARNING"%s: Too many outstanding " "ioctl commands\n", pHba->name); return (u32)-1; @@ -1754,8 +1753,10 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg) sg_offset = (msg[0]>>4)&0xf; msg[2] = 0x40000000; // IOCTL context msg[3] = adpt_ioctl_to_context(pHba, reply); - if (msg[3] == (u32)-1) + if (msg[3] == (u32)-1) { + kfree(reply); return -EBUSY; + } memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize); if(sg_offset) { @@ -3350,7 +3351,7 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid, if (opblk_va == NULL) { dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen), resblk_va, resblk_pa); - printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n", + printk(KERN_CRIT "%s: query operation failed; Out of memory.\n", pHba->name); return -ENOMEM; } diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 9bd41a35a78ae970e8e5e420d4e1157bed2b7d57..59150cad03532e1e1ee15ad0d8fcdf35229d07c3 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -63,6 +63,14 @@ unsigned int fcoe_debug_logging; module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); +unsigned int fcoe_e_d_tov = 2 * 1000; +module_param_named(e_d_tov, fcoe_e_d_tov, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(e_d_tov, "E_D_TOV in ms, default 2000"); + +unsigned int fcoe_r_a_tov = 2 * 2 * 1000; +module_param_named(r_a_tov, fcoe_r_a_tov, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(r_a_tov, "R_A_TOV in ms, default 4000"); + static DEFINE_MUTEX(fcoe_config_mutex); static struct workqueue_struct *fcoe_wq; @@ -582,7 +590,8 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) * Use default VLAN for FIP VLAN discovery protocol */ frame = (struct fip_frame *)skb->data; - if (frame->fip.fip_op == ntohs(FIP_OP_VLAN) && + if (ntohs(frame->eth.h_proto) == ETH_P_FIP && + ntohs(frame->fip.fip_op) == FIP_OP_VLAN && fcoe->realdev != fcoe->netdev) skb->dev = fcoe->realdev; else @@ -633,8 +642,8 @@ static int fcoe_lport_config(struct fc_lport *lport) lport->qfull = 0; lport->max_retry_count = 3; lport->max_rport_retry_count = 3; - lport->e_d_tov = 2 * 1000; /* FC-FS default */ - lport->r_a_tov = 2 * 2 * 1000; + lport->e_d_tov = fcoe_e_d_tov; + lport->r_a_tov = fcoe_r_a_tov; lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); lport->does_npiv = 1; @@ -2160,11 +2169,13 @@ static bool fcoe_match(struct net_device *netdev) */ static void fcoe_dcb_create(struct fcoe_interface *fcoe) { + int ctlr_prio = TC_PRIO_BESTEFFORT; + int fcoe_prio = TC_PRIO_INTERACTIVE; + struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); #ifdef CONFIG_DCB int dcbx; u8 fup, up; struct net_device *netdev = fcoe->realdev; - struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); struct dcb_app app = { .priority = 0, .protocol = ETH_P_FCOE @@ -2186,10 +2197,12 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe) fup = dcb_getapp(netdev, &app); } - fcoe->priority = ffs(up) ? ffs(up) - 1 : 0; - ctlr->priority = ffs(fup) ? ffs(fup) - 1 : fcoe->priority; + fcoe_prio = ffs(up) ? ffs(up) - 1 : 0; + ctlr_prio = ffs(fup) ? ffs(fup) - 1 : fcoe_prio; } #endif + fcoe->priority = fcoe_prio; + ctlr->priority = ctlr_prio; } enum fcoe_create_link_state { diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index dcf36537a767c72d9a2e956115e09f7d5f82dde9..cea57e27e713a672d987a3fe2a471eb6aa74cbee 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -801,6 +801,8 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, return -EINPROGRESS; drop: kfree_skb(skb); + LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n", + op, ntoh24(fh->fh_d_id)); return -EINVAL; } EXPORT_SYMBOL(fcoe_ctlr_els_send); @@ -1316,7 +1318,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) * The overall length has already been checked. */ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, - struct fip_header *fh) + struct sk_buff *skb) { struct fip_desc *desc; struct fip_mac_desc *mp; @@ -1331,20 +1333,49 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, int num_vlink_desc; int reset_phys_port = 0; struct fip_vn_desc **vlink_desc_arr = NULL; + struct fip_header *fh = (struct fip_header *)skb->data; + struct ethhdr *eh = eth_hdr(skb); LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); - if (!fcf || !lport->port_id) { + if (!fcf) { /* * We are yet to select best FCF, but we got CVL in the * meantime. reset the ctlr and let it rediscover the FCF */ + LIBFCOE_FIP_DBG(fip, "Resetting fcoe_ctlr as FCF has not been " + "selected yet\n"); mutex_lock(&fip->ctlr_mutex); fcoe_ctlr_reset(fip); mutex_unlock(&fip->ctlr_mutex); return; } + /* + * If we've selected an FCF check that the CVL is from there to avoid + * processing CVLs from an unexpected source. If it is from an + * unexpected source drop it on the floor. + */ + if (!ether_addr_equal(eh->h_source, fcf->fcf_mac)) { + LIBFCOE_FIP_DBG(fip, "Dropping CVL due to source address " + "mismatch with FCF src=%pM\n", eh->h_source); + return; + } + + /* + * If we haven't logged into the fabric but receive a CVL we should + * reset everything and go back to solicitation. + */ + if (!lport->port_id) { + LIBFCOE_FIP_DBG(fip, "lport not logged in, resoliciting\n"); + mutex_lock(&fip->ctlr_mutex); + fcoe_ctlr_reset(fip); + mutex_unlock(&fip->ctlr_mutex); + fc_lport_reset(fip->lp); + fcoe_ctlr_solicit(fip, NULL); + return; + } + /* * mask of required descriptors. Validating each one clears its bit. */ @@ -1576,7 +1607,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) if (op == FIP_OP_DISC && sub == FIP_SC_ADV) fcoe_ctlr_recv_adv(fip, skb); else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) - fcoe_ctlr_recv_clr_vlink(fip, fiph); + fcoe_ctlr_recv_clr_vlink(fip, skb); kfree_skb(skb); return 0; drop: @@ -2122,7 +2153,7 @@ static void fcoe_ctlr_vn_rport_callback(struct fc_lport *lport, LIBFCOE_FIP_DBG(fip, "rport FLOGI limited port_id %6.6x\n", rdata->ids.port_id); - lport->tt.rport_logoff(rdata); + fc_rport_logoff(rdata); } break; default: @@ -2145,9 +2176,15 @@ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport) { struct fc_rport_priv *rdata; + rcu_read_lock(); + list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { + if (kref_get_unless_zero(&rdata->kref)) { + fc_rport_logoff(rdata); + kref_put(&rdata->kref, fc_rport_destroy); + } + } + rcu_read_unlock(); mutex_lock(&lport->disc.disc_mutex); - list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) - lport->tt.rport_logoff(rdata); lport->disc.disc_callback = NULL; mutex_unlock(&lport->disc.disc_mutex); } @@ -2178,7 +2215,7 @@ static void fcoe_ctlr_disc_stop(struct fc_lport *lport) static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport) { fcoe_ctlr_disc_stop(lport); - lport->tt.rport_flush_queue(); + fc_rport_flush_queue(); synchronize_rcu(); } @@ -2393,6 +2430,8 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, switch (fip->state) { case FIP_ST_VNMP_CLAIM: case FIP_ST_VNMP_UP: + LIBFCOE_FIP_DBG(fip, "vn_probe_req: send reply, state %x\n", + fip->state); fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP, frport->enode_mac, 0); break; @@ -2407,15 +2446,21 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, */ if (fip->lp->wwpn > rdata->ids.port_name && !(frport->flags & FIP_FL_REC_OR_P2P)) { + LIBFCOE_FIP_DBG(fip, "vn_probe_req: " + "port_id collision\n"); fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP, frport->enode_mac, 0); break; } /* fall through */ case FIP_ST_VNMP_START: + LIBFCOE_FIP_DBG(fip, "vn_probe_req: " + "restart VN2VN negotiation\n"); fcoe_ctlr_vn_restart(fip); break; default: + LIBFCOE_FIP_DBG(fip, "vn_probe_req: ignore state %x\n", + fip->state); break; } } @@ -2437,9 +2482,12 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip, case FIP_ST_VNMP_PROBE1: case FIP_ST_VNMP_PROBE2: case FIP_ST_VNMP_CLAIM: + LIBFCOE_FIP_DBG(fip, "vn_probe_reply: restart state %x\n", + fip->state); fcoe_ctlr_vn_restart(fip); break; case FIP_ST_VNMP_UP: + LIBFCOE_FIP_DBG(fip, "vn_probe_reply: send claim notify\n"); fcoe_ctlr_vn_send_claim(fip); break; default: @@ -2467,26 +2515,33 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) return; mutex_lock(&lport->disc.disc_mutex); - rdata = lport->tt.rport_create(lport, port_id); + rdata = fc_rport_create(lport, port_id); if (!rdata) { mutex_unlock(&lport->disc.disc_mutex); return; } + mutex_lock(&rdata->rp_mutex); + mutex_unlock(&lport->disc.disc_mutex); rdata->ops = &fcoe_ctlr_vn_rport_ops; rdata->disc_id = lport->disc.disc_id; ids = &rdata->ids; if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) || - (ids->node_name != -1 && ids->node_name != new->ids.node_name)) - lport->tt.rport_logoff(rdata); + (ids->node_name != -1 && ids->node_name != new->ids.node_name)) { + mutex_unlock(&rdata->rp_mutex); + LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id); + fc_rport_logoff(rdata); + mutex_lock(&rdata->rp_mutex); + } ids->port_name = new->ids.port_name; ids->node_name = new->ids.node_name; - mutex_unlock(&lport->disc.disc_mutex); + mutex_unlock(&rdata->rp_mutex); frport = fcoe_ctlr_rport(rdata); - LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s\n", - port_id, frport->fcoe_len ? "old" : "new"); + LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n", + port_id, frport->fcoe_len ? "old" : "new", + rdata->rp_state); *frport = *fcoe_ctlr_rport(new); frport->time = 0; } @@ -2506,12 +2561,12 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac) struct fcoe_rport *frport; int ret = -1; - rdata = lport->tt.rport_lookup(lport, port_id); + rdata = fc_rport_lookup(lport, port_id); if (rdata) { frport = fcoe_ctlr_rport(rdata); memcpy(mac, frport->enode_mac, ETH_ALEN); ret = 0; - kref_put(&rdata->kref, lport->tt.rport_destroy); + kref_put(&rdata->kref, fc_rport_destroy); } return ret; } @@ -2529,6 +2584,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, struct fcoe_rport *frport = fcoe_ctlr_rport(new); if (frport->flags & FIP_FL_REC_OR_P2P) { + LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n"); fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); return; } @@ -2536,25 +2592,37 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, case FIP_ST_VNMP_START: case FIP_ST_VNMP_PROBE1: case FIP_ST_VNMP_PROBE2: - if (new->ids.port_id == fip->port_id) + if (new->ids.port_id == fip->port_id) { + LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " + "restart, state %d\n", + fip->state); fcoe_ctlr_vn_restart(fip); + } break; case FIP_ST_VNMP_CLAIM: case FIP_ST_VNMP_UP: if (new->ids.port_id == fip->port_id) { if (new->ids.port_name > fip->lp->wwpn) { + LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " + "restart, port_id collision\n"); fcoe_ctlr_vn_restart(fip); break; } + LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " + "send claim notify\n"); fcoe_ctlr_vn_send_claim(fip); break; } + LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n", + new->ids.port_id); fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac, min((u32)frport->fcoe_len, fcoe_ctlr_fcoe_size(fip))); fcoe_ctlr_vn_add(fip, new); break; default: + LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " + "ignoring claim from %x\n", new->ids.port_id); break; } } @@ -2591,19 +2659,26 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, frport = fcoe_ctlr_rport(new); if (frport->flags & FIP_FL_REC_OR_P2P) { + LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n"); fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); return; } - rdata = lport->tt.rport_lookup(lport, new->ids.port_id); + rdata = fc_rport_lookup(lport, new->ids.port_id); if (rdata) { if (rdata->ids.node_name == new->ids.node_name && rdata->ids.port_name == new->ids.port_name) { frport = fcoe_ctlr_rport(rdata); - if (!frport->time && fip->state == FIP_ST_VNMP_UP) - lport->tt.rport_login(rdata); + LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n", + rdata->ids.port_id); + if (!frport->time && fip->state == FIP_ST_VNMP_UP) { + LIBFCOE_FIP_DBG(fip, "beacon expired " + "for rport %x\n", + rdata->ids.port_id); + fc_rport_login(rdata); + } frport->time = jiffies; } - kref_put(&rdata->kref, lport->tt.rport_destroy); + kref_put(&rdata->kref, fc_rport_destroy); return; } if (fip->state != FIP_ST_VNMP_UP) @@ -2638,11 +2713,15 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip) unsigned long deadline; next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10); - mutex_lock(&lport->disc.disc_mutex); + rcu_read_lock(); list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { + if (!kref_get_unless_zero(&rdata->kref)) + continue; frport = fcoe_ctlr_rport(rdata); - if (!frport->time) + if (!frport->time) { + kref_put(&rdata->kref, fc_rport_destroy); continue; + } deadline = frport->time + msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10); if (time_after_eq(jiffies, deadline)) { @@ -2650,11 +2729,12 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip) LIBFCOE_FIP_DBG(fip, "port %16.16llx fc_id %6.6x beacon expired\n", rdata->ids.port_name, rdata->ids.port_id); - lport->tt.rport_logoff(rdata); + fc_rport_logoff(rdata); } else if (time_before(deadline, next_time)) next_time = deadline; + kref_put(&rdata->kref, fc_rport_destroy); } - mutex_unlock(&lport->disc.disc_mutex); + rcu_read_unlock(); return next_time; } @@ -2674,11 +2754,21 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) struct fc_rport_priv rdata; struct fcoe_rport frport; } buf; - int rc; + int rc, vlan_id = 0; fiph = (struct fip_header *)skb->data; sub = fiph->fip_subcode; + if (fip->lp->vlan) + vlan_id = skb_vlan_tag_get_id(skb); + + if (vlan_id && vlan_id != fip->lp->vlan) { + LIBFCOE_FIP_DBG(fip, "vn_recv drop frame sub %x vlan %d\n", + sub, vlan_id); + rc = -EAGAIN; + goto drop; + } + rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata); if (rc) { LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc); @@ -2941,7 +3031,7 @@ static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp) rjt_data.reason = ELS_RJT_UNSUP; rjt_data.explan = ELS_EXPL_NONE; - lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); + fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); fc_frame_free(fp); } @@ -2991,12 +3081,17 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip) mutex_lock(&disc->disc_mutex); callback = disc->pending ? disc->disc_callback : NULL; disc->pending = 0; + mutex_unlock(&disc->disc_mutex); + rcu_read_lock(); list_for_each_entry_rcu(rdata, &disc->rports, peers) { + if (!kref_get_unless_zero(&rdata->kref)) + continue; frport = fcoe_ctlr_rport(rdata); if (frport->time) - lport->tt.rport_login(rdata); + fc_rport_login(rdata); + kref_put(&rdata->kref, fc_rport_destroy); } - mutex_unlock(&disc->disc_mutex); + rcu_read_unlock(); if (callback) callback(lport, DISC_EV_SUCCESS); } @@ -3015,11 +3110,13 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip) switch (fip->state) { case FIP_ST_VNMP_START: fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1); + LIBFCOE_FIP_DBG(fip, "vn_timeout: send 1st probe request\n"); fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT); break; case FIP_ST_VNMP_PROBE1: fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2); + LIBFCOE_FIP_DBG(fip, "vn_timeout: send 2nd probe request\n"); fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); break; @@ -3030,6 +3127,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip) hton24(mac + 3, new_port_id); fcoe_ctlr_map_dest(fip); fip->update_mac(fip->lp, mac); + LIBFCOE_FIP_DBG(fip, "vn_timeout: send claim notify\n"); fcoe_ctlr_vn_send_claim(fip); next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); break; @@ -3041,6 +3139,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip) next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT); if (time_after_eq(jiffies, next_time)) { fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP); + LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n"); fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON, fcoe_all_vn2vn, 0); next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); @@ -3051,6 +3150,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip) case FIP_ST_VNMP_UP: next_time = fcoe_ctlr_vn_age(fip); if (time_after_eq(jiffies, fip->port_ka_time)) { + LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n"); fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON, fcoe_all_vn2vn, 0); fip->port_ka_time = jiffies + @@ -3135,7 +3235,6 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip, fc_exch_init(lport); fc_elsct_init(lport); fc_lport_init(lport); - fc_rport_init(lport); fc_disc_init(lport); fcoe_ctlr_mode_set(lport, fip, fip->mode); return 0; diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index 0675fd128734e15b23513565d83f1c05be328102..9cf3d56296ab87fca59da317412199afc354e5ba 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -335,16 +335,24 @@ static ssize_t store_ctlr_enabled(struct device *dev, const char *buf, size_t count) { struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + bool enabled; int rc; + if (*buf == '1') + enabled = true; + else if (*buf == '0') + enabled = false; + else + return -EINVAL; + switch (ctlr->enabled) { case FCOE_CTLR_ENABLED: - if (*buf == '1') + if (enabled) return count; ctlr->enabled = FCOE_CTLR_DISABLED; break; case FCOE_CTLR_DISABLED: - if (*buf == '0') + if (!enabled) return count; ctlr->enabled = FCOE_CTLR_ENABLED; break; @@ -423,6 +431,75 @@ static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR, show_ctlr_fip_resp, store_ctlr_fip_resp); +static ssize_t +fcoe_ctlr_var_store(u32 *var, const char *buf, size_t count) +{ + int err; + unsigned long v; + + err = kstrtoul(buf, 10, &v); + if (err || v > UINT_MAX) + return -EINVAL; + + *var = v; + + return count; +} + +static ssize_t store_ctlr_r_a_tov(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + + if (ctlr_dev->enabled == FCOE_CTLR_ENABLED) + return -EBUSY; + if (ctlr_dev->enabled == FCOE_CTLR_DISABLED) + return fcoe_ctlr_var_store(&ctlr->lp->r_a_tov, buf, count); + return -ENOTSUPP; +} + +static ssize_t show_ctlr_r_a_tov(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + + return sprintf(buf, "%d\n", ctlr->lp->r_a_tov); +} + +static FCOE_DEVICE_ATTR(ctlr, r_a_tov, S_IRUGO | S_IWUSR, + show_ctlr_r_a_tov, store_ctlr_r_a_tov); + +static ssize_t store_ctlr_e_d_tov(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + + if (ctlr_dev->enabled == FCOE_CTLR_ENABLED) + return -EBUSY; + if (ctlr_dev->enabled == FCOE_CTLR_DISABLED) + return fcoe_ctlr_var_store(&ctlr->lp->e_d_tov, buf, count); + return -ENOTSUPP; +} + +static ssize_t show_ctlr_e_d_tov(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + + return sprintf(buf, "%d\n", ctlr->lp->e_d_tov); +} + +static FCOE_DEVICE_ATTR(ctlr, e_d_tov, S_IRUGO | S_IWUSR, + show_ctlr_e_d_tov, store_ctlr_e_d_tov); + static ssize_t store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr, @@ -507,6 +584,8 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = { static struct attribute *fcoe_ctlr_attrs[] = { &device_attr_fcoe_ctlr_fip_vlan_responder.attr, &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr, + &device_attr_fcoe_ctlr_r_a_tov.attr, + &device_attr_fcoe_ctlr_e_d_tov.attr, &device_attr_fcoe_ctlr_enabled.attr, &device_attr_fcoe_ctlr_mode.attr, NULL, diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index d9fd2f841585cca6850d1c9000739b7eef69a1d1..2544a37ece0afdc19742d01954487f87dd4c3c5c 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -441,30 +441,38 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ unsigned long ptr; spinlock_t *io_lock = NULL; int io_lock_acquired = 0; + struct fc_rport_libfc_priv *rp; if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) return SCSI_MLQUEUE_HOST_BUSY; rport = starget_to_rport(scsi_target(sc->device)); + if (!rport) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "returning DID_NO_CONNECT for IO as rport is NULL\n"); + sc->result = DID_NO_CONNECT << 16; + done(sc); + return 0; + } + ret = fc_remote_port_chkready(rport); if (ret) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "rport is not ready\n"); atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); sc->result = ret; done(sc); return 0; } - if (rport) { - struct fc_rport_libfc_priv *rp = rport->dd_data; - - if (!rp || rp->rp_state != RPORT_ST_READY) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + rp = rport->dd_data; + if (!rp || rp->rp_state != RPORT_ST_READY) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "returning DID_NO_CONNECT for IO as rport is removed\n"); - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); - sc->result = DID_NO_CONNECT<<16; - done(sc); - return 0; - } + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + sc->result = DID_NO_CONNECT<<16; + done(sc); + return 0; } if (lp->state != LPORT_ST_READY || !(lp->link_up)) @@ -2543,7 +2551,7 @@ int fnic_reset(struct Scsi_Host *shost) * Reset local port, this will clean up libFC exchanges, * reset remote port sessions, and if link is up, begin flogi */ - ret = lp->tt.lport_reset(lp); + ret = fc_lport_reset(lp); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Returning from fnic reset %s\n", diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index 4e15c4bf079578afc8d944b0daeaca2264178fb2..5a5fa01576b751ad7a60ca0bd95cd92d4dc2684e 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -613,7 +613,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, fc_trace_entries.rd_idx = 0; } - fc_buf->time_stamp = CURRENT_TIME; + ktime_get_real_ts64(&fc_buf->time_stamp); fc_buf->host_no = host_no; fc_buf->frame_type = frame_type; @@ -740,7 +740,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata, len = *orig_len; - time_to_tm(tdata->time_stamp.tv_sec, 0, &tm); + time64_to_tm(tdata->time_stamp.tv_sec, 0, &tm); fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t"; len += snprintf(fnic_dbgfs_prt->buffer + len, diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h index a8aa0578fcb0a1d9a50cb14384360f79bce4f23d..e375d0c2eaaf8d2ac2ff30849e1d7d157afa486a 100644 --- a/drivers/scsi/fnic/fnic_trace.h +++ b/drivers/scsi/fnic/fnic_trace.h @@ -72,7 +72,7 @@ struct fnic_trace_data { typedef struct fnic_trace_data fnic_trace_data_t; struct fc_trace_hdr { - struct timespec time_stamp; + struct timespec64 time_stamp; u32 host_no; u8 frame_type; u8 frame_len; diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index 9795d6f3e1974e47e98f6d28ef80f1e4f282e528..ba69d6112fa13d34bc6acda465521797a04ee94c 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -499,10 +499,7 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); if (err) - printk(KERN_ERR - "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], - err); + pr_err("Can't add addr [%pM], %d\n", addr, err); } void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) @@ -517,10 +514,7 @@ void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); if (err) - printk(KERN_ERR - "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], - err); + pr_err("Can't del addr [%pM], %d\n", addr, err); } int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index cbf010324c187589e199f2b14ef3793bf892f2a9..6f9665d50d84bb485d3ba9cf99da05bae0f1c018 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -37,7 +37,7 @@ #define MAX_CARDS 8 /* old-style parameters for compatibility */ -static int ncr_irq; +static int ncr_irq = -1; static int ncr_addr; static int ncr_5380; static int ncr_53c400; @@ -52,9 +52,9 @@ module_param(ncr_53c400a, int, 0); module_param(dtc_3181e, int, 0); module_param(hp_c2502, int, 0); -static int irq[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; +static int irq[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; module_param_array(irq, int, NULL, 0); -MODULE_PARM_DESC(irq, "IRQ number(s)"); +MODULE_PARM_DESC(irq, "IRQ number(s) (0=none, 254=auto [default])"); static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; module_param_array(base, int, NULL, 0); @@ -64,9 +64,59 @@ static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; module_param_array(card, int, NULL, 0); MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)"); +MODULE_ALIAS("g_NCR5380_mmio"); MODULE_LICENSE("GPL"); -#ifndef SCSI_G_NCR5380_MEM +static void g_NCR5380_trigger_irq(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + /* + * An interrupt is triggered whenever BSY = false, SEL = true + * and a bit set in the SELECT_ENABLE_REG is asserted on the + * SCSI bus. + * + * Note that the bus is only driven when the phase control signals + * (I/O, C/D, and MSG) match those in the TCR. + */ + NCR5380_write(TARGET_COMMAND_REG, + PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK)); + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); + NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); + NCR5380_write(INITIATOR_COMMAND_REG, + ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL); + + msleep(1); + + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + NCR5380_write(SELECT_ENABLE_REG, 0); + NCR5380_write(TARGET_COMMAND_REG, 0); +} + +/** + * g_NCR5380_probe_irq - find the IRQ of a NCR5380 or equivalent + * @instance: SCSI host instance + * + * Autoprobe for the IRQ line used by the card by triggering an IRQ + * and then looking to see what interrupt actually turned up. + */ + +static int g_NCR5380_probe_irq(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int irq_mask, irq; + + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + irq_mask = probe_irq_on(); + g_NCR5380_trigger_irq(instance); + irq = probe_irq_off(irq_mask); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + + if (irq <= 0) + return NO_IRQ; + return irq; +} + /* * Configure I/O address of 53C400A or DTC436 by writing magic numbers * to ports 0x779 and 0x379. @@ -81,47 +131,64 @@ static void magic_configure(int idx, u8 irq, u8 magic[]) outb(magic[3], 0x379); outb(magic[4], 0x379); - /* allowed IRQs for HP C2502 */ - if (irq != 2 && irq != 3 && irq != 4 && irq != 5 && irq != 7) - irq = 0; + if (irq == 9) + irq = 2; + if (idx >= 0 && idx <= 7) cfg = 0x80 | idx | (irq << 4); outb(cfg, 0x379); } -#endif + +static irqreturn_t legacy_empty_irq_handler(int irq, void *dev_id) +{ + return IRQ_HANDLED; +} + +static int legacy_find_free_irq(int *irq_table) +{ + while (*irq_table != -1) { + if (!request_irq(*irq_table, legacy_empty_irq_handler, + IRQF_PROBE_SHARED, "Test IRQ", + (void *)irq_table)) { + free_irq(*irq_table, (void *) irq_table); + return *irq_table; + } + irq_table++; + } + return -1; +} + +static unsigned int ncr_53c400a_ports[] = { + 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0 +}; +static unsigned int dtc_3181e_ports[] = { + 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0 +}; +static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */ + 0x59, 0xb9, 0xc5, 0xae, 0xa6 +}; +static u8 hp_c2502_magic[] = { /* HP C2502 */ + 0x0f, 0x22, 0xf0, 0x20, 0x80 +}; +static int hp_c2502_irqs[] = { + 9, 5, 7, 3, 4, -1 +}; static int generic_NCR5380_init_one(struct scsi_host_template *tpnt, struct device *pdev, int base, int irq, int board) { - unsigned int *ports; + bool is_pmio = base <= 0xffff; + int ret; + int flags = 0; + unsigned int *ports = NULL; u8 *magic = NULL; -#ifndef SCSI_G_NCR5380_MEM int i; int port_idx = -1; unsigned long region_size; -#endif - static unsigned int ncr_53c400a_ports[] = { - 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0 - }; - static unsigned int dtc_3181e_ports[] = { - 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0 - }; - static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */ - 0x59, 0xb9, 0xc5, 0xae, 0xa6 - }; - static u8 hp_c2502_magic[] = { /* HP C2502 */ - 0x0f, 0x22, 0xf0, 0x20, 0x80 - }; - int flags, ret; struct Scsi_Host *instance; struct NCR5380_hostdata *hostdata; -#ifdef SCSI_G_NCR5380_MEM - void __iomem *iomem; - resource_size_t iomem_size; -#endif + u8 __iomem *iomem; - ports = NULL; - flags = 0; switch (board) { case BOARD_NCR5380: flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP; @@ -140,8 +207,7 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt, break; } -#ifndef SCSI_G_NCR5380_MEM - if (ports && magic) { + if (is_pmio && ports && magic) { /* wakeup sequence for the NCR53C400A and DTC3181E */ /* Disable the adapter and look for a free io port */ @@ -170,84 +236,96 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt, if (ports[i]) { /* At this point we have our region reserved */ magic_configure(i, 0, magic); /* no IRQ yet */ - outb(0xc0, ports[i] + 9); - if (inb(ports[i] + 9) != 0x80) { + base = ports[i]; + outb(0xc0, base + 9); + if (inb(base + 9) != 0x80) { ret = -ENODEV; goto out_release; } - base = ports[i]; port_idx = i; } else return -EINVAL; - } - else - { + } else if (is_pmio) { /* NCR5380 - no configuration, just grab */ region_size = 8; if (!base || !request_region(base, region_size, "ncr5380")) return -EBUSY; + } else { /* MMIO */ + region_size = NCR53C400_region_size; + if (!request_mem_region(base, region_size, "ncr5380")) + return -EBUSY; } -#else - iomem_size = NCR53C400_region_size; - if (!request_mem_region(base, iomem_size, "ncr5380")) - return -EBUSY; - iomem = ioremap(base, iomem_size); + + if (is_pmio) + iomem = ioport_map(base, region_size); + else + iomem = ioremap(base, region_size); + if (!iomem) { - release_mem_region(base, iomem_size); - return -ENOMEM; + ret = -ENOMEM; + goto out_release; } -#endif + instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata)); if (instance == NULL) { ret = -ENOMEM; - goto out_release; + goto out_unmap; } hostdata = shost_priv(instance); -#ifndef SCSI_G_NCR5380_MEM - instance->io_port = base; - instance->n_io_port = region_size; - hostdata->io_width = 1; /* 8-bit PDMA by default */ - - /* - * On NCR53C400 boards, NCR5380 registers are mapped 8 past - * the base address. - */ - switch (board) { - case BOARD_NCR53C400: - instance->io_port += 8; - hostdata->c400_ctl_status = 0; - hostdata->c400_blk_cnt = 1; - hostdata->c400_host_buf = 4; - break; - case BOARD_DTC3181E: - hostdata->io_width = 2; /* 16-bit PDMA */ - /* fall through */ - case BOARD_NCR53C400A: - case BOARD_HP_C2502: - hostdata->c400_ctl_status = 9; - hostdata->c400_blk_cnt = 10; - hostdata->c400_host_buf = 8; - break; + hostdata->io = iomem; + hostdata->region_size = region_size; + + if (is_pmio) { + hostdata->io_port = base; + hostdata->io_width = 1; /* 8-bit PDMA by default */ + hostdata->offset = 0; + + /* + * On NCR53C400 boards, NCR5380 registers are mapped 8 past + * the base address. + */ + switch (board) { + case BOARD_NCR53C400: + hostdata->io_port += 8; + hostdata->c400_ctl_status = 0; + hostdata->c400_blk_cnt = 1; + hostdata->c400_host_buf = 4; + break; + case BOARD_DTC3181E: + hostdata->io_width = 2; /* 16-bit PDMA */ + /* fall through */ + case BOARD_NCR53C400A: + case BOARD_HP_C2502: + hostdata->c400_ctl_status = 9; + hostdata->c400_blk_cnt = 10; + hostdata->c400_host_buf = 8; + break; + } + } else { + hostdata->base = base; + hostdata->offset = NCR53C400_mem_base; + switch (board) { + case BOARD_NCR53C400: + hostdata->c400_ctl_status = 0x100; + hostdata->c400_blk_cnt = 0x101; + hostdata->c400_host_buf = 0x104; + break; + case BOARD_DTC3181E: + case BOARD_NCR53C400A: + case BOARD_HP_C2502: + pr_err(DRV_MODULE_NAME ": unknown register offsets\n"); + ret = -EINVAL; + goto out_unregister; + } } -#else - instance->base = base; - hostdata->iomem = iomem; - hostdata->iomem_size = iomem_size; - switch (board) { - case BOARD_NCR53C400: - hostdata->c400_ctl_status = 0x100; - hostdata->c400_blk_cnt = 0x101; - hostdata->c400_host_buf = 0x104; - break; - case BOARD_DTC3181E: - case BOARD_NCR53C400A: - case BOARD_HP_C2502: - pr_err(DRV_MODULE_NAME ": unknown register offsets\n"); - ret = -EINVAL; + + /* Check for vacant slot */ + NCR5380_write(MODE_REG, 0); + if (NCR5380_read(MODE_REG) != 0) { + ret = -ENODEV; goto out_unregister; } -#endif ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP); if (ret) @@ -263,33 +341,59 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt, NCR5380_maybe_reset_bus(instance); - if (irq != IRQ_AUTO) - instance->irq = irq; - else - instance->irq = NCR5380_probe_irq(instance, 0xffff); - /* Compatibility with documented NCR5380 kernel parameters */ - if (instance->irq == 255) - instance->irq = NO_IRQ; + if (irq == 255 || irq == 0) + irq = NO_IRQ; + else if (irq == -1) + irq = IRQ_AUTO; + + if (board == BOARD_HP_C2502) { + int *irq_table = hp_c2502_irqs; + int board_irq = -1; + + switch (irq) { + case NO_IRQ: + board_irq = 0; + break; + case IRQ_AUTO: + board_irq = legacy_find_free_irq(irq_table); + break; + default: + while (*irq_table != -1) + if (*irq_table++ == irq) + board_irq = irq; + } + + if (board_irq <= 0) { + board_irq = 0; + irq = NO_IRQ; + } + + magic_configure(port_idx, board_irq, magic); + } + + if (irq == IRQ_AUTO) { + instance->irq = g_NCR5380_probe_irq(instance); + if (instance->irq == NO_IRQ) + shost_printk(KERN_INFO, instance, "no irq detected\n"); + } else { + instance->irq = irq; + if (instance->irq == NO_IRQ) + shost_printk(KERN_INFO, instance, "no irq provided\n"); + } if (instance->irq != NO_IRQ) { -#ifndef SCSI_G_NCR5380_MEM - /* set IRQ for HP C2502 */ - if (board == BOARD_HP_C2502) - magic_configure(port_idx, instance->irq, magic); -#endif if (request_irq(instance->irq, generic_NCR5380_intr, 0, "NCR5380", instance)) { - printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = NO_IRQ; + shost_printk(KERN_INFO, instance, + "irq %d denied\n", instance->irq); + } else { + shost_printk(KERN_INFO, instance, + "irq %d acquired\n", instance->irq); } } - if (instance->irq == NO_IRQ) { - printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); - printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); - } - ret = scsi_add_host(instance, pdev); if (ret) goto out_free_irq; @@ -303,38 +407,39 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt, NCR5380_exit(instance); out_unregister: scsi_host_put(instance); -out_release: -#ifndef SCSI_G_NCR5380_MEM - release_region(base, region_size); -#else +out_unmap: iounmap(iomem); - release_mem_region(base, iomem_size); -#endif +out_release: + if (is_pmio) + release_region(base, region_size); + else + release_mem_region(base, region_size); return ret; } static void generic_NCR5380_release_resources(struct Scsi_Host *instance) { + struct NCR5380_hostdata *hostdata = shost_priv(instance); + void __iomem *iomem = hostdata->io; + unsigned long io_port = hostdata->io_port; + unsigned long base = hostdata->base; + unsigned long region_size = hostdata->region_size; + scsi_remove_host(instance); if (instance->irq != NO_IRQ) free_irq(instance->irq, instance); NCR5380_exit(instance); -#ifndef SCSI_G_NCR5380_MEM - release_region(instance->io_port, instance->n_io_port); -#else - { - struct NCR5380_hostdata *hostdata = shost_priv(instance); - - iounmap(hostdata->iomem); - release_mem_region(instance->base, hostdata->iomem_size); - } -#endif scsi_host_put(instance); + iounmap(iomem); + if (io_port) + release_region(io_port, region_size); + else + release_mem_region(base, region_size); } /** * generic_NCR5380_pread - pseudo DMA read - * @instance: adapter to read from + * @hostdata: scsi host private data * @dst: buffer to read into * @len: buffer length * @@ -342,10 +447,9 @@ static void generic_NCR5380_release_resources(struct Scsi_Host *instance) * controller */ -static inline int generic_NCR5380_pread(struct Scsi_Host *instance, +static inline int generic_NCR5380_pread(struct NCR5380_hostdata *hostdata, unsigned char *dst, int len) { - struct NCR5380_hostdata *hostdata = shost_priv(instance); int blocks = len / 128; int start = 0; @@ -361,18 +465,16 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance, while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) ; /* FIXME - no timeout */ -#ifndef SCSI_G_NCR5380_MEM - if (hostdata->io_width == 2) - insw(instance->io_port + hostdata->c400_host_buf, + if (hostdata->io_port && hostdata->io_width == 2) + insw(hostdata->io_port + hostdata->c400_host_buf, dst + start, 64); - else - insb(instance->io_port + hostdata->c400_host_buf, + else if (hostdata->io_port) + insb(hostdata->io_port + hostdata->c400_host_buf, dst + start, 128); -#else - /* implies SCSI_G_NCR5380_MEM */ - memcpy_fromio(dst + start, - hostdata->iomem + NCR53C400_host_buffer, 128); -#endif + else + memcpy_fromio(dst + start, + hostdata->io + NCR53C400_host_buffer, 128); + start += 128; blocks--; } @@ -381,18 +483,16 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance, while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) ; /* FIXME - no timeout */ -#ifndef SCSI_G_NCR5380_MEM - if (hostdata->io_width == 2) - insw(instance->io_port + hostdata->c400_host_buf, + if (hostdata->io_port && hostdata->io_width == 2) + insw(hostdata->io_port + hostdata->c400_host_buf, dst + start, 64); - else - insb(instance->io_port + hostdata->c400_host_buf, + else if (hostdata->io_port) + insb(hostdata->io_port + hostdata->c400_host_buf, dst + start, 128); -#else - /* implies SCSI_G_NCR5380_MEM */ - memcpy_fromio(dst + start, - hostdata->iomem + NCR53C400_host_buffer, 128); -#endif + else + memcpy_fromio(dst + start, + hostdata->io + NCR53C400_host_buffer, 128); + start += 128; blocks--; } @@ -412,7 +512,7 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance, /** * generic_NCR5380_pwrite - pseudo DMA write - * @instance: adapter to read from + * @hostdata: scsi host private data * @dst: buffer to read into * @len: buffer length * @@ -420,10 +520,9 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance, * controller */ -static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance, +static inline int generic_NCR5380_pwrite(struct NCR5380_hostdata *hostdata, unsigned char *src, int len) { - struct NCR5380_hostdata *hostdata = shost_priv(instance); int blocks = len / 128; int start = 0; @@ -439,18 +538,17 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance, break; while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) ; // FIXME - timeout -#ifndef SCSI_G_NCR5380_MEM - if (hostdata->io_width == 2) - outsw(instance->io_port + hostdata->c400_host_buf, + + if (hostdata->io_port && hostdata->io_width == 2) + outsw(hostdata->io_port + hostdata->c400_host_buf, src + start, 64); - else - outsb(instance->io_port + hostdata->c400_host_buf, + else if (hostdata->io_port) + outsb(hostdata->io_port + hostdata->c400_host_buf, src + start, 128); -#else - /* implies SCSI_G_NCR5380_MEM */ - memcpy_toio(hostdata->iomem + NCR53C400_host_buffer, - src + start, 128); -#endif + else + memcpy_toio(hostdata->io + NCR53C400_host_buffer, + src + start, 128); + start += 128; blocks--; } @@ -458,18 +556,16 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance, while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) ; // FIXME - no timeout -#ifndef SCSI_G_NCR5380_MEM - if (hostdata->io_width == 2) - outsw(instance->io_port + hostdata->c400_host_buf, + if (hostdata->io_port && hostdata->io_width == 2) + outsw(hostdata->io_port + hostdata->c400_host_buf, src + start, 64); - else - outsb(instance->io_port + hostdata->c400_host_buf, + else if (hostdata->io_port) + outsb(hostdata->io_port + hostdata->c400_host_buf, src + start, 128); -#else - /* implies SCSI_G_NCR5380_MEM */ - memcpy_toio(hostdata->iomem + NCR53C400_host_buffer, - src + start, 128); -#endif + else + memcpy_toio(hostdata->io + NCR53C400_host_buffer, + src + start, 128); + start += 128; blocks--; } @@ -489,10 +585,9 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance, return 0; } -static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance, +static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata, struct scsi_cmnd *cmd) { - struct NCR5380_hostdata *hostdata = shost_priv(instance); int transfersize = cmd->transfersize; if (hostdata->flags & FLAG_NO_PSEUDO_DMA) @@ -566,7 +661,7 @@ static struct isa_driver generic_NCR5380_isa_driver = { }, }; -#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) +#ifdef CONFIG_PNP static struct pnp_device_id generic_NCR5380_pnp_ids[] = { { .id = "DTC436e", .driver_data = BOARD_DTC3181E }, { .id = "" } @@ -600,7 +695,7 @@ static struct pnp_driver generic_NCR5380_pnp_driver = { .probe = generic_NCR5380_pnp_probe, .remove = generic_NCR5380_pnp_remove, }; -#endif /* !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) */ +#endif /* defined(CONFIG_PNP) */ static int pnp_registered, isa_registered; @@ -609,7 +704,7 @@ static int __init generic_NCR5380_init(void) int ret = 0; /* compatibility with old-style parameters */ - if (irq[0] == 0 && base[0] == 0 && card[0] == -1) { + if (irq[0] == -1 && base[0] == 0 && card[0] == -1) { irq[0] = ncr_irq; base[0] = ncr_addr; if (ncr_5380) @@ -624,7 +719,7 @@ static int __init generic_NCR5380_init(void) card[0] = BOARD_HP_C2502; } -#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) +#ifdef CONFIG_PNP if (!pnp_register_driver(&generic_NCR5380_pnp_driver)) pnp_registered = 1; #endif @@ -637,7 +732,7 @@ static int __init generic_NCR5380_init(void) static void __exit generic_NCR5380_exit(void) { -#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) +#ifdef CONFIG_PNP if (pnp_registered) pnp_unregister_driver(&generic_NCR5380_pnp_driver); #endif diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h index b175b92344586aee3a1247e85288f34731fe179c..81b22d989648b0107f6c0f3128cc25348a88e775 100644 --- a/drivers/scsi/g_NCR5380.h +++ b/drivers/scsi/g_NCR5380.h @@ -14,49 +14,28 @@ #ifndef GENERIC_NCR5380_H #define GENERIC_NCR5380_H -#ifndef SCSI_G_NCR5380_MEM #define DRV_MODULE_NAME "g_NCR5380" #define NCR5380_read(reg) \ - inb(instance->io_port + (reg)) + ioread8(hostdata->io + hostdata->offset + (reg)) #define NCR5380_write(reg, value) \ - outb(value, instance->io_port + (reg)) + iowrite8(value, hostdata->io + hostdata->offset + (reg)) #define NCR5380_implementation_fields \ + int offset; \ int c400_ctl_status; \ int c400_blk_cnt; \ int c400_host_buf; \ int io_width; -#else -/* therefore SCSI_G_NCR5380_MEM */ -#define DRV_MODULE_NAME "g_NCR5380_mmio" - #define NCR53C400_mem_base 0x3880 #define NCR53C400_host_buffer 0x3900 #define NCR53C400_region_size 0x3a00 -#define NCR5380_read(reg) \ - readb(((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \ - NCR53C400_mem_base + (reg)) -#define NCR5380_write(reg, value) \ - writeb(value, ((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \ - NCR53C400_mem_base + (reg)) - -#define NCR5380_implementation_fields \ - void __iomem *iomem; \ - resource_size_t iomem_size; \ - int c400_ctl_status; \ - int c400_blk_cnt; \ - int c400_host_buf; - -#endif - -#define NCR5380_dma_xfer_len(instance, cmd, phase) \ - generic_NCR5380_dma_xfer_len(instance, cmd) +#define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len #define NCR5380_dma_recv_setup generic_NCR5380_pread #define NCR5380_dma_send_setup generic_NCR5380_pwrite -#define NCR5380_dma_residual(instance) (0) +#define NCR5380_dma_residual NCR5380_dma_residual_none #define NCR5380_intr generic_NCR5380_intr #define NCR5380_queue_command generic_NCR5380_queue_command @@ -72,5 +51,6 @@ #define BOARD_DTC3181E 3 #define BOARD_HP_C2502 4 -#endif /* GENERIC_NCR5380_H */ +#define IRQ_AUTO 254 +#endif /* GENERIC_NCR5380_H */ diff --git a/drivers/scsi/g_NCR5380_mmio.c b/drivers/scsi/g_NCR5380_mmio.c deleted file mode 100644 index 8cdde71ba0c889be09e026b7d0341cc607a29ee7..0000000000000000000000000000000000000000 --- a/drivers/scsi/g_NCR5380_mmio.c +++ /dev/null @@ -1,10 +0,0 @@ -/* - * There is probably a nicer way to do this but this one makes - * pretty obvious what is happening. We rebuild the same file with - * different options for mmio versus pio. - */ - -#define SCSI_G_NCR5380_MEM - -#include "g_NCR5380.c" - diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 0a767740bf02c15c2efdfbf04b3ae9f7f2c5527f..d020a13646ae648914ae65e2438e2d474d023662 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -130,7 +130,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 72c98522bd260eda9439d7a5c7cbb400f643b289..c0cd505a9ef79caed37cf4a4ee71429d13482ce0 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -13,6 +13,7 @@ #define _HISI_SAS_H_ #include +#include #include #include #include @@ -110,7 +111,7 @@ struct hisi_sas_device { struct domain_device *sas_device; u64 attached_phy; u64 device_id; - u64 running_req; + atomic64_t running_req; u8 dev_status; }; @@ -149,7 +150,8 @@ struct hisi_sas_hw { struct domain_device *device); struct hisi_sas_device *(*alloc_dev)(struct domain_device *device); void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no); - int (*get_free_slot)(struct hisi_hba *hisi_hba, int *q, int *s); + int (*get_free_slot)(struct hisi_hba *hisi_hba, u32 dev_id, + int *q, int *s); void (*start_delivery)(struct hisi_hba *hisi_hba); int (*prep_ssp)(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, int is_tmf, @@ -166,6 +168,9 @@ struct hisi_sas_hw { void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no); void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no); void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no); + void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no, + struct sas_phy_linkrates *linkrates); + enum sas_linkrate (*phy_get_max_linkrate)(void); void (*free_device)(struct hisi_hba *hisi_hba, struct hisi_sas_device *dev); int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id); @@ -183,6 +188,7 @@ struct hisi_hba { u32 ctrl_reset_reg; u32 ctrl_reset_sts_reg; u32 ctrl_clock_ena_reg; + u32 refclk_frequency_mhz; u8 sas_addr[SAS_ADDR_SIZE]; int n_phy; @@ -205,7 +211,6 @@ struct hisi_hba { struct hisi_sas_port port[HISI_SAS_MAX_PHYS]; int queue_count; - int queue; struct hisi_sas_slot *slot_prep; struct dma_pool *sge_page_pool; diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 2f872f784e109e85b0e32af473f4920274aa953b..d50e9cfefd240fb962b10b1a764af910ab94e791 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -162,8 +162,8 @@ static void hisi_sas_slot_abort(struct work_struct *work) hisi_sas_slot_task_free(hisi_hba, task, abort_slot); if (task->task_done) task->task_done(task); - if (sas_dev && sas_dev->running_req) - sas_dev->running_req--; + if (sas_dev) + atomic64_dec(&sas_dev->running_req); } static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, @@ -232,8 +232,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); if (rc) goto err_out; - rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue, - &dlvry_queue_slot); + rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id, + &dlvry_queue, &dlvry_queue_slot); if (rc) goto err_out_tag; @@ -303,7 +303,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, hisi_hba->slot_prep = slot; - sas_dev->running_req++; + atomic64_inc(&sas_dev->running_req); ++(*pass); return 0; @@ -369,9 +369,14 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no) struct sas_phy *sphy = sas_phy->phy; sphy->negotiated_linkrate = sas_phy->linkrate; - sphy->minimum_linkrate = phy->minimum_linkrate; sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; - sphy->maximum_linkrate = phy->maximum_linkrate; + sphy->maximum_linkrate_hw = + hisi_hba->hw->phy_get_max_linkrate(); + if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) + sphy->minimum_linkrate = phy->minimum_linkrate; + + if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) + sphy->maximum_linkrate = phy->maximum_linkrate; } if (phy->phy_type & PORT_TYPE_SAS) { @@ -537,7 +542,7 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) struct hisi_hba *hisi_hba = sas_ha->lldd_ha; struct hisi_sas_phy *phy = sas_phy->lldd_phy; struct asd_sas_port *sas_port = sas_phy->port; - struct hisi_sas_port *port = &hisi_hba->port[sas_phy->id]; + struct hisi_sas_port *port = &hisi_hba->port[phy->port_id]; unsigned long flags; if (!sas_port) @@ -645,6 +650,9 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_SET_LINK_RATE: + hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata); + break; + case PHY_FUNC_RELEASE_SPINUP_HOLD: default: return -EOPNOTSUPP; @@ -764,7 +772,8 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, task = NULL; } ex_err: - WARN_ON(retry == TASK_RETRY); + if (retry == TASK_RETRY) + dev_warn(dev, "abort tmf: executing internal task failed!\n"); sas_free_task(task); return res; } @@ -960,6 +969,9 @@ static int hisi_sas_query_task(struct sas_task *task) case TMF_RESP_FUNC_FAILED: case TMF_RESP_FUNC_COMPLETE: break; + default: + rc = TMF_RESP_FUNC_FAILED; + break; } } return rc; @@ -987,8 +999,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id, rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); if (rc) goto err_out; - rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue, - &dlvry_queue_slot); + rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id, + &dlvry_queue, &dlvry_queue_slot); if (rc) goto err_out_tag; @@ -1023,7 +1035,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id, hisi_hba->slot_prep = slot; - sas_dev->running_req++; + atomic64_inc(&sas_dev->running_req); + /* send abort command to our chip */ hisi_hba->hw->start_delivery(hisi_hba); @@ -1396,10 +1409,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, struct hisi_hba *hisi_hba; struct device *dev = &pdev->dev; struct device_node *np = pdev->dev.of_node; + struct clk *refclk; shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba)); - if (!shost) - goto err_out; + if (!shost) { + dev_err(dev, "scsi host alloc failed\n"); + return NULL; + } hisi_hba = shost_priv(shost); hisi_hba->hw = hw; @@ -1432,6 +1448,12 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, goto err_out; } + refclk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(refclk)) + dev_info(dev, "no ref clk property\n"); + else + hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; + if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) goto err_out; @@ -1457,6 +1479,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, return shost; err_out: + kfree(shost); dev_err(dev, "shost alloc failed\n"); return NULL; } @@ -1483,10 +1506,8 @@ int hisi_sas_probe(struct platform_device *pdev, int rc, phy_nr, port_nr, i; shost = hisi_sas_shost_alloc(pdev, hw); - if (!shost) { - rc = -ENOMEM; - goto err_out_ha; - } + if (!shost) + return -ENOMEM; sha = SHOST_TO_SAS_HA(shost); hisi_hba = shost_priv(shost); @@ -1496,12 +1517,13 @@ int hisi_sas_probe(struct platform_device *pdev, arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); - if (!arr_phy || !arr_port) - return -ENOMEM; + if (!arr_phy || !arr_port) { + rc = -ENOMEM; + goto err_out_ha; + } sha->sas_phy = arr_phy; sha->sas_port = arr_port; - sha->core.shost = shost; sha->lldd_ha = hisi_hba; shost->transportt = hisi_sas_stt; @@ -1546,6 +1568,7 @@ int hisi_sas_probe(struct platform_device *pdev, err_out_register_ha: scsi_remove_host(shost); err_out_ha: + hisi_sas_free(hisi_hba); kfree(shost); return rc; } @@ -1555,12 +1578,14 @@ int hisi_sas_remove(struct platform_device *pdev) { struct sas_ha_struct *sha = platform_get_drvdata(pdev); struct hisi_hba *hisi_hba = sha->lldd_ha; + struct Scsi_Host *shost = sha->core.shost; scsi_remove_host(sha->core.shost); sas_unregister_ha(sha); sas_remove_host(sha->core.shost); hisi_sas_free(hisi_hba); + kfree(shost); return 0; } EXPORT_SYMBOL_GPL(hisi_sas_remove); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index c0ac49d8bc8d256e9711edcb3a826a19293b5e58..8a1be0ba8a22d399002de66c766f0d85f0ea3a4b 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -843,6 +843,49 @@ static void sl_notify_v1_hw(struct hisi_hba *hisi_hba, int phy_no) hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); } +static enum sas_linkrate phy_get_max_linkrate_v1_hw(void) +{ + return SAS_LINK_RATE_6_0_GBPS; +} + +static void phy_set_linkrate_v1_hw(struct hisi_hba *hisi_hba, int phy_no, + struct sas_phy_linkrates *r) +{ + u32 prog_phy_link_rate = + hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + int i; + enum sas_linkrate min, max; + u32 rate_mask = 0; + + if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { + max = sas_phy->phy->maximum_linkrate; + min = r->minimum_linkrate; + } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { + max = r->maximum_linkrate; + min = sas_phy->phy->minimum_linkrate; + } else + return; + + sas_phy->phy->maximum_linkrate = max; + sas_phy->phy->minimum_linkrate = min; + + min -= SAS_LINK_RATE_1_5_GBPS; + max -= SAS_LINK_RATE_1_5_GBPS; + + for (i = 0; i <= max; i++) + rate_mask |= 1 << (i * 2); + + prog_phy_link_rate &= ~0xff; + prog_phy_link_rate |= rate_mask; + + hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, + prog_phy_link_rate); + + phy_hard_reset_v1_hw(hisi_hba, phy_no); +} + static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id) { int i, bitmap = 0; @@ -862,29 +905,23 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id) * The callpath to this function and upto writing the write * queue pointer should be safe from interruption. */ -static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, int *q, int *s) +static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id, + int *q, int *s) { struct device *dev = &hisi_hba->pdev->dev; struct hisi_sas_dq *dq; u32 r, w; - int queue = hisi_hba->queue; - - while (1) { - dq = &hisi_hba->dq[queue]; - w = dq->wr_point; - r = hisi_sas_read32_relaxed(hisi_hba, - DLVRY_Q_0_RD_PTR + (queue * 0x14)); - if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { - queue = (queue + 1) % hisi_hba->queue_count; - if (queue == hisi_hba->queue) { - dev_warn(dev, "could not find free slot\n"); - return -EAGAIN; - } - continue; - } - break; + int queue = dev_id % hisi_hba->queue_count; + + dq = &hisi_hba->dq[queue]; + w = dq->wr_point; + r = hisi_sas_read32_relaxed(hisi_hba, + DLVRY_Q_0_RD_PTR + (queue * 0x14)); + if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { + dev_warn(dev, "could not find free slot\n"); + return -EAGAIN; } - hisi_hba->queue = (queue + 1) % hisi_hba->queue_count; + *q = queue; *s = w; return 0; @@ -1372,8 +1409,8 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, } out: - if (sas_dev && sas_dev->running_req) - sas_dev->running_req--; + if (sas_dev) + atomic64_dec(&sas_dev->running_req); hisi_sas_slot_task_free(hisi_hba, task, slot); sts = ts->stat; @@ -1824,6 +1861,8 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = { .phy_enable = enable_phy_v1_hw, .phy_disable = disable_phy_v1_hw, .phy_hard_reset = phy_hard_reset_v1_hw, + .phy_set_linkrate = phy_set_linkrate_v1_hw, + .phy_get_max_linkrate = phy_get_max_linkrate_v1_hw, .get_wideport_bitmap = get_wideport_bitmap_v1_hw, .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW, .complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr), diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 9825a3f49f53758cc36a58e747aa13c9a26b315a..b934aec1eebba87ef31ac7c4dc0a831492e4748e 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -55,10 +55,44 @@ #define HGC_DFX_CFG2 0xc0 #define HGC_IOMB_PROC1_STATUS 0x104 #define CFG_1US_TIMER_TRSH 0xcc +#define HGC_LM_DFX_STATUS2 0x128 +#define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0 +#define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \ + HGC_LM_DFX_STATUS2_IOSTLIST_OFF) +#define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12 +#define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \ + HGC_LM_DFX_STATUS2_ITCTLIST_OFF) +#define HGC_CQE_ECC_ADDR 0x13c +#define HGC_CQE_ECC_1B_ADDR_OFF 0 +#define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF) +#define HGC_CQE_ECC_MB_ADDR_OFF 8 +#define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF) +#define HGC_IOST_ECC_ADDR 0x140 +#define HGC_IOST_ECC_1B_ADDR_OFF 0 +#define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF) +#define HGC_IOST_ECC_MB_ADDR_OFF 16 +#define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF) +#define HGC_DQE_ECC_ADDR 0x144 +#define HGC_DQE_ECC_1B_ADDR_OFF 0 +#define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF) +#define HGC_DQE_ECC_MB_ADDR_OFF 16 +#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) #define HGC_INVLD_DQE_INFO 0x148 #define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9 #define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF) #define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18 +#define HGC_ITCT_ECC_ADDR 0x150 +#define HGC_ITCT_ECC_1B_ADDR_OFF 0 +#define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ + HGC_ITCT_ECC_1B_ADDR_OFF) +#define HGC_ITCT_ECC_MB_ADDR_OFF 16 +#define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \ + HGC_ITCT_ECC_MB_ADDR_OFF) +#define HGC_AXI_FIFO_ERR_INFO 0x154 +#define AXI_ERR_INFO_OFF 0 +#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) +#define FIFO_ERR_INFO_OFF 8 +#define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) #define INT_COAL_EN 0x19c #define OQ_INT_COAL_TIME 0x1a0 #define OQ_INT_COAL_CNT 0x1a4 @@ -73,13 +107,41 @@ #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) #define ENT_INT_SRC2 0x1bc #define ENT_INT_SRC3 0x1c0 +#define ENT_INT_SRC3_WP_DEPTH_OFF 8 +#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 +#define ENT_INT_SRC3_RP_DEPTH_OFF 10 +#define ENT_INT_SRC3_AXI_OFF 11 +#define ENT_INT_SRC3_FIFO_OFF 12 +#define ENT_INT_SRC3_LM_OFF 14 #define ENT_INT_SRC3_ITC_INT_OFF 15 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) +#define ENT_INT_SRC3_ABT_OFF 16 #define ENT_INT_SRC_MSK1 0x1c4 #define ENT_INT_SRC_MSK2 0x1c8 #define ENT_INT_SRC_MSK3 0x1cc #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) +#define SAS_ECC_INTR 0x1e8 +#define SAS_ECC_INTR_DQE_ECC_1B_OFF 0 +#define SAS_ECC_INTR_DQE_ECC_MB_OFF 1 +#define SAS_ECC_INTR_IOST_ECC_1B_OFF 2 +#define SAS_ECC_INTR_IOST_ECC_MB_OFF 3 +#define SAS_ECC_INTR_ITCT_ECC_MB_OFF 4 +#define SAS_ECC_INTR_ITCT_ECC_1B_OFF 5 +#define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 6 +#define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 7 +#define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 8 +#define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 9 +#define SAS_ECC_INTR_CQE_ECC_1B_OFF 10 +#define SAS_ECC_INTR_CQE_ECC_MB_OFF 11 +#define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 12 +#define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 13 +#define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 14 +#define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 15 +#define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 16 +#define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 17 +#define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 18 +#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19 #define SAS_ECC_INTR_MSK 0x1ec #define HGC_ERR_STAT_EN 0x238 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 @@ -94,7 +156,20 @@ #define COMPL_Q_0_DEPTH 0x4e8 #define COMPL_Q_0_WR_PTR 0x4ec #define COMPL_Q_0_RD_PTR 0x4f0 - +#define HGC_RXM_DFX_STATUS14 0xae8 +#define HGC_RXM_DFX_STATUS14_MEM0_OFF 0 +#define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS14_MEM0_OFF) +#define HGC_RXM_DFX_STATUS14_MEM1_OFF 9 +#define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS14_MEM1_OFF) +#define HGC_RXM_DFX_STATUS14_MEM2_OFF 18 +#define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS14_MEM2_OFF) +#define HGC_RXM_DFX_STATUS15 0xaec +#define HGC_RXM_DFX_STATUS15_MEM3_OFF 0 +#define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS15_MEM3_OFF) /* phy registers need init */ #define PORT_BASE (0x2000) @@ -119,6 +194,9 @@ #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) #define SL_CONTROL_CTA_OFF 17 #define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF) +#define RX_PRIMS_STATUS (PORT_BASE + 0x98) +#define RX_BCAST_CHG_OFF 1 +#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) #define TX_ID_DWORD0 (PORT_BASE + 0x9c) #define TX_ID_DWORD1 (PORT_BASE + 0xa0) #define TX_ID_DWORD2 (PORT_BASE + 0xa4) @@ -267,6 +345,8 @@ #define ITCT_HDR_RTOLT_OFF 48 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) +#define HISI_SAS_FATAL_INT_NR 2 + struct hisi_sas_complete_v2_hdr { __le32 dw0; __le32 dw1; @@ -659,8 +739,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba, qw0 &= ~(1 << ITCT_HDR_VALID_OFF); hisi_sas_write32(hisi_hba, ENT_INT_SRC3, ENT_INT_SRC3_ITC_INT_MSK); - hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED; - hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL; /* clear the itct */ hisi_sas_write32(hisi_hba, ITCT_CLR, 0); @@ -808,7 +886,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe); - hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfffff3c0); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30); for (i = 0; i < hisi_hba->queue_count; i++) hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0); @@ -824,7 +902,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10); hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); - hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff); hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); @@ -836,7 +914,9 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0); - hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694); + if (hisi_hba->refclk_frequency_mhz == 66) + hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694); + /* else, do nothing -> leave it how you found it */ } for (i = 0; i < hisi_hba->queue_count; i++) { @@ -980,6 +1060,49 @@ static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no) hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); } +static enum sas_linkrate phy_get_max_linkrate_v2_hw(void) +{ + return SAS_LINK_RATE_12_0_GBPS; +} + +static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no, + struct sas_phy_linkrates *r) +{ + u32 prog_phy_link_rate = + hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + int i; + enum sas_linkrate min, max; + u32 rate_mask = 0; + + if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { + max = sas_phy->phy->maximum_linkrate; + min = r->minimum_linkrate; + } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { + max = r->maximum_linkrate; + min = sas_phy->phy->minimum_linkrate; + } else + return; + + sas_phy->phy->maximum_linkrate = max; + sas_phy->phy->minimum_linkrate = min; + + min -= SAS_LINK_RATE_1_5_GBPS; + max -= SAS_LINK_RATE_1_5_GBPS; + + for (i = 0; i <= max; i++) + rate_mask |= 1 << (i * 2); + + prog_phy_link_rate &= ~0xff; + prog_phy_link_rate |= rate_mask; + + hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, + prog_phy_link_rate); + + phy_hard_reset_v2_hw(hisi_hba, phy_no); +} + static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) { int i, bitmap = 0; @@ -1010,29 +1133,24 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) * The callpath to this function and upto writing the write * queue pointer should be safe from interruption. */ -static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s) +static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id, + int *q, int *s) { struct device *dev = &hisi_hba->pdev->dev; struct hisi_sas_dq *dq; u32 r, w; - int queue = hisi_hba->queue; - - while (1) { - dq = &hisi_hba->dq[queue]; - w = dq->wr_point; - r = hisi_sas_read32_relaxed(hisi_hba, - DLVRY_Q_0_RD_PTR + (queue * 0x14)); - if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { - queue = (queue + 1) % hisi_hba->queue_count; - if (queue == hisi_hba->queue) { - dev_warn(dev, "could not find free slot\n"); - return -EAGAIN; - } - continue; - } - break; + int queue = dev_id % hisi_hba->queue_count; + + dq = &hisi_hba->dq[queue]; + w = dq->wr_point; + r = hisi_sas_read32_relaxed(hisi_hba, + DLVRY_Q_0_RD_PTR + (queue * 0x14)); + if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { + dev_warn(dev, "full queue=%d r=%d w=%d\n\n", + queue, r, w); + return -EAGAIN; } - hisi_hba->queue = (queue + 1) % hisi_hba->queue_count; + *q = queue; *s = w; return 0; @@ -1653,8 +1771,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, } out: - if (sas_dev && sas_dev->running_req) - sas_dev->running_req--; + if (sas_dev) + atomic64_dec(&sas_dev->running_req); hisi_sas_slot_task_free(hisi_hba, task, slot); sts = ts->stat; @@ -1675,6 +1793,7 @@ static u8 get_ata_protocol(u8 cmd, int direction) case ATA_CMD_NCQ_NON_DATA: return SATA_PROTOCOL_FPDMA; + case ATA_CMD_DOWNLOAD_MICRO: case ATA_CMD_ID_ATA: case ATA_CMD_PMP_READ: case ATA_CMD_READ_LOG_EXT: @@ -1686,18 +1805,27 @@ static u8 get_ata_protocol(u8 cmd, int direction) case ATA_CMD_PIO_WRITE_EXT: return SATA_PROTOCOL_PIO; + case ATA_CMD_DSM: + case ATA_CMD_DOWNLOAD_MICRO_DMA: + case ATA_CMD_PMP_READ_DMA: + case ATA_CMD_PMP_WRITE_DMA: case ATA_CMD_READ: case ATA_CMD_READ_EXT: case ATA_CMD_READ_LOG_DMA_EXT: + case ATA_CMD_READ_STREAM_DMA_EXT: + case ATA_CMD_TRUSTED_RCV_DMA: + case ATA_CMD_TRUSTED_SND_DMA: case ATA_CMD_WRITE: case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE_FUA_EXT: case ATA_CMD_WRITE_QUEUED: case ATA_CMD_WRITE_LOG_DMA_EXT: + case ATA_CMD_WRITE_STREAM_DMA_EXT: return SATA_PROTOCOL_DMA; - case ATA_CMD_DOWNLOAD_MICRO: - case ATA_CMD_DEV_RESET: case ATA_CMD_CHK_POWER: + case ATA_CMD_DEV_RESET: + case ATA_CMD_EDD: case ATA_CMD_FLUSH: case ATA_CMD_FLUSH_EXT: case ATA_CMD_VERIFY: @@ -1970,9 +2098,12 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct sas_ha_struct *sas_ha = &hisi_hba->sha; + u32 bcast_status; hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); - sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); + if (bcast_status & RX_BCAST_CHG_MSK) + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_SL_RX_BCST_ACK_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); @@ -2005,8 +2136,9 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) if (irq_value1) { if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK | CHL_INT1_DMAC_TX_ECC_ERR_MSK)) - panic("%s: DMAC RX/TX ecc bad error! (0x%x)", - dev_name(dev), irq_value1); + panic("%s: DMAC RX/TX ecc bad error!\ + (0x%x)", + dev_name(dev), irq_value1); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value1); @@ -2037,6 +2169,318 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) return IRQ_HANDLED; } +static void +one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value) +{ + struct device *dev = &hisi_hba->pdev->dev; + u32 reg_val; + + if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR); + dev_warn(dev, "hgc_dqe_acc1b_intr found: \ + Ram address is 0x%08X\n", + (reg_val & HGC_DQE_ECC_1B_ADDR_MSK) >> + HGC_DQE_ECC_1B_ADDR_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR); + dev_warn(dev, "hgc_iost_acc1b_intr found: \ + Ram address is 0x%08X\n", + (reg_val & HGC_IOST_ECC_1B_ADDR_MSK) >> + HGC_IOST_ECC_1B_ADDR_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR); + dev_warn(dev, "hgc_itct_acc1b_intr found: \ + Ram address is 0x%08X\n", + (reg_val & HGC_ITCT_ECC_1B_ADDR_MSK) >> + HGC_ITCT_ECC_1B_ADDR_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); + dev_warn(dev, "hgc_iostl_acc1b_intr found: \ + memory address is 0x%08X\n", + (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >> + HGC_LM_DFX_STATUS2_IOSTLIST_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); + dev_warn(dev, "hgc_itctl_acc1b_intr found: \ + memory address is 0x%08X\n", + (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >> + HGC_LM_DFX_STATUS2_ITCTLIST_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR); + dev_warn(dev, "hgc_cqe_acc1b_intr found: \ + Ram address is 0x%08X\n", + (reg_val & HGC_CQE_ECC_1B_ADDR_MSK) >> + HGC_CQE_ECC_1B_ADDR_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); + dev_warn(dev, "rxm_mem0_acc1b_intr found: \ + memory address is 0x%08X\n", + (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >> + HGC_RXM_DFX_STATUS14_MEM0_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); + dev_warn(dev, "rxm_mem1_acc1b_intr found: \ + memory address is 0x%08X\n", + (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >> + HGC_RXM_DFX_STATUS14_MEM1_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); + dev_warn(dev, "rxm_mem2_acc1b_intr found: \ + memory address is 0x%08X\n", + (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >> + HGC_RXM_DFX_STATUS14_MEM2_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15); + dev_warn(dev, "rxm_mem3_acc1b_intr found: \ + memory address is 0x%08X\n", + (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >> + HGC_RXM_DFX_STATUS15_MEM3_OFF); + } + +} + +static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, + u32 irq_value) +{ + u32 reg_val; + struct device *dev = &hisi_hba->pdev->dev; + + if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR); + panic("%s: hgc_dqe_accbad_intr (0x%x) found: \ + Ram address is 0x%08X\n", + dev_name(dev), irq_value, + (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >> + HGC_DQE_ECC_MB_ADDR_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR); + panic("%s: hgc_iost_accbad_intr (0x%x) found: \ + Ram address is 0x%08X\n", + dev_name(dev), irq_value, + (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >> + HGC_IOST_ECC_MB_ADDR_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR); + panic("%s: hgc_itct_accbad_intr (0x%x) found: \ + Ram address is 0x%08X\n", + dev_name(dev), irq_value, + (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >> + HGC_ITCT_ECC_MB_ADDR_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); + panic("%s: hgc_iostl_accbad_intr (0x%x) found: \ + memory address is 0x%08X\n", + dev_name(dev), irq_value, + (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >> + HGC_LM_DFX_STATUS2_IOSTLIST_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); + panic("%s: hgc_itctl_accbad_intr (0x%x) found: \ + memory address is 0x%08X\n", + dev_name(dev), irq_value, + (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >> + HGC_LM_DFX_STATUS2_ITCTLIST_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR); + panic("%s: hgc_cqe_accbad_intr (0x%x) found: \ + Ram address is 0x%08X\n", + dev_name(dev), irq_value, + (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >> + HGC_CQE_ECC_MB_ADDR_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); + panic("%s: rxm_mem0_accbad_intr (0x%x) found: \ + memory address is 0x%08X\n", + dev_name(dev), irq_value, + (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >> + HGC_RXM_DFX_STATUS14_MEM0_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); + panic("%s: rxm_mem1_accbad_intr (0x%x) found: \ + memory address is 0x%08X\n", + dev_name(dev), irq_value, + (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >> + HGC_RXM_DFX_STATUS14_MEM1_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); + panic("%s: rxm_mem2_accbad_intr (0x%x) found: \ + memory address is 0x%08X\n", + dev_name(dev), irq_value, + (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >> + HGC_RXM_DFX_STATUS14_MEM2_OFF); + } + + if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) { + reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15); + panic("%s: rxm_mem3_accbad_intr (0x%x) found: \ + memory address is 0x%08X\n", + dev_name(dev), irq_value, + (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >> + HGC_RXM_DFX_STATUS15_MEM3_OFF); + } + +} + +static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p) +{ + struct hisi_hba *hisi_hba = p; + u32 irq_value, irq_msk; + + irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff); + + irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); + if (irq_value) { + one_bit_ecc_error_process_v2_hw(hisi_hba, irq_value); + multi_bit_ecc_error_process_v2_hw(hisi_hba, irq_value); + } + + hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk); + + return IRQ_HANDLED; +} + +#define AXI_ERR_NR 8 +static const char axi_err_info[AXI_ERR_NR][32] = { + "IOST_AXI_W_ERR", + "IOST_AXI_R_ERR", + "ITCT_AXI_W_ERR", + "ITCT_AXI_R_ERR", + "SATA_AXI_W_ERR", + "SATA_AXI_R_ERR", + "DQE_AXI_R_ERR", + "CQE_AXI_W_ERR" +}; + +#define FIFO_ERR_NR 5 +static const char fifo_err_info[FIFO_ERR_NR][32] = { + "CQE_WINFO_FIFO", + "CQE_MSG_FIFIO", + "GETDQE_FIFO", + "CMDP_FIFO", + "AWTCTRL_FIFO" +}; + +static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) +{ + struct hisi_hba *hisi_hba = p; + u32 irq_value, irq_msk, err_value; + struct device *dev = &hisi_hba->pdev->dev; + + irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe); + + irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); + if (irq_value) { + if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) { + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, + 1 << ENT_INT_SRC3_WP_DEPTH_OFF); + panic("%s: write pointer and depth error (0x%x) \ + found!\n", + dev_name(dev), irq_value); + } + + if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) { + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, + 1 << + ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF); + panic("%s: iptt no match slot error (0x%x) found!\n", + dev_name(dev), irq_value); + } + + if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF)) + panic("%s: read pointer and depth error (0x%x) \ + found!\n", + dev_name(dev), irq_value); + + if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) { + int i; + + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, + 1 << ENT_INT_SRC3_AXI_OFF); + err_value = hisi_sas_read32(hisi_hba, + HGC_AXI_FIFO_ERR_INFO); + + for (i = 0; i < AXI_ERR_NR; i++) { + if (err_value & BIT(i)) + panic("%s: %s (0x%x) found!\n", + dev_name(dev), + axi_err_info[i], irq_value); + } + } + + if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) { + int i; + + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, + 1 << ENT_INT_SRC3_FIFO_OFF); + err_value = hisi_sas_read32(hisi_hba, + HGC_AXI_FIFO_ERR_INFO); + + for (i = 0; i < FIFO_ERR_NR; i++) { + if (err_value & BIT(AXI_ERR_NR + i)) + panic("%s: %s (0x%x) found!\n", + dev_name(dev), + fifo_err_info[i], irq_value); + } + + } + + if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) { + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, + 1 << ENT_INT_SRC3_LM_OFF); + panic("%s: LM add/fetch list error (0x%x) found!\n", + dev_name(dev), irq_value); + } + + if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) { + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, + 1 << ENT_INT_SRC3_ABT_OFF); + panic("%s: SAS_HGC_ABT fetch LM list error (0x%x) found!\n", + dev_name(dev), irq_value); + } + } + + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk); + + return IRQ_HANDLED; +} + static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) { struct hisi_sas_cq *cq = p; @@ -2136,6 +2580,16 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) goto end; } + /* check ERR bit of Status Register */ + if (fis->status & ATA_ERR) { + dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no, + fis->status); + disable_phy_v2_hw(hisi_hba, phy_no); + enable_phy_v2_hw(hisi_hba, phy_no); + res = IRQ_NONE; + goto end; + } + if (unlikely(phy_no == 8)) { u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); @@ -2190,6 +2644,11 @@ static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = { int_chnl_int_v2_hw, }; +static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = { + fatal_ecc_int_v2_hw, + fatal_axi_int_v2_hw +}; + /** * There is a limitation in the hip06 chipset that we need * to map in all mbigen interrupts, even if they are not used. @@ -2245,6 +2704,26 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) } } + for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++) { + int idx = i; + + irq = irq_map[idx + 81]; + if (!irq) { + dev_err(dev, "irq init: fail map fatal interrupt %d\n", + idx); + return -ENOENT; + } + + rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0, + DRV_NAME " fatal", hisi_hba); + if (rc) { + dev_err(dev, + "irq init: could not request fatal interrupt %d, rc=%d\n", + irq, rc); + return -ENOENT; + } + } + for (i = 0; i < hisi_hba->queue_count; i++) { int idx = i + 96; /* First cq interrupt is irq96 */ @@ -2303,12 +2782,26 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = { .phy_enable = enable_phy_v2_hw, .phy_disable = disable_phy_v2_hw, .phy_hard_reset = phy_hard_reset_v2_hw, + .phy_set_linkrate = phy_set_linkrate_v2_hw, + .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw, .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW, .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr), }; static int hisi_sas_v2_probe(struct platform_device *pdev) { + /* + * Check if we should defer the probe before we probe the + * upper layer, as it's hard to defer later on. + */ + int ret = platform_get_irq(pdev, 0); + + if (ret < 0) { + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "cannot obtain irq\n"); + return ret; + } + return hisi_sas_probe(pdev, &hisi_sas_v2_hw); } @@ -2319,6 +2812,7 @@ static int hisi_sas_v2_remove(struct platform_device *pdev) static const struct of_device_id sas_v2_of_match[] = { { .compatible = "hisilicon,hip06-sas-v2",}, + { .compatible = "hisilicon,hip07-sas-v2",}, {}, }; MODULE_DEVICE_TABLE(of, sas_v2_of_match); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index d007ec18179af9789a12e6ca7c9f5878374dc593..cbc0c5fe5a60188515dab3dd99be83ce62732e1c 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -276,6 +276,9 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar); static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); +static int wait_for_device_to_become_ready(struct ctlr_info *h, + unsigned char lunaddr[], + int reply_queue); static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, int wait_for_ready); static inline void finish_cmd(struct CommandList *c); @@ -700,9 +703,7 @@ static ssize_t lunid_show(struct device *dev, } memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); spin_unlock_irqrestore(&h->lock, flags); - return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", - lunid[0], lunid[1], lunid[2], lunid[3], - lunid[4], lunid[5], lunid[6], lunid[7]); + return snprintf(buf, 20, "0x%8phN\n", lunid); } static ssize_t unique_id_show(struct device *dev, @@ -864,6 +865,16 @@ static ssize_t path_info_show(struct device *dev, return output_len; } +static ssize_t host_show_ctlr_num(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + return snprintf(buf, 20, "%d\n", h->ctlr); +} + static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); @@ -887,6 +898,8 @@ static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); static DEVICE_ATTR(lockup_detected, S_IRUGO, host_show_lockup_detected, NULL); +static DEVICE_ATTR(ctlr_num, S_IRUGO, + host_show_ctlr_num, NULL); static struct device_attribute *hpsa_sdev_attrs[] = { &dev_attr_raid_level, @@ -907,6 +920,7 @@ static struct device_attribute *hpsa_shost_attrs[] = { &dev_attr_hp_ssd_smart_path_status, &dev_attr_raid_offload_debug, &dev_attr_lockup_detected, + &dev_attr_ctlr_num, NULL, }; @@ -1001,7 +1015,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, { if (likely(h->transMethod & CFGTBL_Trans_Performant)) { c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); - if (unlikely(!h->msix_vector)) + if (unlikely(!h->msix_vectors)) return; if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) c->Header.ReplyQueue = @@ -1543,10 +1557,9 @@ static void hpsa_monitor_offline_device(struct ctlr_info *h, /* Device is not on the list, add it. */ device = kmalloc(sizeof(*device), GFP_KERNEL); - if (!device) { - dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__); + if (!device) return; - } + memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); spin_lock_irqsave(&h->offline_device_lock, flags); list_add_tail(&device->offline_list, &h->offline_device_list); @@ -2009,7 +2022,7 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, static int hpsa_slave_alloc(struct scsi_device *sdev) { - struct hpsa_scsi_dev_t *sd; + struct hpsa_scsi_dev_t *sd = NULL; unsigned long flags; struct ctlr_info *h; @@ -2026,7 +2039,8 @@ static int hpsa_slave_alloc(struct scsi_device *sdev) sd->target = sdev_id(sdev); sd->lun = sdev->lun; } - } else + } + if (!sd) sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), sdev_id(sdev), sdev->lun); @@ -2127,17 +2141,15 @@ static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h) h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, GFP_KERNEL); - if (!h->cmd_sg_list) { - dev_err(&h->pdev->dev, "Failed to allocate SG list\n"); + if (!h->cmd_sg_list) return -ENOMEM; - } + for (i = 0; i < h->nr_cmds; i++) { h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * h->chainsize, GFP_KERNEL); - if (!h->cmd_sg_list[i]) { - dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n"); + if (!h->cmd_sg_list[i]) goto clean; - } + } return 0; @@ -2540,7 +2552,7 @@ static void complete_scsi_command(struct CommandList *cp) if ((unlikely(hpsa_is_pending_event(cp)))) { if (cp->reset_pending) - return hpsa_cmd_resolve_and_free(h, cp); + return hpsa_cmd_free_and_done(h, cp, cmd); if (cp->abort_pending) return hpsa_cmd_abort_and_free(h, cp, cmd); } @@ -2823,14 +2835,8 @@ static void hpsa_print_cmd(struct ctlr_info *h, char *txt, const u8 *cdb = c->Request.CDB; const u8 *lun = c->Header.LUN.LunAddrBytes; - dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" - " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", - txt, lun[0], lun[1], lun[2], lun[3], - lun[4], lun[5], lun[6], lun[7], - cdb[0], cdb[1], cdb[2], cdb[3], - cdb[4], cdb[5], cdb[6], cdb[7], - cdb[8], cdb[9], cdb[10], cdb[11], - cdb[12], cdb[13], cdb[14], cdb[15]); + dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n", + txt, lun, cdb); } static void hpsa_scsi_interpret_error(struct ctlr_info *h, @@ -3079,6 +3085,8 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, if (unlikely(rc)) atomic_set(&dev->reset_cmds_out, 0); + else + wait_for_device_to_become_ready(h, scsi3addr, 0); mutex_unlock(&h->reset_mutex); return rc; @@ -3443,11 +3451,8 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr, struct bmic_sense_subsystem_info *ssi; ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); - if (ssi == NULL) { - dev_warn(&h->pdev->dev, - "%s: out of memory\n", __func__); + if (!ssi) return; - } rc = hpsa_bmic_sense_subsystem_information(h, scsi3addr, 0, ssi, sizeof(*ssi)); @@ -3622,8 +3627,32 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, struct ReportExtendedLUNdata *buf, int bufsize) { - return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, - HPSA_REPORT_PHYS_EXTENDED); + int rc; + struct ReportLUNdata *lbuf; + + rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize, + HPSA_REPORT_PHYS_EXTENDED); + if (!rc || !hpsa_allow_any) + return rc; + + /* REPORT PHYS EXTENDED is not supported */ + lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL); + if (!lbuf) + return -ENOMEM; + + rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0); + if (!rc) { + int i; + u32 nphys; + + /* Copy ReportLUNdata header */ + memcpy(buf, lbuf, 8); + nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8; + for (i = 0; i < nphys; i++) + memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8); + } + kfree(lbuf); + return rc; } static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, @@ -3840,6 +3869,7 @@ static int hpsa_update_device_info(struct ctlr_info *h, sizeof(this_device->vendor)); memcpy(this_device->model, &inq_buff[16], sizeof(this_device->model)); + this_device->rev = inq_buff[2]; memset(this_device->device_id, 0, sizeof(this_device->device_id)); if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, @@ -3929,10 +3959,14 @@ static void figure_bus_target_lun(struct ctlr_info *h, if (!is_logical_dev_addr_mode(lunaddrbytes)) { /* physical device, target and lun filled in later */ - if (is_hba_lunid(lunaddrbytes)) + if (is_hba_lunid(lunaddrbytes)) { + int bus = HPSA_HBA_BUS; + + if (!device->rev) + bus = HPSA_LEGACY_HBA_BUS; hpsa_set_bus_target_lun(device, - HPSA_HBA_BUS, 0, lunid & 0x3fff); - else + bus, 0, lunid & 0x3fff); + } else /* defer target, lun assignment for physical devices */ hpsa_set_bus_target_lun(device, HPSA_PHYSICAL_DEVICE_BUS, -1, -1); @@ -4295,8 +4329,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); if (!currentsd[i]) { - dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", - __FILE__, __LINE__); h->drv_req_rescan = 1; goto out; } @@ -5482,7 +5514,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) dev = cmd->device->hostdata; if (!dev) { - cmd->result = NOT_READY << 16; /* host byte */ + cmd->result = DID_NO_CONNECT << 16; cmd->scsi_done(cmd); return 0; } @@ -5563,6 +5595,14 @@ static void hpsa_scan_start(struct Scsi_Host *sh) if (unlikely(lockup_detected(h))) return hpsa_scan_complete(h); + /* + * Do the scan after a reset completion + */ + if (h->reset_in_progress) { + h->drv_req_rescan = 1; + return; + } + hpsa_update_scsi_devices(h); hpsa_scan_complete(h); @@ -5618,7 +5658,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h) sh->sg_tablesize = h->maxsgentries; sh->transportt = hpsa_sas_transport_template; sh->hostdata[0] = (unsigned long) h; - sh->irq = h->intr[h->intr_mode]; + sh->irq = pci_irq_vector(h->pdev, 0); sh->unique_id = sh->irq; h->scsi_host = sh; @@ -5993,11 +6033,9 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, if (h->raid_offload_debug > 0) dev_info(&h->pdev->dev, - "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + "scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n", h->scsi_host->host_no, dev->bus, dev->target, dev->lun, - "Reset as abort", - scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], - scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); + "Reset as abort", scsi3addr); if (!dev->offload_enabled) { dev_warn(&h->pdev->dev, @@ -6014,32 +6052,28 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, /* send the reset */ if (h->raid_offload_debug > 0) dev_info(&h->pdev->dev, - "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", - psa[0], psa[1], psa[2], psa[3], - psa[4], psa[5], psa[6], psa[7]); + "Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n", + psa); rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue); if (rc != 0) { dev_warn(&h->pdev->dev, - "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", - psa[0], psa[1], psa[2], psa[3], - psa[4], psa[5], psa[6], psa[7]); + "Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n", + psa); return rc; /* failed to reset */ } /* wait for device to recover */ if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) { dev_warn(&h->pdev->dev, - "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", - psa[0], psa[1], psa[2], psa[3], - psa[4], psa[5], psa[6], psa[7]); + "Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n", + psa); return -1; /* failed to recover */ } /* device recovered */ dev_info(&h->pdev->dev, - "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", - psa[0], psa[1], psa[2], psa[3], - psa[4], psa[5], psa[6], psa[7]); + "Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n", + psa); return rc; /* success */ } @@ -6657,8 +6691,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; - ioc = (BIG_IOCTL_Command_struct *) - kmalloc(sizeof(*ioc), GFP_KERNEL); + ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) { status = -ENOMEM; goto cleanup1; @@ -7652,67 +7685,41 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) static void hpsa_disable_interrupt_mode(struct ctlr_info *h) { - if (h->msix_vector) { - if (h->pdev->msix_enabled) - pci_disable_msix(h->pdev); - h->msix_vector = 0; - } else if (h->msi_vector) { - if (h->pdev->msi_enabled) - pci_disable_msi(h->pdev); - h->msi_vector = 0; - } + pci_free_irq_vectors(h->pdev); + h->msix_vectors = 0; } /* If MSI/MSI-X is supported by the kernel we will try to enable it on * controllers that are capable. If not, we use legacy INTx mode. */ -static void hpsa_interrupt_mode(struct ctlr_info *h) +static int hpsa_interrupt_mode(struct ctlr_info *h) { -#ifdef CONFIG_PCI_MSI - int err, i; - struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES]; - - for (i = 0; i < MAX_REPLY_QUEUES; i++) { - hpsa_msix_entries[i].vector = 0; - hpsa_msix_entries[i].entry = i; - } + unsigned int flags = PCI_IRQ_LEGACY; + int ret; /* Some boards advertise MSI but don't really support it */ - if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || - (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) - goto default_int_mode; - if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { - dev_info(&h->pdev->dev, "MSI-X capable controller\n"); - h->msix_vector = MAX_REPLY_QUEUES; - if (h->msix_vector > num_online_cpus()) - h->msix_vector = num_online_cpus(); - err = pci_enable_msix_range(h->pdev, hpsa_msix_entries, - 1, h->msix_vector); - if (err < 0) { - dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); - h->msix_vector = 0; - goto single_msi_mode; - } else if (err < h->msix_vector) { - dev_warn(&h->pdev->dev, "only %d MSI-X vectors " - "available\n", err); + switch (h->board_id) { + case 0x40700E11: + case 0x40800E11: + case 0x40820E11: + case 0x40830E11: + break; + default: + ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); + if (ret > 0) { + h->msix_vectors = ret; + return 0; } - h->msix_vector = err; - for (i = 0; i < h->msix_vector; i++) - h->intr[i] = hpsa_msix_entries[i].vector; - return; - } -single_msi_mode: - if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { - dev_info(&h->pdev->dev, "MSI capable controller\n"); - if (!pci_enable_msi(h->pdev)) - h->msi_vector = 1; - else - dev_warn(&h->pdev->dev, "MSI init failed\n"); + + flags |= PCI_IRQ_MSI; + break; } -default_int_mode: -#endif /* CONFIG_PCI_MSI */ - /* if we get here we're going to use the default interrupt mode */ - h->intr[h->intr_mode] = h->pdev->irq; + + ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags); + if (ret < 0) + return ret; + return 0; } static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) @@ -8068,7 +8075,9 @@ static int hpsa_pci_init(struct ctlr_info *h) pci_set_master(h->pdev); - hpsa_interrupt_mode(h); + err = hpsa_interrupt_mode(h); + if (err) + goto clean1; err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); if (err) goto clean2; /* intmode+region, pci */ @@ -8104,6 +8113,7 @@ static int hpsa_pci_init(struct ctlr_info *h) h->vaddr = NULL; clean2: /* intmode+region, pci */ hpsa_disable_interrupt_mode(h); +clean1: /* * call pci_disable_device before pci_release_regions per * Documentation/PCI/pci.txt @@ -8237,34 +8247,20 @@ static int hpsa_alloc_cmd_pool(struct ctlr_info *h) return -ENOMEM; } -static void hpsa_irq_affinity_hints(struct ctlr_info *h) -{ - int i, cpu; - - cpu = cpumask_first(cpu_online_mask); - for (i = 0; i < h->msix_vector; i++) { - irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); - cpu = cpumask_next(cpu, cpu_online_mask); - } -} - /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ static void hpsa_free_irqs(struct ctlr_info *h) { int i; - if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { + if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) { /* Single reply queue, only one irq to free */ - i = h->intr_mode; - irq_set_affinity_hint(h->intr[i], NULL); - free_irq(h->intr[i], &h->q[i]); - h->q[i] = 0; + free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]); + h->q[h->intr_mode] = 0; return; } - for (i = 0; i < h->msix_vector; i++) { - irq_set_affinity_hint(h->intr[i], NULL); - free_irq(h->intr[i], &h->q[i]); + for (i = 0; i < h->msix_vectors; i++) { + free_irq(pci_irq_vector(h->pdev, i), &h->q[i]); h->q[i] = 0; } for (; i < MAX_REPLY_QUEUES; i++) @@ -8285,11 +8281,11 @@ static int hpsa_request_irqs(struct ctlr_info *h, for (i = 0; i < MAX_REPLY_QUEUES; i++) h->q[i] = (u8) i; - if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { + if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) { /* If performant mode and MSI-X, use multiple reply queues */ - for (i = 0; i < h->msix_vector; i++) { + for (i = 0; i < h->msix_vectors; i++) { sprintf(h->intrname[i], "%s-msix%d", h->devname, i); - rc = request_irq(h->intr[i], msixhandler, + rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler, 0, h->intrname[i], &h->q[i]); if (rc) { @@ -8297,9 +8293,9 @@ static int hpsa_request_irqs(struct ctlr_info *h, dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", - h->intr[i], h->devname); + pci_irq_vector(h->pdev, i), h->devname); for (j = 0; j < i; j++) { - free_irq(h->intr[j], &h->q[j]); + free_irq(pci_irq_vector(h->pdev, j), &h->q[j]); h->q[j] = 0; } for (; j < MAX_REPLY_QUEUES; j++) @@ -8307,33 +8303,27 @@ static int hpsa_request_irqs(struct ctlr_info *h, return rc; } } - hpsa_irq_affinity_hints(h); } else { /* Use single reply pool */ - if (h->msix_vector > 0 || h->msi_vector) { - if (h->msix_vector) - sprintf(h->intrname[h->intr_mode], - "%s-msix", h->devname); - else - sprintf(h->intrname[h->intr_mode], - "%s-msi", h->devname); - rc = request_irq(h->intr[h->intr_mode], + if (h->msix_vectors > 0 || h->pdev->msi_enabled) { + sprintf(h->intrname[0], "%s-msi%s", h->devname, + h->msix_vectors ? "x" : ""); + rc = request_irq(pci_irq_vector(h->pdev, 0), msixhandler, 0, - h->intrname[h->intr_mode], + h->intrname[0], &h->q[h->intr_mode]); } else { sprintf(h->intrname[h->intr_mode], "%s-intx", h->devname); - rc = request_irq(h->intr[h->intr_mode], + rc = request_irq(pci_irq_vector(h->pdev, 0), intxhandler, IRQF_SHARED, - h->intrname[h->intr_mode], + h->intrname[0], &h->q[h->intr_mode]); } - irq_set_affinity_hint(h->intr[h->intr_mode], NULL); } if (rc) { dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", - h->intr[h->intr_mode], h->devname); + pci_irq_vector(h->pdev, 0), h->devname); hpsa_free_irqs(h); return -ENODEV; } @@ -8599,14 +8589,12 @@ static int hpsa_luns_changed(struct ctlr_info *h) */ if (!h->lastlogicals) - goto out; + return rc; logdev = kzalloc(sizeof(*logdev), GFP_KERNEL); - if (!logdev) { - dev_warn(&h->pdev->dev, - "Out of memory, can't track lun changes.\n"); - goto out; - } + if (!logdev) + return rc; + if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) { dev_warn(&h->pdev->dev, "report luns failed, can't track lun changes.\n"); @@ -8634,6 +8622,14 @@ static void hpsa_rescan_ctlr_worker(struct work_struct *work) if (h->remove_in_progress) return; + /* + * Do the scan after the reset + */ + if (h->reset_in_progress) { + h->drv_req_rescan = 1; + return; + } + if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { scsi_host_get(h->scsi_host); hpsa_ack_ctlr_events(h); @@ -8992,11 +8988,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h) return; options = kzalloc(sizeof(*options), GFP_KERNEL); - if (!options) { - dev_err(&h->pdev->dev, - "Error: failed to disable rld caching, during alloc.\n"); + if (!options) return; - } c = cmd_alloc(h); @@ -9519,7 +9512,7 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) return rc; } - h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; + h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1; hpsa_get_max_perf_mode_cmds(h); /* Performant mode ring buffer and supporting data structures */ h->reply_queue_size = h->max_commands * sizeof(u64); diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 82cdfad874f3aa363d4989837d2b9455c8e0acdb..64e98295b70703bdf1d9a07a483f472da75a35c5 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -69,6 +69,7 @@ struct hpsa_scsi_dev_t { u64 sas_address; unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ unsigned char model[16]; /* bytes 16-31 of inquiry data */ + unsigned char rev; /* byte 2 of inquiry data */ unsigned char raid_level; /* from inquiry page 0xC1 */ unsigned char volume_offline; /* discovered via TUR or VPD */ u16 queue_depth; /* max queue_depth for this device */ @@ -175,9 +176,7 @@ struct ctlr_info { # define DOORBELL_INT 1 # define SIMPLE_MODE_INT 2 # define MEMQ_MODE_INT 3 - unsigned int intr[MAX_REPLY_QUEUES]; - unsigned int msix_vector; - unsigned int msi_vector; + unsigned int msix_vectors; int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ struct access_method access; @@ -402,6 +401,7 @@ struct offline_device_entry { #define HPSA_RAID_VOLUME_BUS 1 #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2 #define HPSA_HBA_BUS 0 +#define HPSA_LEGACY_HBA_BUS 3 /* Send the command to the hardware @@ -464,7 +464,7 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) unsigned long register_value = FIFO_EMPTY; /* msi auto clears the interrupt pending bit. */ - if (unlikely(!(h->msi_vector || h->msix_vector))) { + if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) { /* flush the controller write of the reply queue by reading * outbound doorbell status register. */ diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index a83f705ed8a54172f21e8655e52c6c618edbcb9a..db17ad15b0c1580222031247eac8b25b41c9a0a8 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 7e487c78279cbd76d5fee217394d9b431c0ada7b..78b72c28a55df36eaf3f03babde02dfe13598e05 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -1701,14 +1702,14 @@ static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt) /** * ibmvfc_bsg_timeout - Handle a BSG timeout - * @job: struct fc_bsg_job that timed out + * @job: struct bsg_job that timed out * * Returns: * 0 on success / other on failure **/ -static int ibmvfc_bsg_timeout(struct fc_bsg_job *job) +static int ibmvfc_bsg_timeout(struct bsg_job *job) { - struct ibmvfc_host *vhost = shost_priv(job->shost); + struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job)); unsigned long port_id = (unsigned long)job->dd_data; struct ibmvfc_event *evt; struct ibmvfc_tmf *tmf; @@ -1814,41 +1815,43 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id) /** * ibmvfc_bsg_request - Handle a BSG request - * @job: struct fc_bsg_job to be executed + * @job: struct bsg_job to be executed * * Returns: * 0 on success / other on failure **/ -static int ibmvfc_bsg_request(struct fc_bsg_job *job) +static int ibmvfc_bsg_request(struct bsg_job *job) { - struct ibmvfc_host *vhost = shost_priv(job->shost); - struct fc_rport *rport = job->rport; + struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job)); + struct fc_rport *rport = fc_bsg_to_rport(job); struct ibmvfc_passthru_mad *mad; struct ibmvfc_event *evt; union ibmvfc_iu rsp_iu; unsigned long flags, port_id = -1; - unsigned int code = job->request->msgcode; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + unsigned int code = bsg_request->msgcode; int rc = 0, req_seg, rsp_seg, issue_login = 0; u32 fc_flags, rsp_len; ENTER; - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; if (rport) port_id = rport->port_id; switch (code) { case FC_BSG_HST_ELS_NOLOGIN: - port_id = (job->request->rqst_data.h_els.port_id[0] << 16) | - (job->request->rqst_data.h_els.port_id[1] << 8) | - job->request->rqst_data.h_els.port_id[2]; + port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) | + (bsg_request->rqst_data.h_els.port_id[1] << 8) | + bsg_request->rqst_data.h_els.port_id[2]; case FC_BSG_RPT_ELS: fc_flags = IBMVFC_FC_ELS; break; case FC_BSG_HST_CT: issue_login = 1; - port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) | - (job->request->rqst_data.h_ct.port_id[1] << 8) | - job->request->rqst_data.h_ct.port_id[2]; + port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) | + (bsg_request->rqst_data.h_ct.port_id[1] << 8) | + bsg_request->rqst_data.h_ct.port_id[2]; case FC_BSG_RPT_CT: fc_flags = IBMVFC_FC_CT_IU; break; @@ -1937,13 +1940,14 @@ static int ibmvfc_bsg_request(struct fc_bsg_job *job) if (rsp_iu.passthru.common.status) rc = -EIO; else - job->reply->reply_payload_rcv_len = rsp_len; + bsg_reply->reply_payload_rcv_len = rsp_len; spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_free_event(evt); spin_unlock_irqrestore(vhost->host->host_lock, flags); - job->reply->result = rc; - job->job_done(job); + bsg_reply->result = rc; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); rc = 0; out: dma_unmap_sg(vhost->dev, job->request_payload.sg_list, diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index d9534ee6ef524fb1fab03680bdde2ed34a4202b7..50cd01165e355b092fb954d2399f42798d5e3e00 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -95,6 +95,7 @@ static int fast_fail = 1; static int client_reserve = 1; static char partition_name[97] = "UNKNOWN"; static unsigned int partition_number = -1; +static LIST_HEAD(ibmvscsi_head); static struct scsi_transport_template *ibmvscsi_transport_template; @@ -232,6 +233,7 @@ static void ibmvscsi_task(void *data) while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) { ibmvscsi_handle_crq(crq, hostdata); crq->valid = VIOSRP_CRQ_FREE; + wmb(); } vio_enable_interrupts(vdev); @@ -240,6 +242,7 @@ static void ibmvscsi_task(void *data) vio_disable_interrupts(vdev); ibmvscsi_handle_crq(crq, hostdata); crq->valid = VIOSRP_CRQ_FREE; + wmb(); } else { done = 1; } @@ -992,7 +995,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct) if (unlikely(rsp->opcode != SRP_RSP)) { if (printk_ratelimit()) dev_warn(evt_struct->hostdata->dev, - "bad SRP RSP type %d\n", rsp->opcode); + "bad SRP RSP type %#02x\n", rsp->opcode); } if (cmnd) { @@ -2270,6 +2273,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) } dev_set_drvdata(&vdev->dev, hostdata); + list_add_tail(&hostdata->host_list, &ibmvscsi_head); return 0; add_srp_port_failed: @@ -2291,6 +2295,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) static int ibmvscsi_remove(struct vio_dev *vdev) { struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); + list_del(&hostdata->host_list); unmap_persist_bufs(hostdata); release_event_pool(&hostdata->pool, hostdata); ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h index e0f6c3aeb4eef35aa390afaa5d7cd6174d8f7a48..3a7875575616e3eec27443a0db7ef9882fcc2528 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.h +++ b/drivers/scsi/ibmvscsi/ibmvscsi.h @@ -90,6 +90,7 @@ struct event_pool { /* all driver data associated with a host adapter */ struct ibmvscsi_host_data { + struct list_head host_list; atomic_t request_limit; int client_migrated; int reset_crq; diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 642b739ad0da33c63bb7667f65cbe3f21db87e2a..3d3768aaab4f2bb79ccbaf40f1f569114e4f9b30 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -22,7 +22,7 @@ * ****************************************************************************/ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -81,7 +82,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd, } } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { if (se_cmd->data_direction == DMA_TO_DEVICE) { - /* residual data from an overflow write */ + /* residual data from an overflow write */ rsp->flags = SRP_RSP_FLAG_DOOVER; rsp->data_out_res_cnt = cpu_to_be32(residual_count); } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { @@ -101,7 +102,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd, * and the function returns TRUE. * * EXECUTION ENVIRONMENT: - * Interrupt or Process environment + * Interrupt or Process environment */ static bool connection_broken(struct scsi_info *vscsi) { @@ -324,7 +325,7 @@ static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask, } /** - * ibmvscsis_send_init_message() - send initialize message to the client + * ibmvscsis_send_init_message() - send initialize message to the client * @vscsi: Pointer to our adapter structure * @format: Which Init Message format to send * @@ -382,13 +383,13 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format) vscsi->cmd_q.base_addr); if (crq) { *format = (uint)(crq->format); - rc = ERROR; + rc = ERROR; crq->valid = INVALIDATE_CMD_RESP_EL; dma_rmb(); } } else { *format = (uint)(crq->format); - rc = ERROR; + rc = ERROR; crq->valid = INVALIDATE_CMD_RESP_EL; dma_rmb(); } @@ -396,166 +397,6 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format) return rc; } -/** - * ibmvscsis_establish_new_q() - Establish new CRQ queue - * @vscsi: Pointer to our adapter structure - * @new_state: New state being established after resetting the queue - * - * Must be called with interrupt lock held. - */ -static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state) -{ - long rc = ADAPT_SUCCESS; - uint format; - - vscsi->flags &= PRESERVE_FLAG_FIELDS; - vscsi->rsp_q_timer.timer_pops = 0; - vscsi->debit = 0; - vscsi->credit = 0; - - rc = vio_enable_interrupts(vscsi->dma_dev); - if (rc) { - pr_warn("reset_queue: failed to enable interrupts, rc %ld\n", - rc); - return rc; - } - - rc = ibmvscsis_check_init_msg(vscsi, &format); - if (rc) { - dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n", - rc); - return rc; - } - - if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) { - rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); - switch (rc) { - case H_SUCCESS: - case H_DROPPED: - case H_CLOSED: - rc = ADAPT_SUCCESS; - break; - - case H_PARAMETER: - case H_HARDWARE: - break; - - default: - vscsi->state = UNDEFINED; - rc = H_HARDWARE; - break; - } - } - - return rc; -} - -/** - * ibmvscsis_reset_queue() - Reset CRQ Queue - * @vscsi: Pointer to our adapter structure - * @new_state: New state to establish after resetting the queue - * - * This function calls h_free_q and then calls h_reg_q and does all - * of the bookkeeping to get us back to where we can communicate. - * - * Actually, we don't always call h_free_crq. A problem was discovered - * where one partition would close and reopen his queue, which would - * cause his partner to get a transport event, which would cause him to - * close and reopen his queue, which would cause the original partition - * to get a transport event, etc., etc. To prevent this, we don't - * actually close our queue if the client initiated the reset, (i.e. - * either we got a transport event or we have detected that the client's - * queue is gone) - * - * EXECUTION ENVIRONMENT: - * Process environment, called with interrupt lock held - */ -static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state) -{ - int bytes; - long rc = ADAPT_SUCCESS; - - pr_debug("reset_queue: flags 0x%x\n", vscsi->flags); - - /* don't reset, the client did it for us */ - if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) { - vscsi->flags &= PRESERVE_FLAG_FIELDS; - vscsi->rsp_q_timer.timer_pops = 0; - vscsi->debit = 0; - vscsi->credit = 0; - vscsi->state = new_state; - vio_enable_interrupts(vscsi->dma_dev); - } else { - rc = ibmvscsis_free_command_q(vscsi); - if (rc == ADAPT_SUCCESS) { - vscsi->state = new_state; - - bytes = vscsi->cmd_q.size * PAGE_SIZE; - rc = h_reg_crq(vscsi->dds.unit_id, - vscsi->cmd_q.crq_token, bytes); - if (rc == H_CLOSED || rc == H_SUCCESS) { - rc = ibmvscsis_establish_new_q(vscsi, - new_state); - } - - if (rc != ADAPT_SUCCESS) { - pr_debug("reset_queue: reg_crq rc %ld\n", rc); - - vscsi->state = ERR_DISCONNECTED; - vscsi->flags |= RESPONSE_Q_DOWN; - ibmvscsis_free_command_q(vscsi); - } - } else { - vscsi->state = ERR_DISCONNECTED; - vscsi->flags |= RESPONSE_Q_DOWN; - } - } -} - -/** - * ibmvscsis_free_cmd_resources() - Free command resources - * @vscsi: Pointer to our adapter structure - * @cmd: Command which is not longer in use - * - * Must be called with interrupt lock held. - */ -static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi, - struct ibmvscsis_cmd *cmd) -{ - struct iu_entry *iue = cmd->iue; - - switch (cmd->type) { - case TASK_MANAGEMENT: - case SCSI_CDB: - /* - * When the queue goes down this value is cleared, so it - * cannot be cleared in this general purpose function. - */ - if (vscsi->debit) - vscsi->debit -= 1; - break; - case ADAPTER_MAD: - vscsi->flags &= ~PROCESSING_MAD; - break; - case UNSET_TYPE: - break; - default: - dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n", - cmd->type); - break; - } - - cmd->iue = NULL; - list_add_tail(&cmd->list, &vscsi->free_cmd); - srp_iu_put(iue); - - if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) && - list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) { - vscsi->flags &= ~WAIT_FOR_IDLE; - complete(&vscsi->wait_idle); - } -} - /** * ibmvscsis_disconnect() - Helper function to disconnect * @work: Pointer to work_struct, gives access to our adapter structure @@ -575,7 +416,6 @@ static void ibmvscsis_disconnect(struct work_struct *work) proc_work); u16 new_state; bool wait_idle = false; - long rc = ADAPT_SUCCESS; spin_lock_bh(&vscsi->intr_lock); new_state = vscsi->new_state; @@ -589,7 +429,7 @@ static void ibmvscsis_disconnect(struct work_struct *work) * should transitition to the new state */ switch (vscsi->state) { - /* Should never be called while in this state. */ + /* Should never be called while in this state. */ case NO_QUEUE: /* * Can never transition from this state; @@ -628,30 +468,24 @@ static void ibmvscsis_disconnect(struct work_struct *work) vscsi->state = new_state; break; - /* - * If this is a transition into an error state. - * a client is attempting to establish a connection - * and has violated the RPA protocol. - * There can be nothing pending on the adapter although - * there can be requests in the command queue. - */ case WAIT_ENABLED: - case PART_UP_WAIT_ENAB: switch (new_state) { - case ERR_DISCONNECT: - vscsi->flags |= RESPONSE_Q_DOWN; + case UNCONFIGURING: vscsi->state = new_state; + vscsi->flags |= RESPONSE_Q_DOWN; vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED); - ibmvscsis_free_command_q(vscsi); - break; - case ERR_DISCONNECT_RECONNECT: - ibmvscsis_reset_queue(vscsi, WAIT_ENABLED); + dma_rmb(); + if (vscsi->flags & CFG_SLEEPING) { + vscsi->flags &= ~CFG_SLEEPING; + complete(&vscsi->unconfig); + } break; /* should never happen */ + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: case WAIT_IDLE: - rc = ERROR; dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n", vscsi->state); break; @@ -660,6 +494,13 @@ static void ibmvscsis_disconnect(struct work_struct *work) case WAIT_IDLE: switch (new_state) { + case UNCONFIGURING: + vscsi->flags |= RESPONSE_Q_DOWN; + vscsi->state = new_state; + vscsi->flags &= ~(SCHEDULE_DISCONNECT | + DISCONNECT_SCHEDULED); + ibmvscsis_free_command_q(vscsi); + break; case ERR_DISCONNECT: case ERR_DISCONNECT_RECONNECT: vscsi->state = new_state; @@ -764,45 +605,348 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state, else state = vscsi->state; - switch (state) { - case NO_QUEUE: - case UNCONFIGURING: - break; + switch (state) { + case NO_QUEUE: + case UNCONFIGURING: + break; + + case ERR_DISCONNECTED: + case ERR_DISCONNECT: + case UNDEFINED: + if (new_state == UNCONFIGURING) + vscsi->new_state = new_state; + break; + + case ERR_DISCONNECT_RECONNECT: + switch (new_state) { + case UNCONFIGURING: + case ERR_DISCONNECT: + vscsi->new_state = new_state; + break; + default: + break; + } + break; + + case WAIT_ENABLED: + case WAIT_IDLE: + case WAIT_CONNECTION: + case CONNECTED: + case SRP_PROCESSING: + vscsi->new_state = new_state; + break; + + default: + break; + } + } + + pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n", + vscsi->flags, vscsi->new_state); +} + +/** + * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message + * @vscsi: Pointer to our adapter structure + * + * Must be called with interrupt lock held. + */ +static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi) +{ + long rc = ADAPT_SUCCESS; + + switch (vscsi->state) { + case NO_QUEUE: + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + case ERR_DISCONNECTED: + case UNCONFIGURING: + case UNDEFINED: + rc = ERROR; + break; + + case WAIT_CONNECTION: + vscsi->state = CONNECTED; + break; + + case WAIT_IDLE: + case SRP_PROCESSING: + case CONNECTED: + case WAIT_ENABLED: + default: + rc = ERROR; + dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n", + vscsi->state); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + } + + return rc; +} + +/** + * ibmvscsis_handle_init_msg() - Respond to an Init Message + * @vscsi: Pointer to our adapter structure + * + * Must be called with interrupt lock held. + */ +static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi) +{ + long rc = ADAPT_SUCCESS; + + switch (vscsi->state) { + case WAIT_CONNECTION: + rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); + switch (rc) { + case H_SUCCESS: + vscsi->state = CONNECTED; + break; + + case H_PARAMETER: + dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); + break; + + case H_DROPPED: + dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", + rc); + rc = ERROR; + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, 0); + break; + + case H_CLOSED: + pr_warn("init_msg: failed to send, rc %ld\n", rc); + rc = 0; + break; + } + break; + + case UNDEFINED: + rc = ERROR; + break; + + case UNCONFIGURING: + break; + + case WAIT_ENABLED: + case CONNECTED: + case SRP_PROCESSING: + case WAIT_IDLE: + case NO_QUEUE: + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + case ERR_DISCONNECTED: + default: + rc = ERROR; + dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n", + vscsi->state); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + } + + return rc; +} + +/** + * ibmvscsis_init_msg() - Respond to an init message + * @vscsi: Pointer to our adapter structure + * @crq: Pointer to CRQ element containing the Init Message + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq) +{ + long rc = ADAPT_SUCCESS; + + pr_debug("init_msg: state 0x%hx\n", vscsi->state); + + rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, + (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, + 0); + if (rc == H_SUCCESS) { + vscsi->client_data.partition_number = + be64_to_cpu(*(u64 *)vscsi->map_buf); + pr_debug("init_msg, part num %d\n", + vscsi->client_data.partition_number); + } else { + pr_debug("init_msg h_vioctl rc %ld\n", rc); + rc = ADAPT_SUCCESS; + } + + if (crq->format == INIT_MSG) { + rc = ibmvscsis_handle_init_msg(vscsi); + } else if (crq->format == INIT_COMPLETE_MSG) { + rc = ibmvscsis_handle_init_compl_msg(vscsi); + } else { + rc = ERROR; + dev_err(&vscsi->dev, "init_msg: invalid format %d\n", + (uint)crq->format); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + } + + return rc; +} + +/** + * ibmvscsis_establish_new_q() - Establish new CRQ queue + * @vscsi: Pointer to our adapter structure + * + * Must be called with interrupt lock held. + */ +static long ibmvscsis_establish_new_q(struct scsi_info *vscsi) +{ + long rc = ADAPT_SUCCESS; + uint format; + + vscsi->flags &= PRESERVE_FLAG_FIELDS; + vscsi->rsp_q_timer.timer_pops = 0; + vscsi->debit = 0; + vscsi->credit = 0; + + rc = vio_enable_interrupts(vscsi->dma_dev); + if (rc) { + pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n", + rc); + return rc; + } + + rc = ibmvscsis_check_init_msg(vscsi, &format); + if (rc) { + dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n", + rc); + return rc; + } + + if (format == UNUSED_FORMAT) { + rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); + switch (rc) { + case H_SUCCESS: + case H_DROPPED: + case H_CLOSED: + rc = ADAPT_SUCCESS; + break; + + case H_PARAMETER: + case H_HARDWARE: + break; + + default: + vscsi->state = UNDEFINED; + rc = H_HARDWARE; + break; + } + } else if (format == INIT_MSG) { + rc = ibmvscsis_handle_init_msg(vscsi); + } + + return rc; +} + +/** + * ibmvscsis_reset_queue() - Reset CRQ Queue + * @vscsi: Pointer to our adapter structure + * + * This function calls h_free_q and then calls h_reg_q and does all + * of the bookkeeping to get us back to where we can communicate. + * + * Actually, we don't always call h_free_crq. A problem was discovered + * where one partition would close and reopen his queue, which would + * cause his partner to get a transport event, which would cause him to + * close and reopen his queue, which would cause the original partition + * to get a transport event, etc., etc. To prevent this, we don't + * actually close our queue if the client initiated the reset, (i.e. + * either we got a transport event or we have detected that the client's + * queue is gone) + * + * EXECUTION ENVIRONMENT: + * Process environment, called with interrupt lock held + */ +static void ibmvscsis_reset_queue(struct scsi_info *vscsi) +{ + int bytes; + long rc = ADAPT_SUCCESS; + + pr_debug("reset_queue: flags 0x%x\n", vscsi->flags); + + /* don't reset, the client did it for us */ + if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) { + vscsi->flags &= PRESERVE_FLAG_FIELDS; + vscsi->rsp_q_timer.timer_pops = 0; + vscsi->debit = 0; + vscsi->credit = 0; + vscsi->state = WAIT_CONNECTION; + vio_enable_interrupts(vscsi->dma_dev); + } else { + rc = ibmvscsis_free_command_q(vscsi); + if (rc == ADAPT_SUCCESS) { + vscsi->state = WAIT_CONNECTION; + + bytes = vscsi->cmd_q.size * PAGE_SIZE; + rc = h_reg_crq(vscsi->dds.unit_id, + vscsi->cmd_q.crq_token, bytes); + if (rc == H_CLOSED || rc == H_SUCCESS) { + rc = ibmvscsis_establish_new_q(vscsi); + } - case ERR_DISCONNECTED: - case ERR_DISCONNECT: - case UNDEFINED: - if (new_state == UNCONFIGURING) - vscsi->new_state = new_state; - break; + if (rc != ADAPT_SUCCESS) { + pr_debug("reset_queue: reg_crq rc %ld\n", rc); - case ERR_DISCONNECT_RECONNECT: - switch (new_state) { - case UNCONFIGURING: - case ERR_DISCONNECT: - vscsi->new_state = new_state; - break; - default: - break; + vscsi->state = ERR_DISCONNECTED; + vscsi->flags |= RESPONSE_Q_DOWN; + ibmvscsis_free_command_q(vscsi); } - break; + } else { + vscsi->state = ERR_DISCONNECTED; + vscsi->flags |= RESPONSE_Q_DOWN; + } + } +} - case WAIT_ENABLED: - case PART_UP_WAIT_ENAB: - case WAIT_IDLE: - case WAIT_CONNECTION: - case CONNECTED: - case SRP_PROCESSING: - vscsi->new_state = new_state; - break; +/** + * ibmvscsis_free_cmd_resources() - Free command resources + * @vscsi: Pointer to our adapter structure + * @cmd: Command which is not longer in use + * + * Must be called with interrupt lock held. + */ +static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd) +{ + struct iu_entry *iue = cmd->iue; - default: - break; - } + switch (cmd->type) { + case TASK_MANAGEMENT: + case SCSI_CDB: + /* + * When the queue goes down this value is cleared, so it + * cannot be cleared in this general purpose function. + */ + if (vscsi->debit) + vscsi->debit -= 1; + break; + case ADAPTER_MAD: + vscsi->flags &= ~PROCESSING_MAD; + break; + case UNSET_TYPE: + break; + default: + dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n", + cmd->type); + break; } - pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n", - vscsi->flags, vscsi->new_state); + cmd->iue = NULL; + list_add_tail(&cmd->list, &vscsi->free_cmd); + srp_iu_put(iue); + + if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) && + list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) { + vscsi->flags &= ~WAIT_FOR_IDLE; + complete(&vscsi->wait_idle); + } } /** @@ -863,10 +1007,6 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi, TRANS_EVENT)); break; - case PART_UP_WAIT_ENAB: - vscsi->state = WAIT_ENABLED; - break; - case SRP_PROCESSING: if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) || @@ -895,7 +1035,7 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi, } } - rc = vscsi->flags & SCHEDULE_DISCONNECT; + rc = vscsi->flags & SCHEDULE_DISCONNECT; pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n", vscsi->flags, vscsi->state, rc); @@ -1066,16 +1206,28 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi) free_qs = true; switch (vscsi->state) { + case UNCONFIGURING: + ibmvscsis_free_command_q(vscsi); + dma_rmb(); + isync(); + if (vscsi->flags & CFG_SLEEPING) { + vscsi->flags &= ~CFG_SLEEPING; + complete(&vscsi->unconfig); + } + break; case ERR_DISCONNECT_RECONNECT: - ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION); + ibmvscsis_reset_queue(vscsi); pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags); break; case ERR_DISCONNECT: ibmvscsis_free_command_q(vscsi); - vscsi->flags &= ~DISCONNECT_SCHEDULED; + vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED); vscsi->flags |= RESPONSE_Q_DOWN; - vscsi->state = ERR_DISCONNECTED; + if (vscsi->tport.enabled) + vscsi->state = ERR_DISCONNECTED; + else + vscsi->state = WAIT_ENABLED; pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n", vscsi->flags, vscsi->state); break; @@ -1220,7 +1372,7 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi, * @iue: Information Unit containing the Adapter Info MAD request * * EXECUTION ENVIRONMENT: - * Interrupt adpater lock is held + * Interrupt adapter lock is held */ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, struct iu_entry *iue) @@ -1542,7 +1694,7 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc) if (!vscsi->rsp_q_timer.started) { if (vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) { - kt = ktime_set(0, WAIT_NANO_SECONDS); + kt = WAIT_NANO_SECONDS; } else { /* * slide the timeslice if the maximum @@ -1620,8 +1772,8 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) be64_to_cpu(msg_hi), be64_to_cpu(cmd->rsp.tag)); - pr_debug("send_messages: tag 0x%llx, rc %ld\n", - be64_to_cpu(cmd->rsp.tag), rc); + pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n", + cmd, be64_to_cpu(cmd->rsp.tag), rc); /* if all ok free up the command element resources */ if (rc == H_SUCCESS) { @@ -1691,7 +1843,7 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi, * @crq: Pointer to the CRQ entry containing the MAD request * * EXECUTION ENVIRONMENT: - * Interrupt called with adapter lock held + * Interrupt, called with adapter lock held */ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq) { @@ -1745,14 +1897,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq) pr_debug("mad: type %d\n", be32_to_cpu(mad->type)); - if (be16_to_cpu(mad->length) < 0) { - dev_err(&vscsi->dev, "mad: length is < 0\n"); - ibmvscsis_post_disconnect(vscsi, - ERR_DISCONNECT_RECONNECT, 0); - rc = SRP_VIOLATION; - } else { - rc = ibmvscsis_process_mad(vscsi, iue); - } + rc = ibmvscsis_process_mad(vscsi, iue); pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status), rc); @@ -1864,7 +2009,7 @@ static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi, break; case H_PERMISSION: if (connection_broken(vscsi)) - flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; + flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", rc); ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, @@ -2089,248 +2234,98 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq) break; case SRP_TSK_MGMT: - tsk = &vio_iu(iue)->srp.tsk_mgmt; - pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag, - tsk->tag); - cmd->rsp.tag = tsk->tag; - vscsi->debit += 1; - cmd->type = TASK_MANAGEMENT; - list_add_tail(&cmd->list, &vscsi->schedule_q); - queue_work(vscsi->work_q, &cmd->work); - break; - - case SRP_CMD: - pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag, - srp->tag); - cmd->rsp.tag = srp->tag; - vscsi->debit += 1; - cmd->type = SCSI_CDB; - /* - * We want to keep track of work waiting for - * the workqueue. - */ - list_add_tail(&cmd->list, &vscsi->schedule_q); - queue_work(vscsi->work_q, &cmd->work); - break; - - case SRP_I_LOGOUT: - rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq); - break; - - case SRP_CRED_RSP: - case SRP_AER_RSP: - default: - ibmvscsis_free_cmd_resources(vscsi, cmd); - dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n", - (uint)srp->opcode); - ibmvscsis_post_disconnect(vscsi, - ERR_DISCONNECT_RECONNECT, 0); - break; - } - } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) { - rc = ibmvscsis_srp_login(vscsi, cmd, crq); - } else { - ibmvscsis_free_cmd_resources(vscsi, cmd); - dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n", - vscsi->state); - ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); - } -} - -/** - * ibmvscsis_ping_response() - Respond to a ping request - * @vscsi: Pointer to our adapter structure - * - * Let the client know that the server is alive and waiting on - * its native I/O stack. - * If any type of error occurs from the call to queue a ping - * response then the client is either not accepting or receiving - * interrupts. Disconnect with an error. - * - * EXECUTION ENVIRONMENT: - * Interrupt, interrupt lock held - */ -static long ibmvscsis_ping_response(struct scsi_info *vscsi) -{ - struct viosrp_crq *crq; - u64 buffer[2] = { 0, 0 }; - long rc; - - crq = (struct viosrp_crq *)&buffer; - crq->valid = VALID_CMD_RESP_EL; - crq->format = (u8)MESSAGE_IN_CRQ; - crq->status = PING_RESPONSE; - - rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), - cpu_to_be64(buffer[MSG_LOW])); - - switch (rc) { - case H_SUCCESS: - break; - case H_CLOSED: - vscsi->flags |= CLIENT_FAILED; - case H_DROPPED: - vscsi->flags |= RESPONSE_Q_DOWN; - case H_REMOTE_PARM: - dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", - rc); - ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); - break; - default: - dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n", - rc); - ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); - break; - } - - return rc; -} - -/** - * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message - * @vscsi: Pointer to our adapter structure - * - * Must be called with interrupt lock held. - */ -static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi) -{ - long rc = ADAPT_SUCCESS; - - switch (vscsi->state) { - case NO_QUEUE: - case ERR_DISCONNECT: - case ERR_DISCONNECT_RECONNECT: - case ERR_DISCONNECTED: - case UNCONFIGURING: - case UNDEFINED: - rc = ERROR; - break; - - case WAIT_CONNECTION: - vscsi->state = CONNECTED; - break; - - case WAIT_IDLE: - case SRP_PROCESSING: - case CONNECTED: - case WAIT_ENABLED: - case PART_UP_WAIT_ENAB: - default: - rc = ERROR; - dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n", - vscsi->state); - ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); - break; - } - - return rc; -} - -/** - * ibmvscsis_handle_init_msg() - Respond to an Init Message - * @vscsi: Pointer to our adapter structure - * - * Must be called with interrupt lock held. - */ -static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi) -{ - long rc = ADAPT_SUCCESS; - - switch (vscsi->state) { - case WAIT_ENABLED: - vscsi->state = PART_UP_WAIT_ENAB; - break; + tsk = &vio_iu(iue)->srp.tsk_mgmt; + pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag, + tsk->tag); + cmd->rsp.tag = tsk->tag; + vscsi->debit += 1; + cmd->type = TASK_MANAGEMENT; + list_add_tail(&cmd->list, &vscsi->schedule_q); + queue_work(vscsi->work_q, &cmd->work); + break; - case WAIT_CONNECTION: - rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); - switch (rc) { - case H_SUCCESS: - vscsi->state = CONNECTED; + case SRP_CMD: + pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag, + srp->tag); + cmd->rsp.tag = srp->tag; + vscsi->debit += 1; + cmd->type = SCSI_CDB; + /* + * We want to keep track of work waiting for + * the workqueue. + */ + list_add_tail(&cmd->list, &vscsi->schedule_q); + queue_work(vscsi->work_q, &cmd->work); break; - case H_PARAMETER: - dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", - rc); - ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); + case SRP_I_LOGOUT: + rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq); break; - case H_DROPPED: - dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", - rc); - rc = ERROR; + case SRP_CRED_RSP: + case SRP_AER_RSP: + default: + ibmvscsis_free_cmd_resources(vscsi, cmd); + dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n", + (uint)srp->opcode); ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); break; - - case H_CLOSED: - pr_warn("init_msg: failed to send, rc %ld\n", rc); - rc = 0; - break; } - break; - - case UNDEFINED: - rc = ERROR; - break; - - case UNCONFIGURING: - break; - - case PART_UP_WAIT_ENAB: - case CONNECTED: - case SRP_PROCESSING: - case WAIT_IDLE: - case NO_QUEUE: - case ERR_DISCONNECT: - case ERR_DISCONNECT_RECONNECT: - case ERR_DISCONNECTED: - default: - rc = ERROR; - dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n", + } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) { + rc = ibmvscsis_srp_login(vscsi, cmd, crq); + } else { + ibmvscsis_free_cmd_resources(vscsi, cmd); + dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n", vscsi->state); ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); - break; } - - return rc; } /** - * ibmvscsis_init_msg() - Respond to an init message + * ibmvscsis_ping_response() - Respond to a ping request * @vscsi: Pointer to our adapter structure - * @crq: Pointer to CRQ element containing the Init Message + * + * Let the client know that the server is alive and waiting on + * its native I/O stack. + * If any type of error occurs from the call to queue a ping + * response then the client is either not accepting or receiving + * interrupts. Disconnect with an error. * * EXECUTION ENVIRONMENT: * Interrupt, interrupt lock held */ -static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq) +static long ibmvscsis_ping_response(struct scsi_info *vscsi) { - long rc = ADAPT_SUCCESS; + struct viosrp_crq *crq; + u64 buffer[2] = { 0, 0 }; + long rc; - pr_debug("init_msg: state 0x%hx\n", vscsi->state); + crq = (struct viosrp_crq *)&buffer; + crq->valid = VALID_CMD_RESP_EL; + crq->format = (u8)MESSAGE_IN_CRQ; + crq->status = PING_RESPONSE; - rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, - (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, - 0); - if (rc == H_SUCCESS) { - vscsi->client_data.partition_number = - be64_to_cpu(*(u64 *)vscsi->map_buf); - pr_debug("init_msg, part num %d\n", - vscsi->client_data.partition_number); - } else { - pr_debug("init_msg h_vioctl rc %ld\n", rc); - rc = ADAPT_SUCCESS; - } + rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), + cpu_to_be64(buffer[MSG_LOW])); - if (crq->format == INIT_MSG) { - rc = ibmvscsis_handle_init_msg(vscsi); - } else if (crq->format == INIT_COMPLETE_MSG) { - rc = ibmvscsis_handle_init_compl_msg(vscsi); - } else { - rc = ERROR; - dev_err(&vscsi->dev, "init_msg: invalid format %d\n", - (uint)crq->format); + switch (rc) { + case H_SUCCESS: + break; + case H_CLOSED: + vscsi->flags |= CLIENT_FAILED; + case H_DROPPED: + vscsi->flags |= RESPONSE_Q_DOWN; + case H_REMOTE_PARM: + dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", + rc); ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + default: + dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); + break; } return rc; @@ -2391,7 +2386,7 @@ static long ibmvscsis_parse_command(struct scsi_info *vscsi, break; case VALID_TRANS_EVENT: - rc = ibmvscsis_trans_event(vscsi, crq); + rc = ibmvscsis_trans_event(vscsi, crq); break; case VALID_INIT_MSG: @@ -2522,7 +2517,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi, dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n", srp->tag); goto fail; - return; } cmd->rsp.sol_not = srp->sol_not; @@ -2559,6 +2553,10 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi, data_len, attr, dir, 0); if (rc) { dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc); + spin_lock_bh(&vscsi->intr_lock); + list_del(&cmd->list); + ibmvscsis_free_cmd_resources(vscsi, cmd); + spin_unlock_bh(&vscsi->intr_lock); goto fail; } return; @@ -2638,6 +2636,9 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi, if (rc) { dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n", rc); + spin_lock_bh(&vscsi->intr_lock); + list_del(&cmd->list); + spin_unlock_bh(&vscsi->intr_lock); cmd->se_cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED; } @@ -2785,36 +2786,6 @@ static irqreturn_t ibmvscsis_interrupt(int dummy, void *data) return IRQ_HANDLED; } -/** - * ibmvscsis_check_q() - Helper function to Check Init Message Valid - * @vscsi: Pointer to our adapter structure - * - * Checks if a initialize message was queued by the initiatior - * while the timing window was open. This function is called from - * probe after the CRQ is created and interrupts are enabled. - * It would only be used by adapters who wait for some event before - * completing the init handshake with the client. For ibmvscsi, this - * event is waiting for the port to be enabled. - * - * EXECUTION ENVIRONMENT: - * Process level only, interrupt lock held - */ -static long ibmvscsis_check_q(struct scsi_info *vscsi) -{ - uint format; - long rc; - - rc = ibmvscsis_check_init_msg(vscsi, &format); - if (rc) - ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); - else if (format == UNUSED_FORMAT) - vscsi->state = WAIT_ENABLED; - else - vscsi->state = PART_UP_WAIT_ENAB; - - return rc; -} - /** * ibmvscsis_enable_change_state() - Set new state based on enabled status * @vscsi: Pointer to our adapter structure @@ -2826,77 +2797,19 @@ static long ibmvscsis_check_q(struct scsi_info *vscsi) */ static long ibmvscsis_enable_change_state(struct scsi_info *vscsi) { + int bytes; long rc = ADAPT_SUCCESS; -handle_state_change: - switch (vscsi->state) { - case WAIT_ENABLED: - rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); - switch (rc) { - case H_SUCCESS: - case H_DROPPED: - case H_CLOSED: - vscsi->state = WAIT_CONNECTION; - rc = ADAPT_SUCCESS; - break; - - case H_PARAMETER: - break; - - case H_HARDWARE: - break; - - default: - vscsi->state = UNDEFINED; - rc = H_HARDWARE; - break; - } - break; - case PART_UP_WAIT_ENAB: - rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); - switch (rc) { - case H_SUCCESS: - vscsi->state = CONNECTED; - rc = ADAPT_SUCCESS; - break; - - case H_DROPPED: - case H_CLOSED: - vscsi->state = WAIT_ENABLED; - goto handle_state_change; - - case H_PARAMETER: - break; - - case H_HARDWARE: - break; - - default: - rc = H_HARDWARE; - break; - } - break; - - case WAIT_CONNECTION: - case WAIT_IDLE: - case SRP_PROCESSING: - case CONNECTED: - rc = ADAPT_SUCCESS; - break; - /* should not be able to get here */ - case UNCONFIGURING: - rc = ERROR; - vscsi->state = UNDEFINED; - break; + bytes = vscsi->cmd_q.size * PAGE_SIZE; + rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes); + if (rc == H_CLOSED || rc == H_SUCCESS) { + vscsi->state = WAIT_CONNECTION; + rc = ibmvscsis_establish_new_q(vscsi); + } - /* driver should never allow this to happen */ - case ERR_DISCONNECT: - case ERR_DISCONNECT_RECONNECT: - default: - dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n", - vscsi->state); - rc = ADAPT_SUCCESS; - break; + if (rc != ADAPT_SUCCESS) { + vscsi->state = ERR_DISCONNECTED; + vscsi->flags |= RESPONSE_Q_DOWN; } return rc; @@ -2916,7 +2829,6 @@ static long ibmvscsis_enable_change_state(struct scsi_info *vscsi) */ static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds) { - long rc = 0; int pages; struct vio_dev *vdev = vscsi->dma_dev; @@ -2940,22 +2852,7 @@ static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds) return -ENOMEM; } - rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE); - if (rc) { - if (rc == H_CLOSED) { - vscsi->state = WAIT_ENABLED; - rc = 0; - } else { - dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token, - PAGE_SIZE, DMA_BIDIRECTIONAL); - free_page((unsigned long)vscsi->cmd_q.base_addr); - rc = -ENODEV; - } - } else { - vscsi->state = WAIT_ENABLED; - } - - return rc; + return 0; } /** @@ -3270,7 +3167,7 @@ static void ibmvscsis_handle_crq(unsigned long data) /* * if we are in a path where we are waiting for all pending commands * to complete because we received a transport event and anything in - * the command queue is for a new connection, do nothing + * the command queue is for a new connection, do nothing */ if (TARGET_STOP(vscsi)) { vio_enable_interrupts(vscsi->dma_dev); @@ -3314,7 +3211,7 @@ static void ibmvscsis_handle_crq(unsigned long data) * everything but transport events on the queue * * need to decrement the queue index so we can - * look at the elment again + * look at the element again */ if (vscsi->cmd_q.index) vscsi->cmd_q.index -= 1; @@ -3378,7 +3275,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev, INIT_LIST_HEAD(&vscsi->waiting_rsp); INIT_LIST_HEAD(&vscsi->active_q); - snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev)); + snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s", + dev_name(&vdev->dev)); pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name); @@ -3393,6 +3291,9 @@ static int ibmvscsis_probe(struct vio_dev *vdev, strncat(vscsi->eye, vdev->name, MAX_EYE); vscsi->dds.unit_id = vdev->unit_address; + strncpy(vscsi->dds.partition_name, partition_name, + sizeof(vscsi->dds.partition_name)); + vscsi->dds.partition_num = partition_number; spin_lock_bh(&ibmvscsis_dev_lock); list_add_tail(&vscsi->list, &ibmvscsis_dev_list); @@ -3469,6 +3370,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev, (unsigned long)vscsi); init_completion(&vscsi->wait_idle); + init_completion(&vscsi->unconfig); snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev)); vscsi->work_q = create_workqueue(wq_name); @@ -3485,31 +3387,12 @@ static int ibmvscsis_probe(struct vio_dev *vdev, goto destroy_WQ; } - spin_lock_bh(&vscsi->intr_lock); - vio_enable_interrupts(vdev); - if (rc) { - dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc); - rc = -ENODEV; - spin_unlock_bh(&vscsi->intr_lock); - goto free_irq; - } - - if (ibmvscsis_check_q(vscsi)) { - rc = ERROR; - dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc); - spin_unlock_bh(&vscsi->intr_lock); - goto disable_interrupt; - } - spin_unlock_bh(&vscsi->intr_lock); + vscsi->state = WAIT_ENABLED; dev_set_drvdata(&vdev->dev, vscsi); return 0; -disable_interrupt: - vio_disable_interrupts(vdev); -free_irq: - free_irq(vdev->irq, vscsi); destroy_WQ: destroy_workqueue(vscsi->work_q); unmap_buf: @@ -3543,10 +3426,11 @@ static int ibmvscsis_remove(struct vio_dev *vdev) pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev)); - /* - * TBD: Need to handle if there are commands on the waiting_rsp q - * Actually, can there still be cmds outstanding to tcm? - */ + spin_lock_bh(&vscsi->intr_lock); + ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0); + vscsi->flags |= CFG_SLEEPING; + spin_unlock_bh(&vscsi->intr_lock); + wait_for_completion(&vscsi->unconfig); vio_disable_interrupts(vdev); free_irq(vdev->irq, vscsi); @@ -3555,7 +3439,6 @@ static int ibmvscsis_remove(struct vio_dev *vdev) DMA_BIDIRECTIONAL); kfree(vscsi->map_buf); tasklet_kill(&vscsi->work_task); - ibmvscsis_unregister_command_q(vscsi); ibmvscsis_destroy_command_q(vscsi); ibmvscsis_freetimer(vscsi); ibmvscsis_free_cmds(vscsi); @@ -3609,7 +3492,7 @@ static int ibmvscsis_get_system_info(void) num = of_get_property(rootdn, "ibm,partition-no", NULL); if (num) - partition_number = *num; + partition_number = of_read_number(num, 1); of_node_put(rootdn); @@ -3903,18 +3786,22 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item, } if (tmp) { - tport->enabled = true; spin_lock_bh(&vscsi->intr_lock); + tport->enabled = true; lrc = ibmvscsis_enable_change_state(vscsi); if (lrc) pr_err("enable_change_state failed, rc %ld state %d\n", lrc, vscsi->state); spin_unlock_bh(&vscsi->intr_lock); } else { + spin_lock_bh(&vscsi->intr_lock); tport->enabled = false; + /* This simulates the server going down */ + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); + spin_unlock_bh(&vscsi->intr_lock); } - pr_debug("tpg_enable_store, state %d\n", vscsi->state); + pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state); return count; } @@ -3983,10 +3870,10 @@ static struct attribute *ibmvscsis_dev_attrs[] = { ATTRIBUTE_GROUPS(ibmvscsis_dev); static struct class ibmvscsis_class = { - .name = "ibmvscsis", - .dev_release = ibmvscsis_dev_release, - .class_attrs = ibmvscsis_class_attrs, - .dev_groups = ibmvscsis_dev_groups, + .name = "ibmvscsis", + .dev_release = ibmvscsis_dev_release, + .class_attrs = ibmvscsis_class_attrs, + .dev_groups = ibmvscsis_dev_groups, }; static struct vio_device_id ibmvscsis_device_table[] = { diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h index 981a0c992b6cce7898f27e958dff816e389b54fe..65c6189885ab08f24212bce5f008c1973539a0f7 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h @@ -26,6 +26,7 @@ #ifndef __H_IBMVSCSI_TGT #define __H_IBMVSCSI_TGT +#include #include "libsrp.h" #define SYS_ID_NAME_LEN 64 @@ -204,8 +205,6 @@ struct scsi_info { struct list_head waiting_rsp; #define NO_QUEUE 0x00 #define WAIT_ENABLED 0X01 - /* driver has received an initialize command */ -#define PART_UP_WAIT_ENAB 0x02 #define WAIT_CONNECTION 0x04 /* have established a connection */ #define CONNECTED 0x08 @@ -259,6 +258,8 @@ struct scsi_info { #define SCHEDULE_DISCONNECT 0x00400 /* disconnect handler is scheduled */ #define DISCONNECT_SCHEDULED 0x00800 + /* remove function is sleeping */ +#define CFG_SLEEPING 0x01000 u32 flags; /* adapter lock */ spinlock_t intr_lock; @@ -287,6 +288,7 @@ struct scsi_info { struct workqueue_struct *work_q; struct completion wait_idle; + struct completion unconfig; struct device dev; struct vio_dev *dma_dev; struct srp_target target; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 532474109624d9cd9c6a52c040a1027120819886..835c59c777f20a506731fea77300f3f305e8ff9a 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -186,16 +186,16 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { }; static const struct ipr_chip_t ipr_chip[] = { - { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, - { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, - { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } }; static int ipr_max_bus_speeds[] = { @@ -9439,23 +9439,11 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg) { struct pci_dev *pdev = ioa_cfg->pdev; + int i; - if (ioa_cfg->intr_flag == IPR_USE_MSI || - ioa_cfg->intr_flag == IPR_USE_MSIX) { - int i; - for (i = 0; i < ioa_cfg->nvectors; i++) - free_irq(ioa_cfg->vectors_info[i].vec, - &ioa_cfg->hrrq[i]); - } else - free_irq(pdev->irq, &ioa_cfg->hrrq[0]); - - if (ioa_cfg->intr_flag == IPR_USE_MSI) { - pci_disable_msi(pdev); - ioa_cfg->intr_flag &= ~IPR_USE_MSI; - } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) { - pci_disable_msix(pdev); - ioa_cfg->intr_flag &= ~IPR_USE_MSIX; - } + for (i = 0; i < ioa_cfg->nvectors; i++) + free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); + pci_free_irq_vectors(pdev); } /** @@ -9883,45 +9871,6 @@ static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg) } } -static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg) -{ - struct msix_entry entries[IPR_MAX_MSIX_VECTORS]; - int i, vectors; - - for (i = 0; i < ARRAY_SIZE(entries); ++i) - entries[i].entry = i; - - vectors = pci_enable_msix_range(ioa_cfg->pdev, - entries, 1, ipr_number_of_msix); - if (vectors < 0) { - ipr_wait_for_pci_err_recovery(ioa_cfg); - return vectors; - } - - for (i = 0; i < vectors; i++) - ioa_cfg->vectors_info[i].vec = entries[i].vector; - ioa_cfg->nvectors = vectors; - - return 0; -} - -static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg) -{ - int i, vectors; - - vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix); - if (vectors < 0) { - ipr_wait_for_pci_err_recovery(ioa_cfg); - return vectors; - } - - for (i = 0; i < vectors; i++) - ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i; - ioa_cfg->nvectors = vectors; - - return 0; -} - static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) { int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; @@ -9934,19 +9883,20 @@ static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) } } -static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg) +static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg, + struct pci_dev *pdev) { int i, rc; for (i = 1; i < ioa_cfg->nvectors; i++) { - rc = request_irq(ioa_cfg->vectors_info[i].vec, + rc = request_irq(pci_irq_vector(pdev, i), ipr_isr_mhrrq, 0, ioa_cfg->vectors_info[i].desc, &ioa_cfg->hrrq[i]); if (rc) { while (--i >= 0) - free_irq(ioa_cfg->vectors_info[i].vec, + free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); return rc; } @@ -9984,8 +9934,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp) * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support. * @pdev: PCI device struct * - * Description: The return value from pci_enable_msi_range() can not always be - * trusted. This routine sets up and initiates a test interrupt to determine + * Description: This routine sets up and initiates a test interrupt to determine * if the interrupt is received via the ipr_test_intr() service routine. * If the tests fails, the driver will fall back to LSI. * @@ -9997,6 +9946,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) int rc; volatile u32 int_reg; unsigned long lock_flags = 0; + int irq = pci_irq_vector(pdev, 0); ENTER; @@ -10008,15 +9958,12 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - if (ioa_cfg->intr_flag == IPR_USE_MSIX) - rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg); - else - rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); + rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); if (rc) { - dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); + dev_err(&pdev->dev, "Can not assign irq %d\n", irq); return rc; } else if (ipr_debug) - dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq); + dev_info(&pdev->dev, "IRQ assigned: %d\n", irq); writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); @@ -10033,10 +9980,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - if (ioa_cfg->intr_flag == IPR_USE_MSIX) - free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg); - else - free_irq(pdev->irq, ioa_cfg); + free_irq(irq, ioa_cfg); LEAVE; @@ -10060,6 +10004,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev, int rc = PCIBIOS_SUCCESSFUL; volatile u32 mask, uproc, interrupts; unsigned long lock_flags, driver_lock_flags; + unsigned int irq_flag; ENTER; @@ -10175,18 +10120,18 @@ static int ipr_probe_ioa(struct pci_dev *pdev, ipr_number_of_msix = IPR_MAX_MSIX_VECTORS; } - if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && - ipr_enable_msix(ioa_cfg) == 0) - ioa_cfg->intr_flag = IPR_USE_MSIX; - else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && - ipr_enable_msi(ioa_cfg) == 0) - ioa_cfg->intr_flag = IPR_USE_MSI; - else { - ioa_cfg->intr_flag = IPR_USE_LSI; - ioa_cfg->clear_isr = 1; - ioa_cfg->nvectors = 1; - dev_info(&pdev->dev, "Cannot enable MSI.\n"); + irq_flag = PCI_IRQ_LEGACY; + if (ioa_cfg->ipr_chip->has_msi) + irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX; + rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag); + if (rc < 0) { + ipr_wait_for_pci_err_recovery(ioa_cfg); + goto cleanup_nomem; } + ioa_cfg->nvectors = rc; + + if (!pdev->msi_enabled && !pdev->msix_enabled) + ioa_cfg->clear_isr = 1; pci_set_master(pdev); @@ -10199,33 +10144,23 @@ static int ipr_probe_ioa(struct pci_dev *pdev, } } - if (ioa_cfg->intr_flag == IPR_USE_MSI || - ioa_cfg->intr_flag == IPR_USE_MSIX) { + if (pdev->msi_enabled || pdev->msix_enabled) { rc = ipr_test_msi(ioa_cfg, pdev); - if (rc == -EOPNOTSUPP) { + switch (rc) { + case 0: + dev_info(&pdev->dev, + "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors, + pdev->msix_enabled ? "-X" : ""); + break; + case -EOPNOTSUPP: ipr_wait_for_pci_err_recovery(ioa_cfg); - if (ioa_cfg->intr_flag == IPR_USE_MSI) { - ioa_cfg->intr_flag &= ~IPR_USE_MSI; - pci_disable_msi(pdev); - } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) { - ioa_cfg->intr_flag &= ~IPR_USE_MSIX; - pci_disable_msix(pdev); - } + pci_free_irq_vectors(pdev); - ioa_cfg->intr_flag = IPR_USE_LSI; ioa_cfg->nvectors = 1; - } - else if (rc) + ioa_cfg->clear_isr = 1; + break; + default: goto out_msi_disable; - else { - if (ioa_cfg->intr_flag == IPR_USE_MSI) - dev_info(&pdev->dev, - "Request for %d MSIs succeeded with starting IRQ: %d\n", - ioa_cfg->nvectors, pdev->irq); - else if (ioa_cfg->intr_flag == IPR_USE_MSIX) - dev_info(&pdev->dev, - "Request for %d MSIXs succeeded.", - ioa_cfg->nvectors); } } @@ -10273,15 +10208,13 @@ static int ipr_probe_ioa(struct pci_dev *pdev, ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - if (ioa_cfg->intr_flag == IPR_USE_MSI - || ioa_cfg->intr_flag == IPR_USE_MSIX) { + if (pdev->msi_enabled || pdev->msix_enabled) { name_msi_vectors(ioa_cfg); - rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr, - 0, + rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0, ioa_cfg->vectors_info[0].desc, &ioa_cfg->hrrq[0]); if (!rc) - rc = ipr_request_other_msi_irqs(ioa_cfg); + rc = ipr_request_other_msi_irqs(ioa_cfg, pdev); } else { rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, @@ -10323,10 +10256,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev, ipr_free_mem(ioa_cfg); out_msi_disable: ipr_wait_for_pci_err_recovery(ioa_cfg); - if (ioa_cfg->intr_flag == IPR_USE_MSI) - pci_disable_msi(pdev); - else if (ioa_cfg->intr_flag == IPR_USE_MSIX) - pci_disable_msix(pdev); + pci_free_irq_vectors(pdev); cleanup_nomem: iounmap(ipr_regs); out_disable: diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 8995053d01b3fe5620b8bba2e84f59209a3b86d2..b7d2e98eb45bdfc81ff39efa7ad00c2a0827fc65 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1413,10 +1413,7 @@ struct ipr_chip_cfg_t { struct ipr_chip_t { u16 vendor; u16 device; - u16 intr_type; -#define IPR_USE_LSI 0x00 -#define IPR_USE_MSI 0x01 -#define IPR_USE_MSIX 0x02 + bool has_msi; u16 sis_type; #define IPR_SIS32 0x00 #define IPR_SIS64 0x01 @@ -1593,11 +1590,9 @@ struct ipr_ioa_cfg { struct ipr_cmnd **ipr_cmnd_list; dma_addr_t *ipr_cmnd_list_dma; - u16 intr_flag; unsigned int nvectors; struct { - unsigned short vec; char desc[22]; } vectors_info[IPR_MAX_MSIX_VECTORS]; diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 02cb76fd442084cbc7ba108a00002f513c119f86..3419e1bcdff60407e94183959a065a5c482a9c09 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -2241,9 +2241,6 @@ ips_get_bios_version(ips_ha_t * ha, int intr) uint8_t minor; uint8_t subminor; uint8_t *buffer; - char hexDigits[] = - { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', - 'D', 'E', 'F' }; METHOD_TRACE("ips_get_bios_version", 1); @@ -2374,13 +2371,13 @@ ips_get_bios_version(ips_ha_t * ha, int intr) } } - ha->bios_version[0] = hexDigits[(major & 0xF0) >> 4]; + ha->bios_version[0] = hex_asc_upper_hi(major); ha->bios_version[1] = '.'; - ha->bios_version[2] = hexDigits[major & 0x0F]; - ha->bios_version[3] = hexDigits[subminor]; + ha->bios_version[2] = hex_asc_upper_lo(major); + ha->bios_version[3] = hex_asc_upper_lo(subminor); ha->bios_version[4] = '.'; - ha->bios_version[5] = hexDigits[(minor & 0xF0) >> 4]; - ha->bios_version[6] = hexDigits[minor & 0x0F]; + ha->bios_version[5] = hex_asc_upper_hi(minor); + ha->bios_version[6] = hex_asc_upper_lo(minor); ha->bios_version[7] = 0; } diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h index 45b9566b928e1895553d85c98056f9aa65793dc5..b782bb60baf0e8d49e64a34f518dd3fec5fcd6ed 100644 --- a/drivers/scsi/ips.h +++ b/drivers/scsi/ips.h @@ -51,7 +51,7 @@ #define _IPS_H_ #include - #include +#include #include /* diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 22a9bb1abae1473062b06031494bb38b2cc11576..b3539928073c628729af52c1301be111ae77b9dc 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -295,7 +295,6 @@ enum sci_controller_states { #define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS) struct isci_pci_info { - struct msix_entry msix_entries[SCI_MAX_MSIX_INT]; struct isci_host *hosts[SCI_MAX_CONTROLLERS]; struct isci_orom *orom; }; diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 77128d680e3bc615732767ea428292c93ffb32fa..0b5b5db0d0f85ece4368e46fa171ff73ea8bda41 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -350,16 +350,12 @@ static int isci_setup_interrupts(struct pci_dev *pdev) */ num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT; - for (i = 0; i < num_msix; i++) - pci_info->msix_entries[i].entry = i; - - err = pci_enable_msix_exact(pdev, pci_info->msix_entries, num_msix); - if (err) + err = pci_alloc_irq_vectors(pdev, num_msix, num_msix, PCI_IRQ_MSIX); + if (err < 0) goto intx; for (i = 0; i < num_msix; i++) { int id = i / SCI_NUM_MSI_X_INT; - struct msix_entry *msix = &pci_info->msix_entries[i]; irq_handler_t isr; ihost = pci_info->hosts[id]; @@ -369,8 +365,8 @@ static int isci_setup_interrupts(struct pci_dev *pdev) else isr = isci_msix_isr; - err = devm_request_irq(&pdev->dev, msix->vector, isr, 0, - DRV_NAME"-msix", ihost); + err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i), + isr, 0, DRV_NAME"-msix", ihost); if (!err) continue; @@ -378,18 +374,19 @@ static int isci_setup_interrupts(struct pci_dev *pdev) while (i--) { id = i / SCI_NUM_MSI_X_INT; ihost = pci_info->hosts[id]; - msix = &pci_info->msix_entries[i]; - devm_free_irq(&pdev->dev, msix->vector, ihost); + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), + ihost); } - pci_disable_msix(pdev); + pci_free_irq_vectors(pdev); goto intx; } return 0; intx: for_each_isci_host(i, ihost, pdev) { - err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr, - IRQF_SHARED, DRV_NAME"-intx", ihost); + err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, 0), + isci_intx_isr, IRQF_SHARED, DRV_NAME"-intx", + ihost); if (err) break; } diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c index 8ac646e5eddc9476603fb98c8ce5bd2a7b0be224..a2bbe46f8ccbd3e2aaa343d3dff2bd249b871037 100644 --- a/drivers/scsi/isci/probe_roms.c +++ b/drivers/scsi/isci/probe_roms.c @@ -54,6 +54,7 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev) len = pci_biosrom_size(pdev); rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL); if (!rom) { + pci_unmap_biosrom(oprom); dev_warn(&pdev->dev, "Unable to allocate memory for orom\n"); return NULL; diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index 1910100638a280a5c70e1f2118e354d6ecfb1a43..e3f2a5359d71e11118dd204d92b3410cfbe39722 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c @@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state) { static const char * const strings[] = RNC_STATES; + if (state >= ARRAY_SIZE(strings)) + return "UNKNOWN"; + return strings[state]; } #undef C @@ -454,7 +457,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con * the device since it's being invalidated anyway */ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: SCIC Remote Node Context 0x%p was " - "suspeneded by hardware while being " + "suspended by hardware while being " "invalidated.\n", __func__, sci_rnc); break; default: @@ -473,7 +476,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con * the device since it's being resumed anyway */ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: SCIC Remote Node Context 0x%p was " - "suspeneded by hardware while being resumed.\n", + "suspended by hardware while being resumed.\n", __func__, sci_rnc); break; default: diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index b709d2b208809cce8442740a61c5429f5ffee982..47f66e9497451e290112852fa5a3f6503b4f33eb 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -2473,7 +2473,7 @@ static void isci_request_process_response_iu( "%s: resp_iu = %p " "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " "resp_iu->response_data_len = %x, " - "resp_iu->sense_data_len = %x\nrepsonse data: ", + "resp_iu->sense_data_len = %x\nresponse data: ", __func__, resp_iu, resp_iu->status, diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 880a9068ca1268baf851ea8804f3bdb4216049f6..6103231104dadbc42f84217f274e5e90a3918fe3 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -68,10 +68,14 @@ static void fc_disc_stop_rports(struct fc_disc *disc) lport = fc_disc_lport(disc); - mutex_lock(&disc->disc_mutex); - list_for_each_entry_rcu(rdata, &disc->rports, peers) - lport->tt.rport_logoff(rdata); - mutex_unlock(&disc->disc_mutex); + rcu_read_lock(); + list_for_each_entry_rcu(rdata, &disc->rports, peers) { + if (kref_get_unless_zero(&rdata->kref)) { + fc_rport_logoff(rdata); + kref_put(&rdata->kref, fc_rport_destroy); + } + } + rcu_read_unlock(); } /** @@ -150,7 +154,7 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp) break; } } - lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); + fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL); /* * If not doing a complete rediscovery, do GPN_ID on @@ -178,7 +182,7 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp) FC_DISC_DBG(disc, "Received a bad RSCN frame\n"); rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_NONE; - lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); + fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); fc_frame_free(fp); } @@ -289,15 +293,19 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) * Skip ports which were never discovered. These are the dNS port * and ports which were created by PLOGI. */ + rcu_read_lock(); list_for_each_entry_rcu(rdata, &disc->rports, peers) { - if (!rdata->disc_id) + if (!kref_get_unless_zero(&rdata->kref)) continue; - if (rdata->disc_id == disc->disc_id) - lport->tt.rport_login(rdata); - else - lport->tt.rport_logoff(rdata); + if (rdata->disc_id) { + if (rdata->disc_id == disc->disc_id) + fc_rport_login(rdata); + else + fc_rport_logoff(rdata); + } + kref_put(&rdata->kref, fc_rport_destroy); } - + rcu_read_unlock(); mutex_unlock(&disc->disc_mutex); disc->disc_callback(lport, event); mutex_lock(&disc->disc_mutex); @@ -446,7 +454,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) if (ids.port_id != lport->port_id && ids.port_name != lport->wwpn) { - rdata = lport->tt.rport_create(lport, ids.port_id); + rdata = fc_rport_create(lport, ids.port_id); if (rdata) { rdata->ids.port_name = ids.port_name; rdata->disc_id = disc->disc_id; @@ -592,7 +600,6 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, lport = rdata->local_port; disc = &lport->disc; - mutex_lock(&disc->disc_mutex); if (PTR_ERR(fp) == -FC_EX_CLOSED) goto out; if (IS_ERR(fp)) @@ -607,37 +614,41 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, goto redisc; pn = (struct fc_ns_gid_pn *)(cp + 1); port_name = get_unaligned_be64(&pn->fn_wwpn); + mutex_lock(&rdata->rp_mutex); if (rdata->ids.port_name == -1) rdata->ids.port_name = port_name; else if (rdata->ids.port_name != port_name) { FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. " "Port-id %6.6x wwpn %16.16llx\n", rdata->ids.port_id, port_name); - lport->tt.rport_logoff(rdata); - - new_rdata = lport->tt.rport_create(lport, - rdata->ids.port_id); + mutex_unlock(&rdata->rp_mutex); + fc_rport_logoff(rdata); + mutex_lock(&lport->disc.disc_mutex); + new_rdata = fc_rport_create(lport, rdata->ids.port_id); + mutex_unlock(&lport->disc.disc_mutex); if (new_rdata) { new_rdata->disc_id = disc->disc_id; - lport->tt.rport_login(new_rdata); + fc_rport_login(new_rdata); } goto out; } rdata->disc_id = disc->disc_id; - lport->tt.rport_login(rdata); + mutex_unlock(&rdata->rp_mutex); + fc_rport_login(rdata); } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n", cp->ct_reason, cp->ct_explan); - lport->tt.rport_logoff(rdata); + fc_rport_logoff(rdata); } else { FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n", ntohs(cp->ct_cmd)); redisc: + mutex_lock(&disc->disc_mutex); fc_disc_restart(disc); + mutex_unlock(&disc->disc_mutex); } out: - mutex_unlock(&disc->disc_mutex); - kref_put(&rdata->kref, lport->tt.rport_destroy); + kref_put(&rdata->kref, fc_rport_destroy); } /** @@ -678,7 +689,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp) { struct fc_rport_priv *rdata; - rdata = lport->tt.rport_create(lport, dp->port_id); + rdata = fc_rport_create(lport, dp->port_id); if (!rdata) return -ENOMEM; rdata->disc_id = 0; @@ -708,7 +719,7 @@ static void fc_disc_stop(struct fc_lport *lport) static void fc_disc_stop_final(struct fc_lport *lport) { fc_disc_stop(lport); - lport->tt.rport_flush_queue(); + fc_rport_flush_queue(); } /** diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index c2384d501470123a119f4fcd61fadd9ba7aacc24..6384a98048af8f396e4e04bf36d6572d8827bc53 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c @@ -67,7 +67,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did, fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type, FC_FCTL_REQ, 0); - return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec); + return fc_exch_seq_send(lport, fp, resp, NULL, arg, timer_msec); } EXPORT_SYMBOL(fc_elsct_send); diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 16ca31ad5ec0abba2d0b08ad1d337ff8afcbbd9a..42bcf7f3a0f90bf5a8778e21cfe0b5c20f6e6a87 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -94,6 +94,7 @@ struct fc_exch_pool { struct fc_exch_mgr { struct fc_exch_pool __percpu *pool; mempool_t *ep_pool; + struct fc_lport *lport; enum fc_class class; struct kref kref; u16 min_xid; @@ -362,8 +363,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, fc_exch_hold(ep); /* hold for timer */ if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, - msecs_to_jiffies(timer_msec))) + msecs_to_jiffies(timer_msec))) { + FC_EXCH_DBG(ep, "Exchange already queued\n"); fc_exch_release(ep); + } } /** @@ -406,6 +409,8 @@ static int fc_exch_done_locked(struct fc_exch *ep) return rc; } +static struct fc_exch fc_quarantine_exch; + /** * fc_exch_ptr_get() - Return an exchange from an exchange pool * @pool: Exchange Pool to get an exchange from @@ -450,14 +455,17 @@ static void fc_exch_delete(struct fc_exch *ep) /* update cache of free slot */ index = (ep->xid - ep->em->min_xid) >> fc_cpu_order; - if (pool->left == FC_XID_UNKNOWN) - pool->left = index; - else if (pool->right == FC_XID_UNKNOWN) - pool->right = index; - else - pool->next_index = index; - - fc_exch_ptr_set(pool, index, NULL); + if (!(ep->state & FC_EX_QUARANTINE)) { + if (pool->left == FC_XID_UNKNOWN) + pool->left = index; + else if (pool->right == FC_XID_UNKNOWN) + pool->right = index; + else + pool->next_index = index; + fc_exch_ptr_set(pool, index, NULL); + } else { + fc_exch_ptr_set(pool, index, &fc_quarantine_exch); + } list_del(&ep->ex_list); spin_unlock_bh(&pool->lock); fc_exch_release(ep); /* drop hold for exch in mp */ @@ -525,8 +533,7 @@ static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp, * Note: The frame will be freed either by a direct call to fc_frame_free(fp) * or indirectly by calling libfc_function_template.frame_send(). */ -static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, - struct fc_frame *fp) +int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp) { struct fc_exch *ep; int error; @@ -536,6 +543,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, spin_unlock_bh(&ep->ex_lock); return error; } +EXPORT_SYMBOL(fc_seq_send); /** * fc_seq_alloc() - Allocate a sequence for a given exchange @@ -577,7 +585,7 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) * for a given sequence/exchange pair * @sp: The sequence/exchange to get a new exchange for */ -static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) +struct fc_seq *fc_seq_start_next(struct fc_seq *sp) { struct fc_exch *ep = fc_seq_exch(sp); @@ -587,16 +595,16 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) return sp; } +EXPORT_SYMBOL(fc_seq_start_next); /* * Set the response handler for the exchange associated with a sequence. * * Note: May sleep if invoked from outside a response handler. */ -static void fc_seq_set_resp(struct fc_seq *sp, - void (*resp)(struct fc_seq *, struct fc_frame *, - void *), - void *arg) +void fc_seq_set_resp(struct fc_seq *sp, + void (*resp)(struct fc_seq *, struct fc_frame *, void *), + void *arg) { struct fc_exch *ep = fc_seq_exch(sp); DEFINE_WAIT(wait); @@ -615,12 +623,20 @@ static void fc_seq_set_resp(struct fc_seq *sp, ep->arg = arg; spin_unlock_bh(&ep->ex_lock); } +EXPORT_SYMBOL(fc_seq_set_resp); /** * fc_exch_abort_locked() - Abort an exchange * @ep: The exchange to be aborted * @timer_msec: The period of time to wait before aborting * + * Abort an exchange and sequence. Generally called because of a + * exchange timeout or an abort from the upper layer. + * + * A timer_msec can be specified for abort timeout, if non-zero + * timer_msec value is specified then exchange resp handler + * will be called with timeout error if no response to abort. + * * Locking notes: Called with exch lock held * * Return value: 0 on success else error code @@ -632,9 +648,13 @@ static int fc_exch_abort_locked(struct fc_exch *ep, struct fc_frame *fp; int error; + FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec); if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || - ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) + ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) { + FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n", + ep->esb_stat, ep->state); return -ENXIO; + } /* * Send the abort on a new sequence if possible. @@ -680,8 +700,7 @@ static int fc_exch_abort_locked(struct fc_exch *ep, * * Return value: 0 on success else error code */ -static int fc_seq_exch_abort(const struct fc_seq *req_sp, - unsigned int timer_msec) +int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec) { struct fc_exch *ep; int error; @@ -758,7 +777,7 @@ static void fc_exch_timeout(struct work_struct *work) u32 e_stat; int rc = 1; - FC_EXCH_DBG(ep, "Exchange timed out\n"); + FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state); spin_lock_bh(&ep->ex_lock); if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) @@ -821,14 +840,18 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, /* peek cache of free slot */ if (pool->left != FC_XID_UNKNOWN) { - index = pool->left; - pool->left = FC_XID_UNKNOWN; - goto hit; + if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) { + index = pool->left; + pool->left = FC_XID_UNKNOWN; + goto hit; + } } if (pool->right != FC_XID_UNKNOWN) { - index = pool->right; - pool->right = FC_XID_UNKNOWN; - goto hit; + if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) { + index = pool->right; + pool->right = FC_XID_UNKNOWN; + goto hit; + } } index = pool->next_index; @@ -888,14 +911,19 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, * EM is selected when a NULL match function pointer is encountered * or when a call to a match function returns true. */ -static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport, - struct fc_frame *fp) +static struct fc_exch *fc_exch_alloc(struct fc_lport *lport, + struct fc_frame *fp) { struct fc_exch_mgr_anchor *ema; + struct fc_exch *ep; - list_for_each_entry(ema, &lport->ema_list, ema_list) - if (!ema->match || ema->match(fp)) - return fc_exch_em_alloc(lport, ema->mp); + list_for_each_entry(ema, &lport->ema_list, ema_list) { + if (!ema->match || ema->match(fp)) { + ep = fc_exch_em_alloc(lport, ema->mp); + if (ep) + return ep; + } + } return NULL; } @@ -906,14 +934,17 @@ static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport, */ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) { + struct fc_lport *lport = mp->lport; struct fc_exch_pool *pool; struct fc_exch *ep = NULL; u16 cpu = xid & fc_cpu_mask; + if (xid == FC_XID_UNKNOWN) + return NULL; + if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { - printk_ratelimited(KERN_ERR - "libfc: lookup request for XID = %d, " - "indicates invalid CPU %d\n", xid, cpu); + pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:", + lport->host->host_no, lport->port_id, xid, cpu); return NULL; } @@ -921,6 +952,10 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) pool = per_cpu_ptr(mp->pool, cpu); spin_lock_bh(&pool->lock); ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); + if (ep == &fc_quarantine_exch) { + FC_LPORT_DBG(lport, "xid %x quarantined\n", xid); + ep = NULL; + } if (ep) { WARN_ON(ep->xid != xid); fc_exch_hold(ep); @@ -938,7 +973,7 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) * * Note: May sleep if invoked from outside a response handler. */ -static void fc_exch_done(struct fc_seq *sp) +void fc_exch_done(struct fc_seq *sp) { struct fc_exch *ep = fc_seq_exch(sp); int rc; @@ -951,6 +986,7 @@ static void fc_exch_done(struct fc_seq *sp) if (!rc) fc_exch_delete(ep); } +EXPORT_SYMBOL(fc_exch_done); /** * fc_exch_resp() - Allocate a new exchange for a response frame @@ -1197,8 +1233,8 @@ static void fc_exch_set_addr(struct fc_exch *ep, * * The received frame is not freed. */ -static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd, - struct fc_seq_els_data *els_data) +void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd, + struct fc_seq_els_data *els_data) { switch (els_cmd) { case ELS_LS_RJT: @@ -1217,6 +1253,7 @@ static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd, FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd); } } +EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send); /** * fc_seq_send_last() - Send a sequence that is the last in the exchange @@ -1258,8 +1295,10 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) */ if (fc_sof_needs_ack(fr_sof(rx_fp))) { fp = fc_frame_alloc(lport, 0); - if (!fp) + if (!fp) { + FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n"); return; + } fh = fc_frame_header_get(fp); fh->fh_r_ctl = FC_RCTL_ACK_1; @@ -1312,13 +1351,18 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, struct fc_frame_header *rx_fh; struct fc_frame_header *fh; struct fc_ba_rjt *rp; + struct fc_seq *sp; struct fc_lport *lport; unsigned int f_ctl; lport = fr_dev(rx_fp); + sp = fr_seq(rx_fp); fp = fc_frame_alloc(lport, sizeof(*rp)); - if (!fp) + if (!fp) { + FC_EXCH_DBG(fc_seq_exch(sp), + "Drop BA_RJT request, out of memory\n"); return; + } fh = fc_frame_header_get(fp); rx_fh = fc_frame_header_get(rx_fp); @@ -1383,14 +1427,17 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) if (!ep) goto reject; + FC_EXCH_DBG(ep, "exch: ABTS received\n"); fp = fc_frame_alloc(ep->lp, sizeof(*ap)); - if (!fp) + if (!fp) { + FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n"); goto free; + } spin_lock_bh(&ep->ex_lock); if (ep->esb_stat & ESB_ST_COMPLETE) { spin_unlock_bh(&ep->ex_lock); - + FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n"); fc_frame_free(fp); goto reject; } @@ -1433,7 +1480,7 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) * A reference will be held on the exchange/sequence for the caller, which * must call fc_seq_release(). */ -static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp) +struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp) { struct fc_exch_mgr_anchor *ema; @@ -1447,15 +1494,17 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp) break; return fr_seq(fp); } +EXPORT_SYMBOL(fc_seq_assign); /** * fc_seq_release() - Release the hold * @sp: The sequence. */ -static void fc_seq_release(struct fc_seq *sp) +void fc_seq_release(struct fc_seq *sp) { fc_exch_release(fc_seq_exch(sp)); } +EXPORT_SYMBOL(fc_seq_release); /** * fc_exch_recv_req() - Handler for an incoming request @@ -1491,7 +1540,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, * The upper-level protocol may request one later, if needed. */ if (fh->fh_rx_id == htons(FC_XID_UNKNOWN)) - return lport->tt.lport_recv(lport, fp); + return fc_lport_recv(lport, fp); reject = fc_seq_lookup_recip(lport, mp, fp); if (reject == FC_RJT_NONE) { @@ -1512,7 +1561,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, * first. */ if (!fc_invoke_resp(ep, sp, fp)) - lport->tt.lport_recv(lport, fp); + fc_lport_recv(lport, fp); fc_exch_release(ep); /* release from lookup */ } else { FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n", @@ -1562,9 +1611,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) if (fc_sof_is_init(sof)) { sp->ssb_stat |= SSB_ST_RESP; sp->id = fh->fh_seq_id; - } else if (sp->id != fh->fh_seq_id) { - atomic_inc(&mp->stats.seq_not_found); - goto rel; } f_ctl = ntoh24(fh->fh_f_ctl); @@ -1761,7 +1807,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) fc_frame_free(fp); break; case FC_RCTL_BA_ABTS: - fc_exch_recv_abts(ep, fp); + if (ep) + fc_exch_recv_abts(ep, fp); + else + fc_frame_free(fp); break; default: /* ignore junk */ fc_frame_free(fp); @@ -1784,11 +1833,16 @@ static void fc_seq_ls_acc(struct fc_frame *rx_fp) struct fc_lport *lport; struct fc_els_ls_acc *acc; struct fc_frame *fp; + struct fc_seq *sp; lport = fr_dev(rx_fp); + sp = fr_seq(rx_fp); fp = fc_frame_alloc(lport, sizeof(*acc)); - if (!fp) + if (!fp) { + FC_EXCH_DBG(fc_seq_exch(sp), + "exch: drop LS_ACC, out of memory\n"); return; + } acc = fc_frame_payload_get(fp, sizeof(*acc)); memset(acc, 0, sizeof(*acc)); acc->la_cmd = ELS_LS_ACC; @@ -1811,11 +1865,16 @@ static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason, struct fc_lport *lport; struct fc_els_ls_rjt *rjt; struct fc_frame *fp; + struct fc_seq *sp; lport = fr_dev(rx_fp); + sp = fr_seq(rx_fp); fp = fc_frame_alloc(lport, sizeof(*rjt)); - if (!fp) + if (!fp) { + FC_EXCH_DBG(fc_seq_exch(sp), + "exch: drop LS_ACC, out of memory\n"); return; + } rjt = fc_frame_payload_get(fp, sizeof(*rjt)); memset(rjt, 0, sizeof(*rjt)); rjt->er_cmd = ELS_LS_RJT; @@ -1960,8 +2019,7 @@ static void fc_exch_els_rec(struct fc_frame *rfp) enum fc_els_rjt_reason reason = ELS_RJT_LOGIC; enum fc_els_rjt_explan explan; u32 sid; - u16 rxid; - u16 oxid; + u16 xid, rxid, oxid; lport = fr_dev(rfp); rp = fc_frame_payload_get(rfp, sizeof(*rp)); @@ -1972,18 +2030,35 @@ static void fc_exch_els_rec(struct fc_frame *rfp) rxid = ntohs(rp->rec_rx_id); oxid = ntohs(rp->rec_ox_id); - ep = fc_exch_lookup(lport, - sid == fc_host_port_id(lport->host) ? oxid : rxid); explan = ELS_EXPL_OXID_RXID; - if (!ep) + if (sid == fc_host_port_id(lport->host)) + xid = oxid; + else + xid = rxid; + if (xid == FC_XID_UNKNOWN) { + FC_LPORT_DBG(lport, + "REC request from %x: invalid rxid %x oxid %x\n", + sid, rxid, oxid); + goto reject; + } + ep = fc_exch_lookup(lport, xid); + if (!ep) { + FC_LPORT_DBG(lport, + "REC request from %x: rxid %x oxid %x not found\n", + sid, rxid, oxid); goto reject; + } + FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n", + sid, rxid, oxid); if (ep->oid != sid || oxid != ep->oxid) goto rel; if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid) goto rel; fp = fc_frame_alloc(lport, sizeof(*acc)); - if (!fp) + if (!fp) { + FC_EXCH_DBG(ep, "Drop REC request, out of memory\n"); goto out; + } acc = fc_frame_payload_get(fp, sizeof(*acc)); memset(acc, 0, sizeof(*acc)); @@ -2065,6 +2140,24 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) * @arg: The argument to be passed to the response handler * @timer_msec: The timeout period for the exchange * + * The exchange response handler is set in this routine to resp() + * function pointer. It can be called in two scenarios: if a timeout + * occurs or if a response frame is received for the exchange. The + * fc_frame pointer in response handler will also indicate timeout + * as error using IS_ERR related macros. + * + * The exchange destructor handler is also set in this routine. + * The destructor handler is invoked by EM layer when exchange + * is about to free, this can be used by caller to free its + * resources along with exchange free. + * + * The arg is passed back to resp and destructor handler. + * + * The timeout value (in msec) for an exchange is set if non zero + * timer_msec argument is specified. The timer is canceled when + * it fires or when the exchange is done. The exchange timeout handler + * is registered by EM layer. + * * The frame pointer with some of the header's fields must be * filled before calling this routine, those fields are: * @@ -2075,14 +2168,13 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) * - frame control * - parameter or relative offset */ -static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, - struct fc_frame *fp, - void (*resp)(struct fc_seq *, - struct fc_frame *fp, - void *arg), - void (*destructor)(struct fc_seq *, - void *), - void *arg, u32 timer_msec) +struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, + struct fc_frame *fp, + void (*resp)(struct fc_seq *, + struct fc_frame *fp, + void *arg), + void (*destructor)(struct fc_seq *, void *), + void *arg, u32 timer_msec) { struct fc_exch *ep; struct fc_seq *sp = NULL; @@ -2101,7 +2193,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, ep->resp = resp; ep->destructor = destructor; ep->arg = arg; - ep->r_a_tov = FC_DEF_R_A_TOV; + ep->r_a_tov = lport->r_a_tov; ep->lp = lport; sp = &ep->seq; @@ -2135,6 +2227,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, fc_exch_delete(ep); return NULL; } +EXPORT_SYMBOL(fc_exch_seq_send); /** * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command @@ -2176,6 +2269,7 @@ static void fc_exch_rrq(struct fc_exch *ep) return; retry: + FC_EXCH_DBG(ep, "exch: RRQ send failed\n"); spin_lock_bh(&ep->ex_lock); if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) { spin_unlock_bh(&ep->ex_lock); @@ -2218,6 +2312,8 @@ static void fc_exch_els_rrq(struct fc_frame *fp) if (!ep) goto reject; spin_lock_bh(&ep->ex_lock); + FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n", + sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id)); if (ep->oxid != ntohs(rp->rrq_ox_id)) goto unlock_reject; if (ep->rxid != ntohs(rp->rrq_rx_id) && @@ -2385,6 +2481,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport, return NULL; mp->class = class; + mp->lport = lport; /* adjust em exch xid range for offload */ mp->min_xid = min_xid; @@ -2558,36 +2655,9 @@ EXPORT_SYMBOL(fc_exch_recv); */ int fc_exch_init(struct fc_lport *lport) { - if (!lport->tt.seq_start_next) - lport->tt.seq_start_next = fc_seq_start_next; - - if (!lport->tt.seq_set_resp) - lport->tt.seq_set_resp = fc_seq_set_resp; - - if (!lport->tt.exch_seq_send) - lport->tt.exch_seq_send = fc_exch_seq_send; - - if (!lport->tt.seq_send) - lport->tt.seq_send = fc_seq_send; - - if (!lport->tt.seq_els_rsp_send) - lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send; - - if (!lport->tt.exch_done) - lport->tt.exch_done = fc_exch_done; - if (!lport->tt.exch_mgr_reset) lport->tt.exch_mgr_reset = fc_exch_mgr_reset; - if (!lport->tt.seq_exch_abort) - lport->tt.seq_exch_abort = fc_seq_exch_abort; - - if (!lport->tt.seq_assign) - lport->tt.seq_assign = fc_seq_assign; - - if (!lport->tt.seq_release) - lport->tt.seq_release = fc_seq_release; - return 0; } EXPORT_SYMBOL(fc_exch_init); diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 5121272f28fd8190b6e599ee9f405504cfa48e8f..0e67621477a8a939c0e1e703ba0d0cbcc043c132 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -122,6 +122,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); #define FC_HRD_ERROR 9 #define FC_CRC_ERROR 10 #define FC_TIMED_OUT 11 +#define FC_TRANS_RESET 12 /* * Error recovery timeout values. @@ -195,7 +196,7 @@ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) * @seq: The sequence that the FCP packet is on (required by destructor API) * @fsp: The FCP packet to be released * - * This routine is called by a destructor callback in the exch_seq_send() + * This routine is called by a destructor callback in the fc_exch_seq_send() * routine of the libfc Transport Template. The 'struct fc_seq' is a required * argument even though it is not used by this routine. * @@ -253,8 +254,21 @@ static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) */ static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) { - if (!(fsp->state & FC_SRB_COMPL)) + if (!(fsp->state & FC_SRB_COMPL)) { mod_timer(&fsp->timer, jiffies + delay); + fsp->timer_delay = delay; + } +} + +static void fc_fcp_abort_done(struct fc_fcp_pkt *fsp) +{ + fsp->state |= FC_SRB_ABORTED; + fsp->state &= ~FC_SRB_ABORT_PENDING; + + if (fsp->wait_for_comp) + complete(&fsp->tm_done); + else + fc_fcp_complete_locked(fsp); } /** @@ -264,6 +278,8 @@ static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) */ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) { + int rc; + if (!fsp->seq_ptr) return -EINVAL; @@ -271,7 +287,16 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) put_cpu(); fsp->state |= FC_SRB_ABORT_PENDING; - return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); + rc = fc_seq_exch_abort(fsp->seq_ptr, 0); + /* + * fc_seq_exch_abort() might return -ENXIO if + * the sequence is already completed + */ + if (rc == -ENXIO) { + fc_fcp_abort_done(fsp); + rc = 0; + } + return rc; } /** @@ -283,16 +308,16 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) * fc_io_compl() will notify the SCSI-ml that the I/O is done. * The SCSI-ml will retry the command. */ -static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) +static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp, int status_code) { if (fsp->seq_ptr) { - fsp->lp->tt.exch_done(fsp->seq_ptr); + fc_exch_done(fsp->seq_ptr); fsp->seq_ptr = NULL; } fsp->state &= ~FC_SRB_ABORT_PENDING; fsp->io_status = 0; - fsp->status_code = FC_ERROR; + fsp->status_code = status_code; fc_fcp_complete_locked(fsp); } @@ -402,8 +427,6 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) if (!can_queue) can_queue = 1; lport->host->can_queue = can_queue; - shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n" - "Reducing can_queue to %d.\n", can_queue); unlock: spin_unlock_irqrestore(lport->host->host_lock, flags); @@ -430,9 +453,28 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport, put_cpu(); /* error case */ fc_fcp_can_queue_ramp_down(lport); + shost_printk(KERN_ERR, lport->host, + "libfc: Could not allocate frame, " + "reducing can_queue to %d.\n", lport->host->can_queue); return NULL; } +/** + * get_fsp_rec_tov() - Helper function to get REC_TOV + * @fsp: the FCP packet + * + * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second + */ +static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp) +{ + struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data; + unsigned int e_d_tov = FC_DEF_E_D_TOV; + + if (rpriv && rpriv->e_d_tov > e_d_tov) + e_d_tov = rpriv->e_d_tov; + return msecs_to_jiffies(e_d_tov) + HZ; +} + /** * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target * @fsp: The FCP packet the data is on @@ -536,8 +578,10 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) * and completes the transfer, call the completion handler. */ if (unlikely(fsp->state & FC_SRB_RCV_STATUS) && - fsp->xfer_len == fsp->data_len - fsp->scsi_resid) + fsp->xfer_len == fsp->data_len - fsp->scsi_resid) { + FC_FCP_DBG( fsp, "complete out-of-order sequence\n" ); fc_fcp_complete_locked(fsp); + } return; err: fc_fcp_recovery(fsp, host_bcode); @@ -609,7 +653,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, remaining = seq_blen; fh_parm_offset = frame_offset = offset; tlen = 0; - seq = lport->tt.seq_start_next(seq); + seq = fc_seq_start_next(seq); f_ctl = FC_FC_REL_OFF; WARN_ON(!seq); @@ -687,7 +731,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, /* * send fragment using for a sequence. */ - error = lport->tt.seq_send(lport, seq, fp); + error = fc_seq_send(lport, seq, fp); if (error) { WARN_ON(1); /* send error should be rare */ return error; @@ -727,15 +771,8 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) ba_done = 0; } - if (ba_done) { - fsp->state |= FC_SRB_ABORTED; - fsp->state &= ~FC_SRB_ABORT_PENDING; - - if (fsp->wait_for_comp) - complete(&fsp->tm_done); - else - fc_fcp_complete_locked(fsp); - } + if (ba_done) + fc_fcp_abort_done(fsp); } /** @@ -764,8 +801,11 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) fh = fc_frame_header_get(fp); r_ctl = fh->fh_r_ctl; - if (lport->state != LPORT_ST_READY) + if (lport->state != LPORT_ST_READY) { + FC_FCP_DBG(fsp, "lport state %d, ignoring r_ctl %x\n", + lport->state, r_ctl); goto out; + } if (fc_fcp_lock_pkt(fsp)) goto out; @@ -774,8 +814,10 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) goto unlock; } - if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) + if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) { + FC_FCP_DBG(fsp, "command aborted, ignoring r_ctl %x\n", r_ctl); goto unlock; + } if (r_ctl == FC_RCTL_DD_DATA_DESC) { /* @@ -910,7 +952,16 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) * Wait a at least one jiffy to see if it is delivered. * If this expires without data, we may do SRR. */ - fc_fcp_timer_set(fsp, 2); + if (fsp->lp->qfull) { + FC_FCP_DBG(fsp, "tgt %6.6x queue busy retry\n", + fsp->rport->port_id); + return; + } + FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx data underrun " + "len %x, data len %x\n", + fsp->rport->port_id, + fsp->xfer_len, expected_len, fsp->data_len); + fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); return; } fsp->status_code = FC_DATA_OVRRUN; @@ -959,8 +1010,11 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) if (fsp->cdb_status == SAM_STAT_GOOD && fsp->xfer_len < fsp->data_len && !fsp->io_status && (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || - fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) + fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) { + FC_FCP_DBG(fsp, "data underrun, xfer %zx data %x\n", + fsp->xfer_len, fsp->data_len); fsp->status_code = FC_DATA_UNDRUN; + } } seq = fsp->seq_ptr; @@ -970,7 +1024,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) struct fc_frame *conf_frame; struct fc_seq *csp; - csp = lport->tt.seq_start_next(seq); + csp = fc_seq_start_next(seq); conf_frame = fc_fcp_frame_alloc(fsp->lp, 0); if (conf_frame) { f_ctl = FC_FC_SEQ_INIT; @@ -979,10 +1033,10 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, ep->did, ep->sid, FC_TYPE_FCP, f_ctl, 0); - lport->tt.seq_send(lport, csp, conf_frame); + fc_seq_send(lport, csp, conf_frame); } } - lport->tt.exch_done(seq); + fc_exch_done(seq); } /* * Some resets driven by SCSI are not I/Os and do not have @@ -1000,10 +1054,8 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) */ static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) { - struct fc_lport *lport = fsp->lp; - if (fsp->seq_ptr) { - lport->tt.exch_done(fsp->seq_ptr); + fc_exch_done(fsp->seq_ptr); fsp->seq_ptr = NULL; } fsp->status_code = error; @@ -1115,19 +1167,6 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) return rc; } -/** - * get_fsp_rec_tov() - Helper function to get REC_TOV - * @fsp: the FCP packet - * - * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second - */ -static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp) -{ - struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data; - - return msecs_to_jiffies(rpriv->e_d_tov) + HZ; -} - /** * fc_fcp_cmd_send() - Send a FCP command * @lport: The local port to send the command on @@ -1165,8 +1204,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, rpriv->local_port->port_id, FC_TYPE_FCP, FC_FCTL_REQ, 0); - seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, - fsp, 0); + seq = fc_exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, fsp, 0); if (!seq) { rc = -1; goto unlock; @@ -1196,7 +1234,7 @@ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) return; if (error == -FC_EX_CLOSED) { - fc_fcp_retry_cmd(fsp); + fc_fcp_retry_cmd(fsp, FC_ERROR); goto unlock; } @@ -1222,8 +1260,16 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) int rc = FAILED; unsigned long ticks_left; - if (fc_fcp_send_abort(fsp)) + FC_FCP_DBG(fsp, "pkt abort state %x\n", fsp->state); + if (fc_fcp_send_abort(fsp)) { + FC_FCP_DBG(fsp, "failed to send abort\n"); return FAILED; + } + + if (fsp->state & FC_SRB_ABORTED) { + FC_FCP_DBG(fsp, "target abort cmd completed\n"); + return SUCCESS; + } init_completion(&fsp->tm_done); fsp->wait_for_comp = 1; @@ -1301,7 +1347,7 @@ static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp, spin_lock_bh(&fsp->scsi_pkt_lock); if (fsp->seq_ptr) { - lport->tt.exch_done(fsp->seq_ptr); + fc_exch_done(fsp->seq_ptr); fsp->seq_ptr = NULL; } fsp->wait_for_comp = 0; @@ -1355,7 +1401,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) if (fh->fh_type != FC_TYPE_BLS) fc_fcp_resp(fsp, fp); fsp->seq_ptr = NULL; - fsp->lp->tt.exch_done(seq); + fc_exch_done(seq); out_unlock: fc_fcp_unlock_pkt(fsp); out: @@ -1394,6 +1440,15 @@ static void fc_fcp_timeout(unsigned long data) if (fsp->cdb_cmd.fc_tm_flags) goto unlock; + if (fsp->lp->qfull) { + FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n", + fsp->timer_delay); + setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); + fc_fcp_timer_set(fsp, fsp->timer_delay); + goto unlock; + } + FC_FCP_DBG(fsp, "fcp timeout, delay %d flags %x state %x\n", + fsp->timer_delay, rpriv->flags, fsp->state); fsp->state |= FC_SRB_FCP_PROCESSING_TMO; if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) @@ -1486,8 +1541,8 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) rjt = fc_frame_payload_get(fp, sizeof(*rjt)); switch (rjt->er_reason) { default: - FC_FCP_DBG(fsp, "device %x unexpected REC reject " - "reason %d expl %d\n", + FC_FCP_DBG(fsp, + "device %x invalid REC reject %d/%d\n", fsp->rport->port_id, rjt->er_reason, rjt->er_explan); /* fall through */ @@ -1503,18 +1558,23 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) break; case ELS_RJT_LOGIC: case ELS_RJT_UNAB: + FC_FCP_DBG(fsp, "device %x REC reject %d/%d\n", + fsp->rport->port_id, rjt->er_reason, + rjt->er_explan); /* - * If no data transfer, the command frame got dropped - * so we just retry. If data was transferred, we - * lost the response but the target has no record, - * so we abort and retry. + * If response got lost or is stuck in the + * queue somewhere we have no idea if and when + * the response will be received. So quarantine + * the xid and retry the command. */ - if (rjt->er_explan == ELS_EXPL_OXID_RXID && - fsp->xfer_len == 0) { - fc_fcp_retry_cmd(fsp); + if (rjt->er_explan == ELS_EXPL_OXID_RXID) { + struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); + ep->state |= FC_EX_QUARANTINE; + fsp->state |= FC_SRB_ABORTED; + fc_fcp_retry_cmd(fsp, FC_TRANS_RESET); break; } - fc_fcp_recovery(fsp, FC_ERROR); + fc_fcp_recovery(fsp, FC_TRANS_RESET); break; } } else if (opcode == ELS_LS_ACC) { @@ -1608,7 +1668,9 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) switch (error) { case -FC_EX_CLOSED: - fc_fcp_retry_cmd(fsp); + FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange closed\n", + fsp, fsp->rport->port_id); + fc_fcp_retry_cmd(fsp, FC_ERROR); break; default: @@ -1622,8 +1684,8 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) * Assume REC or LS_ACC was lost. * The exchange manager will have aborted REC, so retry. */ - FC_FCP_DBG(fsp, "REC fid %6.6x error error %d retry %d/%d\n", - fsp->rport->port_id, error, fsp->recov_retry, + FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange timeout retry %d/%d\n", + fsp, fsp->rport->port_id, fsp->recov_retry, FC_MAX_RECOV_RETRY); if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) fc_fcp_rec(fsp); @@ -1642,6 +1704,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) */ static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code) { + FC_FCP_DBG(fsp, "start recovery code %x\n", code); fsp->status_code = code; fsp->cdb_status = 0; fsp->io_status = 0; @@ -1668,7 +1731,6 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) struct fc_seq *seq; struct fcp_srr *srr; struct fc_frame *fp; - unsigned int rec_tov; rport = fsp->rport; rpriv = rport->dd_data; @@ -1692,10 +1754,9 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) rpriv->local_port->port_id, FC_TYPE_FCP, FC_FCTL_REQ, 0); - rec_tov = get_fsp_rec_tov(fsp); - seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, - fc_fcp_pkt_destroy, - fsp, jiffies_to_msecs(rec_tov)); + seq = fc_exch_seq_send(lport, fp, fc_fcp_srr_resp, + fc_fcp_pkt_destroy, + fsp, get_fsp_rec_tov(fsp)); if (!seq) goto retry; @@ -1706,7 +1767,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */ return; retry: - fc_fcp_retry_cmd(fsp); + fc_fcp_retry_cmd(fsp, FC_TRANS_RESET); } /** @@ -1730,9 +1791,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) fh = fc_frame_header_get(fp); /* - * BUG? fc_fcp_srr_error calls exch_done which would release + * BUG? fc_fcp_srr_error calls fc_exch_done which would release * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT, - * then fc_exch_timeout would be sending an abort. The exch_done + * then fc_exch_timeout would be sending an abort. The fc_exch_done * call by fc_fcp_srr_error would prevent fc_exch.c from seeing * an abort response though. */ @@ -1753,7 +1814,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) } fc_fcp_unlock_pkt(fsp); out: - fsp->lp->tt.exch_done(seq); + fc_exch_done(seq); fc_frame_free(fp); } @@ -1768,20 +1829,22 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) goto out; switch (PTR_ERR(fp)) { case -FC_EX_TIMEOUT: + FC_FCP_DBG(fsp, "SRR timeout, retries %d\n", fsp->recov_retry); if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) fc_fcp_rec(fsp); else fc_fcp_recovery(fsp, FC_TIMED_OUT); break; case -FC_EX_CLOSED: /* e.g., link failure */ + FC_FCP_DBG(fsp, "SRR error, exchange closed\n"); /* fall through */ default: - fc_fcp_retry_cmd(fsp); + fc_fcp_retry_cmd(fsp, FC_ERROR); break; } fc_fcp_unlock_pkt(fsp); out: - fsp->lp->tt.exch_done(fsp->recov_seq); + fc_exch_done(fsp->recov_seq); } /** @@ -1832,8 +1895,13 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd) rpriv = rport->dd_data; if (!fc_fcp_lport_queue_ready(lport)) { - if (lport->qfull) + if (lport->qfull) { fc_fcp_can_queue_ramp_down(lport); + shost_printk(KERN_ERR, lport->host, + "libfc: queue full, " + "reducing can_queue to %d.\n", + lport->host->can_queue); + } rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } @@ -1980,15 +2048,26 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; break; case FC_CMD_ABORTED: - FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " - "due to FC_CMD_ABORTED\n"); - sc_cmd->result = (DID_ERROR << 16) | fsp->io_status; + if (host_byte(sc_cmd->result) == DID_TIME_OUT) + FC_FCP_DBG(fsp, "Returning DID_TIME_OUT to scsi-ml " + "due to FC_CMD_ABORTED\n"); + else { + FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " + "due to FC_CMD_ABORTED\n"); + set_host_byte(sc_cmd, DID_ERROR); + } + sc_cmd->result |= fsp->io_status; break; case FC_CMD_RESET: FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml " "due to FC_CMD_RESET\n"); sc_cmd->result = (DID_RESET << 16); break; + case FC_TRANS_RESET: + FC_FCP_DBG(fsp, "Returning DID_SOFT_ERROR to scsi-ml " + "due to FC_TRANS_RESET\n"); + sc_cmd->result = (DID_SOFT_ERROR << 16); + break; case FC_HRD_ERROR: FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml " "due to FC_HRD_ERROR\n"); @@ -2142,7 +2221,7 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) fc_block_scsi_eh(sc_cmd); - lport->tt.lport_reset(lport); + fc_lport_reset(lport); wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, wait_tmo)) diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c index c11a638f32e6dbc3860bad5758fba5e20a9cc562..d623d084b7ec21c61531380e1da3415126becf35 100644 --- a/drivers/scsi/libfc/fc_libfc.c +++ b/drivers/scsi/libfc/fc_libfc.c @@ -226,7 +226,7 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp, sp = fr_seq(in_fp); if (sp) - fr_seq(fp) = fr_dev(in_fp)->tt.seq_start_next(sp); + fr_seq(fp) = fc_seq_start_next(sp); fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset); } EXPORT_SYMBOL(fc_fill_reply_hdr); diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 04ce7cfb6d1b6272d26ce138a9b11ee952a20d6f..919736a74ffa6e8b46e1f87b042968f42eae71b8 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -149,7 +149,7 @@ static const char *fc_lport_state_names[] = { * @offset: The offset into the response data */ struct fc_bsg_info { - struct fc_bsg_job *job; + struct bsg_job *job; struct fc_lport *lport; u16 rsp_code; struct scatterlist *sg; @@ -200,7 +200,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, "in the DNS or FDMI state, it's in the " "%d state", rdata->ids.port_id, lport->state); - lport->tt.rport_logoff(rdata); + fc_rport_logoff(rdata); } break; case RPORT_EV_LOGO: @@ -237,23 +237,26 @@ static const char *fc_lport_state(struct fc_lport *lport) * @remote_fid: The FID of the ptp rport * @remote_wwpn: The WWPN of the ptp rport * @remote_wwnn: The WWNN of the ptp rport + * + * Locking Note: The lport lock is expected to be held before calling + * this routine. */ static void fc_lport_ptp_setup(struct fc_lport *lport, u32 remote_fid, u64 remote_wwpn, u64 remote_wwnn) { - mutex_lock(&lport->disc.disc_mutex); if (lport->ptp_rdata) { - lport->tt.rport_logoff(lport->ptp_rdata); - kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); + fc_rport_logoff(lport->ptp_rdata); + kref_put(&lport->ptp_rdata->kref, fc_rport_destroy); } - lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid); + mutex_lock(&lport->disc.disc_mutex); + lport->ptp_rdata = fc_rport_create(lport, remote_fid); kref_get(&lport->ptp_rdata->kref); lport->ptp_rdata->ids.port_name = remote_wwpn; lport->ptp_rdata->ids.node_name = remote_wwnn; mutex_unlock(&lport->disc.disc_mutex); - lport->tt.rport_login(lport->ptp_rdata); + fc_rport_login(lport->ptp_rdata); fc_lport_enter_ready(lport); } @@ -308,7 +311,7 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) fc_stats = &lport->host_stats; memset(fc_stats, 0, sizeof(struct fc_host_statistics)); - fc_stats->seconds_since_last_reset = (lport->boot_time - jiffies) / HZ; + fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ; for_each_possible_cpu(cpu) { struct fc_stats *stats; @@ -409,7 +412,7 @@ static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp) FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", fc_lport_state(lport)); - lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); + fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL); fc_frame_free(fp); } @@ -478,7 +481,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport, if (!req) { rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_NONE; - lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); + fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); } else { fmt = req->rnid_fmt; len = sizeof(*rp); @@ -518,7 +521,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport, */ static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) { - lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); + fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL); fc_lport_enter_reset(lport); fc_frame_free(fp); } @@ -620,9 +623,9 @@ int fc_fabric_logoff(struct fc_lport *lport) lport->tt.disc_stop_final(lport); mutex_lock(&lport->lp_mutex); if (lport->dns_rdata) - lport->tt.rport_logoff(lport->dns_rdata); + fc_rport_logoff(lport->dns_rdata); mutex_unlock(&lport->lp_mutex); - lport->tt.rport_flush_queue(); + fc_rport_flush_queue(); mutex_lock(&lport->lp_mutex); fc_lport_enter_logo(lport); mutex_unlock(&lport->lp_mutex); @@ -899,7 +902,7 @@ static void fc_lport_recv_els_req(struct fc_lport *lport, /* * Check opcode. */ - recv = lport->tt.rport_recv_req; + recv = fc_rport_recv_req; switch (fc_frame_payload_op(fp)) { case ELS_FLOGI: if (!lport->point_to_multipoint) @@ -941,15 +944,14 @@ struct fc4_prov fc_lport_els_prov = { }; /** - * fc_lport_recv_req() - The generic lport request handler + * fc_lport_recv() - The generic lport request handler * @lport: The lport that received the request * @fp: The frame the request is in * * Locking Note: This function should not be called with the lport * lock held because it may grab the lock. */ -static void fc_lport_recv_req(struct fc_lport *lport, - struct fc_frame *fp) +void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); struct fc_seq *sp = fr_seq(fp); @@ -978,8 +980,9 @@ static void fc_lport_recv_req(struct fc_lport *lport, FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type); fc_frame_free(fp); if (sp) - lport->tt.exch_done(sp); + fc_exch_done(sp); } +EXPORT_SYMBOL(fc_lport_recv); /** * fc_lport_reset() - Reset a local port @@ -1007,12 +1010,14 @@ EXPORT_SYMBOL(fc_lport_reset); */ static void fc_lport_reset_locked(struct fc_lport *lport) { - if (lport->dns_rdata) - lport->tt.rport_logoff(lport->dns_rdata); + if (lport->dns_rdata) { + fc_rport_logoff(lport->dns_rdata); + lport->dns_rdata = NULL; + } if (lport->ptp_rdata) { - lport->tt.rport_logoff(lport->ptp_rdata); - kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); + fc_rport_logoff(lport->ptp_rdata); + kref_put(&lport->ptp_rdata->kref, fc_rport_destroy); lport->ptp_rdata = NULL; } @@ -1426,13 +1431,13 @@ static void fc_lport_enter_dns(struct fc_lport *lport) fc_lport_state_enter(lport, LPORT_ST_DNS); mutex_lock(&lport->disc.disc_mutex); - rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV); + rdata = fc_rport_create(lport, FC_FID_DIR_SERV); mutex_unlock(&lport->disc.disc_mutex); if (!rdata) goto err; rdata->ops = &fc_lport_rport_ops; - lport->tt.rport_login(rdata); + fc_rport_login(rdata); return; err: @@ -1543,13 +1548,13 @@ static void fc_lport_enter_fdmi(struct fc_lport *lport) fc_lport_state_enter(lport, LPORT_ST_FDMI); mutex_lock(&lport->disc.disc_mutex); - rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV); + rdata = fc_rport_create(lport, FC_FID_MGMT_SERV); mutex_unlock(&lport->disc.disc_mutex); if (!rdata) goto err; rdata->ops = &fc_lport_rport_ops; - lport->tt.rport_login(rdata); + fc_rport_login(rdata); return; err: @@ -1772,7 +1777,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, if ((csp_flags & FC_SP_FT_FPORT) == 0) { if (e_d_tov > lport->e_d_tov) lport->e_d_tov = e_d_tov; - lport->r_a_tov = 2 * e_d_tov; + lport->r_a_tov = 2 * lport->e_d_tov; fc_lport_set_port_id(lport, did, fp); printk(KERN_INFO "host%d: libfc: " "Port (%6.6x) entered " @@ -1784,8 +1789,10 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, get_unaligned_be64( &flp->fl_wwnn)); } else { - lport->e_d_tov = e_d_tov; - lport->r_a_tov = r_a_tov; + if (e_d_tov > lport->e_d_tov) + lport->e_d_tov = e_d_tov; + if (r_a_tov > lport->r_a_tov) + lport->r_a_tov = r_a_tov; fc_host_fabric_name(lport->host) = get_unaligned_be64(&flp->fl_wwnn); fc_lport_set_port_id(lport, did, fp); @@ -1858,12 +1865,6 @@ EXPORT_SYMBOL(fc_lport_config); */ int fc_lport_init(struct fc_lport *lport) { - if (!lport->tt.lport_recv) - lport->tt.lport_recv = fc_lport_recv_req; - - if (!lport->tt.lport_reset) - lport->tt.lport_reset = fc_lport_reset; - fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; fc_host_node_name(lport->host) = lport->wwnn; fc_host_port_name(lport->host) = lport->wwpn; @@ -1900,18 +1901,19 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, void *info_arg) { struct fc_bsg_info *info = info_arg; - struct fc_bsg_job *job = info->job; + struct bsg_job *job = info->job; + struct fc_bsg_reply *bsg_reply = job->reply; struct fc_lport *lport = info->lport; struct fc_frame_header *fh; size_t len; void *buf; if (IS_ERR(fp)) { - job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? + bsg_reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? -ECONNABORTED : -ETIMEDOUT; job->reply_len = sizeof(uint32_t); - job->state_flags |= FC_RQST_STATE_DONE; - job->job_done(job); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); kfree(info); return; } @@ -1928,25 +1930,25 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, (unsigned short)fc_frame_payload_op(fp); /* Save the reply status of the job */ - job->reply->reply_data.ctels_reply.status = + bsg_reply->reply_data.ctels_reply.status = (cmd == info->rsp_code) ? FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; } - job->reply->reply_payload_rcv_len += + bsg_reply->reply_payload_rcv_len += fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, &info->offset, NULL); if (fr_eof(fp) == FC_EOF_T && (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { - if (job->reply->reply_payload_rcv_len > + if (bsg_reply->reply_payload_rcv_len > job->reply_payload.payload_len) - job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len; - job->reply->result = 0; - job->state_flags |= FC_RQST_STATE_DONE; - job->job_done(job); + bsg_reply->result = 0; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); kfree(info); } fc_frame_free(fp); @@ -1962,7 +1964,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, * Locking Note: The lport lock is expected to be held before calling * this routine. */ -static int fc_lport_els_request(struct fc_bsg_job *job, +static int fc_lport_els_request(struct bsg_job *job, struct fc_lport *lport, u32 did, u32 tov) { @@ -2005,8 +2007,8 @@ static int fc_lport_els_request(struct fc_bsg_job *job, info->nents = job->reply_payload.sg_cnt; info->sg = job->reply_payload.sg_list; - if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, - NULL, info, tov)) { + if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp, + NULL, info, tov)) { kfree(info); return -ECOMM; } @@ -2023,7 +2025,7 @@ static int fc_lport_els_request(struct fc_bsg_job *job, * Locking Note: The lport lock is expected to be held before calling * this routine. */ -static int fc_lport_ct_request(struct fc_bsg_job *job, +static int fc_lport_ct_request(struct bsg_job *job, struct fc_lport *lport, u32 did, u32 tov) { struct fc_bsg_info *info; @@ -2066,8 +2068,8 @@ static int fc_lport_ct_request(struct fc_bsg_job *job, info->nents = job->reply_payload.sg_cnt; info->sg = job->reply_payload.sg_list; - if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, - NULL, info, tov)) { + if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp, + NULL, info, tov)) { kfree(info); return -ECOMM; } @@ -2079,25 +2081,27 @@ static int fc_lport_ct_request(struct fc_bsg_job *job, * FC Passthrough requests * @job: The BSG passthrough job */ -int fc_lport_bsg_request(struct fc_bsg_job *job) +int fc_lport_bsg_request(struct bsg_job *job) { + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; struct request *rsp = job->req->next_rq; - struct Scsi_Host *shost = job->shost; + struct Scsi_Host *shost = fc_bsg_to_shost(job); struct fc_lport *lport = shost_priv(shost); struct fc_rport *rport; struct fc_rport_priv *rdata; int rc = -EINVAL; u32 did, tov; - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; if (rsp) rsp->resid_len = job->reply_payload.payload_len; mutex_lock(&lport->lp_mutex); - switch (job->request->msgcode) { + switch (bsg_request->msgcode) { case FC_BSG_RPT_ELS: - rport = job->rport; + rport = fc_bsg_to_rport(job); if (!rport) break; @@ -2107,7 +2111,7 @@ int fc_lport_bsg_request(struct fc_bsg_job *job) break; case FC_BSG_RPT_CT: - rport = job->rport; + rport = fc_bsg_to_rport(job); if (!rport) break; @@ -2117,25 +2121,25 @@ int fc_lport_bsg_request(struct fc_bsg_job *job) break; case FC_BSG_HST_CT: - did = ntoh24(job->request->rqst_data.h_ct.port_id); + did = ntoh24(bsg_request->rqst_data.h_ct.port_id); if (did == FC_FID_DIR_SERV) { rdata = lport->dns_rdata; if (!rdata) break; tov = rdata->e_d_tov; } else { - rdata = lport->tt.rport_lookup(lport, did); + rdata = fc_rport_lookup(lport, did); if (!rdata) break; tov = rdata->e_d_tov; - kref_put(&rdata->kref, lport->tt.rport_destroy); + kref_put(&rdata->kref, fc_rport_destroy); } rc = fc_lport_ct_request(job, lport, did, tov); break; case FC_BSG_HST_ELS_NOLOGIN: - did = ntoh24(job->request->rqst_data.h_els.port_id); + did = ntoh24(bsg_request->rqst_data.h_els.port_id); rc = fc_lport_els_request(job, lport, did, lport->e_d_tov); break; } diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 97aeaddd600d42c03feb79f3fa11bf6a80ce3229..c991f3b822f8857b3120b212f9e00a42b620ed79 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -44,6 +44,19 @@ * path this potential over-use of the mutex is acceptable. */ +/* + * RPORT REFERENCE COUNTING + * + * A rport reference should be taken when: + * - an rport is allocated + * - a workqueue item is scheduled + * - an ELS request is send + * The reference should be dropped when: + * - the workqueue function has finished + * - the ELS response is handled + * - an rport is removed + */ + #include #include #include @@ -74,8 +87,8 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *); static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *); static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *); static void fc_rport_timeout(struct work_struct *); -static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *); -static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *); +static void fc_rport_error(struct fc_rport_priv *, int); +static void fc_rport_error_retry(struct fc_rport_priv *, int); static void fc_rport_work(struct work_struct *); static const char *fc_rport_state_names[] = { @@ -98,8 +111,8 @@ static const char *fc_rport_state_names[] = { * The reference count of the fc_rport_priv structure is * increased by one. */ -static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, - u32 port_id) +struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, + u32 port_id) { struct fc_rport_priv *rdata = NULL, *tmp_rdata; @@ -113,6 +126,7 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, rcu_read_unlock(); return rdata; } +EXPORT_SYMBOL(fc_rport_lookup); /** * fc_rport_create() - Create a new remote port @@ -123,12 +137,11 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, * * Locking note: must be called with the disc_mutex held. */ -static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, - u32 port_id) +struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) { struct fc_rport_priv *rdata; - rdata = lport->tt.rport_lookup(lport, port_id); + rdata = fc_rport_lookup(lport, port_id); if (rdata) return rdata; @@ -158,18 +171,20 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, } return rdata; } +EXPORT_SYMBOL(fc_rport_create); /** * fc_rport_destroy() - Free a remote port after last reference is released * @kref: The remote port's kref */ -static void fc_rport_destroy(struct kref *kref) +void fc_rport_destroy(struct kref *kref) { struct fc_rport_priv *rdata; rdata = container_of(kref, struct fc_rport_priv, kref); kfree_rcu(rdata, rcu); } +EXPORT_SYMBOL(fc_rport_destroy); /** * fc_rport_state() - Return a string identifying the remote port's state @@ -242,6 +257,8 @@ static void fc_rport_state_enter(struct fc_rport_priv *rdata, /** * fc_rport_work() - Handler for remote port events in the rport_event_queue * @work: Handle to the remote port being dequeued + * + * Reference counting: drops kref on return */ static void fc_rport_work(struct work_struct *work) { @@ -272,12 +289,14 @@ static void fc_rport_work(struct work_struct *work) kref_get(&rdata->kref); mutex_unlock(&rdata->rp_mutex); - if (!rport) + if (!rport) { + FC_RPORT_DBG(rdata, "No rport!\n"); rport = fc_remote_port_add(lport->host, 0, &ids); + } if (!rport) { FC_RPORT_DBG(rdata, "Failed to add the rport\n"); - lport->tt.rport_logoff(rdata); - kref_put(&rdata->kref, lport->tt.rport_destroy); + fc_rport_logoff(rdata); + kref_put(&rdata->kref, fc_rport_destroy); return; } mutex_lock(&rdata->rp_mutex); @@ -303,7 +322,7 @@ static void fc_rport_work(struct work_struct *work) FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); rdata->lld_event_callback(lport, rdata, event); } - kref_put(&rdata->kref, lport->tt.rport_destroy); + kref_put(&rdata->kref, fc_rport_destroy); break; case RPORT_EV_FAILED: @@ -329,7 +348,8 @@ static void fc_rport_work(struct work_struct *work) FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); rdata->lld_event_callback(lport, rdata, event); } - cancel_delayed_work_sync(&rdata->retry_work); + if (cancel_delayed_work_sync(&rdata->retry_work)) + kref_put(&rdata->kref, fc_rport_destroy); /* * Reset any outstanding exchanges before freeing rport. @@ -351,7 +371,7 @@ static void fc_rport_work(struct work_struct *work) if (port_id == FC_FID_DIR_SERV) { rdata->event = RPORT_EV_NONE; mutex_unlock(&rdata->rp_mutex); - kref_put(&rdata->kref, lport->tt.rport_destroy); + kref_put(&rdata->kref, fc_rport_destroy); } else if ((rdata->flags & FC_RP_STARTED) && rdata->major_retries < lport->max_rport_retry_count) { @@ -362,17 +382,21 @@ static void fc_rport_work(struct work_struct *work) mutex_unlock(&rdata->rp_mutex); } else { FC_RPORT_DBG(rdata, "work delete\n"); + mutex_lock(&lport->disc.disc_mutex); list_del_rcu(&rdata->peers); + mutex_unlock(&lport->disc.disc_mutex); mutex_unlock(&rdata->rp_mutex); - kref_put(&rdata->kref, lport->tt.rport_destroy); + kref_put(&rdata->kref, fc_rport_destroy); } } else { /* * Re-open for events. Reissue READY event if ready. */ rdata->event = RPORT_EV_NONE; - if (rdata->rp_state == RPORT_ST_READY) + if (rdata->rp_state == RPORT_ST_READY) { + FC_RPORT_DBG(rdata, "work reopen\n"); fc_rport_enter_ready(rdata); + } mutex_unlock(&rdata->rp_mutex); } break; @@ -381,12 +405,21 @@ static void fc_rport_work(struct work_struct *work) mutex_unlock(&rdata->rp_mutex); break; } + kref_put(&rdata->kref, fc_rport_destroy); } /** * fc_rport_login() - Start the remote port login state machine * @rdata: The remote port to be logged in to * + * Initiates the RP state machine. It is called from the LP module. + * This function will issue the following commands to the N_Port + * identified by the FC ID provided. + * + * - PLOGI + * - PRLI + * - RTV + * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* * function and then unlock the rport. @@ -395,10 +428,16 @@ static void fc_rport_work(struct work_struct *work) * If it appears we are already logged in, ADISC is used to verify * the setup. */ -static int fc_rport_login(struct fc_rport_priv *rdata) +int fc_rport_login(struct fc_rport_priv *rdata) { mutex_lock(&rdata->rp_mutex); + if (rdata->flags & FC_RP_STARTED) { + FC_RPORT_DBG(rdata, "port already started\n"); + mutex_unlock(&rdata->rp_mutex); + return 0; + } + rdata->flags |= FC_RP_STARTED; switch (rdata->rp_state) { case RPORT_ST_READY: @@ -408,15 +447,20 @@ static int fc_rport_login(struct fc_rport_priv *rdata) case RPORT_ST_DELETE: FC_RPORT_DBG(rdata, "Restart deleted port\n"); break; - default: + case RPORT_ST_INIT: FC_RPORT_DBG(rdata, "Login to port\n"); fc_rport_enter_flogi(rdata); break; + default: + FC_RPORT_DBG(rdata, "Login in progress, state %s\n", + fc_rport_state(rdata)); + break; } mutex_unlock(&rdata->rp_mutex); return 0; } +EXPORT_SYMBOL(fc_rport_login); /** * fc_rport_enter_delete() - Schedule a remote port to be deleted @@ -431,6 +475,8 @@ static int fc_rport_login(struct fc_rport_priv *rdata) * Set the new event so that the old pending event will not occur. * Since we have the mutex, even if fc_rport_work() is already started, * it'll see the new event. + * + * Reference counting: does not modify kref */ static void fc_rport_enter_delete(struct fc_rport_priv *rdata, enum fc_rport_event event) @@ -442,8 +488,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata, fc_rport_state_enter(rdata, RPORT_ST_DELETE); - if (rdata->event == RPORT_EV_NONE) - queue_work(rport_event_queue, &rdata->event_work); + kref_get(&rdata->kref); + if (rdata->event == RPORT_EV_NONE && + !queue_work(rport_event_queue, &rdata->event_work)) + kref_put(&rdata->kref, fc_rport_destroy); + rdata->event = event; } @@ -455,7 +504,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata, * function will hold the rport lock, call an _enter_* * function and then unlock the rport. */ -static int fc_rport_logoff(struct fc_rport_priv *rdata) +int fc_rport_logoff(struct fc_rport_priv *rdata) { struct fc_lport *lport = rdata->local_port; u32 port_id = rdata->ids.port_id; @@ -489,6 +538,7 @@ static int fc_rport_logoff(struct fc_rport_priv *rdata) mutex_unlock(&rdata->rp_mutex); return 0; } +EXPORT_SYMBOL(fc_rport_logoff); /** * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state @@ -496,6 +546,8 @@ static int fc_rport_logoff(struct fc_rport_priv *rdata) * * Locking Note: The rport lock is expected to be held before calling * this routine. + * + * Reference counting: schedules workqueue, does not modify kref */ static void fc_rport_enter_ready(struct fc_rport_priv *rdata) { @@ -503,8 +555,11 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata) FC_RPORT_DBG(rdata, "Port is Ready\n"); - if (rdata->event == RPORT_EV_NONE) - queue_work(rport_event_queue, &rdata->event_work); + kref_get(&rdata->kref); + if (rdata->event == RPORT_EV_NONE && + !queue_work(rport_event_queue, &rdata->event_work)) + kref_put(&rdata->kref, fc_rport_destroy); + rdata->event = RPORT_EV_READY; } @@ -515,6 +570,8 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata) * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* * function and then unlock the rport. + * + * Reference counting: Drops kref on return. */ static void fc_rport_timeout(struct work_struct *work) { @@ -522,6 +579,7 @@ static void fc_rport_timeout(struct work_struct *work) container_of(work, struct fc_rport_priv, retry_work.work); mutex_lock(&rdata->rp_mutex); + FC_RPORT_DBG(rdata, "Port timeout, state %s\n", fc_rport_state(rdata)); switch (rdata->rp_state) { case RPORT_ST_FLOGI: @@ -547,23 +605,25 @@ static void fc_rport_timeout(struct work_struct *work) } mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, fc_rport_destroy); } /** * fc_rport_error() - Error handler, called once retries have been exhausted * @rdata: The remote port the error is happened on - * @fp: The error code encapsulated in a frame pointer + * @err: The error code * * Locking Note: The rport lock is expected to be held before * calling this routine + * + * Reference counting: does not modify kref */ -static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) +static void fc_rport_error(struct fc_rport_priv *rdata, int err) { struct fc_lport *lport = rdata->local_port; - FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n", - IS_ERR(fp) ? -PTR_ERR(fp) : 0, - fc_rport_state(rdata), rdata->retries); + FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n", + -err, fc_rport_state(rdata), rdata->retries); switch (rdata->rp_state) { case RPORT_ST_FLOGI: @@ -595,36 +655,39 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) /** * fc_rport_error_retry() - Handler for remote port state retries * @rdata: The remote port whose state is to be retried - * @fp: The error code encapsulated in a frame pointer + * @err: The error code * * If the error was an exchange timeout retry immediately, * otherwise wait for E_D_TOV. * * Locking Note: The rport lock is expected to be held before * calling this routine + * + * Reference counting: increments kref when scheduling retry_work */ -static void fc_rport_error_retry(struct fc_rport_priv *rdata, - struct fc_frame *fp) +static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err) { - unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV); + unsigned long delay = msecs_to_jiffies(rdata->e_d_tov); /* make sure this isn't an FC_EX_CLOSED error, never retry those */ - if (PTR_ERR(fp) == -FC_EX_CLOSED) + if (err == -FC_EX_CLOSED) goto out; if (rdata->retries < rdata->local_port->max_rport_retry_count) { - FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n", - PTR_ERR(fp), fc_rport_state(rdata)); + FC_RPORT_DBG(rdata, "Error %d in state %s, retrying\n", + err, fc_rport_state(rdata)); rdata->retries++; /* no additional delay on exchange timeouts */ - if (PTR_ERR(fp) == -FC_EX_TIMEOUT) + if (err == -FC_EX_TIMEOUT) delay = 0; - schedule_delayed_work(&rdata->retry_work, delay); + kref_get(&rdata->kref); + if (!schedule_delayed_work(&rdata->retry_work, delay)) + kref_put(&rdata->kref, fc_rport_destroy); return; } out: - fc_rport_error(rdata, fp); + fc_rport_error(rdata, err); } /** @@ -684,8 +747,11 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, struct fc_lport *lport = rdata->local_port; struct fc_els_flogi *flogi; unsigned int r_a_tov; + u8 opcode; + int err = 0; - FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp)); + FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", + IS_ERR(fp) ? "error" : fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) goto put; @@ -701,18 +767,34 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, } if (IS_ERR(fp)) { - fc_rport_error(rdata, fp); + fc_rport_error(rdata, PTR_ERR(fp)); goto err; } - - if (fc_frame_payload_op(fp) != ELS_LS_ACC) + opcode = fc_frame_payload_op(fp); + if (opcode == ELS_LS_RJT) { + struct fc_els_ls_rjt *rjt; + + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + FC_RPORT_DBG(rdata, "FLOGI ELS rejected, reason %x expl %x\n", + rjt->er_reason, rjt->er_explan); + err = -FC_EX_ELS_RJT; goto bad; - if (fc_rport_login_complete(rdata, fp)) + } else if (opcode != ELS_LS_ACC) { + FC_RPORT_DBG(rdata, "FLOGI ELS invalid opcode %x\n", opcode); + err = -FC_EX_ELS_RJT; goto bad; + } + if (fc_rport_login_complete(rdata, fp)) { + FC_RPORT_DBG(rdata, "FLOGI failed, no login\n"); + err = -FC_EX_INV_LOGIN; + goto bad; + } flogi = fc_frame_payload_get(fp, sizeof(*flogi)); - if (!flogi) + if (!flogi) { + err = -FC_EX_ALLOC_ERR; goto bad; + } r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov); if (r_a_tov > rdata->r_a_tov) rdata->r_a_tov = r_a_tov; @@ -726,11 +808,11 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, err: mutex_unlock(&rdata->rp_mutex); put: - kref_put(&rdata->kref, lport->tt.rport_destroy); + kref_put(&rdata->kref, fc_rport_destroy); return; bad: FC_RPORT_DBG(rdata, "Bad FLOGI response\n"); - fc_rport_error_retry(rdata, fp); + fc_rport_error_retry(rdata, err); goto out; } @@ -740,6 +822,8 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, * * Locking Note: The rport lock is expected to be held before calling * this routine. + * + * Reference counting: increments kref when sending ELS */ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata) { @@ -756,20 +840,23 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata) fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) - return fc_rport_error_retry(rdata, fp); + return fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR); + kref_get(&rdata->kref); if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI, fc_rport_flogi_resp, rdata, - 2 * lport->r_a_tov)) - fc_rport_error_retry(rdata, NULL); - else - kref_get(&rdata->kref); + 2 * lport->r_a_tov)) { + fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR); + kref_put(&rdata->kref, fc_rport_destroy); + } } /** * fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode * @lport: The local port that received the PLOGI request * @rx_fp: The PLOGI request frame + * + * Reference counting: drops kref on return */ static void fc_rport_recv_flogi_req(struct fc_lport *lport, struct fc_frame *rx_fp) @@ -799,7 +886,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, goto reject; } - rdata = lport->tt.rport_lookup(lport, sid); + rdata = fc_rport_lookup(lport, sid); if (!rdata) { rjt_data.reason = ELS_RJT_FIP; rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR; @@ -824,8 +911,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, * RPORT wouldn;t have created and 'rport_lookup' would have * failed anyway in that case. */ - if (lport->point_to_multipoint) - break; + break; case RPORT_ST_DELETE: mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_FIP; @@ -867,20 +953,27 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); - if (rdata->ids.port_name < lport->wwpn) - fc_rport_enter_plogi(rdata); - else - fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); + /* + * Do not proceed with the state machine if our + * FLOGI has crossed with an FLOGI from the + * remote port; wait for the FLOGI response instead. + */ + if (rdata->rp_state != RPORT_ST_FLOGI) { + if (rdata->ids.port_name < lport->wwpn) + fc_rport_enter_plogi(rdata); + else + fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); + } out: mutex_unlock(&rdata->rp_mutex); - kref_put(&rdata->kref, lport->tt.rport_destroy); + kref_put(&rdata->kref, fc_rport_destroy); fc_frame_free(rx_fp); return; reject_put: - kref_put(&rdata->kref, lport->tt.rport_destroy); + kref_put(&rdata->kref, fc_rport_destroy); reject: - lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); + fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); fc_frame_free(rx_fp); } @@ -904,10 +997,13 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, u16 cssp_seq; u8 op; - mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp)); + if (fp == ERR_PTR(-FC_EX_CLOSED)) + goto put; + + mutex_lock(&rdata->rp_mutex); + if (rdata->rp_state != RPORT_ST_PLOGI) { FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state " "%s\n", fc_rport_state(rdata)); @@ -917,7 +1013,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, } if (IS_ERR(fp)) { - fc_rport_error_retry(rdata, fp); + fc_rport_error_retry(rdata, PTR_ERR(fp)); goto err; } @@ -939,14 +1035,20 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, rdata->max_seq = csp_seq; rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs); fc_rport_enter_prli(rdata); - } else - fc_rport_error_retry(rdata, fp); + } else { + struct fc_els_ls_rjt *rjt; + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n", + rjt->er_reason, rjt->er_explan); + fc_rport_error_retry(rdata, -FC_EX_ELS_RJT); + } out: fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); - kref_put(&rdata->kref, lport->tt.rport_destroy); +put: + kref_put(&rdata->kref, fc_rport_destroy); } static bool @@ -969,6 +1071,8 @@ fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata) * * Locking Note: The rport lock is expected to be held before calling * this routine. + * + * Reference counting: increments kref when sending ELS */ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) { @@ -990,17 +1094,18 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) { FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__); - fc_rport_error_retry(rdata, fp); + fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR); return; } rdata->e_d_tov = lport->e_d_tov; + kref_get(&rdata->kref); if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI, fc_rport_plogi_resp, rdata, - 2 * lport->r_a_tov)) - fc_rport_error_retry(rdata, NULL); - else - kref_get(&rdata->kref); + 2 * lport->r_a_tov)) { + fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR); + kref_put(&rdata->kref, fc_rport_destroy); + } } /** @@ -1022,16 +1127,20 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, struct fc_els_spp spp; } *pp; struct fc_els_spp temp_spp; + struct fc_els_ls_rjt *rjt; struct fc4_prov *prov; u32 roles = FC_RPORT_ROLE_UNKNOWN; u32 fcp_parm = 0; u8 op; - u8 resp_code = 0; - - mutex_lock(&rdata->rp_mutex); + enum fc_els_spp_resp resp_code; FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp)); + if (fp == ERR_PTR(-FC_EX_CLOSED)) + goto put; + + mutex_lock(&rdata->rp_mutex); + if (rdata->rp_state != RPORT_ST_PRLI) { FC_RPORT_DBG(rdata, "Received a PRLI response, but in state " "%s\n", fc_rport_state(rdata)); @@ -1041,7 +1150,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, } if (IS_ERR(fp)) { - fc_rport_error_retry(rdata, fp); + fc_rport_error_retry(rdata, PTR_ERR(fp)); goto err; } @@ -1055,14 +1164,14 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, goto out; resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK); - FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n", - pp->spp.spp_flags); + FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n", + pp->spp.spp_flags, pp->spp.spp_type); rdata->spp_type = pp->spp.spp_type; if (resp_code != FC_SPP_RESP_ACK) { if (resp_code == FC_SPP_RESP_CONF) - fc_rport_error(rdata, fp); + fc_rport_error(rdata, -FC_EX_SEQ_ERR); else - fc_rport_error_retry(rdata, fp); + fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR); goto out; } if (pp->prli.prli_spp_len < sizeof(pp->spp)) @@ -1074,13 +1183,25 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, if (fcp_parm & FCP_SPPF_CONF_COMPL) rdata->flags |= FC_RP_FLAGS_CONF_REQ; - prov = fc_passive_prov[FC_TYPE_FCP]; + /* + * Call prli provider if we should act as a target + */ + prov = fc_passive_prov[rdata->spp_type]; if (prov) { memset(&temp_spp, 0, sizeof(temp_spp)); prov->prli(rdata, pp->prli.prli_spp_len, &pp->spp, &temp_spp); } - + /* + * Check if the image pair could be established + */ + if (rdata->spp_type != FC_TYPE_FCP || + !(pp->spp.spp_flags & FC_SPP_EST_IMG_PAIR)) { + /* + * Nope; we can't use this port as a target. + */ + fcp_parm &= ~FCP_SPPF_TARG_FCN; + } rdata->supported_classes = FC_COS_CLASS3; if (fcp_parm & FCP_SPPF_INIT_FCN) roles |= FC_RPORT_ROLE_FCP_INITIATOR; @@ -1091,15 +1212,18 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, fc_rport_enter_rtv(rdata); } else { - FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n"); - fc_rport_error_retry(rdata, fp); + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n", + rjt->er_reason, rjt->er_explan); + fc_rport_error_retry(rdata, FC_EX_ELS_RJT); } out: fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); - kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); +put: + kref_put(&rdata->kref, fc_rport_destroy); } /** @@ -1108,6 +1232,8 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, * * Locking Note: The rport lock is expected to be held before calling * this routine. + * + * Reference counting: increments kref when sending ELS */ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) { @@ -1128,6 +1254,15 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) return; } + /* + * And if the local port does not support the initiator function + * there's no need to send a PRLI, either. + */ + if (!(lport->service_params & FCP_SPPF_INIT_FCN)) { + fc_rport_enter_ready(rdata); + return; + } + FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n", fc_rport_state(rdata)); @@ -1135,7 +1270,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) fp = fc_frame_alloc(lport, sizeof(*pp)); if (!fp) { - fc_rport_error_retry(rdata, fp); + fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR); return; } @@ -1151,15 +1286,16 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) fc_host_port_id(lport->host), FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp, - NULL, rdata, 2 * lport->r_a_tov)) - fc_rport_error_retry(rdata, NULL); - else - kref_get(&rdata->kref); + kref_get(&rdata->kref); + if (!fc_exch_seq_send(lport, fp, fc_rport_prli_resp, + NULL, rdata, 2 * lport->r_a_tov)) { + fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR); + kref_put(&rdata->kref, fc_rport_destroy); + } } /** - * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses + * fc_rport_rtv_resp() - Handler for Request Timeout Value (RTV) responses * @sp: The sequence the RTV was on * @fp: The RTV response frame * @rdata_arg: The remote port that sent the RTV response @@ -1176,10 +1312,13 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, struct fc_rport_priv *rdata = rdata_arg; u8 op; - mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp)); + if (fp == ERR_PTR(-FC_EX_CLOSED)) + goto put; + + mutex_lock(&rdata->rp_mutex); + if (rdata->rp_state != RPORT_ST_RTV) { FC_RPORT_DBG(rdata, "Received a RTV response, but in state " "%s\n", fc_rport_state(rdata)); @@ -1189,7 +1328,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, } if (IS_ERR(fp)) { - fc_rport_error(rdata, fp); + fc_rport_error(rdata, PTR_ERR(fp)); goto err; } @@ -1205,13 +1344,15 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, tov = ntohl(rtv->rtv_r_a_tov); if (tov == 0) tov = 1; - rdata->r_a_tov = tov; + if (tov > rdata->r_a_tov) + rdata->r_a_tov = tov; tov = ntohl(rtv->rtv_e_d_tov); if (toq & FC_ELS_RTV_EDRES) tov /= 1000000; if (tov == 0) tov = 1; - rdata->e_d_tov = tov; + if (tov > rdata->e_d_tov) + rdata->e_d_tov = tov; } } @@ -1221,7 +1362,8 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); - kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); +put: + kref_put(&rdata->kref, fc_rport_destroy); } /** @@ -1230,6 +1372,8 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, * * Locking Note: The rport lock is expected to be held before calling * this routine. + * + * Reference counting: increments kref when sending ELS */ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) { @@ -1243,16 +1387,52 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv)); if (!fp) { - fc_rport_error_retry(rdata, fp); + fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR); return; } + kref_get(&rdata->kref); if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, fc_rport_rtv_resp, rdata, - 2 * lport->r_a_tov)) - fc_rport_error_retry(rdata, NULL); - else - kref_get(&rdata->kref); + 2 * lport->r_a_tov)) { + fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR); + kref_put(&rdata->kref, fc_rport_destroy); + } +} + +/** + * fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests + * @rdata: The remote port that sent the RTV request + * @in_fp: The RTV request frame + * + * Locking Note: Called with the lport and rport locks held. + */ +static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata, + struct fc_frame *in_fp) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + struct fc_els_rtv_acc *rtv; + struct fc_seq_els_data rjt_data; + + FC_RPORT_DBG(rdata, "Received RTV request\n"); + + fp = fc_frame_alloc(lport, sizeof(*rtv)); + if (!fp) { + rjt_data.reason = ELS_RJT_UNAB; + rjt_data.reason = ELS_EXPL_INSUF_RES; + fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); + goto drop; + } + rtv = fc_frame_payload_get(fp, sizeof(*rtv)); + rtv->rtv_cmd = ELS_LS_ACC; + rtv->rtv_r_a_tov = htonl(lport->r_a_tov); + rtv->rtv_e_d_tov = htonl(lport->e_d_tov); + rtv->rtv_toq = 0; + fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); +drop: + fc_frame_free(in_fp); } /** @@ -1262,15 +1442,16 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) * @lport_arg: The local port */ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, - void *lport_arg) + void *rdata_arg) { - struct fc_lport *lport = lport_arg; + struct fc_rport_priv *rdata = rdata_arg; + struct fc_lport *lport = rdata->local_port; FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did, "Received a LOGO %s\n", fc_els_resp_type(fp)); - if (IS_ERR(fp)) - return; - fc_frame_free(fp); + if (!IS_ERR(fp)) + fc_frame_free(fp); + kref_put(&rdata->kref, fc_rport_destroy); } /** @@ -1279,6 +1460,8 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, * * Locking Note: The rport lock is expected to be held before calling * this routine. + * + * Reference counting: increments kref when sending ELS */ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) { @@ -1291,8 +1474,10 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); if (!fp) return; - (void)lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, - fc_rport_logo_resp, lport, 0); + kref_get(&rdata->kref); + if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, + fc_rport_logo_resp, rdata, 0)) + kref_put(&rdata->kref, fc_rport_destroy); } /** @@ -1312,10 +1497,13 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp, struct fc_els_adisc *adisc; u8 op; - mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rdata, "Received a ADISC response\n"); + if (fp == ERR_PTR(-FC_EX_CLOSED)) + goto put; + + mutex_lock(&rdata->rp_mutex); + if (rdata->rp_state != RPORT_ST_ADISC) { FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n", fc_rport_state(rdata)); @@ -1325,7 +1513,7 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp, } if (IS_ERR(fp)) { - fc_rport_error(rdata, fp); + fc_rport_error(rdata, PTR_ERR(fp)); goto err; } @@ -1350,7 +1538,8 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp, fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); - kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); +put: + kref_put(&rdata->kref, fc_rport_destroy); } /** @@ -1359,6 +1548,8 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp, * * Locking Note: The rport lock is expected to be held before calling * this routine. + * + * Reference counting: increments kref when sending ELS */ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) { @@ -1372,15 +1563,16 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc)); if (!fp) { - fc_rport_error_retry(rdata, fp); + fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR); return; } + kref_get(&rdata->kref); if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC, fc_rport_adisc_resp, rdata, - 2 * lport->r_a_tov)) - fc_rport_error_retry(rdata, NULL); - else - kref_get(&rdata->kref); + 2 * lport->r_a_tov)) { + fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR); + kref_put(&rdata->kref, fc_rport_destroy); + } } /** @@ -1404,7 +1596,7 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata, if (!adisc) { rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; - lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); + fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); goto drop; } @@ -1480,7 +1672,7 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, goto out; out_rjt: - lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); + fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); out: fc_frame_free(rx_fp); } @@ -1494,15 +1686,21 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, * The ELS opcode has already been validated by the caller. * * Locking Note: Called with the lport lock held. + * + * Reference counting: does not modify kref */ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_rport_priv *rdata; struct fc_seq_els_data els_data; - rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp)); - if (!rdata) + rdata = fc_rport_lookup(lport, fc_frame_sid(fp)); + if (!rdata) { + FC_RPORT_ID_DBG(lport, fc_frame_sid(fp), + "Received ELS 0x%02x from non-logged-in port\n", + fc_frame_payload_op(fp)); goto reject; + } mutex_lock(&rdata->rp_mutex); @@ -1512,9 +1710,21 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) case RPORT_ST_READY: case RPORT_ST_ADISC: break; + case RPORT_ST_PLOGI: + if (fc_frame_payload_op(fp) == ELS_PRLI) { + FC_RPORT_DBG(rdata, "Reject ELS PRLI " + "while in state %s\n", + fc_rport_state(rdata)); + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, fc_rport_destroy); + goto busy; + } default: + FC_RPORT_DBG(rdata, + "Reject ELS 0x%02x while in state %s\n", + fc_frame_payload_op(fp), fc_rport_state(rdata)); mutex_unlock(&rdata->rp_mutex); - kref_put(&rdata->kref, lport->tt.rport_destroy); + kref_put(&rdata->kref, fc_rport_destroy); goto reject; } @@ -1529,30 +1739,41 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) fc_rport_recv_adisc_req(rdata, fp); break; case ELS_RRQ: - lport->tt.seq_els_rsp_send(fp, ELS_RRQ, NULL); + fc_seq_els_rsp_send(fp, ELS_RRQ, NULL); fc_frame_free(fp); break; case ELS_REC: - lport->tt.seq_els_rsp_send(fp, ELS_REC, NULL); + fc_seq_els_rsp_send(fp, ELS_REC, NULL); fc_frame_free(fp); break; case ELS_RLS: fc_rport_recv_rls_req(rdata, fp); break; + case ELS_RTV: + fc_rport_recv_rtv_req(rdata, fp); + break; default: fc_frame_free(fp); /* can't happen */ break; } mutex_unlock(&rdata->rp_mutex); - kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); + kref_put(&rdata->kref, fc_rport_destroy); return; reject: els_data.reason = ELS_RJT_UNAB; els_data.explan = ELS_EXPL_PLOGI_REQD; - lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); + fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); + fc_frame_free(fp); + return; + +busy: + els_data.reason = ELS_RJT_BUSY; + els_data.explan = ELS_EXPL_NONE; + fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); fc_frame_free(fp); + return; } /** @@ -1561,8 +1782,10 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) * @fp: The request frame * * Locking Note: Called with the lport lock held. + * + * Reference counting: does not modify kref */ -static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) +void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_seq_els_data els_data; @@ -1588,16 +1811,18 @@ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) case ELS_RRQ: case ELS_REC: case ELS_RLS: + case ELS_RTV: fc_rport_recv_els_req(lport, fp); break; default: els_data.reason = ELS_RJT_UNSUP; els_data.explan = ELS_EXPL_NONE; - lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); + fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); fc_frame_free(fp); break; } } +EXPORT_SYMBOL(fc_rport_recv_req); /** * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests @@ -1605,6 +1830,8 @@ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) * @rx_fp: The PLOGI request frame * * Locking Note: The rport lock is held before calling this function. + * + * Reference counting: increments kref on return */ static void fc_rport_recv_plogi_req(struct fc_lport *lport, struct fc_frame *rx_fp) @@ -1630,7 +1857,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, disc = &lport->disc; mutex_lock(&disc->disc_mutex); - rdata = lport->tt.rport_create(lport, sid); + rdata = fc_rport_create(lport, sid); if (!rdata) { mutex_unlock(&disc->disc_mutex); rjt_data.reason = ELS_RJT_UNAB; @@ -1718,7 +1945,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, return; reject: - lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); + fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); fc_frame_free(fp); } @@ -1744,7 +1971,6 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, unsigned int len; unsigned int plen; enum fc_els_spp_resp resp; - enum fc_els_spp_resp passive; struct fc_seq_els_data rjt_data; struct fc4_prov *prov; @@ -1794,15 +2020,21 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, resp = 0; if (rspp->spp_type < FC_FC4_PROV_SIZE) { + enum fc_els_spp_resp active = 0, passive = 0; + prov = fc_active_prov[rspp->spp_type]; if (prov) - resp = prov->prli(rdata, plen, rspp, spp); + active = prov->prli(rdata, plen, rspp, spp); prov = fc_passive_prov[rspp->spp_type]; - if (prov) { + if (prov) passive = prov->prli(rdata, plen, rspp, spp); - if (!resp || passive == FC_SPP_RESP_ACK) - resp = passive; - } + if (!active || passive == FC_SPP_RESP_ACK) + resp = passive; + else + resp = active; + FC_RPORT_DBG(rdata, "PRLI rspp type %x " + "active %x passive %x\n", + rspp->spp_type, active, passive); } if (!resp) { if (spp->spp_flags & FC_SPP_EST_IMG_PAIR) @@ -1823,20 +2055,13 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); - switch (rdata->rp_state) { - case RPORT_ST_PRLI: - fc_rport_enter_ready(rdata); - break; - default: - break; - } goto drop; reject_len: rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; reject: - lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); + fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); drop: fc_frame_free(rx_fp); } @@ -1907,7 +2132,7 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; reject: - lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); + fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); drop: fc_frame_free(rx_fp); } @@ -1919,17 +2144,19 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, * * Locking Note: The rport lock is expected to be held before calling * this function. + * + * Reference counting: drops kref on return */ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_rport_priv *rdata; u32 sid; - lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); + fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL); sid = fc_frame_sid(fp); - rdata = lport->tt.rport_lookup(lport, sid); + rdata = fc_rport_lookup(lport, sid); if (rdata) { mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", @@ -1937,7 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) fc_rport_enter_delete(rdata, RPORT_EV_STOP); mutex_unlock(&rdata->rp_mutex); - kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); + kref_put(&rdata->kref, fc_rport_destroy); } else FC_RPORT_ID_DBG(lport, sid, "Received LOGO from non-logged-in port\n"); @@ -1947,41 +2174,11 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) /** * fc_rport_flush_queue() - Flush the rport_event_queue */ -static void fc_rport_flush_queue(void) +void fc_rport_flush_queue(void) { flush_workqueue(rport_event_queue); } - -/** - * fc_rport_init() - Initialize the remote port layer for a local port - * @lport: The local port to initialize the remote port layer for - */ -int fc_rport_init(struct fc_lport *lport) -{ - if (!lport->tt.rport_lookup) - lport->tt.rport_lookup = fc_rport_lookup; - - if (!lport->tt.rport_create) - lport->tt.rport_create = fc_rport_create; - - if (!lport->tt.rport_login) - lport->tt.rport_login = fc_rport_login; - - if (!lport->tt.rport_logoff) - lport->tt.rport_logoff = fc_rport_logoff; - - if (!lport->tt.rport_recv_req) - lport->tt.rport_recv_req = fc_rport_recv_req; - - if (!lport->tt.rport_flush_queue) - lport->tt.rport_flush_queue = fc_rport_flush_queue; - - if (!lport->tt.rport_destroy) - lport->tt.rport_destroy = fc_rport_destroy; - - return 0; -} -EXPORT_SYMBOL(fc_rport_init); +EXPORT_SYMBOL(fc_rport_flush_queue); /** * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator. diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index b484859464f6b2695fe41d24a5cfecf8e8824c49..8a20b4e862241c7333e0eb927347816919e51a8f 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -648,6 +648,10 @@ struct lpfc_hba { #define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */ #define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ #define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ +#define HBA_FORCED_LINK_SPEED 0x40000 /* + * Firmware supports Forced Link Speed + * capability + */ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ struct lpfc_dmabuf slim2p; @@ -746,6 +750,8 @@ struct lpfc_hba { uint32_t cfg_oas_priority; uint32_t cfg_XLanePriority; uint32_t cfg_enable_bg; + uint32_t cfg_prot_mask; + uint32_t cfg_prot_guard; uint32_t cfg_hostmem_hgp; uint32_t cfg_log_verbose; uint32_t cfg_aer_support; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index f1019908800edd9ae15d10476d6173330c864d30..c84775562c65e9b486eee549594c800c03945fd7 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -2759,18 +2759,14 @@ LPFC_ATTR_R(enable_npiv, 1, 0, 1, LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, "FCF Fast failover=1 Priority failover=2"); -int lpfc_enable_rrq = 2; -module_param(lpfc_enable_rrq, int, S_IRUGO); -MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); -lpfc_param_show(enable_rrq); /* # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures # 0x0 = disabled, XRI/OXID use not tracked. # 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent. # 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent. */ -lpfc_param_init(enable_rrq, 2, 0, 2); -static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL); +LPFC_ATTR_R(enable_rrq, 2, 0, 2, + "Enable RRQ functionality"); /* # lpfc_suppress_link_up: Bring link up at initialization @@ -2827,14 +2823,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(txcmplq_hw, S_IRUGO, lpfc_txcmplq_hw_show, NULL); -int lpfc_iocb_cnt = 2; -module_param(lpfc_iocb_cnt, int, S_IRUGO); -MODULE_PARM_DESC(lpfc_iocb_cnt, +LPFC_ATTR_R(iocb_cnt, 2, 1, 5, "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs"); -lpfc_param_show(iocb_cnt); -lpfc_param_init(iocb_cnt, 2, 1, 5); -static DEVICE_ATTR(lpfc_iocb_cnt, S_IRUGO, - lpfc_iocb_cnt_show, NULL); /* # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear @@ -2887,9 +2877,9 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) vport->cfg_nodev_tmo = vport->cfg_devloss_tmo; if (val != LPFC_DEF_DEVLOSS_TMO) lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0407 Ignoring nodev_tmo module " - "parameter because devloss_tmo is " - "set.\n"); + "0407 Ignoring lpfc_nodev_tmo module " + "parameter because lpfc_devloss_tmo " + "is set.\n"); return 0; } @@ -2948,8 +2938,8 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val) if (vport->dev_loss_tmo_changed || (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0401 Ignoring change to nodev_tmo " - "because devloss_tmo is set.\n"); + "0401 Ignoring change to lpfc_nodev_tmo " + "because lpfc_devloss_tmo is set.\n"); return 0; } if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { @@ -2964,7 +2954,7 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val) return 0; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0403 lpfc_nodev_tmo attribute cannot be set to" + "0403 lpfc_nodev_tmo attribute cannot be set to " "%d, allowed range is [%d, %d]\n", val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); return -EINVAL; @@ -3015,8 +3005,8 @@ lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val) } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0404 lpfc_devloss_tmo attribute cannot be set to" - " %d, allowed range is [%d, %d]\n", + "0404 lpfc_devloss_tmo attribute cannot be set to " + "%d, allowed range is [%d, %d]\n", val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); return -EINVAL; } @@ -3204,6 +3194,8 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1, # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. # Default value is 0. */ +LPFC_ATTR(topology, 0, 0, 6, + "Select Fibre Channel topology"); /** * lpfc_topology_set - Set the adapters topology field @@ -3281,11 +3273,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, phba->brd_no, val); return -EINVAL; } -static int lpfc_topology = 0; -module_param(lpfc_topology, int, S_IRUGO); -MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology"); + lpfc_param_show(topology) -lpfc_param_init(topology, 0, 0, 6) static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, lpfc_topology_show, lpfc_topology_store); @@ -3679,7 +3668,12 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, int nolip = 0; const char *val_buf = buf; int err; - uint32_t prev_val; + uint32_t prev_val, if_type; + + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + if (if_type == LPFC_SLI_INTF_IF_TYPE_2 && + phba->hba_flag & HBA_FORCED_LINK_SPEED) + return -EPERM; if (!strncmp(buf, "nolip ", strlen("nolip "))) { nolip = 1; @@ -3789,6 +3783,9 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR, # 1 = aer supported and enabled (default) # Value range is [0,1]. Default value is 1. */ +LPFC_ATTR(aer_support, 1, 0, 1, + "Enable PCIe device AER support"); +lpfc_param_show(aer_support) /** * lpfc_aer_support_store - Set the adapter for aer support @@ -3871,46 +3868,6 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, return rc; } -static int lpfc_aer_support = 1; -module_param(lpfc_aer_support, int, S_IRUGO); -MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support"); -lpfc_param_show(aer_support) - -/** - * lpfc_aer_support_init - Set the initial adapters aer support flag - * @phba: lpfc_hba pointer. - * @val: enable aer or disable aer flag. - * - * Description: - * If val is in a valid range [0,1], then set the adapter's initial - * cfg_aer_support field. It will be up to the driver's probe_one - * routine to determine whether the device's AER support can be set - * or not. - * - * Notes: - * If the value is not in range log a kernel error message, and - * choose the default value of setting AER support and return. - * - * Returns: - * zero if val saved. - * -EINVAL val out of range - **/ -static int -lpfc_aer_support_init(struct lpfc_hba *phba, int val) -{ - if (val == 0 || val == 1) { - phba->cfg_aer_support = val; - return 0; - } - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2712 lpfc_aer_support attribute value %d out " - "of range, allowed values are 0|1, setting it " - "to default value of 1\n", val); - /* By default, try to enable AER on a device */ - phba->cfg_aer_support = 1; - return -EINVAL; -} - static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR, lpfc_aer_support_show, lpfc_aer_support_store); @@ -4055,39 +4012,10 @@ lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr, return rc; } -static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN; -module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn"); -lpfc_param_show(sriov_nr_virtfn) - -/** - * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable - * @phba: lpfc_hba pointer. - * @val: link speed value. - * - * Description: - * If val is in a valid range [0,255], then set the adapter's initial - * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum - * number shall be used instead. It will be up to the driver's probe_one - * routine to determine whether the device's SR-IOV is supported or not. - * - * Returns: - * zero if val saved. - * -EINVAL val out of range - **/ -static int -lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val) -{ - if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) { - phba->cfg_sriov_nr_virtfn = val; - return 0; - } +LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN, + "Enable PCIe device SR-IOV virtual fn"); - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3017 Enabling %d virtual functions is not " - "allowed.\n", val); - return -EINVAL; -} +lpfc_param_show(sriov_nr_virtfn) static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR, lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store); @@ -4251,7 +4179,8 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3016 fcp_imax: %d out of range, using default\n", val); + "3016 lpfc_fcp_imax: %d out of range, using default\n", + val); phba->cfg_fcp_imax = LPFC_DEF_IMAX; return 0; @@ -4401,8 +4330,8 @@ lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3326 fcp_cpu_map: %d out of range, using default\n", - val); + "3326 lpfc_fcp_cpu_map: %d out of range, using " + "default\n", val); phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; return 0; @@ -4441,12 +4370,10 @@ LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536, # to limit the I/O completion time to the parameter value. # The value is set in milliseconds. */ -static int lpfc_max_scsicmpl_time; -module_param(lpfc_max_scsicmpl_time, int, S_IRUGO); -MODULE_PARM_DESC(lpfc_max_scsicmpl_time, +LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000, "Use command completion time to control queue depth"); + lpfc_vport_param_show(max_scsicmpl_time); -lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000); static int lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val) { @@ -4691,12 +4618,15 @@ unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF; # HBA supports DIX Type 1: Host to HBA Type 1 protection # */ -unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION | - SHOST_DIX_TYPE0_PROTECTION | - SHOST_DIX_TYPE1_PROTECTION; - -module_param(lpfc_prot_mask, uint, S_IRUGO); -MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); +LPFC_ATTR(prot_mask, + (SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIX_TYPE0_PROTECTION | + SHOST_DIX_TYPE1_PROTECTION), + 0, + (SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIX_TYPE0_PROTECTION | + SHOST_DIX_TYPE1_PROTECTION), + "T10-DIF host protection capabilities mask"); /* # lpfc_prot_guard: i @@ -4706,9 +4636,9 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); # - Default will result in registering capabilities for all guard types # */ -unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP; -module_param(lpfc_prot_guard, byte, S_IRUGO); -MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type"); +LPFC_ATTR(prot_guard, + SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP, + "T10-DIF host protection guard type"); /* * Delay initial NPort discovery when Clean Address bit is cleared in @@ -5828,6 +5758,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_oas_flags = 0; phba->cfg_oas_priority = 0; lpfc_enable_bg_init(phba, lpfc_enable_bg); + lpfc_prot_mask_init(phba, lpfc_prot_mask); + lpfc_prot_guard_init(phba, lpfc_prot_guard); if (phba->sli_rev == LPFC_SLI_REV4) phba->cfg_poll = 0; else diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 05dcc2abd541a2c8900a58f8bc1f494710e68b5e..7dca4d6a888346981ad1376e58d376add6774545 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -97,7 +98,7 @@ struct lpfc_bsg_menlo { #define TYPE_MENLO 4 struct bsg_job_data { uint32_t type; - struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */ + struct bsg_job *set_job; /* job waiting for this iocb to finish */ union { struct lpfc_bsg_event *evt; struct lpfc_bsg_iocb iocb; @@ -211,7 +212,7 @@ lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size, static unsigned int lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, - struct fc_bsg_buffer *bsg_buffers, + struct bsg_buffer *bsg_buffers, unsigned int bytes_to_transfer, int to_buffers) { @@ -297,7 +298,8 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *rspiocbq) { struct bsg_job_data *dd_data; - struct fc_bsg_job *job; + struct bsg_job *job; + struct fc_bsg_reply *bsg_reply; IOCB_t *rsp; struct lpfc_dmabuf *bmp, *cmp, *rmp; struct lpfc_nodelist *ndlp; @@ -312,6 +314,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, spin_lock_irqsave(&phba->ct_ev_lock, flags); job = dd_data->set_job; if (job) { + bsg_reply = job->reply; /* Prevent timeout handling from trying to abort job */ job->dd_data = NULL; } @@ -350,7 +353,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, } } else { rsp_size = rsp->un.genreq64.bdl.bdeSize; - job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = lpfc_bsg_copy_data(rmp, &job->reply_payload, rsp_size, 0); } @@ -367,8 +370,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, /* Complete the job if the job is still active */ if (job) { - job->reply->result = rc; - job->job_done(job); + bsg_reply->result = rc; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); } return; } @@ -378,12 +382,13 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, * @job: fc_bsg_job to handle **/ static int -lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) +lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) { - struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; - struct lpfc_rport_data *rdata = job->rport->dd_data; + struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; struct lpfc_nodelist *ndlp = rdata->pnode; + struct fc_bsg_reply *bsg_reply = job->reply; struct ulp_bde64 *bpl = NULL; uint32_t timeout; struct lpfc_iocbq *cmdiocbq = NULL; @@ -398,7 +403,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) int iocb_stat; /* in case no data is transferred */ - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; /* allocate our bsg tracking structure */ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); @@ -542,7 +547,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) kfree(dd_data); no_dd_data: /* make error code available to userspace */ - job->reply->result = rc; + bsg_reply->result = rc; job->dd_data = NULL; return rc; } @@ -570,7 +575,8 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *rspiocbq) { struct bsg_job_data *dd_data; - struct fc_bsg_job *job; + struct bsg_job *job; + struct fc_bsg_reply *bsg_reply; IOCB_t *rsp; struct lpfc_nodelist *ndlp; struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL; @@ -588,6 +594,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, spin_lock_irqsave(&phba->ct_ev_lock, flags); job = dd_data->set_job; if (job) { + bsg_reply = job->reply; /* Prevent timeout handling from trying to abort job */ job->dd_data = NULL; } @@ -609,17 +616,17 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, if (job) { if (rsp->ulpStatus == IOSTAT_SUCCESS) { rsp_size = rsp->un.elsreq64.bdl.bdeSize; - job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, prsp->virt, rsp_size); } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { - job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = sizeof(struct fc_bsg_ctels_reply); /* LS_RJT data returned in word 4 */ rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; - els_reply = &job->reply->reply_data.ctels_reply; + els_reply = &bsg_reply->reply_data.ctels_reply; els_reply->status = FC_CTELS_STATUS_REJECT; els_reply->rjt_data.action = rjt_data[3]; els_reply->rjt_data.reason_code = rjt_data[2]; @@ -637,8 +644,9 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, /* Complete the job if the job is still active */ if (job) { - job->reply->result = rc; - job->job_done(job); + bsg_reply->result = rc; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); } return; } @@ -648,12 +656,14 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, * @job: fc_bsg_job to handle **/ static int -lpfc_bsg_rport_els(struct fc_bsg_job *job) +lpfc_bsg_rport_els(struct bsg_job *job) { - struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; - struct lpfc_rport_data *rdata = job->rport->dd_data; + struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; struct lpfc_nodelist *ndlp = rdata->pnode; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; uint32_t elscmd; uint32_t cmdsize; struct lpfc_iocbq *cmdiocbq; @@ -664,7 +674,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) int rc = 0; /* in case no data is transferred */ - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; /* verify the els command is not greater than the * maximum ELS transfer size. @@ -684,7 +694,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) goto no_dd_data; } - elscmd = job->request->rqst_data.r_els.els_code; + elscmd = bsg_request->rqst_data.r_els.els_code; cmdsize = job->request_payload.payload_len; if (!lpfc_nlp_get(ndlp)) { @@ -771,7 +781,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) no_dd_data: /* make error code available to userspace */ - job->reply->result = rc; + bsg_reply->result = rc; job->dd_data = NULL; return rc; } @@ -917,7 +927,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; struct lpfc_hbq_entry *hbqe; struct lpfc_sli_ct_request *ct_req; - struct fc_bsg_job *job = NULL; + struct bsg_job *job = NULL; + struct fc_bsg_reply *bsg_reply; struct bsg_job_data *dd_data = NULL; unsigned long flags; int size = 0; @@ -1120,13 +1131,15 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, dd_data->set_job = NULL; lpfc_bsg_event_unref(evt); if (job) { - job->reply->reply_payload_rcv_len = size; + bsg_reply = job->reply; + bsg_reply->reply_payload_rcv_len = size; /* make error code available to userspace */ - job->reply->result = 0; + bsg_reply->result = 0; job->dd_data = NULL; /* complete the job back to userspace */ spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - job->job_done(job); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); spin_lock_irqsave(&phba->ct_ev_lock, flags); } } @@ -1187,10 +1200,11 @@ lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) * @job: SET_EVENT fc_bsg_job **/ static int -lpfc_bsg_hba_set_event(struct fc_bsg_job *job) +lpfc_bsg_hba_set_event(struct bsg_job *job) { - struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; + struct fc_bsg_request *bsg_request = job->request; struct set_ct_event *event_req; struct lpfc_bsg_event *evt; int rc = 0; @@ -1208,7 +1222,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job) } event_req = (struct set_ct_event *) - job->request->rqst_data.h_vendor.vendor_cmd; + bsg_request->rqst_data.h_vendor.vendor_cmd; ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & FC_REG_EVENT_MASK); spin_lock_irqsave(&phba->ct_ev_lock, flags); @@ -1271,10 +1285,12 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job) * @job: GET_EVENT fc_bsg_job **/ static int -lpfc_bsg_hba_get_event(struct fc_bsg_job *job) +lpfc_bsg_hba_get_event(struct bsg_job *job) { - struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; struct get_ct_event *event_req; struct get_ct_event_reply *event_reply; struct lpfc_bsg_event *evt, *evt_next; @@ -1292,10 +1308,10 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job) } event_req = (struct get_ct_event *) - job->request->rqst_data.h_vendor.vendor_cmd; + bsg_request->rqst_data.h_vendor.vendor_cmd; event_reply = (struct get_ct_event_reply *) - job->reply->reply_data.vendor_reply.vendor_rsp; + bsg_reply->reply_data.vendor_reply.vendor_rsp; spin_lock_irqsave(&phba->ct_ev_lock, flags); list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { if (evt->reg_id == event_req->ev_reg_id) { @@ -1315,7 +1331,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job) * an error indicating that there isn't anymore */ if (evt_dat == NULL) { - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; rc = -ENOENT; goto job_error; } @@ -1331,12 +1347,12 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job) event_reply->type = evt_dat->type; event_reply->immed_data = evt_dat->immed_dat; if (evt_dat->len > 0) - job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, evt_dat->data, evt_dat->len); else - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; if (evt_dat) { kfree(evt_dat->data); @@ -1347,13 +1363,14 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job) lpfc_bsg_event_unref(evt); spin_unlock_irqrestore(&phba->ct_ev_lock, flags); job->dd_data = NULL; - job->reply->result = 0; - job->job_done(job); + bsg_reply->result = 0; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return 0; job_error: job->dd_data = NULL; - job->reply->result = rc; + bsg_reply->result = rc; return rc; } @@ -1380,7 +1397,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *rspiocbq) { struct bsg_job_data *dd_data; - struct fc_bsg_job *job; + struct bsg_job *job; + struct fc_bsg_reply *bsg_reply; IOCB_t *rsp; struct lpfc_dmabuf *bmp, *cmp; struct lpfc_nodelist *ndlp; @@ -1411,6 +1429,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, /* Copy the completed job data or set the error status */ if (job) { + bsg_reply = job->reply; if (rsp->ulpStatus) { if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { @@ -1428,7 +1447,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, rc = -EACCES; } } else { - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; } } @@ -1442,8 +1461,9 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, /* Complete the job if the job is still active */ if (job) { - job->reply->result = rc; - job->job_done(job); + bsg_reply->result = rc; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); } return; } @@ -1457,7 +1477,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, * @num_entry: Number of enties in the bde. **/ static int -lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, +lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag, struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp, int num_entry) { @@ -1603,12 +1623,14 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, * @job: SEND_MGMT_RESP fc_bsg_job **/ static int -lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job) +lpfc_bsg_send_mgmt_rsp(struct bsg_job *job) { - struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) - job->request->rqst_data.h_vendor.vendor_cmd; + bsg_request->rqst_data.h_vendor.vendor_cmd; struct ulp_bde64 *bpl; struct lpfc_dmabuf *bmp = NULL, *cmp = NULL; int bpl_entries; @@ -1618,7 +1640,7 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job) int rc = 0; /* in case no data is transferred */ - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { rc = -ERANGE; @@ -1664,7 +1686,7 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job) kfree(bmp); send_mgmt_rsp_exit: /* make error code available to userspace */ - job->reply->result = rc; + bsg_reply->result = rc; job->dd_data = NULL; return rc; } @@ -1760,8 +1782,10 @@ lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba) * All of this is done in-line. */ static int -lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) +lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) { + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; struct diag_mode_set *loopback_mode; uint32_t link_flags; uint32_t timeout; @@ -1771,7 +1795,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) int rc = 0; /* no data to return just the return code */ - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) { @@ -1791,7 +1815,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) /* bring the link to diagnostic mode */ loopback_mode = (struct diag_mode_set *) - job->request->rqst_data.h_vendor.vendor_cmd; + bsg_request->rqst_data.h_vendor.vendor_cmd; link_flags = loopback_mode->type; timeout = loopback_mode->timeout * 100; @@ -1864,10 +1888,11 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) job_error: /* make error code available to userspace */ - job->reply->result = rc; + bsg_reply->result = rc; /* complete the job back to userspace if no error */ if (rc == 0) - job->job_done(job); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } @@ -2015,14 +2040,16 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba) * loopback mode in order to perform a diagnostic loopback test. */ static int -lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) +lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) { + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; struct diag_mode_set *loopback_mode; uint32_t link_flags, timeout; int i, rc = 0; /* no data to return just the return code */ - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) { @@ -2054,7 +2081,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3129 Bring link to diagnostic state.\n"); loopback_mode = (struct diag_mode_set *) - job->request->rqst_data.h_vendor.vendor_cmd; + bsg_request->rqst_data.h_vendor.vendor_cmd; link_flags = loopback_mode->type; timeout = loopback_mode->timeout * 100; @@ -2151,10 +2178,11 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) job_error: /* make error code available to userspace */ - job->reply->result = rc; + bsg_reply->result = rc; /* complete the job back to userspace if no error */ if (rc == 0) - job->job_done(job); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } @@ -2166,17 +2194,17 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) * command from the user to proper driver action routines. */ static int -lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job) +lpfc_bsg_diag_loopback_mode(struct bsg_job *job) { struct Scsi_Host *shost; struct lpfc_vport *vport; struct lpfc_hba *phba; int rc; - shost = job->shost; + shost = fc_bsg_to_shost(job); if (!shost) return -ENODEV; - vport = (struct lpfc_vport *)job->shost->hostdata; + vport = shost_priv(shost); if (!vport) return -ENODEV; phba = vport->phba; @@ -2202,8 +2230,10 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job) * command from the user to proper driver action routines. */ static int -lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job) +lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job) { + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; struct Scsi_Host *shost; struct lpfc_vport *vport; struct lpfc_hba *phba; @@ -2211,10 +2241,10 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job) uint32_t timeout; int rc, i; - shost = job->shost; + shost = fc_bsg_to_shost(job); if (!shost) return -ENODEV; - vport = (struct lpfc_vport *)job->shost->hostdata; + vport = shost_priv(shost); if (!vport) return -ENODEV; phba = vport->phba; @@ -2232,7 +2262,7 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job) phba->link_flag &= ~LS_LOOPBACK_MODE; spin_unlock_irq(&phba->hbalock); loopback_mode_end_cmd = (struct diag_mode_set *) - job->request->rqst_data.h_vendor.vendor_cmd; + bsg_request->rqst_data.h_vendor.vendor_cmd; timeout = loopback_mode_end_cmd->timeout * 100; rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); @@ -2263,10 +2293,11 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job) loopback_mode_end_exit: /* make return code available to userspace */ - job->reply->result = rc; + bsg_reply->result = rc; /* complete the job back to userspace if no error */ if (rc == 0) - job->job_done(job); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } @@ -2278,8 +2309,10 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job) * applicaiton. */ static int -lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job) +lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) { + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; struct Scsi_Host *shost; struct lpfc_vport *vport; struct lpfc_hba *phba; @@ -2292,12 +2325,12 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job) struct diag_status *diag_status_reply; int mbxstatus, rc = 0; - shost = job->shost; + shost = fc_bsg_to_shost(job); if (!shost) { rc = -ENODEV; goto job_error; } - vport = (struct lpfc_vport *)job->shost->hostdata; + vport = shost_priv(shost); if (!vport) { rc = -ENODEV; goto job_error; @@ -2335,7 +2368,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job) goto job_error; link_diag_test_cmd = (struct sli4_link_diag *) - job->request->rqst_data.h_vendor.vendor_cmd; + bsg_request->rqst_data.h_vendor.vendor_cmd; rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); @@ -2385,7 +2418,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job) } diag_status_reply = (struct diag_status *) - job->reply->reply_data.vendor_reply.vendor_rsp; + bsg_reply->reply_data.vendor_reply.vendor_rsp; if (job->reply_len < sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) { @@ -2413,10 +2446,11 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job) job_error: /* make error code available to userspace */ - job->reply->result = rc; + bsg_reply->result = rc; /* complete the job back to userspace if no error */ if (rc == 0) - job->job_done(job); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } @@ -2982,9 +3016,10 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, * of loopback mode. **/ static int -lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) +lpfc_bsg_diag_loopback_run(struct bsg_job *job) { - struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_hba *phba = vport->phba; struct lpfc_bsg_event *evt; struct event_data *evdat; @@ -3012,7 +3047,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) uint32_t total_mem; /* in case no data is returned return just the return code */ - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { @@ -3237,11 +3272,11 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) rc = IOCB_SUCCESS; /* skip over elx loopback header */ rx_databuf += ELX_LOOPBACK_HEADER_SZ; - job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, rx_databuf, size); - job->reply->reply_payload_rcv_len = size; + bsg_reply->reply_payload_rcv_len = size; } } @@ -3271,11 +3306,12 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) loopback_test_exit: kfree(dataout); /* make error code available to userspace */ - job->reply->result = rc; + bsg_reply->result = rc; job->dd_data = NULL; /* complete the job back to userspace if no error */ if (rc == IOCB_SUCCESS) - job->job_done(job); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } @@ -3284,9 +3320,10 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) * @job: GET_DFC_REV fc_bsg_job **/ static int -lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job) +lpfc_bsg_get_dfc_rev(struct bsg_job *job) { - struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_hba *phba = vport->phba; struct get_mgmt_rev_reply *event_reply; int rc = 0; @@ -3301,7 +3338,7 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job) } event_reply = (struct get_mgmt_rev_reply *) - job->reply->reply_data.vendor_reply.vendor_rsp; + bsg_reply->reply_data.vendor_reply.vendor_rsp; if (job->reply_len < sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { @@ -3315,9 +3352,10 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job) event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; job_error: - job->reply->result = rc; + bsg_reply->result = rc; if (rc == 0) - job->job_done(job); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } @@ -3336,7 +3374,8 @@ static void lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct bsg_job_data *dd_data; - struct fc_bsg_job *job; + struct fc_bsg_reply *bsg_reply; + struct bsg_job *job; uint32_t size; unsigned long flags; uint8_t *pmb, *pmb_buf; @@ -3364,8 +3403,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) /* Copy the mailbox data to the job if it is still active */ if (job) { + bsg_reply = job->reply; size = job->reply_payload.payload_len; - job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, pmb_buf, size); @@ -3379,8 +3419,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) /* Complete the job if the job is still active */ if (job) { - job->reply->result = 0; - job->job_done(job); + bsg_reply->result = 0; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); } return; } @@ -3510,11 +3551,12 @@ lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba) * This is routine handles BSG job for mailbox commands completions with * multiple external buffers. **/ -static struct fc_bsg_job * +static struct bsg_job * lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct bsg_job_data *dd_data; - struct fc_bsg_job *job; + struct bsg_job *job; + struct fc_bsg_reply *bsg_reply; uint8_t *pmb, *pmb_buf; unsigned long flags; uint32_t size; @@ -3529,6 +3571,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) spin_lock_irqsave(&phba->ct_ev_lock, flags); job = dd_data->set_job; if (job) { + bsg_reply = job->reply; /* Prevent timeout handling from trying to abort job */ job->dd_data = NULL; } @@ -3559,13 +3602,13 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) if (job) { size = job->reply_payload.payload_len; - job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, pmb_buf, size); /* result for successful */ - job->reply->result = 0; + bsg_reply->result = 0; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2937 SLI_CONFIG ext-buffer maibox command " @@ -3603,7 +3646,8 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) static void lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { - struct fc_bsg_job *job; + struct bsg_job *job; + struct fc_bsg_reply *bsg_reply; job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); @@ -3623,9 +3667,11 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) mempool_free(pmboxq, phba->mbox_mem_pool); /* if the job is still active, call job done */ - if (job) - job->job_done(job); - + if (job) { + bsg_reply = job->reply; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + } return; } @@ -3640,7 +3686,8 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) static void lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { - struct fc_bsg_job *job; + struct bsg_job *job; + struct fc_bsg_reply *bsg_reply; job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); @@ -3658,8 +3705,11 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) lpfc_bsg_mbox_ext_session_reset(phba); /* if the job is still active, call job done */ - if (job) - job->job_done(job); + if (job) { + bsg_reply = job->reply; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + } return; } @@ -3768,10 +3818,11 @@ lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp, * non-embedded external bufffers. **/ static int -lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, +lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, enum nemb_type nemb_tp, struct lpfc_dmabuf *dmabuf) { + struct fc_bsg_request *bsg_request = job->request; struct lpfc_sli_config_mbox *sli_cfg_mbx; struct dfc_mbox_req *mbox_req; struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; @@ -3784,7 +3835,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, int rc, i; mbox_req = - (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; + (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; /* pointer to the start of mailbox command */ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; @@ -3955,10 +4006,12 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, * non-embedded external bufffers. **/ static int -lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, +lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, enum nemb_type nemb_tp, struct lpfc_dmabuf *dmabuf) { + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; struct dfc_mbox_req *mbox_req; struct lpfc_sli_config_mbox *sli_cfg_mbx; uint32_t ext_buf_cnt; @@ -3969,7 +4022,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, int rc = SLI_CONFIG_NOT_HANDLED, i; mbox_req = - (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; + (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; /* pointer to the start of mailbox command */ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; @@ -4096,8 +4149,9 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, /* wait for additoinal external buffers */ - job->reply->result = 0; - job->job_done(job); + bsg_reply->result = 0; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return SLI_CONFIG_HANDLED; job_error: @@ -4119,7 +4173,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, * with embedded sussystem 0x1 and opcodes with external HBDs. **/ static int -lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, +lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job, struct lpfc_dmabuf *dmabuf) { struct lpfc_sli_config_mbox *sli_cfg_mbx; @@ -4268,8 +4322,9 @@ lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba) * user space through BSG. **/ static int -lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job) +lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job) { + struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_sli_config_mbox *sli_cfg_mbx; struct lpfc_dmabuf *dmabuf; uint8_t *pbuf; @@ -4307,7 +4362,7 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job) dmabuf, index); pbuf = (uint8_t *)dmabuf->virt; - job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, pbuf, size); @@ -4321,8 +4376,9 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job) lpfc_bsg_mbox_ext_session_reset(phba); } - job->reply->result = 0; - job->job_done(job); + bsg_reply->result = 0; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return SLI_CONFIG_HANDLED; } @@ -4336,9 +4392,10 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job) * from user space through BSG. **/ static int -lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job, +lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, struct lpfc_dmabuf *dmabuf) { + struct fc_bsg_reply *bsg_reply = job->reply; struct bsg_job_data *dd_data = NULL; LPFC_MBOXQ_t *pmboxq = NULL; MAILBOX_t *pmb; @@ -4436,8 +4493,9 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job, } /* wait for additoinal external buffers */ - job->reply->result = 0; - job->job_done(job); + bsg_reply->result = 0; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return SLI_CONFIG_HANDLED; job_error: @@ -4457,7 +4515,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job, * command with multiple non-embedded external buffers. **/ static int -lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job, +lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job, struct lpfc_dmabuf *dmabuf) { int rc; @@ -4502,14 +4560,15 @@ lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job, * (0x9B) mailbox commands and external buffers. **/ static int -lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, +lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job, struct lpfc_dmabuf *dmabuf) { + struct fc_bsg_request *bsg_request = job->request; struct dfc_mbox_req *mbox_req; int rc = SLI_CONFIG_NOT_HANDLED; mbox_req = - (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; + (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; /* mbox command with/without single external buffer */ if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) @@ -4579,9 +4638,11 @@ lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, * let our completion handler finish the command. **/ static int -lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, +lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, struct lpfc_vport *vport) { + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ /* a 4k buffer to hold the mb and extended data from/to the bsg */ @@ -4600,7 +4661,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t size; /* in case no data is transferred */ - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; /* sanity check to protect driver */ if (job->reply_payload.payload_len > BSG_MBOX_SIZE || @@ -4619,7 +4680,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, } mbox_req = - (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; + (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; /* check if requested extended data lengths are valid */ if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || @@ -4841,7 +4902,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, /* job finished, copy the data */ memcpy(pmbx, pmb, sizeof(*pmb)); - job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, pmbx, size); @@ -4870,15 +4931,17 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. **/ static int -lpfc_bsg_mbox_cmd(struct fc_bsg_job *job) +lpfc_bsg_mbox_cmd(struct bsg_job *job) { - struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_hba *phba = vport->phba; struct dfc_mbox_req *mbox_req; int rc = 0; /* mix-and-match backward compatibility */ - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, @@ -4889,7 +4952,7 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job) sizeof(struct fc_bsg_request)), (int)sizeof(struct dfc_mbox_req)); mbox_req = (struct dfc_mbox_req *) - job->request->rqst_data.h_vendor.vendor_cmd; + bsg_request->rqst_data.h_vendor.vendor_cmd; mbox_req->extMboxTag = 0; mbox_req->extSeqNum = 0; } @@ -4898,15 +4961,16 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job) if (rc == 0) { /* job done */ - job->reply->result = 0; + bsg_reply->result = 0; job->dd_data = NULL; - job->job_done(job); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); } else if (rc == 1) /* job submitted, will complete later*/ rc = 0; /* return zero, no error */ else { /* some error occurred */ - job->reply->result = rc; + bsg_reply->result = rc; job->dd_data = NULL; } @@ -4936,7 +5000,8 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *rspiocbq) { struct bsg_job_data *dd_data; - struct fc_bsg_job *job; + struct bsg_job *job; + struct fc_bsg_reply *bsg_reply; IOCB_t *rsp; struct lpfc_dmabuf *bmp, *cmp, *rmp; struct lpfc_bsg_menlo *menlo; @@ -4956,6 +5021,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, spin_lock_irqsave(&phba->ct_ev_lock, flags); job = dd_data->set_job; if (job) { + bsg_reply = job->reply; /* Prevent timeout handling from trying to abort job */ job->dd_data = NULL; } @@ -4970,7 +5036,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, */ menlo_resp = (struct menlo_response *) - job->reply->reply_data.vendor_reply.vendor_rsp; + bsg_reply->reply_data.vendor_reply.vendor_rsp; menlo_resp->xri = rsp->ulpContext; if (rsp->ulpStatus) { if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { @@ -4990,7 +5056,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, } } else { rsp_size = rsp->un.genreq64.bdl.bdeSize; - job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = lpfc_bsg_copy_data(rmp, &job->reply_payload, rsp_size, 0); } @@ -5007,8 +5073,9 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, /* Complete the job if active */ if (job) { - job->reply->result = rc; - job->job_done(job); + bsg_reply->result = rc; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); } return; @@ -5024,9 +5091,11 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, * supplied in the menlo request header xri field. **/ static int -lpfc_menlo_cmd(struct fc_bsg_job *job) +lpfc_menlo_cmd(struct bsg_job *job) { - struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocbq; IOCB_t *cmd; @@ -5039,7 +5108,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job) struct ulp_bde64 *bpl = NULL; /* in case no data is returned return just the return code */ - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; if (job->request_len < sizeof(struct fc_bsg_request) + @@ -5069,7 +5138,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job) } menlo_cmd = (struct menlo_command *) - job->request->rqst_data.h_vendor.vendor_cmd; + bsg_request->rqst_data.h_vendor.vendor_cmd; /* allocate our bsg tracking structure */ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); @@ -5180,19 +5249,65 @@ lpfc_menlo_cmd(struct fc_bsg_job *job) kfree(dd_data); no_dd_data: /* make error code available to userspace */ - job->reply->result = rc; + bsg_reply->result = rc; job->dd_data = NULL; return rc; } +static int +lpfc_forced_link_speed(struct bsg_job *job) +{ + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct lpfc_vport *vport = shost_priv(shost); + struct lpfc_hba *phba = vport->phba; + struct fc_bsg_reply *bsg_reply = job->reply; + struct forced_link_speed_support_reply *forced_reply; + int rc = 0; + + if (job->request_len < + sizeof(struct fc_bsg_request) + + sizeof(struct get_forced_link_speed_support)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "0048 Received FORCED_LINK_SPEED request " + "below minimum size\n"); + rc = -EINVAL; + goto job_error; + } + + forced_reply = (struct forced_link_speed_support_reply *) + bsg_reply->reply_data.vendor_reply.vendor_rsp; + + if (job->reply_len < + sizeof(struct fc_bsg_request) + + sizeof(struct forced_link_speed_support_reply)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "0049 Received FORCED_LINK_SPEED reply below " + "minimum size\n"); + rc = -EINVAL; + goto job_error; + } + + forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED) + ? LPFC_FORCED_LINK_SPEED_SUPPORTED + : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED; +job_error: + bsg_reply->result = rc; + if (rc == 0) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rc; +} + /** * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job * @job: fc_bsg_job to handle **/ static int -lpfc_bsg_hst_vendor(struct fc_bsg_job *job) +lpfc_bsg_hst_vendor(struct bsg_job *job) { - int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; int rc; switch (command) { @@ -5227,11 +5342,14 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job) case LPFC_BSG_VENDOR_MENLO_DATA: rc = lpfc_menlo_cmd(job); break; + case LPFC_BSG_VENDOR_FORCED_LINK_SPEED: + rc = lpfc_forced_link_speed(job); + break; default: rc = -EINVAL; - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; /* make error code available to userspace */ - job->reply->result = rc; + bsg_reply->result = rc; break; } @@ -5243,12 +5361,14 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job) * @job: fc_bsg_job to handle **/ int -lpfc_bsg_request(struct fc_bsg_job *job) +lpfc_bsg_request(struct bsg_job *job) { + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; uint32_t msgcode; int rc; - msgcode = job->request->msgcode; + msgcode = bsg_request->msgcode; switch (msgcode) { case FC_BSG_HST_VENDOR: rc = lpfc_bsg_hst_vendor(job); @@ -5261,9 +5381,9 @@ lpfc_bsg_request(struct fc_bsg_job *job) break; default: rc = -EINVAL; - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; /* make error code available to userspace */ - job->reply->result = rc; + bsg_reply->result = rc; break; } @@ -5278,9 +5398,9 @@ lpfc_bsg_request(struct fc_bsg_job *job) * the waiting function which will handle passing the error back to userspace **/ int -lpfc_bsg_timeout(struct fc_bsg_job *job) +lpfc_bsg_timeout(struct bsg_job *job) { - struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb; struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index e557bcdbcb198f2f63d2e45978d480fa0d54c3e7..f2247aa4fa17360f987d0b5fb0f671c5741618ad 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -35,6 +35,7 @@ #define LPFC_BSG_VENDOR_MENLO_DATA 9 #define LPFC_BSG_VENDOR_DIAG_MODE_END 10 #define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11 +#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14 struct set_ct_event { uint32_t command; @@ -284,6 +285,15 @@ struct lpfc_sli_config_mbox { } un; }; +#define LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED 0 +#define LPFC_FORCED_LINK_SPEED_SUPPORTED 1 +struct get_forced_link_speed_support { + uint32_t command; +}; +struct forced_link_speed_support_reply { + uint8_t supported; +}; + /* driver only */ #define SLI_CONFIG_NOT_HANDLED 0 #define SLI_CONFIG_HANDLED 1 diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index bd7576d452f2921152082a267a2938fe6f7f1ae6..15d2bfdf582dc73c99ca26951046d515e21b34c8 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -397,8 +397,6 @@ extern spinlock_t _dump_buf_lock; extern int _dump_buf_done; extern spinlock_t pgcnt_lock; extern unsigned int pgcnt; -extern unsigned int lpfc_prot_mask; -extern unsigned char lpfc_prot_guard; extern unsigned int lpfc_fcp_look_ahead; /* Interface exported by fabric iocb scheduler */ @@ -431,8 +429,8 @@ struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t); #define HBA_EVENT_LINK_DOWN 3 /* functions to support SGIOv4/bsg interface */ -int lpfc_bsg_request(struct fc_bsg_job *); -int lpfc_bsg_timeout(struct fc_bsg_job *); +int lpfc_bsg_request(struct bsg_job *); +int lpfc_bsg_timeout(struct bsg_job *); int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b7d54bfb1df98847f584b53e6a14867eeccdb3a0..236e4e51d1617243d279d09089d95bb50c0a09d6 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -7610,7 +7610,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* reject till our FLOGI completes */ if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && (cmd != ELS_CMD_FLOGI)) { - rjt_err = LSRJT_UNABLE_TPC; + rjt_err = LSRJT_LOGICAL_BSY; rjt_exp = LSEXP_NOTHING_MORE; goto lsrjt; } diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index ee80227375915677fb1f89c8c74bd678c84a92eb..5646699b0516b216c87c55684202bbfed2ec8c84 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -921,6 +921,7 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D #define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A #define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B +#define LPFC_MBOX_OPCODE_SET_HOST_DATA 0x5D #define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73 #define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74 #define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A @@ -2289,6 +2290,9 @@ struct lpfc_mbx_read_config { #define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0 #define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF #define lpfc_mbx_rd_conf_r_a_tov_WORD word6 +#define lpfc_mbx_rd_conf_link_speed_SHIFT 16 +#define lpfc_mbx_rd_conf_link_speed_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_link_speed_WORD word6 uint32_t rsvd_7; uint32_t rsvd_8; uint32_t word9; @@ -2919,6 +2923,16 @@ struct lpfc_mbx_set_feature { }; +#define LPFC_SET_HOST_OS_DRIVER_VERSION 0x2 +struct lpfc_mbx_set_host_data { +#define LPFC_HOST_OS_DRIVER_VERSION_SIZE 48 + struct mbox_header header; + uint32_t param_id; + uint32_t param_len; + uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE]; +}; + + struct lpfc_mbx_get_sli4_parameters { struct mbox_header header; struct lpfc_sli4_parameters sli4_parameters; @@ -3313,6 +3327,7 @@ struct lpfc_mqe { struct lpfc_mbx_get_port_name get_port_name; struct lpfc_mbx_set_feature set_feature; struct lpfc_mbx_memory_dump_type3 mem_dump_type3; + struct lpfc_mbx_set_host_data set_host_data; struct lpfc_mbx_nop nop; } un; }; @@ -3981,7 +3996,8 @@ union lpfc_wqe128 { struct gen_req64_wqe gen_req; }; -#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001 +#define LPFC_GROUP_OJECT_MAGIC_G5 0xfeaa0001 +#define LPFC_GROUP_OJECT_MAGIC_G6 0xfeaa0003 #define LPFC_FILE_TYPE_GROUP 0xf7 #define LPFC_FILE_ID_GROUP 0xa2 struct lpfc_grp_hdr { diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 734a0428ef0efb90aaef478a957ae9f34a5db30e..4776fd85514f5886a195d409c25fca6f389468da 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -6279,34 +6279,36 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) uint32_t old_guard; int pagecnt = 10; - if (lpfc_prot_mask && lpfc_prot_guard) { + if (phba->cfg_prot_mask && phba->cfg_prot_guard) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "1478 Registering BlockGuard with the " "SCSI layer\n"); - old_mask = lpfc_prot_mask; - old_guard = lpfc_prot_guard; + old_mask = phba->cfg_prot_mask; + old_guard = phba->cfg_prot_guard; /* Only allow supported values */ - lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | + phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION | SHOST_DIX_TYPE1_PROTECTION); - lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC); + phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | + SHOST_DIX_GUARD_CRC); /* DIF Type 1 protection for profiles AST1/C1 is end to end */ - if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION) - lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; + if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) + phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; - if (lpfc_prot_mask && lpfc_prot_guard) { - if ((old_mask != lpfc_prot_mask) || - (old_guard != lpfc_prot_guard)) + if (phba->cfg_prot_mask && phba->cfg_prot_guard) { + if ((old_mask != phba->cfg_prot_mask) || + (old_guard != phba->cfg_prot_guard)) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1475 Registering BlockGuard with the " "SCSI layer: mask %d guard %d\n", - lpfc_prot_mask, lpfc_prot_guard); + phba->cfg_prot_mask, + phba->cfg_prot_guard); - scsi_host_set_prot(shost, lpfc_prot_mask); - scsi_host_set_guard(shost, lpfc_prot_guard); + scsi_host_set_prot(shost, phba->cfg_prot_mask); + scsi_host_set_guard(shost, phba->cfg_prot_guard); } else lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1479 Not Registering BlockGuard with the SCSI " @@ -6929,6 +6931,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) struct lpfc_mbx_get_func_cfg *get_func_cfg; struct lpfc_rsrc_desc_fcfcoe *desc; char *pdesc_0; + uint16_t forced_link_speed; + uint32_t if_type; int length, i, rc = 0, rc2; pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -7022,6 +7026,58 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) if (rc) goto read_cfg_out; + /* Update link speed if forced link speed is supported */ + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { + forced_link_speed = + bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); + if (forced_link_speed) { + phba->hba_flag |= HBA_FORCED_LINK_SPEED; + + switch (forced_link_speed) { + case LINK_SPEED_1G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_1G; + break; + case LINK_SPEED_2G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_2G; + break; + case LINK_SPEED_4G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_4G; + break; + case LINK_SPEED_8G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_8G; + break; + case LINK_SPEED_10G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_10G; + break; + case LINK_SPEED_16G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_16G; + break; + case LINK_SPEED_32G: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_32G; + break; + case 0xffff: + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_AUTO; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0047 Unrecognized link " + "speed : %d\n", + forced_link_speed); + phba->cfg_link_speed = + LPFC_USER_LINK_SPEED_AUTO; + } + } + } + /* Reset the DFT_HBA_Q_DEPTH to the max xri */ length = phba->sli4_hba.max_cfg_param.max_xri - lpfc_sli4_get_els_iocb_cnt(phba); @@ -7256,6 +7312,7 @@ int lpfc_sli4_queue_create(struct lpfc_hba *phba) { struct lpfc_queue *qdesc; + uint32_t wqesize; int idx; /* @@ -7340,15 +7397,10 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.fcp_cq[idx] = qdesc; /* Create Fast Path FCP WQs */ - if (phba->fcp_embed_io) { - qdesc = lpfc_sli4_queue_alloc(phba, - LPFC_WQE128_SIZE, - LPFC_WQE128_DEF_COUNT); - } else { - qdesc = lpfc_sli4_queue_alloc(phba, - phba->sli4_hba.wq_esize, - phba->sli4_hba.wq_ecount); - } + wqesize = (phba->fcp_embed_io) ? + LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; + qdesc = lpfc_sli4_queue_alloc(phba, wqesize, + phba->sli4_hba.wq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0503 Failed allocate fast-path FCP " @@ -10260,6 +10312,7 @@ lpfc_write_firmware(const struct firmware *fw, void *context) int i, rc = 0; struct lpfc_dmabuf *dmabuf, *next; uint32_t offset = 0, temp_offset = 0; + uint32_t magic_number, ftype, fid, fsize; /* It can be null in no-wait mode, sanity check */ if (!fw) { @@ -10268,18 +10321,19 @@ lpfc_write_firmware(const struct firmware *fw, void *context) } image = (struct lpfc_grp_hdr *)fw->data; + magic_number = be32_to_cpu(image->magic_number); + ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); + fid = bf_get_be32(lpfc_grp_hdr_id, image), + fsize = be32_to_cpu(image->size); + INIT_LIST_HEAD(&dma_buffer_list); - if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || - (bf_get_be32(lpfc_grp_hdr_file_type, image) != - LPFC_FILE_TYPE_GROUP) || - (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) || - (be32_to_cpu(image->size) != fw->size)) { + if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 && + magic_number != LPFC_GROUP_OJECT_MAGIC_G6) || + ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3022 Invalid FW image found. " - "Magic:%x Type:%x ID:%x\n", - be32_to_cpu(image->magic_number), - bf_get_be32(lpfc_grp_hdr_file_type, image), - bf_get_be32(lpfc_grp_hdr_id, image)); + "Magic:%x Type:%x ID:%x Size %d %zd\n", + magic_number, ftype, fid, fsize, fw->size); rc = -EINVAL; goto release_out; } diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index d197aa176dee3a10098770d4b747741f5e4b8b8c..ad350d969bdca637cc879ab54950e881022b11b8 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -413,15 +413,13 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) * struct fcp_cmnd, struct fcp_rsp and the number of bde's * necessary to support the sg_tablesize. */ - psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, + psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, &psb->dma_handle); if (!psb->data) { kfree(psb); break; } - /* Initialize virtual ptrs to dma_buf region. */ - memset(psb->data, 0, phba->cfg_sg_dma_buf_size); /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); @@ -607,7 +605,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, } /** - * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list + * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list * @phba: pointer to lpfc hba data structure. * @post_sblist: pointer to the scsi buffer list. * @@ -736,7 +734,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, } /** - * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls + * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls * @phba: pointer to lpfc hba data structure. * * This routine walks the list of scsi buffers that have been allocated and @@ -821,13 +819,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) * for the struct fcp_cmnd, struct fcp_rsp and the number * of bde's necessary to support the sg_tablesize. */ - psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, + psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, &psb->dma_handle); if (!psb->data) { kfree(psb); break; } - memset(psb->data, 0, phba->cfg_sg_dma_buf_size); /* * 4K Page alignment is CRITICAL to BlockGuard, double check @@ -857,7 +854,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) psb->data, psb->dma_handle); kfree(psb); lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "3368 Failed to allocated IOTAG for" + "3368 Failed to allocate IOTAG for" " XRI:0x%x\n", lxri); lpfc_sli4_free_xri(phba, lxri); break; @@ -1136,7 +1133,7 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) * * This routine does the pci dma mapping for scatter-gather list of scsi cmnd * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans - * through sg elements and format the bdea. This routine also initializes all + * through sg elements and format the bde. This routine also initializes all * IOCB fields which are dependent on scsi command request buffer. * * Return codes: @@ -1269,13 +1266,16 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS -/* Return if if error injection is detected by Initiator */ +/* Return BG_ERR_INIT if error injection is detected by Initiator */ #define BG_ERR_INIT 0x1 -/* Return if if error injection is detected by Target */ +/* Return BG_ERR_TGT if error injection is detected by Target */ #define BG_ERR_TGT 0x2 -/* Return if if swapping CSUM<-->CRC is required for error injection */ +/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */ #define BG_ERR_SWAP 0x10 -/* Return if disabling Guard/Ref/App checking is required for error injection */ +/** + * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for + * error injection + **/ #define BG_ERR_CHECK 0x20 /** @@ -4139,13 +4139,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); - /* The sdev is not guaranteed to be valid post scsi_done upcall. */ - cmd->scsi_done(cmd); - spin_lock_irqsave(&phba->hbalock, flags); lpfc_cmd->pCmd = NULL; spin_unlock_irqrestore(&phba->hbalock, flags); + /* The sdev is not guaranteed to be valid post scsi_done upcall. */ + cmd->scsi_done(cmd); + /* * If there is a thread waiting for command completion * wake up the thread. @@ -4822,7 +4822,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) ret = FAILED; lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0748 abort handler timed out waiting " - "for abortng I/O (xri:x%x) to complete: " + "for aborting I/O (xri:x%x) to complete: " "ret %#x, ID %d, LUN %llu\n", iocb->sli4_xritag, ret, cmnd->device->id, cmnd->device->lun); @@ -4945,26 +4945,30 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd) * 0x2002 - Success. **/ static int -lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, - unsigned tgt_id, uint64_t lun_id, - uint8_t task_mgmt_cmd) +lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, + unsigned int tgt_id, uint64_t lun_id, + uint8_t task_mgmt_cmd) { struct lpfc_hba *phba = vport->phba; struct lpfc_scsi_buf *lpfc_cmd; struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbqrsp; - struct lpfc_nodelist *pnode = rdata->pnode; + struct lpfc_rport_data *rdata; + struct lpfc_nodelist *pnode; int ret; int status; - if (!pnode || !NLP_CHK_NODE_ACT(pnode)) + rdata = lpfc_rport_data_from_scsi_device(cmnd->device); + if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) return FAILED; + pnode = rdata->pnode; - lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode); + lpfc_cmd = lpfc_get_scsi_buf(phba, pnode); if (lpfc_cmd == NULL) return FAILED; lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; lpfc_cmd->rdata = rdata; + lpfc_cmd->pCmd = cmnd; status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, task_mgmt_cmd); @@ -5171,7 +5175,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); - status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id, + status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, FCP_LUN_RESET); lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, @@ -5249,7 +5253,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); - status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id, + status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, FCP_TARGET_RESET); lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, @@ -5328,7 +5332,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) if (!match) continue; - status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data, + status = lpfc_send_taskmgmt(vport, cmnd, i, 0, FCP_TARGET_RESET); if (status != SUCCESS) { diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index c5326055beee15bd9937f02c09e9f74aeec1fe6e..4faa7672fc1d80add7e603e7bda066e5b98fd34b 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -47,6 +47,7 @@ #include "lpfc_compat.h" #include "lpfc_debugfs.h" #include "lpfc_vport.h" +#include "lpfc_version.h" /* There are only four IOCB completion types. */ typedef enum _lpfc_iocb_type { @@ -1323,18 +1324,20 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, { lockdep_assert_held(&phba->hbalock); - BUG_ON(!piocb || !piocb->vport); + BUG_ON(!piocb); list_add_tail(&piocb->list, &pring->txcmplq); piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; if ((unlikely(pring->ringno == LPFC_ELS_RING)) && (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && - (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) && - (!(piocb->vport->load_flag & FC_UNLOADING))) - mod_timer(&piocb->vport->els_tmofunc, - jiffies + - msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); + (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { + BUG_ON(!piocb->vport); + if (!(piocb->vport->load_flag & FC_UNLOADING)) + mod_timer(&piocb->vport->els_tmofunc, + jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); + } return 0; } @@ -2676,15 +2679,16 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, if (iotag != 0 && iotag <= phba->sli.last_iotag) { cmd_iocb = phba->sli.iocbq_lookup[iotag]; - list_del_init(&cmd_iocb->list); if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { + /* remove from txcmpl queue list */ + list_del_init(&cmd_iocb->list); cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; + return cmd_iocb; } - return cmd_iocb; } lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0317 iotag x%x is out off " + "0317 iotag x%x is out of " "range: max iotag x%x wd0 x%x\n", iotag, phba->sli.last_iotag, *(((uint32_t *) &prspiocb->iocb) + 7)); @@ -2719,8 +2723,9 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, return cmd_iocb; } } + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0372 iotag x%x is out off range: max iotag (x%x)\n", + "0372 iotag x%x is out of range: max iotag (x%x)\n", iotag, phba->sli.last_iotag); return NULL; } @@ -6289,6 +6294,25 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) return 0; } +void +lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) +{ + uint32_t len; + + len = sizeof(struct lpfc_mbx_set_host_data) - + sizeof(struct lpfc_sli4_cfg_mhdr); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_SET_HOST_DATA, len, + LPFC_SLI4_MBX_EMBED); + + mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; + mbox->u.mqe.un.set_host_data.param_len = 8; + snprintf(mbox->u.mqe.un.set_host_data.data, + LPFC_HOST_OS_DRIVER_VERSION_SIZE, + "Linux %s v"LPFC_DRIVER_VERSION, + (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); +} + /** * lpfc_sli4_hba_setup - SLI4 device intialization PCI function * @phba: Pointer to HBA context object. @@ -6540,6 +6564,15 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) goto out_free_mbox; } + lpfc_set_host_data(phba, mboxq); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "2134 Failed to set host os driver version %x", + rc); + } + /* Read the port's service parameters. */ rc = lpfc_read_sparam(phba, mboxq, vport->vpi); if (rc) { @@ -11779,6 +11812,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, /* Look up the ELS command IOCB and create pseudo response IOCB */ cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, bf_get(lpfc_wcqe_c_request_tag, wcqe)); + /* Put the iocb back on the txcmplq */ + lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); spin_unlock_irqrestore(&pring->ring_lock, iflags); if (unlikely(!cmdiocbq)) { diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index c9bf20eb7223286df3fe5959ba76b98e4b0d43af..50bfc43ebcb01d4800e8ec8adf9b557a96e81e76 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "11.2.0.0." +#define LPFC_DRIVER_VERSION "11.2.0.2" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index a590089b9397b3fedc619dd83f7bc91285e03065..ccb68d12692c2beedaf653c2650cfe41c911d982 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -28,17 +28,15 @@ /* Definitions for the core NCR5380 driver. */ -#define NCR5380_implementation_fields unsigned char *pdma_base; \ - int pdma_residual +#define NCR5380_implementation_fields int pdma_residual -#define NCR5380_read(reg) macscsi_read(instance, reg) -#define NCR5380_write(reg, value) macscsi_write(instance, reg, value) +#define NCR5380_read(reg) in_8(hostdata->io + ((reg) << 4)) +#define NCR5380_write(reg, value) out_8(hostdata->io + ((reg) << 4), value) -#define NCR5380_dma_xfer_len(instance, cmd, phase) \ - macscsi_dma_xfer_len(instance, cmd) +#define NCR5380_dma_xfer_len macscsi_dma_xfer_len #define NCR5380_dma_recv_setup macscsi_pread #define NCR5380_dma_send_setup macscsi_pwrite -#define NCR5380_dma_residual(instance) (hostdata->pdma_residual) +#define NCR5380_dma_residual macscsi_dma_residual #define NCR5380_intr macscsi_intr #define NCR5380_queue_command macscsi_queue_command @@ -61,20 +59,6 @@ module_param(setup_hostid, int, 0); static int setup_toshiba_delay = -1; module_param(setup_toshiba_delay, int, 0); -/* - * NCR 5380 register access functions - */ - -static inline char macscsi_read(struct Scsi_Host *instance, int reg) -{ - return in_8(instance->base + (reg << 4)); -} - -static inline void macscsi_write(struct Scsi_Host *instance, int reg, int value) -{ - out_8(instance->base + (reg << 4), value); -} - #ifndef MODULE static int __init mac_scsi_setup(char *str) { @@ -167,16 +151,15 @@ __asm__ __volatile__ \ : "0"(s), "1"(d), "2"(n) \ : "d0") -static int macscsi_pread(struct Scsi_Host *instance, - unsigned char *dst, int len) +static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, + unsigned char *dst, int len) { - struct NCR5380_hostdata *hostdata = shost_priv(instance); - unsigned char *s = hostdata->pdma_base + (INPUT_DATA_REG << 4); + unsigned char *s = hostdata->pdma_io + (INPUT_DATA_REG << 4); unsigned char *d = dst; int n = len; int transferred; - while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, + while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_DRQ | BASR_PHASE_MATCH, BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) { CP_IO_TO_MEM(s, d, n); @@ -189,23 +172,23 @@ static int macscsi_pread(struct Scsi_Host *instance, return 0; /* Target changed phase early? */ - if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ, + if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0) scmd_printk(KERN_ERR, hostdata->connected, "%s: !REQ and !ACK\n", __func__); if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) return 0; - dsprintk(NDEBUG_PSEUDO_DMA, instance, + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, "%s: bus error (%d/%d)\n", __func__, transferred, len); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); d = dst + transferred; n = len - transferred; } scmd_printk(KERN_ERR, hostdata->connected, "%s: phase mismatch or !DRQ\n", __func__); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); return -1; } @@ -270,16 +253,15 @@ __asm__ __volatile__ \ : "0"(s), "1"(d), "2"(n) \ : "d0") -static int macscsi_pwrite(struct Scsi_Host *instance, - unsigned char *src, int len) +static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, + unsigned char *src, int len) { - struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char *s = src; - unsigned char *d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4); + unsigned char *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4); int n = len; int transferred; - while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, + while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_DRQ | BASR_PHASE_MATCH, BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) { CP_MEM_TO_IO(s, d, n); @@ -288,7 +270,7 @@ static int macscsi_pwrite(struct Scsi_Host *instance, hostdata->pdma_residual = len - transferred; /* Target changed phase early? */ - if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ, + if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0) scmd_printk(KERN_ERR, hostdata->connected, "%s: !REQ and !ACK\n", __func__); @@ -297,7 +279,7 @@ static int macscsi_pwrite(struct Scsi_Host *instance, /* No bus error. */ if (n == 0) { - if (NCR5380_poll_politely(instance, TARGET_COMMAND_REG, + if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG, TCR_LAST_BYTE_SENT, TCR_LAST_BYTE_SENT, HZ / 64) < 0) scmd_printk(KERN_ERR, hostdata->connected, @@ -305,25 +287,23 @@ static int macscsi_pwrite(struct Scsi_Host *instance, return 0; } - dsprintk(NDEBUG_PSEUDO_DMA, instance, + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, "%s: bus error (%d/%d)\n", __func__, transferred, len); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); s = src + transferred; n = len - transferred; } scmd_printk(KERN_ERR, hostdata->connected, "%s: phase mismatch or !DRQ\n", __func__); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); return -1; } -static int macscsi_dma_xfer_len(struct Scsi_Host *instance, +static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, struct scsi_cmnd *cmd) { - struct NCR5380_hostdata *hostdata = shost_priv(instance); - if (hostdata->flags & FLAG_NO_PSEUDO_DMA || cmd->SCp.this_residual < 16) return 0; @@ -331,6 +311,11 @@ static int macscsi_dma_xfer_len(struct Scsi_Host *instance, return cmd->SCp.this_residual; } +static int macscsi_dma_residual(struct NCR5380_hostdata *hostdata) +{ + return hostdata->pdma_residual; +} + #include "NCR5380.c" #define DRV_MODULE_NAME "mac_scsi" @@ -356,6 +341,7 @@ static struct scsi_host_template mac_scsi_template = { static int __init mac_scsi_probe(struct platform_device *pdev) { struct Scsi_Host *instance; + struct NCR5380_hostdata *hostdata; int error; int host_flags = 0; struct resource *irq, *pio_mem, *pdma_mem = NULL; @@ -388,17 +374,18 @@ static int __init mac_scsi_probe(struct platform_device *pdev) if (!instance) return -ENOMEM; - instance->base = pio_mem->start; if (irq) instance->irq = irq->start; else instance->irq = NO_IRQ; - if (pdma_mem && setup_use_pdma) { - struct NCR5380_hostdata *hostdata = shost_priv(instance); + hostdata = shost_priv(instance); + hostdata->base = pio_mem->start; + hostdata->io = (void *)pio_mem->start; - hostdata->pdma_base = (unsigned char *)pdma_mem->start; - } else + if (pdma_mem && setup_use_pdma) + hostdata->pdma_io = (void *)pdma_mem->start; + else host_flags |= FLAG_NO_PSEUDO_DMA; host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 9d05302a3bcd503f1d21d1c453c68a64ace13024..3c63c292cb92d5bc92d297ca8bef61ec643d0140 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h index 55b425c0a65459a8f8f1a93b218278a6aa6633e7..a30e725f2d5c56d6e82cfa5761e9dc2f7bac3b13 100644 --- a/drivers/scsi/megaraid/megaraid_mm.h +++ b/drivers/scsi/megaraid/megaraid_mm.h @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index ca86c885dfaab4f0fb6ae3595922d6f1a77974be..fdd519c1dd5753bc59dff5a11b7e2c19f89d52bd 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -35,8 +35,8 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "06.811.02.00-rc1" -#define MEGASAS_RELDATE "April 12, 2016" +#define MEGASAS_VERSION "06.812.07.00-rc1" +#define MEGASAS_RELDATE "August 22, 2016" /* * Device IDs @@ -1429,6 +1429,8 @@ enum FW_BOOT_CONTEXT { #define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14 #define MR_MAX_MSIX_REG_ARRAY 16 #define MR_RDPQ_MODE_OFFSET 0X00800000 +#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000 + /* * register set for both 1068 and 1078 controllers * structure extended for 1078 registers @@ -2118,7 +2120,6 @@ struct megasas_instance { u32 ctrl_context_pages; struct megasas_ctrl_info *ctrl_info; unsigned int msix_vectors; - struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES]; struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES]; u64 map_id; u64 pd_seq_map_id; @@ -2140,6 +2141,7 @@ struct megasas_instance { u8 is_imr; u8 is_rdpq; bool dev_handle; + bool fw_sync_cache_support; }; struct MR_LD_VF_MAP { u32 size; @@ -2233,7 +2235,7 @@ struct megasas_instance_template { }; #define MEGASAS_IS_LOGICAL(scp) \ - (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 + ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) #define MEGASAS_DEV_INDEX(scp) \ (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index d8b1fbd4c8aafc61e5e5491d5394440651a43a04..d5cf15eb8c5e3e794803f95724e5f4a52a1ee9b2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -42,7 +42,7 @@ #include #include #include -#include +#include #include #include #include @@ -1700,11 +1700,8 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) goto out_done; } - /* - * FW takes care of flush cache on its own for Virtual Disk. - * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW. - */ - if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) { + if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd) && + (!instance->fw_sync_cache_support)) { scmd->result = DID_OK << 16; goto out_done; } @@ -4840,7 +4837,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance) } /* - * megasas_setup_irqs_msix - register legacy interrupts. + * megasas_setup_irqs_ioapic - register legacy interrupts. * @instance: Adapter soft state * * Do not enable interrupt, only setup ISRs. @@ -4855,8 +4852,9 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance) pdev = instance->pdev; instance->irq_context[0].instance = instance; instance->irq_context[0].MSIxIndex = 0; - if (request_irq(pdev->irq, instance->instancet->service_isr, - IRQF_SHARED, "megasas", &instance->irq_context[0])) { + if (request_irq(pci_irq_vector(pdev, 0), + instance->instancet->service_isr, IRQF_SHARED, + "megasas", &instance->irq_context[0])) { dev_err(&instance->pdev->dev, "Failed to register IRQ from %s %d\n", __func__, __LINE__); @@ -4877,28 +4875,23 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance) static int megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) { - int i, j, cpu; + int i, j; struct pci_dev *pdev; pdev = instance->pdev; /* Try MSI-x */ - cpu = cpumask_first(cpu_online_mask); for (i = 0; i < instance->msix_vectors; i++) { instance->irq_context[i].instance = instance; instance->irq_context[i].MSIxIndex = i; - if (request_irq(instance->msixentry[i].vector, + if (request_irq(pci_irq_vector(pdev, i), instance->instancet->service_isr, 0, "megasas", &instance->irq_context[i])) { dev_err(&instance->pdev->dev, "Failed to register IRQ for vector %d.\n", i); - for (j = 0; j < i; j++) { - if (smp_affinity_enable) - irq_set_affinity_hint( - instance->msixentry[j].vector, NULL); - free_irq(instance->msixentry[j].vector, - &instance->irq_context[j]); - } + for (j = 0; j < i; j++) + free_irq(pci_irq_vector(pdev, j), + &instance->irq_context[j]); /* Retry irq register for IO_APIC*/ instance->msix_vectors = 0; if (is_probe) @@ -4906,14 +4899,6 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) else return -1; } - if (smp_affinity_enable) { - if (irq_set_affinity_hint(instance->msixentry[i].vector, - get_cpu_mask(cpu))) - dev_err(&instance->pdev->dev, - "Failed to set affinity hint" - " for cpu %d\n", cpu); - cpu = cpumask_next(cpu, cpu_online_mask); - } } return 0; } @@ -4930,14 +4915,12 @@ megasas_destroy_irqs(struct megasas_instance *instance) { if (instance->msix_vectors) for (i = 0; i < instance->msix_vectors; i++) { - if (smp_affinity_enable) - irq_set_affinity_hint( - instance->msixentry[i].vector, NULL); - free_irq(instance->msixentry[i].vector, + free_irq(pci_irq_vector(instance->pdev, i), &instance->irq_context[i]); } else - free_irq(instance->pdev->irq, &instance->irq_context[0]); + free_irq(pci_irq_vector(instance->pdev, 0), + &instance->irq_context[0]); } /** @@ -5095,6 +5078,8 @@ static int megasas_init_fw(struct megasas_instance *instance) msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & 0x4000000) >> 0x1a; if (msix_enable && !msix_disable) { + int irq_flags = PCI_IRQ_MSIX; + scratch_pad_2 = readl (&instance->reg_set->outbound_scratch_pad_2); /* Check max MSI-X vectors */ @@ -5131,15 +5116,18 @@ static int megasas_init_fw(struct megasas_instance *instance) /* Don't bother allocating more MSI-X vectors than cpus */ instance->msix_vectors = min(instance->msix_vectors, (unsigned int)num_online_cpus()); - for (i = 0; i < instance->msix_vectors; i++) - instance->msixentry[i].entry = i; - i = pci_enable_msix_range(instance->pdev, instance->msixentry, - 1, instance->msix_vectors); + if (smp_affinity_enable) + irq_flags |= PCI_IRQ_AFFINITY; + i = pci_alloc_irq_vectors(instance->pdev, 1, + instance->msix_vectors, irq_flags); if (i > 0) instance->msix_vectors = i; else instance->msix_vectors = 0; } + i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); + if (i < 0) + goto fail_setup_irqs; dev_info(&instance->pdev->dev, "firmware supports msix\t: (%d)", fw_msix_count); @@ -5152,11 +5140,6 @@ static int megasas_init_fw(struct megasas_instance *instance) tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); - if (instance->msix_vectors ? - megasas_setup_irqs_msix(instance, 1) : - megasas_setup_irqs_ioapic(instance)) - goto fail_setup_irqs; - instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); if (instance->ctrl_info == NULL) @@ -5172,6 +5155,10 @@ static int megasas_init_fw(struct megasas_instance *instance) if (instance->instancet->init_adapter(instance)) goto fail_init_adapter; + if (instance->msix_vectors ? + megasas_setup_irqs_msix(instance, 1) : + megasas_setup_irqs_ioapic(instance)) + goto fail_init_adapter; instance->instancet->enable_intr(instance); @@ -5315,7 +5302,7 @@ static int megasas_init_fw(struct megasas_instance *instance) megasas_destroy_irqs(instance); fail_setup_irqs: if (instance->msix_vectors) - pci_disable_msix(instance->pdev); + pci_free_irq_vectors(instance->pdev); instance->msix_vectors = 0; fail_ready_state: kfree(instance->ctrl_info); @@ -5584,7 +5571,6 @@ static int megasas_io_attach(struct megasas_instance *instance) /* * Export parameters required by SCSI mid-layer */ - host->irq = instance->pdev->irq; host->unique_id = instance->unique_id; host->can_queue = instance->max_scsi_cmds; host->this_id = instance->init_id; @@ -5947,7 +5933,7 @@ static int megasas_probe_one(struct pci_dev *pdev, else megasas_release_mfi(instance); if (instance->msix_vectors) - pci_disable_msix(instance->pdev); + pci_free_irq_vectors(instance->pdev); fail_init_mfi: fail_alloc_dma_buf: if (instance->evt_detail) @@ -6105,7 +6091,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) megasas_destroy_irqs(instance); if (instance->msix_vectors) - pci_disable_msix(instance->pdev); + pci_free_irq_vectors(instance->pdev); pci_save_state(pdev); pci_disable_device(pdev); @@ -6125,6 +6111,7 @@ megasas_resume(struct pci_dev *pdev) int rval; struct Scsi_Host *host; struct megasas_instance *instance; + int irq_flags = PCI_IRQ_LEGACY; instance = pci_get_drvdata(pdev); host = instance->host; @@ -6160,9 +6147,15 @@ megasas_resume(struct pci_dev *pdev) goto fail_ready_state; /* Now re-enable MSI-X */ - if (instance->msix_vectors && - pci_enable_msix_exact(instance->pdev, instance->msixentry, - instance->msix_vectors)) + if (instance->msix_vectors) { + irq_flags = PCI_IRQ_MSIX; + if (smp_affinity_enable) + irq_flags |= PCI_IRQ_AFFINITY; + } + rval = pci_alloc_irq_vectors(instance->pdev, 1, + instance->msix_vectors ? + instance->msix_vectors : 1, irq_flags); + if (rval < 0) goto fail_reenable_msix; if (instance->ctrl_context) { @@ -6245,6 +6238,34 @@ megasas_resume(struct pci_dev *pdev) #define megasas_resume NULL #endif +static inline int +megasas_wait_for_adapter_operational(struct megasas_instance *instance) +{ + int wait_time = MEGASAS_RESET_WAIT_TIME * 2; + int i; + + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) + return 1; + + for (i = 0; i < wait_time; i++) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) + break; + + if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) + dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); + + msleep(1000); + } + + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { + dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n", + __func__); + return 1; + } + + return 0; +} + /** * megasas_detach_one - PCI hot"un"plug entry point * @pdev: PCI device structure @@ -6269,9 +6290,14 @@ static void megasas_detach_one(struct pci_dev *pdev) if (instance->fw_crash_state != UNAVAILABLE) megasas_free_host_crash_buffer(instance); scsi_remove_host(instance->host); + + if (megasas_wait_for_adapter_operational(instance)) + goto skip_firing_dcmds; + megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); +skip_firing_dcmds: /* cancel the delayed work if this work still in queue*/ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; @@ -6302,7 +6328,7 @@ static void megasas_detach_one(struct pci_dev *pdev) megasas_destroy_irqs(instance); if (instance->msix_vectors) - pci_disable_msix(instance->pdev); + pci_free_irq_vectors(instance->pdev); if (instance->ctrl_context) { megasas_release_fusion(instance); @@ -6385,13 +6411,19 @@ static void megasas_shutdown(struct pci_dev *pdev) struct megasas_instance *instance = pci_get_drvdata(pdev); instance->unload = 1; + + if (megasas_wait_for_adapter_operational(instance)) + goto skip_firing_dcmds; + megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); + +skip_firing_dcmds: instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); if (instance->msix_vectors) - pci_disable_msix(instance->pdev); + pci_free_irq_vectors(instance->pdev); } /** @@ -6752,8 +6784,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); - dev_err(&instance->pdev->dev, "timed out while" - "waiting for HBA to recover\n"); + dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n"); error = -ENODEV; goto out_up; } @@ -6821,8 +6852,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) spin_lock_irqsave(&instance->hba_lock, flags); if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); - dev_err(&instance->pdev->dev, "timed out while waiting" - "for HBA to recover\n"); + dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n"); return -ENODEV; } spin_unlock_irqrestore(&instance->hba_lock, flags); diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index e413113c86ac17924ae6cd7bce872444c217843a..f237d0003df363d276b8286adf120f6cd875ef64 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -782,7 +782,8 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { - pd = MR_ArPdGet(arRef, physArm + 1, map); + physArm = physArm + 1; + pd = MR_ArPdGet(arRef, physArm, map); if (pd != MR_PD_INVALID) *pDevHandle = MR_PdDevHandleGet(pd, map); } @@ -879,7 +880,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { /* Get alternate Pd. */ - pd = MR_ArPdGet(arRef, physArm + 1, map); + physArm = physArm + 1; + pd = MR_ArPdGet(arRef, physArm, map); if (pd != MR_PD_INVALID) /* Get dev handle from Pd */ *pDevHandle = MR_PdDevHandleGet(pd, map); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 52d8bbf7feb5c50efe361aa2e57f30277f38a0df..24778ba4b6e8be0b4f2086b344031004ee982b89 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -748,6 +748,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) goto fail_fw_init; } + instance->fw_sync_cache_support = (scratch_pad_2 & + MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; + dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n", + instance->fw_sync_cache_support ? "Yes" : "No"); + IOCInitMessage = dma_alloc_coherent(&instance->pdev->dev, sizeof(struct MPI2_IOC_INIT_REQUEST), @@ -2000,6 +2005,8 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, io_request->DevHandle = pd_sync->seq[pd_index].devHandle; pRAID_Context->regLockFlags |= (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA); + pRAID_Context->Type = MPI2_TYPE_CUDA; + pRAID_Context->nseg = 0x1; } else if (fusion->fast_path_io) { pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); pRAID_Context->configSeqNum = 0; @@ -2035,12 +2042,10 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, pRAID_Context->timeoutValue = cpu_to_le16((os_timeout_value > timeout_limit) ? timeout_limit : os_timeout_value); - if (fusion->adapter_type == INVADER_SERIES) { - pRAID_Context->Type = MPI2_TYPE_CUDA; - pRAID_Context->nseg = 0x1; + if (fusion->adapter_type == INVADER_SERIES) io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); - } + cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); @@ -2463,12 +2468,15 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp) /* Start collecting crash, if DMA bit is done */ if ((fw_state == MFI_STATE_FAULT) && dma_state) schedule_work(&instance->crash_init); - else if (fw_state == MFI_STATE_FAULT) - schedule_work(&instance->work_init); + else if (fw_state == MFI_STATE_FAULT) { + if (instance->unload == 0) + schedule_work(&instance->work_init); + } } else if (fw_state == MFI_STATE_FAULT) { dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt" "for scsi%d\n", instance->host->host_no); - schedule_work(&instance->work_init); + if (instance->unload == 0) + schedule_work(&instance->work_init); } } @@ -2823,6 +2831,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, dev_err(&instance->pdev->dev, "pending commands remain after waiting, " "will reset adapter scsi%d.\n", instance->host->host_no); + *convert = 1; retval = 1; } out: diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h index 95356a82ee99bc09d730e2e918e34ce40ee65766..fa61baf7c74dd409d1733b27e3c7b1d460dcfe7f 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -478,6 +478,13 @@ typedef struct _MPI2_CONFIG_REPLY { #define MPI26_MFGPAGE_DEVID_SAS3324_3 (0x00C2) #define MPI26_MFGPAGE_DEVID_SAS3324_4 (0x00C3) +#define MPI26_MFGPAGE_DEVID_SAS3516 (0x00AA) +#define MPI26_MFGPAGE_DEVID_SAS3516_1 (0x00AB) +#define MPI26_MFGPAGE_DEVID_SAS3416 (0x00AC) +#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD) +#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE) +#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF) + /*Manufacturing Page 0 */ typedef struct _MPI2_CONFIG_PAGE_MAN_0 { diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index a1a5ceb42ce6d3280dadd5a521e24e4af9a2aff8..f00ef88a378af2f79f5ef9b38cc97b6cb9eeed19 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -849,7 +849,7 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) ack_request->EventContext = mpi_reply->EventContext; ack_request->VF_ID = 0; /* TODO */ ack_request->VP_ID = 0; - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); out: @@ -1078,7 +1078,7 @@ _base_interrupt(int irq, void *bus_id) * new reply host index value in ReplyPostIndex Field and msix_index * value in MSIxIndex field. */ - if (ioc->msix96_vector) + if (ioc->combined_reply_queue) writel(reply_q->reply_post_host_index | ((msix_index & 7) << MPI2_RPHI_MSIX_INDEX_SHIFT), ioc->replyPostRegisterIndex[msix_index/8]); @@ -1959,7 +1959,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) { struct msix_entry *entries, *a; int r; - int i; + int i, local_max_msix_vectors; u8 try_msix = 0; if (msix_disable == -1 || msix_disable == 0) @@ -1979,13 +1979,15 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) ioc->cpu_count, max_msix_vectors); if (!ioc->rdpq_array_enable && max_msix_vectors == -1) - max_msix_vectors = 8; + local_max_msix_vectors = 8; + else + local_max_msix_vectors = max_msix_vectors; - if (max_msix_vectors > 0) { - ioc->reply_queue_count = min_t(int, max_msix_vectors, + if (local_max_msix_vectors > 0) { + ioc->reply_queue_count = min_t(int, local_max_msix_vectors, ioc->reply_queue_count); ioc->msix_vector_count = ioc->reply_queue_count; - } else if (max_msix_vectors == 0) + } else if (local_max_msix_vectors == 0) goto try_ioapic; if (ioc->msix_vector_count < ioc->cpu_count) @@ -2050,7 +2052,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) _base_free_irq(ioc); _base_disable_msix(ioc); - if (ioc->msix96_vector) { + if (ioc->combined_reply_queue) { kfree(ioc->replyPostRegisterIndex); ioc->replyPostRegisterIndex = NULL; } @@ -2160,7 +2162,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) /* Use the Combined reply queue feature only for SAS3 C0 & higher * revision HBAs and also only when reply queue count is greater than 8 */ - if (ioc->msix96_vector && ioc->reply_queue_count > 8) { + if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) { /* Determine the Supplemental Reply Post Host Index Registers * Addresse. Supplemental Reply Post Host Index Registers * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and @@ -2168,7 +2170,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one. */ ioc->replyPostRegisterIndex = kcalloc( - MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT, + ioc->combined_reply_index_count, sizeof(resource_size_t *), GFP_KERNEL); if (!ioc->replyPostRegisterIndex) { dfailprintk(ioc, printk(MPT3SAS_FMT @@ -2178,14 +2180,14 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) goto out_fail; } - for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) { + for (i = 0; i < ioc->combined_reply_index_count; i++) { ioc->replyPostRegisterIndex[i] = (resource_size_t *) ((u8 *)&ioc->chip->Doorbell + MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); } } else - ioc->msix96_vector = 0; + ioc->combined_reply_queue = 0; if (ioc->is_warpdrive) { ioc->reply_post_host_index[0] = (resource_size_t __iomem *) @@ -2462,15 +2464,15 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) #endif /** - * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware + * _base_put_smid_scsi_io - send SCSI_IO request to firmware * @ioc: per adapter object * @smid: system request message index * @handle: device handle * * Return nothing. */ -void -mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) +static void +_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) { Mpi2RequestDescriptorUnion_t descriptor; u64 *request = (u64 *)&descriptor; @@ -2486,15 +2488,15 @@ mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) } /** - * mpt3sas_base_put_smid_fast_path - send fast path request to firmware + * _base_put_smid_fast_path - send fast path request to firmware * @ioc: per adapter object * @smid: system request message index * @handle: device handle * * Return nothing. */ -void -mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, +static void +_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) { Mpi2RequestDescriptorUnion_t descriptor; @@ -2511,14 +2513,14 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, } /** - * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware + * _base_put_smid_hi_priority - send Task Management request to firmware * @ioc: per adapter object * @smid: system request message index * @msix_task: msix_task will be same as msix of IO incase of task abort else 0. * Return nothing. */ -void -mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, +static void +_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 msix_task) { Mpi2RequestDescriptorUnion_t descriptor; @@ -2535,14 +2537,14 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, } /** - * mpt3sas_base_put_smid_default - Default, primarily used for config pages + * _base_put_smid_default - Default, primarily used for config pages * @ioc: per adapter object * @smid: system request message index * * Return nothing. */ -void -mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) +static void +_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) { Mpi2RequestDescriptorUnion_t descriptor; u64 *request = (u64 *)&descriptor; @@ -2556,6 +2558,95 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) &ioc->scsi_lookup_lock); } +/** +* _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using +* Atomic Request Descriptor +* @ioc: per adapter object +* @smid: system request message index +* @handle: device handle, unused in this function, for function type match +* +* Return nothing. +*/ +static void +_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle) +{ + Mpi26AtomicRequestDescriptor_t descriptor; + u32 *request = (u32 *)&descriptor; + + descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.MSIxIndex = _base_get_msix_index(ioc); + descriptor.SMID = cpu_to_le16(smid); + + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +/** + * _base_put_smid_fast_path_atomic - send fast path request to firmware + * using Atomic Request Descriptor + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle, unused in this function, for function type match + * Return nothing + */ +static void +_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle) +{ + Mpi26AtomicRequestDescriptor_t descriptor; + u32 *request = (u32 *)&descriptor; + + descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; + descriptor.MSIxIndex = _base_get_msix_index(ioc); + descriptor.SMID = cpu_to_le16(smid); + + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +/** + * _base_put_smid_hi_priority_atomic - send Task Management request to + * firmware using Atomic Request Descriptor + * @ioc: per adapter object + * @smid: system request message index + * @msix_task: msix_task will be same as msix of IO incase of task abort else 0 + * + * Return nothing. + */ +static void +_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 msix_task) +{ + Mpi26AtomicRequestDescriptor_t descriptor; + u32 *request = (u32 *)&descriptor; + + descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; + descriptor.MSIxIndex = msix_task; + descriptor.SMID = cpu_to_le16(smid); + + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +/** + * _base_put_smid_default - Default, primarily used for config pages + * use Atomic Request Descriptor + * @ioc: per adapter object + * @smid: system request message index + * + * Return nothing. + */ +static void +_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + Mpi26AtomicRequestDescriptor_t descriptor; + u32 *request = (u32 *)&descriptor; + + descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; + descriptor.MSIxIndex = _base_get_msix_index(ioc); + descriptor.SMID = cpu_to_le16(smid); + + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + /** * _base_display_OEMs_branding - Display branding string * @ioc: per adapter object @@ -4070,7 +4161,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) ioc->ioc_link_reset_in_progress = 1; init_completion(&ioc->base_cmds.done); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->base_cmds.done, msecs_to_jiffies(10000)); if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || @@ -4170,7 +4261,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, ioc->base_cmds.smid = smid; memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); init_completion(&ioc->base_cmds.done); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->base_cmds.done, msecs_to_jiffies(10000)); if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { @@ -4355,6 +4446,8 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc) if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE)) ioc->rdpq_array_capable = 1; + if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) + ioc->atomic_desc_capable = 1; facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); facts->IOCRequestFrameSize = le16_to_cpu(mpi_reply.IOCRequestFrameSize); @@ -4582,7 +4675,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; init_completion(&ioc->port_enable_cmds.done); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ); if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { pr_err(MPT3SAS_FMT "%s: timeout\n", @@ -4645,7 +4738,7 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); return 0; } @@ -4764,7 +4857,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc) mpi_request->EventMasks[i] = cpu_to_le32(ioc->event_masks[i]); init_completion(&ioc->base_cmds.done); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { pr_err(MPT3SAS_FMT "%s: timeout\n", @@ -5138,7 +5231,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) /* initialize reply post host index */ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { - if (ioc->msix96_vector) + if (ioc->combined_reply_queue) writel((reply_q->msix_index & 7)<< MPI2_RPHI_MSIX_INDEX_SHIFT, ioc->replyPostRegisterIndex[reply_q->msix_index/8]); @@ -5280,9 +5373,23 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->build_sg = &_base_build_sg_ieee; ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); + break; } + if (ioc->atomic_desc_capable) { + ioc->put_smid_default = &_base_put_smid_default_atomic; + ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic; + ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic; + ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic; + } else { + ioc->put_smid_default = &_base_put_smid_default; + ioc->put_smid_scsi_io = &_base_put_smid_scsi_io; + ioc->put_smid_fast_path = &_base_put_smid_fast_path; + ioc->put_smid_hi_priority = &_base_put_smid_hi_priority; + } + + /* * These function pointers for other requests that don't * the require IEEE scatter gather elements. @@ -5332,6 +5439,21 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) goto out_free_resources; } + /* allocate memory for pending OS device add list */ + ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + ioc->pend_os_device_add_sz++; + ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, + GFP_KERNEL); + if (!ioc->pend_os_device_add) + goto out_free_resources; + + ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; + ioc->device_remove_in_progress = + kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); + if (!ioc->device_remove_in_progress) + goto out_free_resources; + ioc->fwfault_debug = mpt3sas_fwfault_debug; /* base internal command bits */ @@ -5414,6 +5536,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) kfree(ioc->reply_post_host_index); kfree(ioc->pd_handles); kfree(ioc->blocking_handles); + kfree(ioc->device_remove_in_progress); + kfree(ioc->pend_os_device_add); kfree(ioc->tm_cmds.reply); kfree(ioc->transport_cmds.reply); kfree(ioc->scsih_cmds.reply); @@ -5455,6 +5579,8 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) kfree(ioc->reply_post_host_index); kfree(ioc->pd_handles); kfree(ioc->blocking_handles); + kfree(ioc->device_remove_in_progress); + kfree(ioc->pend_os_device_add); kfree(ioc->pfacts); kfree(ioc->ctl_cmds.reply); kfree(ioc->ctl_cmds.sense); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 3e71bc1b4a80604cca3488b7ce1d07acffb24445..394fe1338d0976a42f183e328dfaed02f540560f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -73,9 +73,9 @@ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "Avago Technologies " #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "13.100.00.00" -#define MPT3SAS_MAJOR_VERSION 13 -#define MPT3SAS_MINOR_VERSION 100 +#define MPT3SAS_DRIVER_VERSION "14.101.00.00" +#define MPT3SAS_MAJOR_VERSION 14 +#define MPT3SAS_MINOR_VERSION 101 #define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_RELEASE_VERSION 00 @@ -300,8 +300,9 @@ * There are twelve Supplemental Reply Post Host Index Registers * and each register is at offset 0x10 bytes from the previous one. */ -#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT 12 -#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10) +#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12 +#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16 +#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10) /* OEM Identifiers */ #define MFG10_OEM_ID_INVALID (0x00000000) @@ -375,7 +376,6 @@ struct MPT3SAS_TARGET { * per device private data */ #define MPT_DEVICE_FLAGS_INIT 0x01 -#define MPT_DEVICE_TLR_ON 0x02 #define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003) #define MFG_PAGE10_HIDE_ALL_DISKS (0x00) @@ -402,6 +402,9 @@ struct MPT3SAS_DEVICE { u8 block; u8 tlr_snoop_check; u8 ignore_delay_remove; + /* Iopriority Command Handling */ + u8 ncq_prio_enable; + }; #define MPT3_CMD_NOT_USED 0x8000 /* free */ @@ -736,7 +739,10 @@ typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge, typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc, void *paddr); - +/* To support atomic and non atomic descriptors*/ +typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 funcdep); +typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid); /* IOC Facts and Port Facts converted from little endian to cpu */ union mpi3_version_union { @@ -1079,6 +1085,9 @@ struct MPT3SAS_ADAPTER { void *pd_handles; u16 pd_handles_sz; + void *pend_os_device_add; + u16 pend_os_device_add_sz; + /* config page */ u16 config_page_sz; void *config_page; @@ -1156,7 +1165,8 @@ struct MPT3SAS_ADAPTER { u8 reply_queue_count; struct list_head reply_queue_list; - u8 msix96_vector; + u8 combined_reply_queue; + u8 combined_reply_index_count; /* reply post register index */ resource_size_t **replyPostRegisterIndex; @@ -1187,6 +1197,15 @@ struct MPT3SAS_ADAPTER { struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event; struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi; struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi; + void *device_remove_in_progress; + u16 device_remove_in_progress_sz; + u8 is_gen35_ioc; + u8 atomic_desc_capable; + PUT_SMID_IO_FP_HIP put_smid_scsi_io; + PUT_SMID_IO_FP_HIP put_smid_fast_path; + PUT_SMID_IO_FP_HIP put_smid_hi_priority; + PUT_SMID_DEFAULT put_smid_default; + }; typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, @@ -1232,13 +1251,6 @@ u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid); -void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, - u16 handle); -void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, - u16 handle); -void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, - u16 smid, u16 msix_task); -void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid); void mpt3sas_base_initialize_callback_handler(void); u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func); void mpt3sas_base_release_callback_handler(u8 cb_idx); @@ -1449,4 +1461,7 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request, u16 smid); +/* NCQ Prio Handling Check */ +bool scsih_ncq_prio_supp(struct scsi_device *sdev); + #endif /* MPT3SAS_BASE_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index cebfd734fd769c78a36fb66c1805f5fb89e37ac2..dd62701256142b04f5d3e4989017fc9848cd95ec 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -384,7 +384,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t)); _config_display_some_debug(ioc, smid, "config_request", NULL); init_completion(&ioc->config_cmds.done); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ); if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) { pr_err(MPT3SAS_FMT "%s: timeout\n", diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 26cdc127ac89cf022490e8d87584f0542babfa04..95f0f24bac05598e1c8246cb1078de9163a66e5d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -654,6 +654,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, size_t data_in_sz = 0; long ret; u16 wait_state_count; + u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; issue_reset = 0; @@ -738,10 +739,13 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, data_in_sz = karg.data_in_size; if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || - mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { - if (!le16_to_cpu(mpi_request->FunctionDependent1) || - le16_to_cpu(mpi_request->FunctionDependent1) > - ioc->facts.MaxDevHandle) { + mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || + mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT || + mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH) { + + device_handle = le16_to_cpu(mpi_request->FunctionDependent1); + if (!device_handle || (device_handle > + ioc->facts.MaxDevHandle)) { ret = -EINVAL; mpt3sas_base_free_smid(ioc, smid); goto out; @@ -797,14 +801,20 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, scsiio_request->SenseBufferLowAddress = mpt3sas_base_get_sense_buffer_dma(ioc, smid); memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); + if (test_bit(device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "handle(0x%04x) :ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, data_in_sz); - if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) - mpt3sas_base_put_smid_scsi_io(ioc, smid, - le16_to_cpu(mpi_request->FunctionDependent1)); + ioc->put_smid_scsi_io(ioc, smid, device_handle); else - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); break; } case MPI2_FUNCTION_SCSI_TASK_MGMT: @@ -827,11 +837,19 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, } } + if (test_bit(device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "handle(0x%04x) :ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu( tm_request->DevHandle)); ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, data_in_dma, data_in_sz); - mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); + ioc->put_smid_hi_priority(ioc, smid, 0); break; } case MPI2_FUNCTION_SMP_PASSTHROUGH: @@ -862,16 +880,30 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, } ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, data_in_sz); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); break; } case MPI2_FUNCTION_SATA_PASSTHROUGH: + { + if (test_bit(device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "handle(0x%04x) :ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } case MPI2_FUNCTION_FW_DOWNLOAD: case MPI2_FUNCTION_FW_UPLOAD: { ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, data_in_sz); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); break; } case MPI2_FUNCTION_TOOLBOX: @@ -886,7 +918,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, data_in_dma, data_in_sz); } - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); break; } case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: @@ -905,7 +937,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, default: ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, data_in_dma, data_in_sz); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); break; } @@ -1064,7 +1096,10 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) break; case MPI25_VERSION: case MPI26_VERSION: - karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; + if (ioc->is_gen35_ioc) + karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35; + else + karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); break; } @@ -1491,7 +1526,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, cpu_to_le32(ioc->product_specific[buffer_type][i]); init_completion(&ioc->ctl_cmds.done); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->ctl_cmds.done, MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); @@ -1838,7 +1873,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, mpi_request->VP_ID = 0; init_completion(&ioc->ctl_cmds.done); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->ctl_cmds.done, MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); @@ -2105,7 +2140,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) mpi_request->VP_ID = 0; init_completion(&ioc->ctl_cmds.done); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->ctl_cmds.done, MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); @@ -3290,8 +3325,6 @@ static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR, /*********** diagnostic trigger suppport *** END ****************************/ - - /*****************************************/ struct device_attribute *mpt3sas_host_attrs[] = { @@ -3367,9 +3400,50 @@ _ctl_device_handle_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL); +/** + * _ctl_device_ncq_io_prio_show - send prioritized io commands to device + * @dev - pointer to embedded device + * @buf - the buffer returned + * + * A sysfs 'read/write' sdev attribute, only works with SATA + */ +static ssize_t +_ctl_device_ncq_prio_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "%d\n", + sas_device_priv_data->ncq_prio_enable); +} + +static ssize_t +_ctl_device_ncq_prio_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; + bool ncq_prio_enable = 0; + + if (kstrtobool(buf, &ncq_prio_enable)) + return -EINVAL; + + if (!scsih_ncq_prio_supp(sdev)) + return -EINVAL; + + sas_device_priv_data->ncq_prio_enable = ncq_prio_enable; + return strlen(buf); +} +static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR, + _ctl_device_ncq_prio_enable_show, + _ctl_device_ncq_prio_enable_store); + struct device_attribute *mpt3sas_dev_attrs[] = { &dev_attr_sas_address, &dev_attr_sas_device_handle, + &dev_attr_sas_ncq_prio_enable, NULL, }; diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h index 89408356d252432b4d57f9ae3964ec4d1381b14a..f3e17a8c1b07ba08e1ea4ca559ef0e7b5b598f89 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h @@ -143,6 +143,7 @@ struct mpt3_ioctl_pci_info { #define MPT2_IOCTL_INTERFACE_SAS2 (0x04) #define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05) #define MPT3_IOCTL_INTERFACE_SAS3 (0x06) +#define MPT3_IOCTL_INTERFACE_SAS35 (0x07) #define MPT2_IOCTL_VERSION_LENGTH (32) /** diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 209a969a979d8768fa5d0dc15bc5f921f2c4ec1c..b5c966e319d315474b94703b93ab0343013dd973 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -423,7 +423,7 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, return 0; } - /* we hit this becuase the given parent handle doesn't exist */ + /* we hit this because the given parent handle doesn't exist */ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) return -ENXIO; @@ -788,6 +788,11 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, list_add_tail(&sas_device->list, &ioc->sas_device_list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (ioc->hide_drives) { + clear_bit(sas_device->handle, ioc->pend_os_device_add); + return; + } + if (!mpt3sas_transport_port_add(ioc, sas_device->handle, sas_device->sas_address_parent)) { _scsih_sas_device_remove(ioc, sas_device); @@ -803,7 +808,8 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, sas_device->sas_address_parent); _scsih_sas_device_remove(ioc, sas_device); } - } + } else + clear_bit(sas_device->handle, ioc->pend_os_device_add); } /** @@ -1273,9 +1279,9 @@ scsih_target_alloc(struct scsi_target *starget) sas_target_priv_data->handle = raid_device->handle; sas_target_priv_data->sas_address = raid_device->wwid; sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; - sas_target_priv_data->raid_device = raid_device; if (ioc->is_warpdrive) - raid_device->starget = starget; + sas_target_priv_data->raid_device = raid_device; + raid_device->starget = starget; } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); return 0; @@ -1517,7 +1523,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, /* * raid transport support - * Enabled for SLES11 and newer, in older kernels the driver will panic when - * unloading the driver followed by a load - I beleive that the subroutine + * unloading the driver followed by a load - I believe that the subroutine * raid_class_release() is not cleaning up properly. */ @@ -2279,7 +2285,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, msix_task = scsi_lookup->msix_io; else msix_task = 0; - mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task); + ioc->put_smid_hi_priority(ioc, smid, msix_task); wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { pr_err(MPT3SAS_FMT "%s: timeout\n", @@ -2837,7 +2843,7 @@ _scsih_internal_device_block(struct scsi_device *sdev, if (r == -EINVAL) sdev_printk(KERN_WARNING, sdev, "device_block failed with return(%d) for handle(0x%04x)\n", - sas_device_priv_data->sas_target->handle, r); + r, sas_device_priv_data->sas_target->handle); } /** @@ -2867,20 +2873,20 @@ _scsih_internal_device_unblock(struct scsi_device *sdev, sdev_printk(KERN_WARNING, sdev, "device_unblock failed with return(%d) for handle(0x%04x) " "performing a block followed by an unblock\n", - sas_device_priv_data->sas_target->handle, r); + r, sas_device_priv_data->sas_target->handle); sas_device_priv_data->block = 1; r = scsi_internal_device_block(sdev); if (r) sdev_printk(KERN_WARNING, sdev, "retried device_block " "failed with return(%d) for handle(0x%04x)\n", - sas_device_priv_data->sas_target->handle, r); + r, sas_device_priv_data->sas_target->handle); sas_device_priv_data->block = 0; r = scsi_internal_device_unblock(sdev, SDEV_RUNNING); if (r) sdev_printk(KERN_WARNING, sdev, "retried device_unblock" " failed with return(%d) for handle(0x%04x)\n", - sas_device_priv_data->sas_target->handle, r); + r, sas_device_priv_data->sas_target->handle); } } @@ -2942,7 +2948,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) * @ioc: per adapter object * @handle: device handle * - * During device pull we need to appropiately set the sdev state. + * During device pull we need to appropriately set the sdev state. */ static void _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) @@ -2971,7 +2977,7 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) * @ioc: per adapter object * @handle: device handle * - * During device pull we need to appropiately set the sdev state. + * During device pull we need to appropriately set the sdev state. */ static void _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) @@ -3138,6 +3144,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) if (test_bit(handle, ioc->pd_handles)) return; + clear_bit(handle, ioc->pend_os_device_add); + spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); if (sas_device && sas_device->starget && @@ -3192,7 +3200,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; mpi_request->DevHandle = cpu_to_le16(handle); mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; - mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); + set_bit(handle, ioc->device_remove_in_progress); + ioc->put_smid_hi_priority(ioc, smid, 0); mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); out: @@ -3291,7 +3300,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; mpi_request->DevHandle = mpi_request_tm->DevHandle; - mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl); + ioc->put_smid_default(ioc, smid_sas_ctrl); return _scsih_check_for_pending_tm(ioc, smid); } @@ -3326,6 +3335,11 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo))); + if (le16_to_cpu(mpi_reply->IOCStatus) == + MPI2_IOCSTATUS_SUCCESS) { + clear_bit(le16_to_cpu(mpi_reply->DevHandle), + ioc->device_remove_in_progress); + } } else { pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); @@ -3381,7 +3395,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; mpi_request->DevHandle = cpu_to_le16(handle); mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; - mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); + ioc->put_smid_hi_priority(ioc, smid, 0); } /** @@ -3473,7 +3487,7 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event, ack_request->EventContext = event_context; ack_request->VF_ID = 0; /* TODO */ ack_request->VP_ID = 0; - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); } /** @@ -3530,7 +3544,7 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; mpi_request->DevHandle = handle; - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); } /** @@ -3885,6 +3899,11 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, } } +static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) +{ + return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); +} + /** * _scsih_flush_running_cmds - completing outstanding commands. * @ioc: per adapter object @@ -3906,6 +3925,9 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) if (!scmd) continue; count++; + if (ata_12_16_cmd(scmd)) + scsi_internal_device_unblock(scmd->device, + SDEV_RUNNING); mpt3sas_base_free_smid(ioc, smid); scsi_dma_unmap(scmd); if (ioc->pci_error_recovery) @@ -3922,7 +3944,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) * _scsih_setup_eedp - setup MPI request for EEDP transfer * @ioc: per adapter object * @scmd: pointer to scsi command object - * @mpi_request: pointer to the SCSI_IO reqest message frame + * @mpi_request: pointer to the SCSI_IO request message frame * * Supporting protection 1 and 3. * @@ -3975,6 +3997,9 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, mpi_request_3v->EEDPBlockSize = cpu_to_le16(scmd->device->sector_size); + + if (ioc->is_gen35_ioc) + eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); } @@ -4010,8 +4035,6 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) SAM_STAT_CHECK_CONDITION; } - - /** * scsih_qcmd - main scsi request entry point * @scmd: pointer to scsi command object @@ -4030,6 +4053,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) struct MPT3SAS_DEVICE *sas_device_priv_data; struct MPT3SAS_TARGET *sas_target_priv_data; struct _raid_device *raid_device; + struct request *rq = scmd->request; + int class; Mpi2SCSIIORequest_t *mpi_request; u32 mpi_control; u16 smid; @@ -4038,6 +4063,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (ioc->logging_level & MPT_DEBUG_SCSI) scsi_print_command(scmd); + /* + * Lock the device for any subsequent command until command is + * done. + */ + if (ata_12_16_cmd(scmd)) + scsi_internal_device_block(scmd->device); + sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { scmd->result = DID_NO_CONNECT << 16; @@ -4071,7 +4103,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; - /* device busy with task managment */ + /* device busy with task management */ } else if (sas_target_priv_data->tm_busy || sas_device_priv_data->block) return SCSI_MLQUEUE_DEVICE_BUSY; @@ -4085,7 +4117,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) /* set tags */ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; - + /* NCQ Prio supported, make sure control indicated high priority */ + if (sas_device_priv_data->ncq_prio_enable) { + class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); + if (class == IOPRIO_CLASS_RT) + mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT; + } /* Make sure Device is not raid volume. * We do not expose raid functionality to upper layer for warpdrive. */ @@ -4141,12 +4178,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | MPI25_SCSIIO_IOFLAGS_FAST_PATH); - mpt3sas_base_put_smid_fast_path(ioc, smid, handle); + ioc->put_smid_fast_path(ioc, smid, handle); } else - mpt3sas_base_put_smid_scsi_io(ioc, smid, + ioc->put_smid_scsi_io(ioc, smid, le16_to_cpu(mpi_request->DevHandle)); } else - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); return 0; out: @@ -4613,6 +4650,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) if (scmd == NULL) return 1; + if (ata_12_16_cmd(scmd)) + scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); if (mpi_reply == NULL) { @@ -4642,7 +4682,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); mpi_request->DevHandle = cpu_to_le16(sas_device_priv_data->sas_target->handle); - mpt3sas_base_put_smid_scsi_io(ioc, smid, + ioc->put_smid_scsi_io(ioc, smid, sas_device_priv_data->sas_target->handle); return 0; } @@ -5367,10 +5407,10 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, sas_device->handle, handle); sas_target_priv_data->handle = handle; sas_device->handle = handle; - if (sas_device_pg0.Flags & + if (le16_to_cpu(sas_device_pg0.Flags) & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { sas_device->enclosure_level = - le16_to_cpu(sas_device_pg0.EnclosureLevel); + sas_device_pg0.EnclosureLevel; memcpy(sas_device->connector_name, sas_device_pg0.ConnectorName, 4); sas_device->connector_name[4] = '\0'; @@ -5449,6 +5489,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); if (!(_scsih_is_end_device(device_info))) return -1; + set_bit(handle, ioc->pend_os_device_add); sas_address = le64_to_cpu(sas_device_pg0.SASAddress); /* check if device is present */ @@ -5467,6 +5508,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, sas_device = mpt3sas_get_sdev_by_addr(ioc, sas_address); if (sas_device) { + clear_bit(handle, ioc->pend_os_device_add); sas_device_put(sas_device); return -1; } @@ -5497,9 +5539,10 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; - if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + if (le16_to_cpu(sas_device_pg0.Flags) + & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { sas_device->enclosure_level = - le16_to_cpu(sas_device_pg0.EnclosureLevel); + sas_device_pg0.EnclosureLevel; memcpy(sas_device->connector_name, sas_device_pg0.ConnectorName, 4); sas_device->connector_name[4] = '\0'; @@ -5790,6 +5833,9 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, _scsih_check_device(ioc, sas_address, handle, phy_number, link_rate); + if (!test_bit(handle, ioc->pend_os_device_add)) + break; + case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: @@ -6251,7 +6297,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) handle, phys_disk_num)); init_completion(&ioc->scsih_cmds.done); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { @@ -6304,7 +6350,7 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) { sdev->no_uld_attach = no_uld_attach ? 1 : 0; sdev_printk(KERN_INFO, sdev, "%s raid component\n", - sdev->no_uld_attach ? "hidding" : "exposing"); + sdev->no_uld_attach ? "hiding" : "exposing"); WARN_ON(scsi_device_reprobe(sdev)); } @@ -7034,7 +7080,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0) if (sas_device_pg0->Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { sas_device->enclosure_level = - le16_to_cpu(sas_device_pg0->EnclosureLevel); + sas_device_pg0->EnclosureLevel; memcpy(&sas_device->connector_name[0], &sas_device_pg0->ConnectorName[0], 4); } else { @@ -7096,6 +7142,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) sas_device_pg0.SASAddress = le64_to_cpu(sas_device_pg0.SASAddress); sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot); + sas_device_pg0.Flags = le16_to_cpu(sas_device_pg0.Flags); _scsih_mark_responding_sas_device(ioc, &sas_device_pg0); } @@ -7707,6 +7754,9 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) complete(&ioc->tm_cmds.done); } + memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); + memset(ioc->device_remove_in_progress, 0, + ioc->device_remove_in_progress_sz); _scsih_fw_event_cleanup_queue(ioc); _scsih_flush_running_cmds(ioc); break; @@ -8097,7 +8147,7 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) if (!ioc->hide_ir_msg) pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name); init_completion(&ioc->scsih_cmds.done); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { @@ -8638,6 +8688,12 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev) case MPI26_MFGPAGE_DEVID_SAS3324_2: case MPI26_MFGPAGE_DEVID_SAS3324_3: case MPI26_MFGPAGE_DEVID_SAS3324_4: + case MPI26_MFGPAGE_DEVID_SAS3508: + case MPI26_MFGPAGE_DEVID_SAS3508_1: + case MPI26_MFGPAGE_DEVID_SAS3408: + case MPI26_MFGPAGE_DEVID_SAS3516: + case MPI26_MFGPAGE_DEVID_SAS3516_1: + case MPI26_MFGPAGE_DEVID_SAS3416: return MPI26_VERSION; } return 0; @@ -8706,10 +8762,29 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->hba_mpi_version_belonged = hba_mpi_version; ioc->id = mpt3_ids++; sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME); + switch (pdev->device) { + case MPI26_MFGPAGE_DEVID_SAS3508: + case MPI26_MFGPAGE_DEVID_SAS3508_1: + case MPI26_MFGPAGE_DEVID_SAS3408: + case MPI26_MFGPAGE_DEVID_SAS3516: + case MPI26_MFGPAGE_DEVID_SAS3516_1: + case MPI26_MFGPAGE_DEVID_SAS3416: + ioc->is_gen35_ioc = 1; + break; + default: + ioc->is_gen35_ioc = 0; + } if ((ioc->hba_mpi_version_belonged == MPI25_VERSION && pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) || - (ioc->hba_mpi_version_belonged == MPI26_VERSION)) - ioc->msix96_vector = 1; + (ioc->hba_mpi_version_belonged == MPI26_VERSION)) { + ioc->combined_reply_queue = 1; + if (ioc->is_gen35_ioc) + ioc->combined_reply_index_count = + MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35; + else + ioc->combined_reply_index_count = + MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3; + } break; default: return -ENODEV; @@ -9031,6 +9106,31 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev) return PCI_ERS_RESULT_RECOVERED; } +/** + * scsih__ncq_prio_supp - Check for NCQ command priority support + * @sdev: scsi device struct + * + * This is called when a user indicates they would like to enable + * ncq command priorities. This works only on SATA devices. + */ +bool scsih_ncq_prio_supp(struct scsi_device *sdev) +{ + unsigned char *buf; + bool ncq_prio_supp = false; + + if (!scsi_device_supports_vpd(sdev)) + return ncq_prio_supp; + + buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL); + if (!buf) + return ncq_prio_supp; + + if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN)) + ncq_prio_supp = (buf[213] >> 4) & 1; + + kfree(buf); + return ncq_prio_supp; +} /* * The pci device ids are defined in mpi/mpi2_cnfg.h. */ @@ -9112,6 +9212,19 @@ static const struct pci_device_id mpt3sas_pci_table[] = { PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4, PCI_ANY_ID, PCI_ANY_ID }, + /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416, + PCI_ANY_ID, PCI_ANY_ID }, {0} /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); @@ -9152,7 +9265,7 @@ scsih_init(void) /* queuecommand callback hander */ scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done); - /* task managment callback handler */ + /* task management callback handler */ tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done); /* base internal commands callback handler */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index b74faf1a69b27426a5d1ae01b2df7634ac4dba7b..7f1d5785bc30afc104c9c249f5cc9ec266e776d6 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -392,7 +392,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, "report_manufacture - send to sas_addr(0x%016llx)\n", ioc->name, (unsigned long long)sas_address)); init_completion(&ioc->transport_cmds.done); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { @@ -1198,7 +1198,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, ioc->name, (unsigned long long)phy->identify.sas_address, phy->number)); init_completion(&ioc->transport_cmds.done); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { @@ -1514,7 +1514,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, ioc->name, (unsigned long long)phy->identify.sas_address, phy->number, phy_operation)); init_completion(&ioc->transport_cmds.done); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { @@ -2032,7 +2032,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, "%s - sending smp request\n", ioc->name, __func__)); init_completion(&ioc->transport_cmds.done); - mpt3sas_base_put_smid_default(ioc, smid); + ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c index 4c57d9abce7b772be368a19388eceeffafc66d6f..7de5d8d75480f0d5934925d53556ca04f9b34510 100644 --- a/drivers/scsi/mvsas/mv_94xx.c +++ b/drivers/scsi/mvsas/mv_94xx.c @@ -668,7 +668,7 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) { u32 tmp; tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3)); - if (tmp && 1 << (slot_idx % 32)) { + if (tmp & 1 << (slot_idx % 32)) { mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx); mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3), 1 << (slot_idx % 32)); diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 86eb19902bacef9f62e808833cbe86387b1d1771..c7cc8035eacb371eb3ae6e038abb6df7c94e7169 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -791,8 +791,10 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf slot->slot_tag = tag; slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); - if (!slot->buf) + if (!slot->buf) { + rc = -ENOMEM; goto err_out_tag; + } memset(slot->buf, 0, MVS_SLOT_BUF_SZ); tei.task = task; diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 2f2a9910e30e826658464e74bf2ac6609838c3d5..ef99f62831fb8586b42cb9a03ce02078d3aecd08 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -1595,7 +1595,7 @@ static int _init_blk_request(struct osd_request *or, } or->request = req; - req->cmd_flags |= REQ_QUIET; + req->rq_flags |= RQF_QUIET; req->timeout = or->timeout; req->retries = or->retries; diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 5033223f6287703c3f74b8eb5b8048d37845b44a..e8196c55b633bcedb6cb6f52e2badf5e2867c164 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -52,7 +52,7 @@ static const char * osst_version = "0.99.4"; #include #include #include -#include +#include #include /* The driver prints some debugging information on the console if DEBUG @@ -368,7 +368,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd, return DRIVER_ERROR << 24; blk_rq_set_block_pc(req); - req->cmd_flags |= REQ_QUIET; + req->rq_flags |= RQF_QUIET; SRpnt->bio = NULL; diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 68a5c347fae9a578fdcacbf6c618a0c15f4f6bb6..337982cf3d638198c1d674ab974d696b582ab160 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -1368,13 +1368,8 @@ static struct genl_multicast_group pmcraid_mcgrps[] = { { .name = "events", /* not really used - see ID discussion below */ }, }; -static struct genl_family pmcraid_event_family = { - /* - * Due to prior multicast group abuse (the code having assumed that - * the family ID can be used as a multicast group ID) we need to - * statically allocate a family (and thus group) ID. - */ - .id = GENL_ID_PMCRAID, +static struct genl_family pmcraid_event_family __ro_after_init = { + .module = THIS_MODULE, .name = "pmcraid", .version = 1, .maxattr = PMCRAID_AEN_ATTR_MAX, @@ -1389,7 +1384,7 @@ static struct genl_family pmcraid_event_family = { * 0 if the pmcraid_event_family is successfully registered * with netlink generic, non-zero otherwise */ -static int pmcraid_netlink_init(void) +static int __init pmcraid_netlink_init(void) { int result; @@ -3792,11 +3787,11 @@ static long pmcraid_ioctl_passthrough( direction); if (rc) { pmcraid_err("couldn't build passthrough ioadls\n"); - goto out_free_buffer; + goto out_free_cmd; } } else if (request_size < 0) { rc = -EINVAL; - goto out_free_buffer; + goto out_free_cmd; } /* If data is being written into the device, copy the data from user @@ -3913,6 +3908,8 @@ static long pmcraid_ioctl_passthrough( out_free_sglist: pmcraid_release_passthrough_ioadls(cmd, request_size, direction); + +out_free_cmd: pmcraid_return_cmd(cmd); out_free_buffer: @@ -6023,8 +6020,10 @@ static int __init pmcraid_init(void) error = pmcraid_netlink_init(); - if (error) + if (error) { + class_destroy(pmcraid_class); goto out_unreg_chrdev; + } error = pci_register_driver(&pmcraid_driver); diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..23ca8a274586746e19204f291a844e6cbc43a07f --- /dev/null +++ b/drivers/scsi/qedi/Kconfig @@ -0,0 +1,10 @@ +config QEDI + tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support" + depends on PCI && SCSI + depends on QED + select SCSI_ISCSI_ATTRS + select QED_LL2 + select QED_ISCSI + ---help--- + This driver supports iSCSI offload for the QLogic FastLinQ + 41000 Series Converged Network Adapters. diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..2b3e16b24299ee94bf752d361d7b479c7958c385 --- /dev/null +++ b/drivers/scsi/qedi/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_QEDI) := qedi.o +qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \ + qedi_dbg.o + +qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h new file mode 100644 index 0000000000000000000000000000000000000000..5ca3e8c28a3f6af3b4faf1d82ec1ab6f37480127 --- /dev/null +++ b/drivers/scsi/qedi/qedi.h @@ -0,0 +1,364 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QEDI_H_ +#define _QEDI_H_ + +#define __PREVENT_QED_HSI__ + +#include +#include +#include +#include + +#include "qedi_hsi.h" +#include +#include "qedi_dbg.h" +#include +#include +#include "qedi_version.h" + +#define QEDI_MODULE_NAME "qedi" + +struct qedi_endpoint; + +/* + * PCI function probe defines + */ +#define QEDI_MODE_NORMAL 0 +#define QEDI_MODE_RECOVERY 1 + +#define ISCSI_WQE_SET_PTU_INVALIDATE 1 +#define QEDI_MAX_ISCSI_TASK 4096 +#define QEDI_MAX_TASK_NUM 0x0FFF +#define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024 +#define QEDI_ISCSI_MAX_BDS_PER_CMD 256 /* Firmware max BDs is 256 */ +#define MAX_OUSTANDING_TASKS_PER_CON 1024 + +#define QEDI_MAX_BD_LEN 0xffff +#define QEDI_BD_SPLIT_SZ 0x1000 +#define QEDI_PAGE_SIZE 4096 +#define QEDI_FAST_SGE_COUNT 4 +/* MAX Length for cached SGL */ +#define MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1) + +#define MAX_NUM_MSIX_PF 8 +#define MIN_NUM_CPUS_MSIX(x) min((x)->msix_count, num_online_cpus()) + +#define QEDI_LOCAL_PORT_MIN 60000 +#define QEDI_LOCAL_PORT_MAX 61024 +#define QEDI_LOCAL_PORT_RANGE (QEDI_LOCAL_PORT_MAX - QEDI_LOCAL_PORT_MIN) +#define QEDI_LOCAL_PORT_INVALID 0xffff +#define TX_RX_RING 16 +#define RX_RING (TX_RX_RING - 1) +#define LL2_SINGLE_BUF_SIZE 0x400 +#define QEDI_PAGE_SIZE 4096 +#define QEDI_PAGE_ALIGN(addr) ALIGN(addr, QEDI_PAGE_SIZE) +#define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1)) + +#define QEDI_PAGE_SIZE 4096 +#define QEDI_PATH_HANDLE 0xFE0000000UL + +struct qedi_uio_ctrl { + /* meta data */ + u32 uio_hsi_version; + + /* user writes */ + u32 host_tx_prod; + u32 host_rx_cons; + u32 host_rx_bd_cons; + u32 host_tx_pkt_len; + u32 host_rx_cons_cnt; + + /* driver writes */ + u32 hw_tx_cons; + u32 hw_rx_prod; + u32 hw_rx_bd_prod; + u32 hw_rx_prod_cnt; + + /* other */ + u8 mac_addr[6]; + u8 reserve[2]; +}; + +struct qedi_rx_bd { + u32 rx_pkt_index; + u32 rx_pkt_len; + u16 vlan_id; +}; + +#define QEDI_RX_DESC_CNT (QEDI_PAGE_SIZE / sizeof(struct qedi_rx_bd)) +#define QEDI_MAX_RX_DESC_CNT (QEDI_RX_DESC_CNT - 1) +#define QEDI_NUM_RX_BD (QEDI_RX_DESC_CNT * 1) +#define QEDI_MAX_RX_BD (QEDI_NUM_RX_BD - 1) + +#define QEDI_NEXT_RX_IDX(x) ((((x) & (QEDI_MAX_RX_DESC_CNT)) == \ + (QEDI_MAX_RX_DESC_CNT - 1)) ? \ + (x) + 2 : (x) + 1) + +struct qedi_uio_dev { + struct uio_info qedi_uinfo; + u32 uio_dev; + struct list_head list; + + u32 ll2_ring_size; + void *ll2_ring; + + u32 ll2_buf_size; + void *ll2_buf; + + void *rx_pkt; + void *tx_pkt; + + struct qedi_ctx *qedi; + struct pci_dev *pdev; + void *uctrl; +}; + +/* List to maintain the skb pointers */ +struct skb_work_list { + struct list_head list; + struct sk_buff *skb; + u16 vlan_id; +}; + +/* Queue sizes in number of elements */ +#define QEDI_SQ_SIZE MAX_OUSTANDING_TASKS_PER_CON +#define QEDI_CQ_SIZE 2048 +#define QEDI_CMDQ_SIZE QEDI_MAX_ISCSI_TASK +#define QEDI_PROTO_CQ_PROD_IDX 0 + +struct qedi_glbl_q_params { + u64 hw_p_cq; /* Completion queue PBL */ + u64 hw_p_rq; /* Request queue PBL */ + u64 hw_p_cmdq; /* Command queue PBL */ +}; + +struct global_queue { + union iscsi_cqe *cq; + dma_addr_t cq_dma; + u32 cq_mem_size; + u32 cq_cons_idx; /* Completion queue consumer index */ + + void *cq_pbl; + dma_addr_t cq_pbl_dma; + u32 cq_pbl_size; + +}; + +struct qedi_fastpath { + struct qed_sb_info *sb_info; + u16 sb_id; +#define QEDI_NAME_SIZE 16 + char name[QEDI_NAME_SIZE]; + struct qedi_ctx *qedi; +}; + +/* Used to pass fastpath information needed to process CQEs */ +struct qedi_io_work { + struct list_head list; + struct iscsi_cqe_solicited cqe; + u16 que_idx; +}; + +/** + * struct iscsi_cid_queue - Per adapter iscsi cid queue + * + * @cid_que_base: queue base memory + * @cid_que: queue memory pointer + * @cid_q_prod_idx: produce index + * @cid_q_cons_idx: consumer index + * @cid_q_max_idx: max index. used to detect wrap around condition + * @cid_free_cnt: queue size + * @conn_cid_tbl: iscsi cid to conn structure mapping table + * + * Per adapter iSCSI CID Queue + */ +struct iscsi_cid_queue { + void *cid_que_base; + u32 *cid_que; + u32 cid_q_prod_idx; + u32 cid_q_cons_idx; + u32 cid_q_max_idx; + u32 cid_free_cnt; + struct qedi_conn **conn_cid_tbl; +}; + +struct qedi_portid_tbl { + spinlock_t lock; /* Port id lock */ + u16 start; + u16 max; + u16 next; + unsigned long *table; +}; + +struct qedi_itt_map { + __le32 itt; + struct qedi_cmd *p_cmd; +}; + +/* I/O tracing entry */ +#define QEDI_IO_TRACE_SIZE 2048 +struct qedi_io_log { +#define QEDI_IO_TRACE_REQ 0 +#define QEDI_IO_TRACE_RSP 1 + u8 direction; + u16 task_id; + u32 cid; + u32 port_id; /* Remote port fabric ID */ + int lun; + u8 op; /* SCSI CDB */ + u8 lba[4]; + unsigned int bufflen; /* SCSI buffer length */ + unsigned int sg_count; /* Number of SG elements */ + u8 fast_sgs; /* number of fast sgls */ + u8 slow_sgs; /* number of slow sgls */ + u8 cached_sgs; /* number of cached sgls */ + int result; /* Result passed back to mid-layer */ + unsigned long jiffies; /* Time stamp when I/O logged */ + int refcount; /* Reference count for task id */ + unsigned int blk_req_cpu; /* CPU that the task is queued on by + * blk layer + */ + unsigned int req_cpu; /* CPU that the task is queued on */ + unsigned int intr_cpu; /* Interrupt CPU that the task is received on */ + unsigned int blk_rsp_cpu;/* CPU that task is actually processed and + * returned to blk layer + */ + bool cached_sge; + bool slow_sge; + bool fast_sge; +}; + +/* Number of entries in BDQ */ +#define QEDI_BDQ_NUM 256 +#define QEDI_BDQ_BUF_SIZE 256 + +/* DMA coherent buffers for BDQ */ +struct qedi_bdq_buf { + void *buf_addr; + dma_addr_t buf_dma; +}; + +/* Main port level struct */ +struct qedi_ctx { + struct qedi_dbg_ctx dbg_ctx; + struct Scsi_Host *shost; + struct pci_dev *pdev; + struct qed_dev *cdev; + struct qed_dev_iscsi_info dev_info; + struct qed_int_info int_info; + struct qedi_glbl_q_params *p_cpuq; + struct global_queue **global_queues; + /* uio declaration */ + struct qedi_uio_dev *udev; + struct list_head ll2_skb_list; + spinlock_t ll2_lock; /* Light L2 lock */ + spinlock_t hba_lock; /* per port lock */ + struct task_struct *ll2_recv_thread; + unsigned long flags; +#define UIO_DEV_OPENED 1 +#define QEDI_IOTHREAD_WAKE 2 +#define QEDI_IN_RECOVERY 5 +#define QEDI_IN_OFFLINE 6 + + u8 mac[ETH_ALEN]; + u32 src_ip[4]; + u8 ip_type; + + /* Physical address of above array */ + dma_addr_t hw_p_cpuq; + + struct qedi_bdq_buf bdq[QEDI_BDQ_NUM]; + void *bdq_pbl; + dma_addr_t bdq_pbl_dma; + size_t bdq_pbl_mem_size; + void *bdq_pbl_list; + dma_addr_t bdq_pbl_list_dma; + u8 bdq_pbl_list_num_entries; + void __iomem *bdq_primary_prod; + void __iomem *bdq_secondary_prod; + u16 bdq_prod_idx; + u16 rq_num_entries; + + u32 msix_count; + u32 max_sqes; + u8 num_queues; + u32 max_active_conns; + + struct iscsi_cid_queue cid_que; + struct qedi_endpoint **ep_tbl; + struct qedi_portid_tbl lcl_port_tbl; + + /* Rx fast path intr context */ + struct qed_sb_info *sb_array; + struct qedi_fastpath *fp_array; + struct qed_iscsi_tid tasks; + +#define QEDI_LINK_DOWN 0 +#define QEDI_LINK_UP 1 + atomic_t link_state; + +#define QEDI_RESERVE_TASK_ID 0 +#define MAX_ISCSI_TASK_ENTRIES 4096 +#define QEDI_INVALID_TASK_ID (MAX_ISCSI_TASK_ENTRIES + 1) + unsigned long task_idx_map[MAX_ISCSI_TASK_ENTRIES / BITS_PER_LONG]; + struct qedi_itt_map *itt_map; + u16 tid_reuse_count[QEDI_MAX_ISCSI_TASK]; + struct qed_pf_params pf_params; + + struct workqueue_struct *tmf_thread; + struct workqueue_struct *offload_thread; + + u16 ll2_mtu; + + struct workqueue_struct *dpc_wq; + + spinlock_t task_idx_lock; /* To protect gbl context */ + s32 last_tidx_alloc; + s32 last_tidx_clear; + + struct qedi_io_log io_trace_buf[QEDI_IO_TRACE_SIZE]; + spinlock_t io_trace_lock; /* prtect trace Log buf */ + u16 io_trace_idx; + unsigned int intr_cpu; + u32 cached_sgls; + bool use_cached_sge; + u32 slow_sgls; + bool use_slow_sge; + u32 fast_sgls; + bool use_fast_sge; + + atomic_t num_offloads; +}; + +struct qedi_work { + struct list_head list; + struct qedi_ctx *qedi; + union iscsi_cqe cqe; + u16 que_idx; + bool is_solicited; +}; + +struct qedi_percpu_s { + struct task_struct *iothread; + struct list_head work_list; + spinlock_t p_work_lock; /* Per cpu worker lock */ +}; + +static inline void *qedi_get_task_mem(struct qed_iscsi_tid *info, u32 tid) +{ + return (info->blocks[tid / info->num_tids_per_block] + + (tid % info->num_tids_per_block) * info->size); +} + +#define QEDI_U64_HI(val) ((u32)(((u64)(val)) >> 32)) +#define QEDI_U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff)) + +#endif /* _QEDI_H_ */ diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c new file mode 100644 index 0000000000000000000000000000000000000000..2bdedb9c39bc4126ec6a1234e8db8e0884f33cf0 --- /dev/null +++ b/drivers/scsi/qedi/qedi_dbg.c @@ -0,0 +1,143 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include "qedi_dbg.h" +#include + +void +qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char nfunc[32]; + + memset(nfunc, 0, sizeof(nfunc)); + memcpy(nfunc, func, sizeof(nfunc) - 1); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (likely(qedi) && likely(qedi->pdev)) + pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), + nfunc, line, qedi->host_no, &vaf); + else + pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + + va_end(va); +} + +void +qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char nfunc[32]; + + memset(nfunc, 0, sizeof(nfunc)); + memcpy(nfunc, func, sizeof(nfunc) - 1); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedi_dbg_log & QEDI_LOG_WARN)) + return; + + if (likely(qedi) && likely(qedi->pdev)) + pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), + nfunc, line, qedi->host_no, &vaf); + else + pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + + va_end(va); +} + +void +qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char nfunc[32]; + + memset(nfunc, 0, sizeof(nfunc)); + memcpy(nfunc, func, sizeof(nfunc) - 1); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedi_dbg_log & QEDI_LOG_NOTICE)) + return; + + if (likely(qedi) && likely(qedi->pdev)) + pr_notice("[%s]:[%s:%d]:%d: %pV", + dev_name(&qedi->pdev->dev), nfunc, line, + qedi->host_no, &vaf); + else + pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + + va_end(va); +} + +void +qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + u32 level, const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char nfunc[32]; + + memset(nfunc, 0, sizeof(nfunc)); + memcpy(nfunc, func, sizeof(nfunc) - 1); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedi_dbg_log & level)) + return; + + if (likely(qedi) && likely(qedi->pdev)) + pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), + nfunc, line, qedi->host_no, &vaf); + else + pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + + va_end(va); +} + +int +qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) +{ + int ret = 0; + + for (; iter->name; iter++) { + ret = sysfs_create_bin_file(&shost->shost_gendev.kobj, + iter->attr); + if (ret) + pr_err("Unable to create sysfs %s attr, err(%d).\n", + iter->name, ret); + } + return ret; +} + +void +qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) +{ + for (; iter->name; iter++) + sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr); +} diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h new file mode 100644 index 0000000000000000000000000000000000000000..c55572badfb01ff8c7a1fe0cd8f93722395a3740 --- /dev/null +++ b/drivers/scsi/qedi/qedi_dbg.h @@ -0,0 +1,144 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QEDI_DBG_H_ +#define _QEDI_DBG_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define __PREVENT_QED_HSI__ +#include +#include + +extern uint qedi_dbg_log; + +/* Debug print level definitions */ +#define QEDI_LOG_DEFAULT 0x1 /* Set default logging mask */ +#define QEDI_LOG_INFO 0x2 /* Informational logs, + * MAC address, WWPN, WWNN + */ +#define QEDI_LOG_DISC 0x4 /* Init, discovery, rport */ +#define QEDI_LOG_LL2 0x8 /* LL2, VLAN logs */ +#define QEDI_LOG_CONN 0x10 /* Connection setup, cleanup */ +#define QEDI_LOG_EVT 0x20 /* Events, link, mtu */ +#define QEDI_LOG_TIMER 0x40 /* Timer events */ +#define QEDI_LOG_MP_REQ 0x80 /* Middle Path (MP) logs */ +#define QEDI_LOG_SCSI_TM 0x100 /* SCSI Aborts, Task Mgmt */ +#define QEDI_LOG_UNSOL 0x200 /* unsolicited event logs */ +#define QEDI_LOG_IO 0x400 /* scsi cmd, completion */ +#define QEDI_LOG_MQ 0x800 /* Multi Queue logs */ +#define QEDI_LOG_BSG 0x1000 /* BSG logs */ +#define QEDI_LOG_DEBUGFS 0x2000 /* debugFS logs */ +#define QEDI_LOG_LPORT 0x4000 /* lport logs */ +#define QEDI_LOG_ELS 0x8000 /* ELS logs */ +#define QEDI_LOG_NPIV 0x10000 /* NPIV logs */ +#define QEDI_LOG_SESS 0x20000 /* Conection setup, cleanup */ +#define QEDI_LOG_UIO 0x40000 /* iSCSI UIO logs */ +#define QEDI_LOG_TID 0x80000 /* FW TID context acquire, + * free + */ +#define QEDI_TRACK_TID 0x100000 /* Track TID state. To be + * enabled only at module load + * and not run-time. + */ +#define QEDI_TRACK_CMD_LIST 0x300000 /* Track active cmd list nodes, + * done with reference to TID, + * hence TRACK_TID also enabled. + */ +#define QEDI_LOG_NOTICE 0x40000000 /* Notice logs */ +#define QEDI_LOG_WARN 0x80000000 /* Warning logs */ + +/* Debug context structure */ +struct qedi_dbg_ctx { + unsigned int host_no; + struct pci_dev *pdev; +#ifdef CONFIG_DEBUG_FS + struct dentry *bdf_dentry; +#endif +}; + +#define QEDI_ERR(pdev, fmt, ...) \ + qedi_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDI_WARN(pdev, fmt, ...) \ + qedi_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDI_NOTICE(pdev, fmt, ...) \ + qedi_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDI_INFO(pdev, level, fmt, ...) \ + qedi_dbg_info(pdev, __func__, __LINE__, level, fmt, \ + ## __VA_ARGS__) + +void qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + const char *fmt, ...); +void qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + const char *fmt, ...); +void qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + const char *fmt, ...); +void qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, + u32 info, const char *fmt, ...); + +struct Scsi_Host; + +struct sysfs_bin_attrs { + char *name; + struct bin_attribute *attr; +}; + +int qedi_create_sysfs_attr(struct Scsi_Host *shost, + struct sysfs_bin_attrs *iter); +void qedi_remove_sysfs_attr(struct Scsi_Host *shost, + struct sysfs_bin_attrs *iter); + +#ifdef CONFIG_DEBUG_FS +/* DebugFS related code */ +struct qedi_list_of_funcs { + char *oper_str; + ssize_t (*oper_func)(struct qedi_dbg_ctx *qedi); +}; + +struct qedi_debugfs_ops { + char *name; + struct qedi_list_of_funcs *qedi_funcs; +}; + +#define qedi_dbg_fileops(drv, ops) \ +{ \ + .owner = THIS_MODULE, \ + .open = simple_open, \ + .read = drv##_dbg_##ops##_cmd_read, \ + .write = drv##_dbg_##ops##_cmd_write \ +} + +/* Used for debugfs sequential files */ +#define qedi_dbg_fileops_seq(drv, ops) \ +{ \ + .owner = THIS_MODULE, \ + .open = drv##_dbg_##ops##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +void qedi_dbg_host_init(struct qedi_dbg_ctx *qedi, + struct qedi_debugfs_ops *dops, + const struct file_operations *fops); +void qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi); +void qedi_dbg_init(char *drv_name); +void qedi_dbg_exit(void); +#endif /* CONFIG_DEBUG_FS */ + +#endif /* _QEDI_DBG_H_ */ diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..955936274241406e2c8df92a7f3a5fcd530d7f05 --- /dev/null +++ b/drivers/scsi/qedi/qedi_debugfs.c @@ -0,0 +1,244 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include "qedi.h" +#include "qedi_dbg.h" + +#include +#include +#include + +int do_not_recover; +static struct dentry *qedi_dbg_root; + +void +qedi_dbg_host_init(struct qedi_dbg_ctx *qedi, + struct qedi_debugfs_ops *dops, + const struct file_operations *fops) +{ + char host_dirname[32]; + struct dentry *file_dentry = NULL; + + sprintf(host_dirname, "host%u", qedi->host_no); + qedi->bdf_dentry = debugfs_create_dir(host_dirname, qedi_dbg_root); + if (!qedi->bdf_dentry) + return; + + while (dops) { + if (!(dops->name)) + break; + + file_dentry = debugfs_create_file(dops->name, 0600, + qedi->bdf_dentry, qedi, + fops); + if (!file_dentry) { + QEDI_INFO(qedi, QEDI_LOG_DEBUGFS, + "Debugfs entry %s creation failed\n", + dops->name); + debugfs_remove_recursive(qedi->bdf_dentry); + return; + } + dops++; + fops++; + } +} + +void +qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi) +{ + debugfs_remove_recursive(qedi->bdf_dentry); + qedi->bdf_dentry = NULL; +} + +void +qedi_dbg_init(char *drv_name) +{ + qedi_dbg_root = debugfs_create_dir(drv_name, NULL); + if (!qedi_dbg_root) + QEDI_INFO(NULL, QEDI_LOG_DEBUGFS, "Init of debugfs failed\n"); +} + +void +qedi_dbg_exit(void) +{ + debugfs_remove_recursive(qedi_dbg_root); + qedi_dbg_root = NULL; +} + +static ssize_t +qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg) +{ + if (!do_not_recover) + do_not_recover = 1; + + QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n", + do_not_recover); + return 0; +} + +static ssize_t +qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg) +{ + if (do_not_recover) + do_not_recover = 0; + + QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n", + do_not_recover); + return 0; +} + +static struct qedi_list_of_funcs qedi_dbg_do_not_recover_ops[] = { + { "enable", qedi_dbg_do_not_recover_enable }, + { "disable", qedi_dbg_do_not_recover_disable }, + { NULL, NULL } +}; + +struct qedi_debugfs_ops qedi_debugfs_ops[] = { + { "gbl_ctx", NULL }, + { "do_not_recover", qedi_dbg_do_not_recover_ops}, + { "io_trace", NULL }, + { NULL, NULL } +}; + +static ssize_t +qedi_dbg_do_not_recover_cmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + size_t cnt = 0; + struct qedi_dbg_ctx *qedi_dbg = + (struct qedi_dbg_ctx *)filp->private_data; + struct qedi_list_of_funcs *lof = qedi_dbg_do_not_recover_ops; + + if (*ppos) + return 0; + + while (lof) { + if (!(lof->oper_str)) + break; + + if (!strncmp(lof->oper_str, buffer, strlen(lof->oper_str))) { + cnt = lof->oper_func(qedi_dbg); + break; + } + + lof++; + } + return (count - cnt); +} + +static ssize_t +qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + size_t cnt = 0; + + if (*ppos) + return 0; + + cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover); + cnt = min_t(int, count, cnt - *ppos); + *ppos += cnt; + return cnt; +} + +static int +qedi_gbl_ctx_show(struct seq_file *s, void *unused) +{ + struct qedi_fastpath *fp = NULL; + struct qed_sb_info *sb_info = NULL; + struct status_block *sb = NULL; + struct global_queue *que = NULL; + int id; + u16 prod_idx; + struct qedi_ctx *qedi = s->private; + unsigned long flags; + + seq_puts(s, " DUMP CQ CONTEXT:\n"); + + for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { + spin_lock_irqsave(&qedi->hba_lock, flags); + seq_printf(s, "=========FAST CQ PATH [%d] ==========\n", id); + fp = &qedi->fp_array[id]; + sb_info = fp->sb_info; + sb = sb_info->sb_virt; + prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] & + STATUS_BLOCK_PROD_INDEX_MASK); + seq_printf(s, "SB PROD IDX: %d\n", prod_idx); + que = qedi->global_queues[fp->sb_id]; + seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx); + seq_printf(s, "CQ complete host memory: %d\n", fp->sb_id); + seq_puts(s, "=========== END ==================\n\n\n"); + spin_unlock_irqrestore(&qedi->hba_lock, flags); + } + return 0; +} + +static int +qedi_dbg_gbl_ctx_open(struct inode *inode, struct file *file) +{ + struct qedi_dbg_ctx *qedi_dbg = inode->i_private; + struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx, + dbg_ctx); + + return single_open(file, qedi_gbl_ctx_show, qedi); +} + +static int +qedi_io_trace_show(struct seq_file *s, void *unused) +{ + int id, idx = 0; + struct qedi_ctx *qedi = s->private; + struct qedi_io_log *io_log; + unsigned long flags; + + seq_puts(s, " DUMP IO LOGS:\n"); + spin_lock_irqsave(&qedi->io_trace_lock, flags); + idx = qedi->io_trace_idx; + for (id = 0; id < QEDI_IO_TRACE_SIZE; id++) { + io_log = &qedi->io_trace_buf[idx]; + seq_printf(s, "iodir-%d:", io_log->direction); + seq_printf(s, "tid-0x%x:", io_log->task_id); + seq_printf(s, "cid-0x%x:", io_log->cid); + seq_printf(s, "lun-%d:", io_log->lun); + seq_printf(s, "op-0x%02x:", io_log->op); + seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0], + io_log->lba[1], io_log->lba[2], io_log->lba[3]); + seq_printf(s, "buflen-%d:", io_log->bufflen); + seq_printf(s, "sgcnt-%d:", io_log->sg_count); + seq_printf(s, "res-0x%08x:", io_log->result); + seq_printf(s, "jif-%lu:", io_log->jiffies); + seq_printf(s, "blk_req_cpu-%d:", io_log->blk_req_cpu); + seq_printf(s, "req_cpu-%d:", io_log->req_cpu); + seq_printf(s, "intr_cpu-%d:", io_log->intr_cpu); + seq_printf(s, "blk_rsp_cpu-%d\n", io_log->blk_rsp_cpu); + + idx++; + if (idx == QEDI_IO_TRACE_SIZE) + idx = 0; + } + spin_unlock_irqrestore(&qedi->io_trace_lock, flags); + return 0; +} + +static int +qedi_dbg_io_trace_open(struct inode *inode, struct file *file) +{ + struct qedi_dbg_ctx *qedi_dbg = inode->i_private; + struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx, + dbg_ctx); + + return single_open(file, qedi_io_trace_show, qedi); +} + +const struct file_operations qedi_dbg_fops[] = { + qedi_dbg_fileops_seq(qedi, gbl_ctx), + qedi_dbg_fileops(qedi, do_not_recover), + qedi_dbg_fileops_seq(qedi, io_trace), + { NULL, NULL }, +}; diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c new file mode 100644 index 0000000000000000000000000000000000000000..b1d3904ae8fd8443e6295f19336a294244af4a1e --- /dev/null +++ b/drivers/scsi/qedi/qedi_fw.c @@ -0,0 +1,2378 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include +#include +#include + +#include "qedi.h" +#include "qedi_iscsi.h" +#include "qedi_gbl.h" + +static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, + struct iscsi_task *mtask); + +void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd) +{ + struct scsi_cmnd *sc = cmd->scsi_cmd; + + if (cmd->io_tbl.sge_valid && sc) { + cmd->io_tbl.sge_valid = 0; + scsi_dma_unmap(sc); + } +} + +static void qedi_process_logout_resp(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_logout_rsp *resp_hdr; + struct iscsi_session *session = conn->session; + struct iscsi_logout_response_hdr *cqe_logout_response; + struct qedi_cmd *cmd; + + cmd = (struct qedi_cmd *)task->dd_data; + cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response; + spin_lock(&session->back_lock); + resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = cqe_logout_response->opcode; + resp_hdr->flags = cqe_logout_response->flags; + resp_hdr->hlength = 0; + + resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); + resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn); + resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn); + + resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait); + resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, + "Freeing tid=0x%x for cid=0x%x\n", + cmd->task_id, qedi_conn->iscsi_conn_id); + + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } else { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n", + cmd->task_id, qedi_conn->iscsi_conn_id, + &cmd->io_cmd); + } + + cmd->state = RESPONSE_RECEIVED; + qedi_clear_task_idx(qedi, cmd->task_id); + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); + + spin_unlock(&session->back_lock); +} + +static void qedi_process_text_resp(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_task_context *task_ctx; + struct iscsi_text_rsp *resp_hdr_ptr; + struct iscsi_text_response_hdr *cqe_text_response; + struct qedi_cmd *cmd; + int pld_len; + u32 *tmp; + + cmd = (struct qedi_cmd *)task->dd_data; + task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id); + + cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response; + spin_lock(&session->back_lock); + resp_hdr_ptr = (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr; + memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr)); + resp_hdr_ptr->opcode = cqe_text_response->opcode; + resp_hdr_ptr->flags = cqe_text_response->flags; + resp_hdr_ptr->hlength = 0; + + hton24(resp_hdr_ptr->dlength, + (cqe_text_response->hdr_second_dword & + ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK)); + tmp = (u32 *)resp_hdr_ptr->dlength; + + resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, + conn->session->age); + resp_hdr_ptr->ttt = cqe_text_response->ttt; + resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn); + resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn); + resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn); + + pld_len = cqe_text_response->hdr_second_dword & + ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK; + qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len; + + memset(task_ctx, '\0', sizeof(*task_ctx)); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, + "Freeing tid=0x%x for cid=0x%x\n", + cmd->task_id, qedi_conn->iscsi_conn_id); + + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } else { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n", + cmd->task_id, qedi_conn->iscsi_conn_id, + &cmd->io_cmd); + } + + cmd->state = RESPONSE_RECEIVED; + qedi_clear_task_idx(qedi, cmd->task_id); + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, + qedi_conn->gen_pdu.resp_buf, + (qedi_conn->gen_pdu.resp_wr_ptr - + qedi_conn->gen_pdu.resp_buf)); + spin_unlock(&session->back_lock); +} + +static void qedi_tmf_resp_work(struct work_struct *work) +{ + struct qedi_cmd *qedi_cmd = + container_of(work, struct qedi_cmd, tmf_work); + struct qedi_conn *qedi_conn = qedi_cmd->conn; + struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_tm_rsp *resp_hdr_ptr; + struct iscsi_cls_session *cls_sess; + int rval = 0; + + set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); + resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf; + cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn); + + iscsi_block_session(session->cls_session); + rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true); + if (rval) { + clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); + qedi_clear_task_idx(qedi, qedi_cmd->task_id); + iscsi_unblock_session(session->cls_session); + return; + } + + iscsi_unblock_session(session->cls_session); + qedi_clear_task_idx(qedi, qedi_cmd->task_id); + + spin_lock(&session->back_lock); + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0); + spin_unlock(&session->back_lock); + kfree(resp_hdr_ptr); + clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); +} + +static void qedi_process_tmf_resp(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn) + +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_tmf_response_hdr *cqe_tmp_response; + struct iscsi_tm_rsp *resp_hdr_ptr; + struct iscsi_tm *tmf_hdr; + struct qedi_cmd *qedi_cmd = NULL; + u32 *tmp; + + cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; + + qedi_cmd = task->dd_data; + qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL); + if (!qedi_cmd->tmf_resp_buf) { + QEDI_ERR(&qedi->dbg_ctx, + "Failed to allocate resp buf, cid=0x%x\n", + qedi_conn->iscsi_conn_id); + return; + } + + spin_lock(&session->back_lock); + resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf; + memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp)); + + /* Fill up the header */ + resp_hdr_ptr->opcode = cqe_tmp_response->opcode; + resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags; + resp_hdr_ptr->response = cqe_tmp_response->hdr_response; + resp_hdr_ptr->hlength = 0; + + hton24(resp_hdr_ptr->dlength, + (cqe_tmp_response->hdr_second_dword & + ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK)); + tmp = (u32 *)resp_hdr_ptr->dlength; + resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, + conn->session->age); + resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn); + resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_tmp_response->exp_cmd_sn); + resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn); + + tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr; + + if (likely(qedi_cmd->io_cmd_in_list)) { + qedi_cmd->io_cmd_in_list = false; + list_del_init(&qedi_cmd->io_cmd); + qedi_conn->active_cmd_count--; + } + + if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == + ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) || + ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == + ISCSI_TM_FUNC_TARGET_WARM_RESET) || + ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == + ISCSI_TM_FUNC_TARGET_COLD_RESET)) { + INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work); + queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work); + goto unblock_sess; + } + + qedi_clear_task_idx(qedi, qedi_cmd->task_id); + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0); + kfree(resp_hdr_ptr); + +unblock_sess: + spin_unlock(&session->back_lock); +} + +static void qedi_process_login_resp(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_task_context *task_ctx; + struct iscsi_login_rsp *resp_hdr_ptr; + struct iscsi_login_response_hdr *cqe_login_response; + struct qedi_cmd *cmd; + int pld_len; + u32 *tmp; + + cmd = (struct qedi_cmd *)task->dd_data; + + cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response; + task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id); + + spin_lock(&session->back_lock); + resp_hdr_ptr = (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr; + memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp)); + resp_hdr_ptr->opcode = cqe_login_response->opcode; + resp_hdr_ptr->flags = cqe_login_response->flags_attr; + resp_hdr_ptr->hlength = 0; + + hton24(resp_hdr_ptr->dlength, + (cqe_login_response->hdr_second_dword & + ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK)); + tmp = (u32 *)resp_hdr_ptr->dlength; + resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, + conn->session->age); + resp_hdr_ptr->tsih = cqe_login_response->tsih; + resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn); + resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn); + resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn); + resp_hdr_ptr->status_class = cqe_login_response->status_class; + resp_hdr_ptr->status_detail = cqe_login_response->status_detail; + pld_len = cqe_login_response->hdr_second_dword & + ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK; + qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len; + + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } + + memset(task_ctx, '\0', sizeof(*task_ctx)); + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, + qedi_conn->gen_pdu.resp_buf, + (qedi_conn->gen_pdu.resp_wr_ptr - + qedi_conn->gen_pdu.resp_buf)); + + spin_unlock(&session->back_lock); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, + "Freeing tid=0x%x for cid=0x%x\n", + cmd->task_id, qedi_conn->iscsi_conn_id); + cmd->state = RESPONSE_RECEIVED; + qedi_clear_task_idx(qedi, cmd->task_id); +} + +static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi, + struct iscsi_cqe_unsolicited *cqe, + char *ptr, int len) +{ + u16 idx = 0; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n", + len, qedi->bdq_prod_idx, + (qedi->bdq_prod_idx % qedi->rq_num_entries)); + + /* Obtain buffer address from rqe_opaque */ + idx = cqe->rqe_opaque.lo; + if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "wrong idx %d returned by FW, dropping the unsolicited pkt\n", + idx); + return; + } + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n", + cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "unsol_cqe_type = %d\n", cqe->unsol_cqe_type); + switch (cqe->unsol_cqe_type) { + case ISCSI_CQE_UNSOLICITED_SINGLE: + case ISCSI_CQE_UNSOLICITED_FIRST: + if (len) + memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len); + break; + case ISCSI_CQE_UNSOLICITED_MIDDLE: + case ISCSI_CQE_UNSOLICITED_LAST: + break; + default: + break; + } +} + +static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi, + struct iscsi_cqe_unsolicited *cqe, + int count) +{ + u16 tmp; + u16 idx = 0; + struct scsi_bd *pbl; + + /* Obtain buffer address from rqe_opaque */ + idx = cqe->rqe_opaque.lo; + if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "wrong idx %d returned by FW, dropping the unsolicited pkt\n", + idx); + return; + } + + pbl = (struct scsi_bd *)qedi->bdq_pbl; + pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries); + pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma)); + pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma)); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n", + pbl, pbl->address.hi, pbl->address.lo, idx); + pbl->opaque.hi = 0; + pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx)); + + /* Increment producer to let f/w know we've handled the frame */ + qedi->bdq_prod_idx += count; + + writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod); + tmp = readw(qedi->bdq_primary_prod); + + writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod); + tmp = readw(qedi->bdq_secondary_prod); +} + +static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi, + struct iscsi_cqe_unsolicited *cqe, + u32 pdu_len, u32 num_bdqs, + char *bdq_data) +{ + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "num_bdqs [%d]\n", num_bdqs); + + qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len); + qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1)); +} + +static int qedi_process_nopin_mesg(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn, u16 que_idx) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_nop_in_hdr *cqe_nop_in; + struct iscsi_nopin *hdr; + struct qedi_cmd *cmd; + int tgt_async_nop = 0; + u32 lun[2]; + u32 pdu_len, num_bdqs; + char bdq_data[QEDI_BDQ_BUF_SIZE]; + unsigned long flags; + + spin_lock_bh(&session->back_lock); + cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in; + + pdu_len = cqe_nop_in->hdr_second_dword & + ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK; + num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE; + + hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr; + memset(hdr, 0, sizeof(struct iscsi_hdr)); + hdr->opcode = cqe_nop_in->opcode; + hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn); + hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn); + hdr->ttt = cpu_to_be32(cqe_nop_in->ttt); + + if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) { + spin_lock_irqsave(&qedi->hba_lock, flags); + qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited, + pdu_len, num_bdqs, bdq_data); + hdr->itt = RESERVED_ITT; + tgt_async_nop = 1; + spin_unlock_irqrestore(&qedi->hba_lock, flags); + goto done; + } + + /* Response to one of our nop-outs */ + if (task) { + cmd = task->dd_data; + hdr->flags = ISCSI_FLAG_CMD_FINAL; + hdr->itt = build_itt(cqe->cqe_solicited.itid, + conn->session->age); + lun[0] = 0xffffffff; + lun[1] = 0xffffffff; + memcpy(&hdr->lun, lun, sizeof(struct scsi_lun)); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, + "Freeing tid=0x%x for cid=0x%x\n", + cmd->task_id, qedi_conn->iscsi_conn_id); + cmd->state = RESPONSE_RECEIVED; + spin_lock(&qedi_conn->list_lock); + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } + + spin_unlock(&qedi_conn->list_lock); + qedi_clear_task_idx(qedi, cmd->task_id); + } + +done: + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len); + + spin_unlock_bh(&session->back_lock); + return tgt_async_nop; +} + +static void qedi_process_async_mesg(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn, + u16 que_idx) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_async_msg_hdr *cqe_async_msg; + struct iscsi_async *resp_hdr; + u32 lun[2]; + u32 pdu_len, num_bdqs; + char bdq_data[QEDI_BDQ_BUF_SIZE]; + unsigned long flags; + + spin_lock_bh(&session->back_lock); + + cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg; + pdu_len = cqe_async_msg->hdr_second_dword & + ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK; + num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE; + + if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) { + spin_lock_irqsave(&qedi->hba_lock, flags); + qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited, + pdu_len, num_bdqs, bdq_data); + spin_unlock_irqrestore(&qedi->hba_lock, flags); + } + + resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = cqe_async_msg->opcode; + resp_hdr->flags = 0x80; + + lun[0] = cpu_to_be32(cqe_async_msg->lun.lo); + lun[1] = cpu_to_be32(cqe_async_msg->lun.hi); + memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun)); + resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn); + resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn); + + resp_hdr->async_event = cqe_async_msg->async_event; + resp_hdr->async_vcode = cqe_async_msg->async_vcode; + + resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv); + resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv); + resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv); + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data, + pdu_len); + + spin_unlock_bh(&session->back_lock); +} + +static void qedi_process_reject_mesg(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn, + uint16_t que_idx) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_reject_hdr *cqe_reject; + struct iscsi_reject *hdr; + u32 pld_len, num_bdqs; + unsigned long flags; + + spin_lock_bh(&session->back_lock); + cqe_reject = &cqe->cqe_common.iscsi_hdr.reject; + pld_len = cqe_reject->hdr_second_dword & + ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK; + num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE; + + if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) { + spin_lock_irqsave(&qedi->hba_lock, flags); + qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited, + pld_len, num_bdqs, conn->data); + spin_unlock_irqrestore(&qedi->hba_lock, flags); + } + hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr; + memset(hdr, 0, sizeof(struct iscsi_hdr)); + hdr->opcode = cqe_reject->opcode; + hdr->reason = cqe_reject->hdr_reason; + hdr->flags = cqe_reject->hdr_flags; + hton24(hdr->dlength, (cqe_reject->hdr_second_dword & + ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK)); + hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn); + hdr->statsn = cpu_to_be32(cqe_reject->stat_sn); + hdr->ffffffff = cpu_to_be32(0xffffffff); + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, + conn->data, pld_len); + spin_unlock_bh(&session->back_lock); +} + +static void qedi_scsi_completion(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct iscsi_conn *conn) +{ + struct scsi_cmnd *sc_cmd; + struct qedi_cmd *cmd = task->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_scsi_rsp *hdr; + struct iscsi_data_in_hdr *cqe_data_in; + int datalen = 0; + struct qedi_conn *qedi_conn; + u32 iscsi_cid; + bool mark_cmd_node_deleted = false; + u8 cqe_err_bits = 0; + + iscsi_cid = cqe->cqe_common.conn_id; + qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; + + cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in; + cqe_err_bits = + cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits; + + spin_lock_bh(&session->back_lock); + /* get the scsi command */ + sc_cmd = cmd->scsi_cmd; + + if (!sc_cmd) { + QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n"); + goto error; + } + + if (!sc_cmd->SCp.ptr) { + QEDI_WARN(&qedi->dbg_ctx, + "SCp.ptr is NULL, returned in another context.\n"); + goto error; + } + + if (!sc_cmd->request) { + QEDI_WARN(&qedi->dbg_ctx, + "sc_cmd->request is NULL, sc_cmd=%p.\n", + sc_cmd); + goto error; + } + + if (!sc_cmd->request->special) { + QEDI_WARN(&qedi->dbg_ctx, + "request->special is NULL so request not valid, sc_cmd=%p.\n", + sc_cmd); + goto error; + } + + if (!sc_cmd->request->q) { + QEDI_WARN(&qedi->dbg_ctx, + "request->q is NULL so request is not valid, sc_cmd=%p.\n", + sc_cmd); + goto error; + } + + qedi_iscsi_unmap_sg_list(cmd); + + hdr = (struct iscsi_scsi_rsp *)task->hdr; + hdr->opcode = cqe_data_in->opcode; + hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn); + hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); + hdr->response = cqe_data_in->reserved1; + hdr->cmd_status = cqe_data_in->status_rsvd; + hdr->flags = cqe_data_in->flags; + hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count); + + if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) { + datalen = cqe_data_in->reserved2 & + ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK; + memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen); + } + + /* If f/w reports data underrun err then set residual to IO transfer + * length, set Underrun flag and clear Overrun flag explicitly + */ + if (unlikely(cqe_err_bits && + GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n", + hdr->itt, cqe_data_in->flags, cmd->task_id, + qedi_conn->iscsi_conn_id, hdr->residual_count, + scsi_bufflen(sc_cmd)); + hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd)); + hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; + hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW); + } + + spin_lock(&qedi_conn->list_lock); + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + mark_cmd_node_deleted = true; + } + spin_unlock(&qedi_conn->list_lock); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, + "Freeing tid=0x%x for cid=0x%x\n", + cmd->task_id, qedi_conn->iscsi_conn_id); + cmd->state = RESPONSE_RECEIVED; + if (qedi_io_tracing) + qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP); + + qedi_clear_task_idx(qedi, cmd->task_id); + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, + conn->data, datalen); +error: + spin_unlock_bh(&session->back_lock); +} + +static void qedi_mtask_completion(struct qedi_ctx *qedi, + union iscsi_cqe *cqe, + struct iscsi_task *task, + struct qedi_conn *conn, uint16_t que_idx) +{ + struct iscsi_conn *iscsi_conn; + u32 hdr_opcode; + + hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte; + iscsi_conn = conn->cls_conn->dd_data; + + switch (hdr_opcode) { + case ISCSI_OPCODE_SCSI_RESPONSE: + case ISCSI_OPCODE_DATA_IN: + qedi_scsi_completion(qedi, cqe, task, iscsi_conn); + break; + case ISCSI_OPCODE_LOGIN_RESPONSE: + qedi_process_login_resp(qedi, cqe, task, conn); + break; + case ISCSI_OPCODE_TMF_RESPONSE: + qedi_process_tmf_resp(qedi, cqe, task, conn); + break; + case ISCSI_OPCODE_TEXT_RESPONSE: + qedi_process_text_resp(qedi, cqe, task, conn); + break; + case ISCSI_OPCODE_LOGOUT_RESPONSE: + qedi_process_logout_resp(qedi, cqe, task, conn); + break; + case ISCSI_OPCODE_NOP_IN: + qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx); + break; + default: + QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n"); + } +} + +static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi, + struct iscsi_cqe_solicited *cqe, + struct iscsi_task *task, + struct qedi_conn *qedi_conn) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct qedi_cmd *cmd = task->dd_data; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL, + "itid=0x%x, cmd task id=0x%x\n", + cqe->itid, cmd->task_id); + + cmd->state = RESPONSE_RECEIVED; + qedi_clear_task_idx(qedi, cmd->task_id); + + spin_lock_bh(&session->back_lock); + __iscsi_put_task(task); + spin_unlock_bh(&session->back_lock); +} + +static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, + struct iscsi_cqe_solicited *cqe, + struct iscsi_task *task, + struct iscsi_conn *conn) +{ + struct qedi_work_map *work, *work_tmp; + u32 proto_itt = cqe->itid; + u32 ptmp_itt = 0; + itt_t protoitt = 0; + int found = 0; + struct qedi_cmd *qedi_cmd = NULL; + u32 rtid = 0; + u32 iscsi_cid; + struct qedi_conn *qedi_conn; + struct qedi_cmd *cmd_new, *dbg_cmd; + struct iscsi_task *mtask; + struct iscsi_tm *tmf_hdr = NULL; + + iscsi_cid = cqe->conn_id; + qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; + + /* Based on this itt get the corresponding qedi_cmd */ + spin_lock_bh(&qedi_conn->tmf_work_lock); + list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list, + list) { + if (work->rtid == proto_itt) { + /* We found the command */ + qedi_cmd = work->qedi_cmd; + if (!qedi_cmd->list_tmf_work) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "TMF work not found, cqe->tid=0x%x, cid=0x%x\n", + proto_itt, qedi_conn->iscsi_conn_id); + WARN_ON(1); + } + found = 1; + mtask = qedi_cmd->task; + tmf_hdr = (struct iscsi_tm *)mtask->hdr; + rtid = work->rtid; + + list_del_init(&work->list); + kfree(work); + qedi_cmd->list_tmf_work = NULL; + } + } + spin_unlock_bh(&qedi_conn->tmf_work_lock); + + if (found) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n", + proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id); + + if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == + ISCSI_TM_FUNC_ABORT_TASK) { + spin_lock_bh(&conn->session->back_lock); + + protoitt = build_itt(get_itt(tmf_hdr->rtt), + conn->session->age); + task = iscsi_itt_to_task(conn, protoitt); + + spin_unlock_bh(&conn->session->back_lock); + + if (!task) { + QEDI_NOTICE(&qedi->dbg_ctx, + "IO task completed, tmf rtt=0x%x, cid=0x%x\n", + get_itt(tmf_hdr->rtt), + qedi_conn->iscsi_conn_id); + return; + } + + dbg_cmd = task->dd_data; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n", + get_itt(tmf_hdr->rtt), get_itt(task->itt), + dbg_cmd->task_id, qedi_conn->iscsi_conn_id); + + if (qedi_cmd->state == CLEANUP_WAIT_FAILED) + qedi_cmd->state = CLEANUP_RECV; + + qedi_clear_task_idx(qedi_conn->qedi, rtid); + + spin_lock(&qedi_conn->list_lock); + list_del_init(&dbg_cmd->io_cmd); + qedi_conn->active_cmd_count--; + spin_unlock(&qedi_conn->list_lock); + qedi_cmd->state = CLEANUP_RECV; + wake_up_interruptible(&qedi_conn->wait_queue); + } + } else if (qedi_conn->cmd_cleanup_req > 0) { + spin_lock_bh(&conn->session->back_lock); + qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt); + protoitt = build_itt(ptmp_itt, conn->session->age); + task = iscsi_itt_to_task(conn, protoitt); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n", + cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl, + qedi_conn->iscsi_conn_id); + + spin_unlock_bh(&conn->session->back_lock); + if (!task) { + QEDI_NOTICE(&qedi->dbg_ctx, + "task is null, itid=0x%x, cid=0x%x\n", + cqe->itid, qedi_conn->iscsi_conn_id); + return; + } + qedi_conn->cmd_cleanup_cmpl++; + wake_up(&qedi_conn->wait_queue); + cmd_new = task->dd_data; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, + "Freeing tid=0x%x for cid=0x%x\n", + cqe->itid, qedi_conn->iscsi_conn_id); + qedi_clear_task_idx(qedi_conn->qedi, cqe->itid); + + } else { + qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt); + protoitt = build_itt(ptmp_itt, conn->session->age); + task = iscsi_itt_to_task(conn, protoitt); + QEDI_ERR(&qedi->dbg_ctx, + "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n", + protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task); + WARN_ON(1); + } +} + +void qedi_fp_process_cqes(struct qedi_work *work) +{ + struct qedi_ctx *qedi = work->qedi; + union iscsi_cqe *cqe = &work->cqe; + struct iscsi_task *task = NULL; + struct iscsi_nopout *nopout_hdr; + struct qedi_conn *q_conn; + struct iscsi_conn *conn; + struct qedi_cmd *qedi_cmd; + u32 comp_type; + u32 iscsi_cid; + u32 hdr_opcode; + u16 que_idx = work->que_idx; + u8 cqe_err_bits = 0; + + comp_type = cqe->cqe_common.cqe_type; + hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte; + cqe_err_bits = + cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n", + cqe->cqe_common.conn_id, comp_type, hdr_opcode); + + if (comp_type >= MAX_ISCSI_CQES_TYPE) { + QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n"); + return; + } + + iscsi_cid = cqe->cqe_common.conn_id; + q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; + if (!q_conn) { + QEDI_WARN(&qedi->dbg_ctx, + "Session no longer exists for cid=0x%x!!\n", + iscsi_cid); + return; + } + + conn = q_conn->cls_conn->dd_data; + + if (unlikely(cqe_err_bits && + GET_FIELD(cqe_err_bits, + CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) { + iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); + return; + } + + switch (comp_type) { + case ISCSI_CQE_TYPE_SOLICITED: + case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE: + qedi_cmd = container_of(work, struct qedi_cmd, cqe_work); + task = qedi_cmd->task; + if (!task) { + QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n"); + return; + } + + /* Process NOPIN local completion */ + nopout_hdr = (struct iscsi_nopout *)task->hdr; + if ((nopout_hdr->itt == RESERVED_ITT) && + (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) { + qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited, + task, q_conn); + } else { + cqe->cqe_solicited.itid = + qedi_get_itt(cqe->cqe_solicited); + /* Process other solicited responses */ + qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx); + } + break; + case ISCSI_CQE_TYPE_UNSOLICITED: + switch (hdr_opcode) { + case ISCSI_OPCODE_NOP_IN: + qedi_process_nopin_mesg(qedi, cqe, task, q_conn, + que_idx); + break; + case ISCSI_OPCODE_ASYNC_MSG: + qedi_process_async_mesg(qedi, cqe, task, q_conn, + que_idx); + break; + case ISCSI_OPCODE_REJECT: + qedi_process_reject_mesg(qedi, cqe, task, q_conn, + que_idx); + break; + } + goto exit_fp_process; + case ISCSI_CQE_TYPE_DUMMY: + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n"); + goto exit_fp_process; + case ISCSI_CQE_TYPE_TASK_CLEANUP: + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n"); + qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task, + conn); + goto exit_fp_process; + default: + QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n"); + break; + } + +exit_fp_process: + return; +} + +static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task, + u16 tid, uint16_t ptu_invalidate, int is_cleanup) +{ + struct iscsi_wqe *wqe; + struct iscsi_wqe_field *cont_field; + struct qedi_endpoint *ep; + struct scsi_cmnd *sc = task->sc; + struct iscsi_login_req *login_hdr; + struct qedi_cmd *cmd = task->dd_data; + + login_hdr = (struct iscsi_login_req *)task->hdr; + ep = qedi_conn->ep; + wqe = &ep->sq[ep->sq_prod_idx]; + + memset(wqe, 0, sizeof(*wqe)); + + ep->sq_prod_idx++; + ep->fw_sq_prod_idx++; + if (ep->sq_prod_idx == QEDI_SQ_SIZE) + ep->sq_prod_idx = 0; + + if (is_cleanup) { + SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE, + ISCSI_WQE_TYPE_TASK_CLEANUP); + wqe->task_id = tid; + return; + } + + if (ptu_invalidate) { + SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE, + ISCSI_WQE_SET_PTU_INVALIDATE); + } + + cont_field = &wqe->cont_prevtid_union.cont_field; + + switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { + case ISCSI_OP_LOGIN: + case ISCSI_OP_TEXT: + SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE, + ISCSI_WQE_TYPE_MIDDLE_PATH); + SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, + 1); + cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength); + break; + case ISCSI_OP_LOGOUT: + case ISCSI_OP_NOOP_OUT: + case ISCSI_OP_SCSI_TMFUNC: + SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE, + ISCSI_WQE_TYPE_NORMAL); + break; + default: + if (!sc) + break; + + SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE, + ISCSI_WQE_TYPE_NORMAL); + cont_field->contlen_cdbsize_field = + (sc->sc_data_direction == DMA_TO_DEVICE) ? + scsi_bufflen(sc) : 0; + if (cmd->use_slowpath) + SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0); + else + SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, + (sc->sc_data_direction == + DMA_TO_DEVICE) ? + min((u16)QEDI_FAST_SGE_COUNT, + (u16)cmd->io_tbl.sge_valid) : 0); + break; + } + + wqe->task_id = tid; + /* Make sure SQ data is coherent */ + wmb(); +} + +static void qedi_ring_doorbell(struct qedi_conn *qedi_conn) +{ + struct iscsi_db_data dbell = { 0 }; + + dbell.agg_flags = 0; + + dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT; + dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT; + dbell.params |= + DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT; + + dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx; + writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell); + + /* Make sure fw write idx is coherent, and include both memory barriers + * as a failsafe as for some architectures the call is the same but on + * others they are two different assembly operations. + */ + wmb(); + mmiowb(); + QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ, + "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n", + qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx, + qedi_conn->iscsi_conn_id); +} + +int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, + struct iscsi_task *task) +{ + struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_task_context *fw_task_ctx; + struct iscsi_login_req *login_hdr; + struct iscsi_login_req_hdr *fw_login_req = NULL; + struct iscsi_cached_sge_ctx *cached_sge = NULL; + struct iscsi_sge *single_sge = NULL; + struct iscsi_sge *req_sge = NULL; + struct iscsi_sge *resp_sge = NULL; + struct qedi_cmd *qedi_cmd; + s16 ptu_invalidate = 0; + s16 tid = 0; + + req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + qedi_cmd = (struct qedi_cmd *)task->dd_data; + login_hdr = (struct iscsi_login_req *)task->hdr; + + tid = qedi_get_task_idx(qedi); + if (tid == -1) + return -ENOMEM; + + fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + + qedi_cmd->task_id = tid; + + /* Ystorm context */ + fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req; + fw_login_req->opcode = login_hdr->opcode; + fw_login_req->version_min = login_hdr->min_version; + fw_login_req->version_max = login_hdr->max_version; + fw_login_req->flags_attr = login_hdr->flags; + fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2); + fw_login_req->isid_d = *((u32 *)login_hdr->isid); + fw_login_req->tsih = login_hdr->tsih; + qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); + fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt)); + fw_login_req->cid = qedi_conn->iscsi_conn_id; + fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn); + fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn); + fw_login_req->exp_stat_sn = 0; + + if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { + ptu_invalidate = 1; + qedi->tid_reuse_count[tid] = 0; + } + + fw_task_ctx->ystorm_st_context.state.reuse_count = + qedi->tid_reuse_count[tid]; + fw_task_ctx->mstorm_st_context.reuse_count = + qedi->tid_reuse_count[tid]++; + cached_sge = + &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge; + cached_sge->sge.sge_len = req_sge->sge_len; + cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr); + cached_sge->sge.sge_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); + + /* Mstorm context */ + single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge; + fw_task_ctx->mstorm_st_context.task_type = 0x2; + fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id; + single_sge->sge_addr.lo = resp_sge->sge_addr.lo; + single_sge->sge_addr.hi = resp_sge->sge_addr.hi; + single_sge->sge_len = resp_sge->sge_len; + + SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, + ISCSI_MFLAGS_SINGLE_SGE, 1); + SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, + ISCSI_MFLAGS_SLOW_IO, 0); + fw_task_ctx->mstorm_st_context.sgl_size = 1; + fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len; + + /* Ustorm context */ + fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len; + fw_task_ctx->ustorm_st_context.exp_data_transfer_len = + ntoh24(login_hdr->dlength); + fw_task_ctx->ustorm_st_context.exp_data_sn = 0; + fw_task_ctx->ustorm_st_context.cq_rss_number = 0; + fw_task_ctx->ustorm_st_context.task_type = 0x2; + fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id; + fw_task_ctx->ustorm_ag_context.exp_data_acked = + ntoh24(login_hdr->dlength); + SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1, + USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); + SET_FIELD(fw_task_ctx->ustorm_st_context.flags, + USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0); + + spin_lock(&qedi_conn->list_lock); + list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); + qedi_cmd->io_cmd_in_list = true; + qedi_conn->active_cmd_count++; + spin_unlock(&qedi_conn->list_lock); + + qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false); + qedi_ring_doorbell(qedi_conn); + return 0; +} + +int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, + struct iscsi_task *task) +{ + struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_logout_req_hdr *fw_logout_req = NULL; + struct iscsi_task_context *fw_task_ctx = NULL; + struct iscsi_logout *logout_hdr = NULL; + struct qedi_cmd *qedi_cmd = NULL; + s16 tid = 0; + s16 ptu_invalidate = 0; + + qedi_cmd = (struct qedi_cmd *)task->dd_data; + logout_hdr = (struct iscsi_logout *)task->hdr; + + tid = qedi_get_task_idx(qedi); + if (tid == -1) + return -ENOMEM; + + fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); + + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + qedi_cmd->task_id = tid; + + /* Ystorm context */ + fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req; + fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST; + fw_logout_req->reason_code = 0x80 | logout_hdr->flags; + qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); + fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt)); + fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn); + fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn); + + if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { + ptu_invalidate = 1; + qedi->tid_reuse_count[tid] = 0; + } + fw_task_ctx->ystorm_st_context.state.reuse_count = + qedi->tid_reuse_count[tid]; + fw_task_ctx->mstorm_st_context.reuse_count = + qedi->tid_reuse_count[tid]++; + fw_logout_req->cid = qedi_conn->iscsi_conn_id; + fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0; + + /* Mstorm context */ + fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; + fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id; + + /* Ustorm context */ + fw_task_ctx->ustorm_st_context.rem_rcv_len = 0; + fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0; + fw_task_ctx->ustorm_st_context.exp_data_sn = 0; + fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; + fw_task_ctx->ustorm_st_context.cq_rss_number = 0; + + SET_FIELD(fw_task_ctx->ustorm_st_context.flags, + USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0); + SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, + ISCSI_REG1_NUM_FAST_SGES, 0); + + fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id; + SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1, + USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); + + spin_lock(&qedi_conn->list_lock); + list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); + qedi_cmd->io_cmd_in_list = true; + qedi_conn->active_cmd_count++; + spin_unlock(&qedi_conn->list_lock); + + qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false); + qedi_ring_doorbell(qedi_conn); + + return 0; +} + +int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, + struct iscsi_task *task, bool in_recovery) +{ + int rval; + struct iscsi_task *ctask; + struct qedi_cmd *cmd, *cmd_tmp; + struct iscsi_tm *tmf_hdr; + unsigned int lun = 0; + bool lun_reset = false; + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + + /* From recovery, task is NULL or from tmf resp valid task */ + if (task) { + tmf_hdr = (struct iscsi_tm *)task->hdr; + + if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == + ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) { + lun_reset = true; + lun = scsilun_to_int(&tmf_hdr->lun); + } + } + + qedi_conn->cmd_cleanup_req = 0; + qedi_conn->cmd_cleanup_cmpl = 0; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n", + qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id, + in_recovery, lun_reset); + + if (lun_reset) + spin_lock_bh(&session->back_lock); + + spin_lock(&qedi_conn->list_lock); + + list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list, + io_cmd) { + ctask = cmd->task; + if (ctask == task) + continue; + + if (lun_reset) { + if (cmd->scsi_cmd && cmd->scsi_cmd->device) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n", + cmd->task_id, get_itt(ctask->itt), + cmd->scsi_cmd, cmd->scsi_cmd->device, + ctask->state, cmd->state, + qedi_conn->iscsi_conn_id); + if (cmd->scsi_cmd->device->lun != lun) + continue; + } + } + qedi_conn->cmd_cleanup_req++; + qedi_iscsi_cleanup_task(ctask, true); + + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + QEDI_WARN(&qedi->dbg_ctx, + "Deleted active cmd list node io_cmd=%p, cid=0x%x\n", + &cmd->io_cmd, qedi_conn->iscsi_conn_id); + } + + spin_unlock(&qedi_conn->list_lock); + + if (lun_reset) + spin_unlock_bh(&session->back_lock); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "cmd_cleanup_req=%d, cid=0x%x\n", + qedi_conn->cmd_cleanup_req, + qedi_conn->iscsi_conn_id); + + rval = wait_event_interruptible_timeout(qedi_conn->wait_queue, + ((qedi_conn->cmd_cleanup_req == + qedi_conn->cmd_cleanup_cmpl) || + qedi_conn->ep), + 5 * HZ); + if (rval) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n", + qedi_conn->cmd_cleanup_req, + qedi_conn->cmd_cleanup_cmpl, + qedi_conn->iscsi_conn_id); + + return 0; + } + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n", + qedi_conn->cmd_cleanup_req, + qedi_conn->cmd_cleanup_cmpl, + qedi_conn->iscsi_conn_id); + + iscsi_host_for_each_session(qedi->shost, + qedi_mark_device_missing); + qedi_ops->common->drain(qedi->cdev); + + /* Enable IOs for all other sessions except current.*/ + if (!wait_event_interruptible_timeout(qedi_conn->wait_queue, + (qedi_conn->cmd_cleanup_req == + qedi_conn->cmd_cleanup_cmpl), + 5 * HZ)) { + iscsi_host_for_each_session(qedi->shost, + qedi_mark_device_available); + return -1; + } + + iscsi_host_for_each_session(qedi->shost, + qedi_mark_device_available); + + return 0; +} + +void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, + struct iscsi_task *task) +{ + struct qedi_endpoint *qedi_ep; + int rval; + + qedi_ep = qedi_conn->ep; + qedi_conn->cmd_cleanup_req = 0; + qedi_conn->cmd_cleanup_cmpl = 0; + + if (!qedi_ep) { + QEDI_WARN(&qedi->dbg_ctx, + "Cannot proceed, ep already disconnected, cid=0x%x\n", + qedi_conn->iscsi_conn_id); + return; + } + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n", + qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep); + + qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle); + + rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true); + if (rval) { + QEDI_ERR(&qedi->dbg_ctx, + "fatal error, need hard reset, cid=0x%x\n", + qedi_conn->iscsi_conn_id); + WARN_ON(1); + } +} + +static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn, + struct iscsi_task *task, + struct qedi_cmd *qedi_cmd, + struct qedi_work_map *list_work) +{ + struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data; + int wait; + + wait = wait_event_interruptible_timeout(qedi_conn->wait_queue, + ((qedi_cmd->state == + CLEANUP_RECV) || + ((qedi_cmd->type == TYPEIO) && + (cmd->state == + RESPONSE_RECEIVED))), + 5 * HZ); + if (!wait) { + qedi_cmd->state = CLEANUP_WAIT_FAILED; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n", + cmd->task_id, qedi_conn->iscsi_conn_id); + + return -1; + } + return 0; +} + +static void qedi_tmf_work(struct work_struct *work) +{ + struct qedi_cmd *qedi_cmd = + container_of(work, struct qedi_cmd, tmf_work); + struct qedi_conn *qedi_conn = qedi_cmd->conn; + struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_cls_session *cls_sess; + struct qedi_work_map *list_work = NULL; + struct iscsi_task *mtask; + struct qedi_cmd *cmd; + struct iscsi_task *ctask; + struct iscsi_tm *tmf_hdr; + s16 rval = 0; + s16 tid = 0; + + mtask = qedi_cmd->task; + tmf_hdr = (struct iscsi_tm *)mtask->hdr; + cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn); + set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); + + ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt); + if (!ctask || !ctask->sc) { + QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n"); + goto abort_ret; + } + + cmd = (struct qedi_cmd *)ctask->dd_data; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n", + get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id, + qedi_conn->iscsi_conn_id); + + if (do_not_recover) { + QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n", + do_not_recover); + goto abort_ret; + } + + list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC); + if (!list_work) { + QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n"); + goto abort_ret; + } + + qedi_cmd->type = TYPEIO; + list_work->qedi_cmd = qedi_cmd; + list_work->rtid = cmd->task_id; + list_work->state = QEDI_WORK_SCHEDULED; + qedi_cmd->list_tmf_work = list_work; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n", + list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id, + tmf_hdr->flags); + + spin_lock_bh(&qedi_conn->tmf_work_lock); + list_add_tail(&list_work->list, &qedi_conn->tmf_work_list); + spin_unlock_bh(&qedi_conn->tmf_work_lock); + + qedi_iscsi_cleanup_task(ctask, false); + + rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd, + list_work); + if (rval == -1) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "FW cleanup got escalated, cid=0x%x\n", + qedi_conn->iscsi_conn_id); + goto ldel_exit; + } + + tid = qedi_get_task_idx(qedi); + if (tid == -1) { + QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n", + qedi_conn->iscsi_conn_id); + goto ldel_exit; + } + + qedi_cmd->task_id = tid; + qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task); + +abort_ret: + clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); + return; + +ldel_exit: + spin_lock_bh(&qedi_conn->tmf_work_lock); + if (!qedi_cmd->list_tmf_work) { + list_del_init(&list_work->list); + qedi_cmd->list_tmf_work = NULL; + kfree(list_work); + } + spin_unlock_bh(&qedi_conn->tmf_work_lock); + + spin_lock(&qedi_conn->list_lock); + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + spin_unlock(&qedi_conn->list_lock); + + clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); +} + +static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, + struct iscsi_task *mtask) +{ + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_task_context *fw_task_ctx; + struct iscsi_tmf_request_hdr *fw_tmf_request; + struct iscsi_sge *single_sge; + struct qedi_cmd *qedi_cmd; + struct qedi_cmd *cmd; + struct iscsi_task *ctask; + struct iscsi_tm *tmf_hdr; + struct iscsi_sge *req_sge; + struct iscsi_sge *resp_sge; + u32 lun[2]; + s16 tid = 0, ptu_invalidate = 0; + + req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + qedi_cmd = (struct qedi_cmd *)mtask->dd_data; + tmf_hdr = (struct iscsi_tm *)mtask->hdr; + + tid = qedi_cmd->task_id; + qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd); + + fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + + fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request; + fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt)); + fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn); + + memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun)); + fw_tmf_request->lun.lo = be32_to_cpu(lun[0]); + fw_tmf_request->lun.hi = be32_to_cpu(lun[1]); + + if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { + ptu_invalidate = 1; + qedi->tid_reuse_count[tid] = 0; + } + fw_task_ctx->ystorm_st_context.state.reuse_count = + qedi->tid_reuse_count[tid]; + fw_task_ctx->mstorm_st_context.reuse_count = + qedi->tid_reuse_count[tid]++; + + if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == + ISCSI_TM_FUNC_ABORT_TASK) { + ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt); + if (!ctask || !ctask->sc) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not get reference task\n"); + return 0; + } + cmd = (struct qedi_cmd *)ctask->dd_data; + fw_tmf_request->rtt = + qedi_set_itt(cmd->task_id, + get_itt(tmf_hdr->rtt)); + } else { + fw_tmf_request->rtt = ISCSI_RESERVED_TAG; + } + + fw_tmf_request->opcode = tmf_hdr->opcode; + fw_tmf_request->function = tmf_hdr->flags; + fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength); + fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn); + + single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge; + fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; + fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id; + single_sge->sge_addr.lo = resp_sge->sge_addr.lo; + single_sge->sge_addr.hi = resp_sge->sge_addr.hi; + single_sge->sge_len = resp_sge->sge_len; + + SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, + ISCSI_MFLAGS_SINGLE_SGE, 1); + SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, + ISCSI_MFLAGS_SLOW_IO, 0); + fw_task_ctx->mstorm_st_context.sgl_size = 1; + fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len; + + /* Ustorm context */ + fw_task_ctx->ustorm_st_context.rem_rcv_len = 0; + fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0; + fw_task_ctx->ustorm_st_context.exp_data_sn = 0; + fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; + fw_task_ctx->ustorm_st_context.cq_rss_number = 0; + + SET_FIELD(fw_task_ctx->ustorm_st_context.flags, + USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0); + SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, + ISCSI_REG1_NUM_FAST_SGES, 0); + + fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id; + SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1, + USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); + fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]); + fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n", + tid, mtask->itt, qedi_conn->iscsi_conn_id); + + spin_lock(&qedi_conn->list_lock); + list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); + qedi_cmd->io_cmd_in_list = true; + qedi_conn->active_cmd_count++; + spin_unlock(&qedi_conn->list_lock); + + qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false); + qedi_ring_doorbell(qedi_conn); + return 0; +} + +int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn, + struct iscsi_task *mtask) +{ + struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_tm *tmf_hdr; + struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data; + s16 tid = 0; + + tmf_hdr = (struct iscsi_tm *)mtask->hdr; + qedi_cmd->task = mtask; + + /* If abort task then schedule the work and return */ + if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == + ISCSI_TM_FUNC_ABORT_TASK) { + qedi_cmd->state = CLEANUP_WAIT; + INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work); + queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work); + + } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == + ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) || + ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == + ISCSI_TM_FUNC_TARGET_WARM_RESET) || + ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == + ISCSI_TM_FUNC_TARGET_COLD_RESET)) { + tid = qedi_get_task_idx(qedi); + if (tid == -1) { + QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n", + qedi_conn->iscsi_conn_id); + return -1; + } + qedi_cmd->task_id = tid; + + qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task); + + } else { + QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n", + qedi_conn->iscsi_conn_id); + return -1; + } + + return 0; +} + +int qedi_send_iscsi_text(struct qedi_conn *qedi_conn, + struct iscsi_task *task) +{ + struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_task_context *fw_task_ctx; + struct iscsi_text_request_hdr *fw_text_request; + struct iscsi_cached_sge_ctx *cached_sge; + struct iscsi_sge *single_sge; + struct qedi_cmd *qedi_cmd; + /* For 6.5 hdr iscsi_hdr */ + struct iscsi_text *text_hdr; + struct iscsi_sge *req_sge; + struct iscsi_sge *resp_sge; + s16 ptu_invalidate = 0; + s16 tid = 0; + + req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + qedi_cmd = (struct qedi_cmd *)task->dd_data; + text_hdr = (struct iscsi_text *)task->hdr; + + tid = qedi_get_task_idx(qedi); + if (tid == -1) + return -ENOMEM; + + fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + + qedi_cmd->task_id = tid; + + /* Ystorm context */ + fw_text_request = + &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request; + fw_text_request->opcode = text_hdr->opcode; + fw_text_request->flags_attr = text_hdr->flags; + + qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); + fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt)); + fw_text_request->ttt = text_hdr->ttt; + fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn); + fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn); + fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength); + + if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { + ptu_invalidate = 1; + qedi->tid_reuse_count[tid] = 0; + } + fw_task_ctx->ystorm_st_context.state.reuse_count = + qedi->tid_reuse_count[tid]; + fw_task_ctx->mstorm_st_context.reuse_count = + qedi->tid_reuse_count[tid]++; + + cached_sge = + &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge; + cached_sge->sge.sge_len = req_sge->sge_len; + cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr); + cached_sge->sge.sge_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); + + /* Mstorm context */ + single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge; + fw_task_ctx->mstorm_st_context.task_type = 0x2; + fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id; + single_sge->sge_addr.lo = resp_sge->sge_addr.lo; + single_sge->sge_addr.hi = resp_sge->sge_addr.hi; + single_sge->sge_len = resp_sge->sge_len; + + SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, + ISCSI_MFLAGS_SINGLE_SGE, 1); + SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, + ISCSI_MFLAGS_SLOW_IO, 0); + fw_task_ctx->mstorm_st_context.sgl_size = 1; + fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len; + + /* Ustorm context */ + fw_task_ctx->ustorm_ag_context.exp_data_acked = + ntoh24(text_hdr->dlength); + fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len; + fw_task_ctx->ustorm_st_context.exp_data_transfer_len = + ntoh24(text_hdr->dlength); + fw_task_ctx->ustorm_st_context.exp_data_sn = + be32_to_cpu(text_hdr->exp_statsn); + fw_task_ctx->ustorm_st_context.cq_rss_number = 0; + fw_task_ctx->ustorm_st_context.task_type = 0x2; + fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id; + SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1, + USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); + + /* Add command in active command list */ + spin_lock(&qedi_conn->list_lock); + list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); + qedi_cmd->io_cmd_in_list = true; + qedi_conn->active_cmd_count++; + spin_unlock(&qedi_conn->list_lock); + + qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false); + qedi_ring_doorbell(qedi_conn); + + return 0; +} + +int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, + struct iscsi_task *task, + char *datap, int data_len, int unsol) +{ + struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_task_context *fw_task_ctx; + struct iscsi_nop_out_hdr *fw_nop_out; + struct qedi_cmd *qedi_cmd; + /* For 6.5 hdr iscsi_hdr */ + struct iscsi_nopout *nopout_hdr; + struct iscsi_cached_sge_ctx *cached_sge; + struct iscsi_sge *single_sge; + struct iscsi_sge *req_sge; + struct iscsi_sge *resp_sge; + u32 lun[2]; + s16 ptu_invalidate = 0; + s16 tid = 0; + + req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + qedi_cmd = (struct qedi_cmd *)task->dd_data; + nopout_hdr = (struct iscsi_nopout *)task->hdr; + + tid = qedi_get_task_idx(qedi); + if (tid == -1) { + QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n"); + return -ENOMEM; + } + + fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); + + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + qedi_cmd->task_id = tid; + + /* Ystorm context */ + fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out; + SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1); + SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0); + + memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun)); + fw_nop_out->lun.lo = be32_to_cpu(lun[0]); + fw_nop_out->lun.hi = be32_to_cpu(lun[1]); + + qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); + + if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) { + fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt); + fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt); + fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0; + fw_task_ctx->ystorm_st_context.state.local_comp = 1; + SET_FIELD(fw_task_ctx->ustorm_st_context.flags, + USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1); + } else { + fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt)); + fw_nop_out->ttt = ISCSI_TTT_ALL_ONES; + fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0; + + spin_lock(&qedi_conn->list_lock); + list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); + qedi_cmd->io_cmd_in_list = true; + qedi_conn->active_cmd_count++; + spin_unlock(&qedi_conn->list_lock); + } + + fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT; + fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn); + fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn); + + cached_sge = + &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge; + cached_sge->sge.sge_len = req_sge->sge_len; + cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr); + cached_sge->sge.sge_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); + + /* Mstorm context */ + fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; + fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id; + + single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge; + single_sge->sge_addr.lo = resp_sge->sge_addr.lo; + single_sge->sge_addr.hi = resp_sge->sge_addr.hi; + single_sge->sge_len = resp_sge->sge_len; + fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len; + + if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { + ptu_invalidate = 1; + qedi->tid_reuse_count[tid] = 0; + } + fw_task_ctx->ystorm_st_context.state.reuse_count = + qedi->tid_reuse_count[tid]; + fw_task_ctx->mstorm_st_context.reuse_count = + qedi->tid_reuse_count[tid]++; + /* Ustorm context */ + fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len; + fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len; + fw_task_ctx->ustorm_st_context.exp_data_sn = 0; + fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; + fw_task_ctx->ustorm_st_context.cq_rss_number = 0; + + SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, + ISCSI_REG1_NUM_FAST_SGES, 0); + + fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id; + SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1, + USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); + + fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]); + fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]); + + qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false); + qedi_ring_doorbell(qedi_conn); + return 0; +} + +static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len, + int bd_index) +{ + struct iscsi_sge *bd = cmd->io_tbl.sge_tbl; + int frag_size, sg_frags; + + sg_frags = 0; + + while (sg_len) { + if (addr % QEDI_PAGE_SIZE) + frag_size = + (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE)); + else + frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 : + (sg_len % QEDI_BD_SPLIT_SZ); + + if (frag_size == 0) + frag_size = QEDI_BD_SPLIT_SZ; + + bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff); + bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32); + bd[bd_index + sg_frags].sge_len = (u16)frag_size; + QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO, + "split sge %d: addr=%llx, len=%x", + (bd_index + sg_frags), addr, frag_size); + + addr += (u64)frag_size; + sg_frags++; + sg_len -= frag_size; + } + return sg_frags; +} + +static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd) +{ + struct scsi_cmnd *sc = cmd->scsi_cmd; + struct iscsi_sge *bd = cmd->io_tbl.sge_tbl; + struct scatterlist *sg; + int byte_count = 0; + int bd_count = 0; + int sg_count; + int sg_len; + int sg_frags; + u64 addr, end_addr; + int i; + + WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD); + + sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc), + scsi_sg_count(sc), sc->sc_data_direction); + + /* + * New condition to send single SGE as cached-SGL. + * Single SGE with length less than 64K. + */ + sg = scsi_sglist(sc); + if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) { + sg_len = sg_dma_len(sg); + addr = (u64)sg_dma_address(sg); + + bd[bd_count].sge_addr.lo = (addr & 0xffffffff); + bd[bd_count].sge_addr.hi = (addr >> 32); + bd[bd_count].sge_len = (u16)sg_len; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, + "single-cashed-sgl: bd_count:%d addr=%llx, len=%x", + sg_count, addr, sg_len); + + return ++bd_count; + } + + scsi_for_each_sg(sc, sg, sg_count, i) { + sg_len = sg_dma_len(sg); + addr = (u64)sg_dma_address(sg); + end_addr = (addr + sg_len); + + /* + * first sg elem in the 'list', + * check if end addr is page-aligned. + */ + if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE)) + cmd->use_slowpath = true; + + /* + * last sg elem in the 'list', + * check if start addr is page-aligned. + */ + else if ((i == (sg_count - 1)) && + (sg_count > 1) && (addr % QEDI_PAGE_SIZE)) + cmd->use_slowpath = true; + + /* + * middle sg elements in list, + * check if start and end addr is page-aligned + */ + else if ((i != 0) && (i != (sg_count - 1)) && + ((addr % QEDI_PAGE_SIZE) || + (end_addr % QEDI_PAGE_SIZE))) + cmd->use_slowpath = true; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x", + i, sg_len); + + if (sg_len > QEDI_BD_SPLIT_SZ) { + sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count); + } else { + sg_frags = 1; + bd[bd_count].sge_addr.lo = addr & 0xffffffff; + bd[bd_count].sge_addr.hi = addr >> 32; + bd[bd_count].sge_len = sg_len; + } + byte_count += sg_len; + bd_count += sg_frags; + } + + if (byte_count != scsi_bufflen(sc)) + QEDI_ERR(&qedi->dbg_ctx, + "byte_count = %d != scsi_bufflen = %d\n", byte_count, + scsi_bufflen(sc)); + else + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n", + byte_count); + + WARN_ON(byte_count != scsi_bufflen(sc)); + + return bd_count; +} + +static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd) +{ + int bd_count; + struct scsi_cmnd *sc = cmd->scsi_cmd; + + if (scsi_sg_count(sc)) { + bd_count = qedi_map_scsi_sg(cmd->conn->qedi, cmd); + if (bd_count == 0) + return; + } else { + struct iscsi_sge *bd = cmd->io_tbl.sge_tbl; + + bd[0].sge_addr.lo = 0; + bd[0].sge_addr.hi = 0; + bd[0].sge_len = 0; + bd_count = 0; + } + cmd->io_tbl.sge_valid = bd_count; +} + +static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp) +{ + u32 dword; + int lpcnt; + u8 *srcp; + + lpcnt = sc->cmd_len / sizeof(dword); + srcp = (u8 *)sc->cmnd; + while (lpcnt--) { + memcpy(&dword, (const void *)srcp, 4); + *dstp = cpu_to_be32(dword); + srcp += 4; + dstp++; + } + if (sc->cmd_len & 0x3) { + dword = (u32)srcp[0] | ((u32)srcp[1] << 8); + *dstp = cpu_to_be32(dword); + } +} + +void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task, + u16 tid, int8_t direction) +{ + struct qedi_io_log *io_log; + struct iscsi_conn *conn = task->conn; + struct qedi_conn *qedi_conn = conn->dd_data; + struct scsi_cmnd *sc_cmd = task->sc; + unsigned long flags; + u8 op; + + spin_lock_irqsave(&qedi->io_trace_lock, flags); + + io_log = &qedi->io_trace_buf[qedi->io_trace_idx]; + io_log->direction = direction; + io_log->task_id = tid; + io_log->cid = qedi_conn->iscsi_conn_id; + io_log->lun = sc_cmd->device->lun; + io_log->op = sc_cmd->cmnd[0]; + op = sc_cmd->cmnd[0]; + io_log->lba[0] = sc_cmd->cmnd[2]; + io_log->lba[1] = sc_cmd->cmnd[3]; + io_log->lba[2] = sc_cmd->cmnd[4]; + io_log->lba[3] = sc_cmd->cmnd[5]; + io_log->bufflen = scsi_bufflen(sc_cmd); + io_log->sg_count = scsi_sg_count(sc_cmd); + io_log->fast_sgs = qedi->fast_sgls; + io_log->cached_sgs = qedi->cached_sgls; + io_log->slow_sgs = qedi->slow_sgls; + io_log->cached_sge = qedi->use_cached_sge; + io_log->slow_sge = qedi->use_slow_sge; + io_log->fast_sge = qedi->use_fast_sge; + io_log->result = sc_cmd->result; + io_log->jiffies = jiffies; + io_log->blk_req_cpu = smp_processor_id(); + + if (direction == QEDI_IO_TRACE_REQ) { + /* For requests we only care about the submission CPU */ + io_log->req_cpu = smp_processor_id() % qedi->num_queues; + io_log->intr_cpu = 0; + io_log->blk_rsp_cpu = 0; + } else if (direction == QEDI_IO_TRACE_RSP) { + io_log->req_cpu = smp_processor_id() % qedi->num_queues; + io_log->intr_cpu = qedi->intr_cpu; + io_log->blk_rsp_cpu = smp_processor_id(); + } + + qedi->io_trace_idx++; + if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE) + qedi->io_trace_idx = 0; + + qedi->use_cached_sge = false; + qedi->use_slow_sge = false; + qedi->use_fast_sge = false; + + spin_unlock_irqrestore(&qedi->io_trace_lock, flags); +} + +int qedi_iscsi_send_ioreq(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_session *session = conn->session; + struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); + struct qedi_ctx *qedi = iscsi_host_priv(shost); + struct qedi_conn *qedi_conn = conn->dd_data; + struct qedi_cmd *cmd = task->dd_data; + struct scsi_cmnd *sc = task->sc; + struct iscsi_task_context *fw_task_ctx; + struct iscsi_cached_sge_ctx *cached_sge; + struct iscsi_phys_sgl_ctx *phys_sgl; + struct iscsi_virt_sgl_ctx *virt_sgl; + struct ystorm_iscsi_task_st_ctx *yst_cxt; + struct mstorm_iscsi_task_st_ctx *mst_cxt; + struct iscsi_sgl *sgl_struct; + struct iscsi_sge *single_sge; + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; + struct iscsi_sge *bd = cmd->io_tbl.sge_tbl; + enum iscsi_task_type task_type; + struct iscsi_cmd_hdr *fw_cmd; + u32 lun[2]; + u32 exp_data; + u16 cq_idx = smp_processor_id() % qedi->num_queues; + s16 ptu_invalidate = 0; + s16 tid = 0; + u8 num_fast_sgs; + + tid = qedi_get_task_idx(qedi); + if (tid == -1) + return -ENOMEM; + + qedi_iscsi_map_sg_list(cmd); + + int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun); + fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); + + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + cmd->task_id = tid; + + /* Ystorm context */ + fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd; + SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE); + + if (sc->sc_data_direction == DMA_TO_DEVICE) { + if (conn->session->initial_r2t_en) { + exp_data = min((conn->session->imm_data_en * + conn->max_xmit_dlength), + conn->session->first_burst); + exp_data = min(exp_data, scsi_bufflen(sc)); + fw_task_ctx->ustorm_ag_context.exp_data_acked = + cpu_to_le32(exp_data); + } else { + fw_task_ctx->ustorm_ag_context.exp_data_acked = + min(conn->session->first_burst, scsi_bufflen(sc)); + } + + SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1); + task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE; + } else { + if (scsi_bufflen(sc)) + SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1); + task_type = ISCSI_TASK_TYPE_INITIATOR_READ; + } + + fw_cmd->lun.lo = be32_to_cpu(lun[0]); + fw_cmd->lun.hi = be32_to_cpu(lun[1]); + + qedi_update_itt_map(qedi, tid, task->itt, cmd); + fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt)); + fw_cmd->expected_transfer_length = scsi_bufflen(sc); + fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); + fw_cmd->opcode = hdr->opcode; + qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb); + + /* Mstorm context */ + fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma; + fw_task_ctx->mstorm_st_context.sense_db.hi = + (u32)((u64)cmd->sense_buffer_dma >> 32); + fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id; + fw_task_ctx->mstorm_st_context.task_type = task_type; + + if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { + ptu_invalidate = 1; + qedi->tid_reuse_count[tid] = 0; + } + fw_task_ctx->ystorm_st_context.state.reuse_count = + qedi->tid_reuse_count[tid]; + fw_task_ctx->mstorm_st_context.reuse_count = + qedi->tid_reuse_count[tid]++; + + /* Ustorm context */ + fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc); + fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc); + fw_task_ctx->ustorm_st_context.exp_data_sn = + be32_to_cpu(hdr->exp_statsn); + fw_task_ctx->ustorm_st_context.task_type = task_type; + fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx; + fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id; + + SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1, + USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); + SET_FIELD(fw_task_ctx->ustorm_st_context.flags, + USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0); + + num_fast_sgs = (cmd->io_tbl.sge_valid ? + min((u16)QEDI_FAST_SGE_COUNT, + (u16)cmd->io_tbl.sge_valid) : 0); + SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, + ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs); + + fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]); + fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n", + cmd->io_tbl.sge_valid); + + yst_cxt = &fw_task_ctx->ystorm_st_context; + mst_cxt = &fw_task_ctx->mstorm_st_context; + /* Tx path */ + if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) { + /* not considering superIO or FastIO */ + if (cmd->io_tbl.sge_valid == 1) { + cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge; + cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo; + cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi; + cached_sge->sge.sge_len = bd[0].sge_len; + qedi->cached_sgls++; + } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) { + SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, + ISCSI_MFLAGS_SLOW_IO, 1); + SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, + ISCSI_REG1_NUM_FAST_SGES, 0); + phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl; + phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma); + phys_sgl->sgl_base.hi = + (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); + phys_sgl->sgl_size = cmd->io_tbl.sge_valid; + qedi->slow_sgls++; + } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) { + SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, + ISCSI_MFLAGS_SLOW_IO, 0); + SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, + ISCSI_REG1_NUM_FAST_SGES, + min((u16)QEDI_FAST_SGE_COUNT, + (u16)cmd->io_tbl.sge_valid)); + virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl; + virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma); + virt_sgl->sgl_base.hi = + (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); + virt_sgl->sgl_initial_offset = + (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1); + qedi->fast_sgls++; + } + fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid; + fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc); + } else { + /* Rx path */ + if (cmd->io_tbl.sge_valid == 1) { + SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, + ISCSI_MFLAGS_SLOW_IO, 0); + SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, + ISCSI_MFLAGS_SINGLE_SGE, 1); + single_sge = &mst_cxt->sgl_union.single_sge; + single_sge->sge_addr.lo = bd[0].sge_addr.lo; + single_sge->sge_addr.hi = bd[0].sge_addr.hi; + single_sge->sge_len = bd[0].sge_len; + qedi->cached_sgls++; + } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) { + sgl_struct = &mst_cxt->sgl_union.sgl_struct; + sgl_struct->sgl_addr.lo = + (u32)(cmd->io_tbl.sge_tbl_dma); + sgl_struct->sgl_addr.hi = + (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); + SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, + ISCSI_MFLAGS_SLOW_IO, 1); + SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, + ISCSI_REG1_NUM_FAST_SGES, 0); + sgl_struct->updated_sge_size = 0; + sgl_struct->updated_sge_offset = 0; + qedi->slow_sgls++; + } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) { + sgl_struct = &mst_cxt->sgl_union.sgl_struct; + sgl_struct->sgl_addr.lo = + (u32)(cmd->io_tbl.sge_tbl_dma); + sgl_struct->sgl_addr.hi = + (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); + sgl_struct->byte_offset = + (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1); + SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, + ISCSI_MFLAGS_SLOW_IO, 0); + SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, + ISCSI_REG1_NUM_FAST_SGES, 0); + sgl_struct->updated_sge_size = 0; + sgl_struct->updated_sge_offset = 0; + qedi->fast_sgls++; + } + fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid; + fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc); + } + + if (cmd->io_tbl.sge_valid == 1) + /* Singel-SGL */ + qedi->use_cached_sge = true; + else { + if (cmd->use_slowpath) + qedi->use_slow_sge = true; + else + qedi->use_fast_sge = true; + } + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, + "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x", + (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ? + "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ? + "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"), + (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma), + (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32)); + + /* Add command in active command list */ + spin_lock(&qedi_conn->list_lock); + list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list); + cmd->io_cmd_in_list = true; + qedi_conn->active_cmd_count++; + spin_unlock(&qedi_conn->list_lock); + + qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false); + qedi_ring_doorbell(qedi_conn); + if (qedi_io_tracing) + qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ); + + return 0; +} + +int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted) +{ + struct iscsi_conn *conn = task->conn; + struct qedi_conn *qedi_conn = conn->dd_data; + struct qedi_cmd *cmd = task->dd_data; + s16 ptu_invalidate = 0; + + QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM, + "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n", + cmd->task_id, get_itt(task->itt), task->state, + cmd->state, qedi_conn->iscsi_conn_id); + + qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true); + qedi_ring_doorbell(qedi_conn); + + return 0; +} diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h new file mode 100644 index 0000000000000000000000000000000000000000..8e488de88ece9fb8fc49b991935cf5c24a87b5b4 --- /dev/null +++ b/drivers/scsi/qedi/qedi_gbl.h @@ -0,0 +1,73 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QEDI_GBL_H_ +#define _QEDI_GBL_H_ + +#include "qedi_iscsi.h" + +extern uint qedi_io_tracing; +extern int do_not_recover; +extern struct scsi_host_template qedi_host_template; +extern struct iscsi_transport qedi_iscsi_transport; +extern const struct qed_iscsi_ops *qedi_ops; +extern struct qedi_debugfs_ops qedi_debugfs_ops; +extern const struct file_operations qedi_dbg_fops; +extern struct device_attribute *qedi_shost_attrs[]; + +int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep); +void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep); + +int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, + struct iscsi_task *task); +int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, + struct iscsi_task *task); +int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn, + struct iscsi_task *mtask); +int qedi_send_iscsi_text(struct qedi_conn *qedi_conn, + struct iscsi_task *task); +int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, + struct iscsi_task *task, + char *datap, int data_len, int unsol); +int qedi_iscsi_send_ioreq(struct iscsi_task *task); +int qedi_get_task_idx(struct qedi_ctx *qedi); +void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx); +int qedi_iscsi_cleanup_task(struct iscsi_task *task, + bool mark_cmd_node_deleted); +void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd); +void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, + struct qedi_cmd *qedi_cmd); +void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt); +void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid); +void qedi_process_iscsi_error(struct qedi_endpoint *ep, + struct async_data *data); +void qedi_start_conn_recovery(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn); +struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid); +void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data); +void qedi_mark_device_missing(struct iscsi_cls_session *cls_session); +void qedi_mark_device_available(struct iscsi_cls_session *cls_session); +void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu); +int qedi_recover_all_conns(struct qedi_ctx *qedi); +void qedi_fp_process_cqes(struct qedi_work *work); +int qedi_cleanup_all_io(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn, + struct iscsi_task *task, bool in_recovery); +void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task, + u16 tid, int8_t direction); +int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id); +u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl); +void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id); +int qedi_create_sysfs_ctx_attr(struct qedi_ctx *qedi); +void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi); +void qedi_clearsq(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn, + struct iscsi_task *task); + +#endif diff --git a/drivers/scsi/qedi/qedi_hsi.h b/drivers/scsi/qedi/qedi_hsi.h new file mode 100644 index 0000000000000000000000000000000000000000..8ca44c78f0936301b97bd332d0ec7afb5c1d9ce1 --- /dev/null +++ b/drivers/scsi/qedi/qedi_hsi.h @@ -0,0 +1,52 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#ifndef __QEDI_HSI__ +#define __QEDI_HSI__ +/* + * Add include to common target + */ +#include + +/* + * Add include to common storage target + */ +#include + +/* + * Add include to common TCP target + */ +#include + +/* + * Add include to common iSCSI target for both eCore and protocol driver + */ +#include + +/* + * iSCSI CMDQ element + */ +struct iscsi_cmdqe { + __le16 conn_id; + u8 invalid_command; + u8 cmd_hdr_type; + __le32 reserved1[2]; + __le32 cmd_payload[13]; +}; + +/* + * iSCSI CMD header type + */ +enum iscsi_cmd_hdr_type { + ISCSI_CMD_HDR_TYPE_BHS_ONLY /* iSCSI BHS with no expected AHS */, + ISCSI_CMD_HDR_TYPE_BHS_W_AHS /* iSCSI BHS with expected AHS */, + ISCSI_CMD_HDR_TYPE_AHS /* iSCSI AHS */, + MAX_ISCSI_CMD_HDR_TYPE +}; + +#endif /* __QEDI_HSI__ */ diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c new file mode 100644 index 0000000000000000000000000000000000000000..d6a205433b66b6d2da39cbdbe99691d89afa2705 --- /dev/null +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -0,0 +1,1624 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include +#include +#include +#include +#include + +#include "qedi.h" +#include "qedi_iscsi.h" +#include "qedi_gbl.h" + +int qedi_recover_all_conns(struct qedi_ctx *qedi) +{ + struct qedi_conn *qedi_conn; + int i; + + for (i = 0; i < qedi->max_active_conns; i++) { + qedi_conn = qedi_get_conn_from_id(qedi, i); + if (!qedi_conn) + continue; + + qedi_start_conn_recovery(qedi, qedi_conn); + } + + return SUCCESS; +} + +static int qedi_eh_host_reset(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *shost = cmd->device->host; + struct qedi_ctx *qedi; + + qedi = iscsi_host_priv(shost); + + return qedi_recover_all_conns(qedi); +} + +struct scsi_host_template qedi_host_template = { + .module = THIS_MODULE, + .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver", + .proc_name = QEDI_MODULE_NAME, + .queuecommand = iscsi_queuecommand, + .eh_abort_handler = iscsi_eh_abort, + .eh_device_reset_handler = iscsi_eh_device_reset, + .eh_target_reset_handler = iscsi_eh_recover_target, + .eh_host_reset_handler = qedi_eh_host_reset, + .target_alloc = iscsi_target_alloc, + .change_queue_depth = scsi_change_queue_depth, + .can_queue = QEDI_MAX_ISCSI_TASK, + .this_id = -1, + .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD, + .max_sectors = 0xffff, + .cmd_per_lun = 128, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = qedi_shost_attrs, +}; + +static void qedi_conn_free_login_resources(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn) +{ + if (qedi_conn->gen_pdu.resp_bd_tbl) { + dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, + qedi_conn->gen_pdu.resp_bd_tbl, + qedi_conn->gen_pdu.resp_bd_dma); + qedi_conn->gen_pdu.resp_bd_tbl = NULL; + } + + if (qedi_conn->gen_pdu.req_bd_tbl) { + dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, + qedi_conn->gen_pdu.req_bd_tbl, + qedi_conn->gen_pdu.req_bd_dma); + qedi_conn->gen_pdu.req_bd_tbl = NULL; + } + + if (qedi_conn->gen_pdu.resp_buf) { + dma_free_coherent(&qedi->pdev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + qedi_conn->gen_pdu.resp_buf, + qedi_conn->gen_pdu.resp_dma_addr); + qedi_conn->gen_pdu.resp_buf = NULL; + } + + if (qedi_conn->gen_pdu.req_buf) { + dma_free_coherent(&qedi->pdev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + qedi_conn->gen_pdu.req_buf, + qedi_conn->gen_pdu.req_dma_addr); + qedi_conn->gen_pdu.req_buf = NULL; + } +} + +static int qedi_conn_alloc_login_resources(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn) +{ + qedi_conn->gen_pdu.req_buf = + dma_alloc_coherent(&qedi->pdev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + &qedi_conn->gen_pdu.req_dma_addr, + GFP_KERNEL); + if (!qedi_conn->gen_pdu.req_buf) + goto login_req_buf_failure; + + qedi_conn->gen_pdu.req_buf_size = 0; + qedi_conn->gen_pdu.req_wr_ptr = qedi_conn->gen_pdu.req_buf; + + qedi_conn->gen_pdu.resp_buf = + dma_alloc_coherent(&qedi->pdev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + &qedi_conn->gen_pdu.resp_dma_addr, + GFP_KERNEL); + if (!qedi_conn->gen_pdu.resp_buf) + goto login_resp_buf_failure; + + qedi_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN; + qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf; + + qedi_conn->gen_pdu.req_bd_tbl = + dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, + &qedi_conn->gen_pdu.req_bd_dma, GFP_KERNEL); + if (!qedi_conn->gen_pdu.req_bd_tbl) + goto login_req_bd_tbl_failure; + + qedi_conn->gen_pdu.resp_bd_tbl = + dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, + &qedi_conn->gen_pdu.resp_bd_dma, + GFP_KERNEL); + if (!qedi_conn->gen_pdu.resp_bd_tbl) + goto login_resp_bd_tbl_failure; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SESS, + "Allocation successful, cid=0x%x\n", + qedi_conn->iscsi_conn_id); + return 0; + +login_resp_bd_tbl_failure: + dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, + qedi_conn->gen_pdu.req_bd_tbl, + qedi_conn->gen_pdu.req_bd_dma); + qedi_conn->gen_pdu.req_bd_tbl = NULL; + +login_req_bd_tbl_failure: + dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, + qedi_conn->gen_pdu.resp_buf, + qedi_conn->gen_pdu.resp_dma_addr); + qedi_conn->gen_pdu.resp_buf = NULL; +login_resp_buf_failure: + dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, + qedi_conn->gen_pdu.req_buf, + qedi_conn->gen_pdu.req_dma_addr); + qedi_conn->gen_pdu.req_buf = NULL; +login_req_buf_failure: + iscsi_conn_printk(KERN_ERR, qedi_conn->cls_conn->dd_data, + "login resource alloc failed!!\n"); + return -ENOMEM; +} + +static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi, + struct iscsi_session *session) +{ + int i; + + for (i = 0; i < session->cmds_max; i++) { + struct iscsi_task *task = session->cmds[i]; + struct qedi_cmd *cmd = task->dd_data; + + if (cmd->io_tbl.sge_tbl) + dma_free_coherent(&qedi->pdev->dev, + QEDI_ISCSI_MAX_BDS_PER_CMD * + sizeof(struct iscsi_sge), + cmd->io_tbl.sge_tbl, + cmd->io_tbl.sge_tbl_dma); + + if (cmd->sense_buffer) + dma_free_coherent(&qedi->pdev->dev, + SCSI_SENSE_BUFFERSIZE, + cmd->sense_buffer, + cmd->sense_buffer_dma); + } +} + +static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session, + struct qedi_cmd *cmd) +{ + struct qedi_io_bdt *io = &cmd->io_tbl; + struct iscsi_sge *sge; + + io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev, + QEDI_ISCSI_MAX_BDS_PER_CMD * + sizeof(*sge), + &io->sge_tbl_dma, GFP_KERNEL); + if (!io->sge_tbl) { + iscsi_session_printk(KERN_ERR, session, + "Could not allocate BD table.\n"); + return -ENOMEM; + } + + io->sge_valid = 0; + return 0; +} + +static int qedi_setup_cmd_pool(struct qedi_ctx *qedi, + struct iscsi_session *session) +{ + int i; + + for (i = 0; i < session->cmds_max; i++) { + struct iscsi_task *task = session->cmds[i]; + struct qedi_cmd *cmd = task->dd_data; + + task->hdr = &cmd->hdr; + task->hdr_max = sizeof(struct iscsi_hdr); + + if (qedi_alloc_sget(qedi, session, cmd)) + goto free_sgets; + + cmd->sense_buffer = dma_alloc_coherent(&qedi->pdev->dev, + SCSI_SENSE_BUFFERSIZE, + &cmd->sense_buffer_dma, + GFP_KERNEL); + if (!cmd->sense_buffer) + goto free_sgets; + } + + return 0; + +free_sgets: + qedi_destroy_cmd_pool(qedi, session); + return -ENOMEM; +} + +static struct iscsi_cls_session * +qedi_session_create(struct iscsi_endpoint *ep, u16 cmds_max, + u16 qdepth, uint32_t initial_cmdsn) +{ + struct Scsi_Host *shost; + struct iscsi_cls_session *cls_session; + struct qedi_ctx *qedi; + struct qedi_endpoint *qedi_ep; + + if (!ep) + return NULL; + + qedi_ep = ep->dd_data; + shost = qedi_ep->qedi->shost; + qedi = iscsi_host_priv(shost); + + if (cmds_max > qedi->max_sqes) + cmds_max = qedi->max_sqes; + else if (cmds_max < QEDI_SQ_WQES_MIN) + cmds_max = QEDI_SQ_WQES_MIN; + + cls_session = iscsi_session_setup(&qedi_iscsi_transport, shost, + cmds_max, 0, sizeof(struct qedi_cmd), + initial_cmdsn, ISCSI_MAX_TARGET); + if (!cls_session) { + QEDI_ERR(&qedi->dbg_ctx, + "Failed to setup session for ep=%p\n", qedi_ep); + return NULL; + } + + if (qedi_setup_cmd_pool(qedi, cls_session->dd_data)) { + QEDI_ERR(&qedi->dbg_ctx, + "Failed to setup cmd pool for ep=%p\n", qedi_ep); + goto session_teardown; + } + + return cls_session; + +session_teardown: + iscsi_session_teardown(cls_session); + return NULL; +} + +static void qedi_session_destroy(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct qedi_ctx *qedi = iscsi_host_priv(shost); + + qedi_destroy_cmd_pool(qedi, session); + iscsi_session_teardown(cls_session); +} + +static struct iscsi_cls_conn * +qedi_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) +{ + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct qedi_ctx *qedi = iscsi_host_priv(shost); + struct iscsi_cls_conn *cls_conn; + struct qedi_conn *qedi_conn; + struct iscsi_conn *conn; + + cls_conn = iscsi_conn_setup(cls_session, sizeof(*qedi_conn), + cid); + if (!cls_conn) { + QEDI_ERR(&qedi->dbg_ctx, + "conn_new: iscsi conn setup failed, cid=0x%x, cls_sess=%p!\n", + cid, cls_session); + return NULL; + } + + conn = cls_conn->dd_data; + qedi_conn = conn->dd_data; + qedi_conn->cls_conn = cls_conn; + qedi_conn->qedi = qedi; + qedi_conn->ep = NULL; + qedi_conn->active_cmd_count = 0; + INIT_LIST_HEAD(&qedi_conn->active_cmd_list); + spin_lock_init(&qedi_conn->list_lock); + + if (qedi_conn_alloc_login_resources(qedi, qedi_conn)) { + iscsi_conn_printk(KERN_ALERT, conn, + "conn_new: login resc alloc failed, cid=0x%x, cls_sess=%p!!\n", + cid, cls_session); + goto free_conn; + } + + return cls_conn; + +free_conn: + iscsi_conn_teardown(cls_conn); + return NULL; +} + +void qedi_mark_device_missing(struct iscsi_cls_session *cls_session) +{ + iscsi_block_session(cls_session); +} + +void qedi_mark_device_available(struct iscsi_cls_session *cls_session) +{ + iscsi_unblock_session(cls_session); +} + +static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn) +{ + u32 iscsi_cid = qedi_conn->iscsi_conn_id; + + if (qedi->cid_que.conn_cid_tbl[iscsi_cid]) { + iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data, + "conn bind - entry #%d not free\n", + iscsi_cid); + return -EBUSY; + } + + qedi->cid_que.conn_cid_tbl[iscsi_cid] = qedi_conn; + return 0; +} + +struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid) +{ + if (!qedi->cid_que.conn_cid_tbl) { + QEDI_ERR(&qedi->dbg_ctx, "missing conn<->cid table\n"); + return NULL; + + } else if (iscsi_cid >= qedi->max_active_conns) { + QEDI_ERR(&qedi->dbg_ctx, "wrong cid #%d\n", iscsi_cid); + return NULL; + } + return qedi->cid_que.conn_cid_tbl[iscsi_cid]; +} + +static int qedi_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + u64 transport_fd, int is_leading) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct qedi_conn *qedi_conn = conn->dd_data; + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct qedi_ctx *qedi = iscsi_host_priv(shost); + struct qedi_endpoint *qedi_ep; + struct iscsi_endpoint *ep; + + ep = iscsi_lookup_endpoint(transport_fd); + if (!ep) + return -EINVAL; + + qedi_ep = ep->dd_data; + if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) || + (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) + return -EINVAL; + + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) + return -EINVAL; + + qedi_ep->conn = qedi_conn; + qedi_conn->ep = qedi_ep; + qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid; + qedi_conn->fw_cid = qedi_ep->fw_cid; + qedi_conn->cmd_cleanup_req = 0; + qedi_conn->cmd_cleanup_cmpl = 0; + + if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) + return -EINVAL; + + spin_lock_init(&qedi_conn->tmf_work_lock); + INIT_LIST_HEAD(&qedi_conn->tmf_work_list); + init_waitqueue_head(&qedi_conn->wait_queue); + return 0; +} + +static int qedi_iscsi_update_conn(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn) +{ + struct qed_iscsi_params_update *conn_info; + struct iscsi_cls_conn *cls_conn = qedi_conn->cls_conn; + struct iscsi_conn *conn = cls_conn->dd_data; + struct qedi_endpoint *qedi_ep; + int rval; + + qedi_ep = qedi_conn->ep; + + conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL); + if (!conn_info) { + QEDI_ERR(&qedi->dbg_ctx, "memory alloc failed\n"); + return -ENOMEM; + } + + conn_info->update_flag = 0; + + if (conn->hdrdgst_en) + SET_FIELD(conn_info->update_flag, + ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, true); + if (conn->datadgst_en) + SET_FIELD(conn_info->update_flag, + ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, true); + if (conn->session->initial_r2t_en) + SET_FIELD(conn_info->update_flag, + ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T, + true); + if (conn->session->imm_data_en) + SET_FIELD(conn_info->update_flag, + ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA, + true); + + conn_info->max_seq_size = conn->session->max_burst; + conn_info->max_recv_pdu_length = conn->max_recv_dlength; + conn_info->max_send_pdu_length = conn->max_xmit_dlength; + conn_info->first_seq_length = conn->session->first_burst; + conn_info->exp_stat_sn = conn->exp_statsn; + + rval = qedi_ops->update_conn(qedi->cdev, qedi_ep->handle, + conn_info); + if (rval) { + rval = -ENXIO; + QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n"); + goto update_conn_err; + } + + kfree(conn_info); + rval = 0; + +update_conn_err: + return rval; +} + +static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en) +{ + u16 mss = 0; + u16 hdrs = TCP_HDR_LEN; + + if (is_ipv6) + hdrs += IPV6_HDR_LEN; + else + hdrs += IPV4_HDR_LEN; + + if (vlan_en) + hdrs += VLAN_LEN; + + mss = pmtu - hdrs; + + if (tcp_ts_en) + mss -= TCP_OPTION_LEN; + + if (!mss) + mss = DEF_MSS; + + return mss; +} + +static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep) +{ + struct qedi_ctx *qedi = qedi_ep->qedi; + struct qed_iscsi_params_offload *conn_info; + int rval; + int i; + + conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL); + if (!conn_info) { + QEDI_ERR(&qedi->dbg_ctx, + "Failed to allocate memory ep=%p\n", qedi_ep); + return -ENOMEM; + } + + ether_addr_copy(conn_info->src.mac, qedi_ep->src_mac); + ether_addr_copy(conn_info->dst.mac, qedi_ep->dst_mac); + + conn_info->src.ip[0] = ntohl(qedi_ep->src_addr[0]); + conn_info->dst.ip[0] = ntohl(qedi_ep->dst_addr[0]); + + if (qedi_ep->ip_type == TCP_IPV4) { + conn_info->ip_version = 0; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "After ntohl: src_addr=%pI4, dst_addr=%pI4\n", + qedi_ep->src_addr, qedi_ep->dst_addr); + } else { + for (i = 1; i < 4; i++) { + conn_info->src.ip[i] = ntohl(qedi_ep->src_addr[i]); + conn_info->dst.ip[i] = ntohl(qedi_ep->dst_addr[i]); + } + + conn_info->ip_version = 1; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "After ntohl: src_addr=%pI6, dst_addr=%pI6\n", + qedi_ep->src_addr, qedi_ep->dst_addr); + } + + conn_info->src.port = qedi_ep->src_port; + conn_info->dst.port = qedi_ep->dst_port; + + conn_info->layer_code = ISCSI_SLOW_PATH_LAYER_CODE; + conn_info->sq_pbl_addr = qedi_ep->sq_pbl_dma; + conn_info->vlan_id = qedi_ep->vlan_id; + + SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_TS_EN, 1); + SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_EN, 1); + SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1); + SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1); + + conn_info->default_cq = (qedi_ep->fw_cid % 8); + + conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT; + conn_info->dup_ack_theshold = 3; + conn_info->rcv_wnd = 65535; + conn_info->cwnd = DEF_MAX_CWND; + + conn_info->ss_thresh = 65535; + conn_info->srtt = 300; + conn_info->rtt_var = 150; + conn_info->flow_label = 0; + conn_info->ka_timeout = DEF_KA_TIMEOUT; + conn_info->ka_interval = DEF_KA_INTERVAL; + conn_info->max_rt_time = DEF_MAX_RT_TIME; + conn_info->ttl = DEF_TTL; + conn_info->tos_or_tc = DEF_TOS; + conn_info->remote_port = qedi_ep->dst_port; + conn_info->local_port = qedi_ep->src_port; + + conn_info->mss = qedi_calc_mss(qedi_ep->pmtu, + (qedi_ep->ip_type == TCP_IPV6), + 1, (qedi_ep->vlan_id != 0)); + + conn_info->rcv_wnd_scale = 4; + conn_info->ts_ticks_per_second = 1000; + conn_info->da_timeout_value = 200; + conn_info->ack_frequency = 2; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Default cq index [%d], mss [%d]\n", + conn_info->default_cq, conn_info->mss); + + rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info); + if (rval) + QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n", + rval, qedi_ep); + + kfree(conn_info); + return rval; +} + +static int qedi_conn_start(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct qedi_conn *qedi_conn = conn->dd_data; + struct qedi_ctx *qedi; + int rval; + + qedi = qedi_conn->qedi; + + rval = qedi_iscsi_update_conn(qedi, qedi_conn); + if (rval) { + iscsi_conn_printk(KERN_ALERT, conn, + "conn_start: FW oflload conn failed.\n"); + rval = -EINVAL; + goto start_err; + } + + clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); + qedi_conn->abrt_conn = 0; + + rval = iscsi_conn_start(cls_conn); + if (rval) { + iscsi_conn_printk(KERN_ALERT, conn, + "iscsi_conn_start: FW oflload conn failed!!\n"); + } + +start_err: + return rval; +} + +static void qedi_conn_destroy(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct qedi_conn *qedi_conn = conn->dd_data; + struct Scsi_Host *shost; + struct qedi_ctx *qedi; + + shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); + qedi = iscsi_host_priv(shost); + + qedi_conn_free_login_resources(qedi, qedi_conn); + iscsi_conn_teardown(cls_conn); +} + +static int qedi_ep_get_param(struct iscsi_endpoint *ep, + enum iscsi_param param, char *buf) +{ + struct qedi_endpoint *qedi_ep = ep->dd_data; + int len; + + if (!qedi_ep) + return -ENOTCONN; + + switch (param) { + case ISCSI_PARAM_CONN_PORT: + len = sprintf(buf, "%hu\n", qedi_ep->dst_port); + break; + case ISCSI_PARAM_CONN_ADDRESS: + if (qedi_ep->ip_type == TCP_IPV4) + len = sprintf(buf, "%pI4\n", qedi_ep->dst_addr); + else + len = sprintf(buf, "%pI6\n", qedi_ep->dst_addr); + break; + default: + return -ENOTCONN; + } + + return len; +} + +static int qedi_host_get_param(struct Scsi_Host *shost, + enum iscsi_host_param param, char *buf) +{ + struct qedi_ctx *qedi; + int len; + + qedi = iscsi_host_priv(shost); + + switch (param) { + case ISCSI_HOST_PARAM_HWADDRESS: + len = sysfs_format_mac(buf, qedi->mac, 6); + break; + case ISCSI_HOST_PARAM_NETDEV_NAME: + len = sprintf(buf, "host%d\n", shost->host_no); + break; + case ISCSI_HOST_PARAM_IPADDRESS: + if (qedi->ip_type == TCP_IPV4) + len = sprintf(buf, "%pI4\n", qedi->src_ip); + else + len = sprintf(buf, "%pI6\n", qedi->src_ip); + break; + default: + return iscsi_host_get_param(shost, param, buf); + } + + return len; +} + +static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct qed_iscsi_stats iscsi_stats; + struct Scsi_Host *shost; + struct qedi_ctx *qedi; + + shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); + qedi = iscsi_host_priv(shost); + qedi_ops->get_stats(qedi->cdev, &iscsi_stats); + + conn->txdata_octets = iscsi_stats.iscsi_tx_bytes_cnt; + conn->rxdata_octets = iscsi_stats.iscsi_rx_bytes_cnt; + conn->dataout_pdus_cnt = (uint32_t)iscsi_stats.iscsi_tx_data_pdu_cnt; + conn->datain_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_data_pdu_cnt; + conn->r2t_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_r2t_pdu_cnt; + + stats->txdata_octets = conn->txdata_octets; + stats->rxdata_octets = conn->rxdata_octets; + stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; + stats->dataout_pdus = conn->dataout_pdus_cnt; + stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; + stats->datain_pdus = conn->datain_pdus_cnt; + stats->r2t_pdus = conn->r2t_pdus_cnt; + stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; + stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; + stats->digest_err = 0; + stats->timeout_err = 0; + strcpy(stats->custom[0].desc, "eh_abort_cnt"); + stats->custom[0].value = conn->eh_abort_cnt; + stats->custom_length = 1; +} + +static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn) +{ + struct iscsi_sge *bd_tbl; + + bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + + bd_tbl->sge_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); + bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr; + bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr - + qedi_conn->gen_pdu.req_buf; + bd_tbl->reserved0 = 0; + bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + bd_tbl->sge_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); + bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr; + bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN; + bd_tbl->reserved0 = 0; +} + +static int qedi_iscsi_send_generic_request(struct iscsi_task *task) +{ + struct qedi_cmd *cmd = task->dd_data; + struct qedi_conn *qedi_conn = cmd->conn; + char *buf; + int data_len; + int rc = 0; + + qedi_iscsi_prep_generic_pdu_bd(qedi_conn); + switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { + case ISCSI_OP_LOGIN: + qedi_send_iscsi_login(qedi_conn, task); + break; + case ISCSI_OP_NOOP_OUT: + data_len = qedi_conn->gen_pdu.req_buf_size; + buf = qedi_conn->gen_pdu.req_buf; + if (data_len) + rc = qedi_send_iscsi_nopout(qedi_conn, task, + buf, data_len, 1); + else + rc = qedi_send_iscsi_nopout(qedi_conn, task, + NULL, 0, 1); + break; + case ISCSI_OP_LOGOUT: + rc = qedi_send_iscsi_logout(qedi_conn, task); + break; + case ISCSI_OP_SCSI_TMFUNC: + rc = qedi_iscsi_abort_work(qedi_conn, task); + break; + case ISCSI_OP_TEXT: + rc = qedi_send_iscsi_text(qedi_conn, task); + break; + default: + iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data, + "unsupported op 0x%x\n", task->hdr->opcode); + } + + return rc; +} + +static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) +{ + struct qedi_conn *qedi_conn = conn->dd_data; + struct qedi_cmd *cmd = task->dd_data; + + memset(qedi_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); + + qedi_conn->gen_pdu.req_buf_size = task->data_count; + + if (task->data_count) { + memcpy(qedi_conn->gen_pdu.req_buf, task->data, + task->data_count); + qedi_conn->gen_pdu.req_wr_ptr = + qedi_conn->gen_pdu.req_buf + task->data_count; + } + + cmd->conn = conn->dd_data; + cmd->scsi_cmd = NULL; + return qedi_iscsi_send_generic_request(task); +} + +static int qedi_task_xmit(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct qedi_conn *qedi_conn = conn->dd_data; + struct qedi_cmd *cmd = task->dd_data; + struct scsi_cmnd *sc = task->sc; + + cmd->state = 0; + cmd->task = NULL; + cmd->use_slowpath = false; + cmd->conn = qedi_conn; + cmd->task = task; + cmd->io_cmd_in_list = false; + INIT_LIST_HEAD(&cmd->io_cmd); + + if (!sc) + return qedi_mtask_xmit(conn, task); + + cmd->scsi_cmd = sc; + return qedi_iscsi_send_ioreq(task); +} + +static struct iscsi_endpoint * +qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, + int non_blocking) +{ + struct qedi_ctx *qedi; + struct iscsi_endpoint *ep; + struct qedi_endpoint *qedi_ep; + struct sockaddr_in *addr; + struct sockaddr_in6 *addr6; + struct qed_dev *cdev = NULL; + struct qedi_uio_dev *udev = NULL; + struct iscsi_path path_req; + u32 msg_type = ISCSI_KEVENT_IF_DOWN; + u32 iscsi_cid = QEDI_CID_RESERVED; + u16 len = 0; + char *buf = NULL; + int ret; + + if (!shost) { + ret = -ENXIO; + QEDI_ERR(NULL, "shost is NULL\n"); + return ERR_PTR(ret); + } + + if (do_not_recover) { + ret = -ENOMEM; + return ERR_PTR(ret); + } + + qedi = iscsi_host_priv(shost); + cdev = qedi->cdev; + udev = qedi->udev; + + if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) || + test_bit(QEDI_IN_RECOVERY, &qedi->flags)) { + ret = -ENOMEM; + return ERR_PTR(ret); + } + + ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint)); + if (!ep) { + QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n"); + ret = -ENOMEM; + return ERR_PTR(ret); + } + qedi_ep = ep->dd_data; + memset(qedi_ep, 0, sizeof(struct qedi_endpoint)); + qedi_ep->state = EP_STATE_IDLE; + qedi_ep->iscsi_cid = (u32)-1; + qedi_ep->qedi = qedi; + + if (dst_addr->sa_family == AF_INET) { + addr = (struct sockaddr_in *)dst_addr; + memcpy(qedi_ep->dst_addr, &addr->sin_addr.s_addr, + sizeof(struct in_addr)); + qedi_ep->dst_port = ntohs(addr->sin_port); + qedi_ep->ip_type = TCP_IPV4; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "dst_addr=%pI4, dst_port=%u\n", + qedi_ep->dst_addr, qedi_ep->dst_port); + } else if (dst_addr->sa_family == AF_INET6) { + addr6 = (struct sockaddr_in6 *)dst_addr; + memcpy(qedi_ep->dst_addr, &addr6->sin6_addr, + sizeof(struct in6_addr)); + qedi_ep->dst_port = ntohs(addr6->sin6_port); + qedi_ep->ip_type = TCP_IPV6; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "dst_addr=%pI6, dst_port=%u\n", + qedi_ep->dst_addr, qedi_ep->dst_port); + } else { + QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n"); + } + + if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) { + QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n"); + ret = -ENXIO; + goto ep_conn_exit; + } + + ret = qedi_alloc_sq(qedi, qedi_ep); + if (ret) + goto ep_conn_exit; + + ret = qedi_ops->acquire_conn(qedi->cdev, &qedi_ep->handle, + &qedi_ep->fw_cid, &qedi_ep->p_doorbell); + + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, "Could not acquire connection\n"); + ret = -ENXIO; + goto ep_free_sq; + } + + iscsi_cid = qedi_ep->handle; + qedi_ep->iscsi_cid = iscsi_cid; + + init_waitqueue_head(&qedi_ep->ofld_wait); + init_waitqueue_head(&qedi_ep->tcp_ofld_wait); + qedi_ep->state = EP_STATE_OFLDCONN_START; + qedi->ep_tbl[iscsi_cid] = qedi_ep; + + buf = (char *)&path_req; + len = sizeof(path_req); + memset(&path_req, 0, len); + + msg_type = ISCSI_KEVENT_PATH_REQ; + path_req.handle = (u64)qedi_ep->iscsi_cid; + path_req.pmtu = qedi->ll2_mtu; + qedi_ep->pmtu = qedi->ll2_mtu; + if (qedi_ep->ip_type == TCP_IPV4) { + memcpy(&path_req.dst.v4_addr, &qedi_ep->dst_addr, + sizeof(struct in_addr)); + path_req.ip_addr_len = 4; + } else { + memcpy(&path_req.dst.v6_addr, &qedi_ep->dst_addr, + sizeof(struct in6_addr)); + path_req.ip_addr_len = 16; + } + + ret = iscsi_offload_mesg(shost, &qedi_iscsi_transport, msg_type, buf, + len); + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, + "iscsi_offload_mesg() failed for cid=0x%x ret=%d\n", + iscsi_cid, ret); + goto ep_rel_conn; + } + + atomic_inc(&qedi->num_offloads); + return ep; + +ep_rel_conn: + qedi->ep_tbl[iscsi_cid] = NULL; + ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle); + if (ret) + QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n", + ret); +ep_free_sq: + qedi_free_sq(qedi, qedi_ep); +ep_conn_exit: + iscsi_destroy_endpoint(ep); + return ERR_PTR(ret); +} + +static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) +{ + struct qedi_endpoint *qedi_ep; + int ret = 0; + + if (do_not_recover) + return 1; + + qedi_ep = ep->dd_data; + if (qedi_ep->state == EP_STATE_IDLE || + qedi_ep->state == EP_STATE_OFLDCONN_FAILED) + return -1; + + if (qedi_ep->state == EP_STATE_OFLDCONN_COMPL) + ret = 1; + + ret = wait_event_interruptible_timeout(qedi_ep->ofld_wait, + QEDI_OFLD_WAIT_STATE(qedi_ep), + msecs_to_jiffies(timeout_ms)); + + if (qedi_ep->state == EP_STATE_OFLDCONN_FAILED) + ret = -1; + + if (ret > 0) + return 1; + else if (!ret) + return 0; + else + return ret; +} + +static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn) +{ + struct qedi_cmd *cmd, *cmd_tmp; + + list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list, + io_cmd) { + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } +} + +static void qedi_ep_disconnect(struct iscsi_endpoint *ep) +{ + struct qedi_endpoint *qedi_ep; + struct qedi_conn *qedi_conn = NULL; + struct iscsi_conn *conn = NULL; + struct qedi_ctx *qedi; + int ret = 0; + int wait_delay = 20 * HZ; + int abrt_conn = 0; + int count = 10; + + qedi_ep = ep->dd_data; + qedi = qedi_ep->qedi; + + flush_work(&qedi_ep->offload_work); + + if (qedi_ep->conn) { + qedi_conn = qedi_ep->conn; + conn = qedi_conn->cls_conn->dd_data; + iscsi_suspend_queue(conn); + abrt_conn = qedi_conn->abrt_conn; + + while (count--) { + if (!test_bit(QEDI_CONN_FW_CLEANUP, + &qedi_conn->flags)) { + break; + } + msleep(1000); + } + + if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) { + if (do_not_recover) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Do not recover cid=0x%x\n", + qedi_ep->iscsi_cid); + goto ep_exit_recover; + } + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Reset recovery cid=0x%x, qedi_ep=%p, state=0x%x\n", + qedi_ep->iscsi_cid, qedi_ep, qedi_ep->state); + qedi_cleanup_active_cmd_list(qedi_conn); + goto ep_release_conn; + } + } + + if (do_not_recover) + goto ep_exit_recover; + + switch (qedi_ep->state) { + case EP_STATE_OFLDCONN_START: + goto ep_release_conn; + case EP_STATE_OFLDCONN_FAILED: + break; + case EP_STATE_OFLDCONN_COMPL: + if (unlikely(!qedi_conn)) + break; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Active cmd count=%d, abrt_conn=%d, ep state=0x%x, cid=0x%x, qedi_conn=%p\n", + qedi_conn->active_cmd_count, abrt_conn, + qedi_ep->state, + qedi_ep->iscsi_cid, + qedi_ep->conn + ); + + if (!qedi_conn->active_cmd_count) + abrt_conn = 0; + else + abrt_conn = 1; + + if (abrt_conn) + qedi_clearsq(qedi, qedi_conn, NULL); + break; + default: + break; + } + + qedi_ep->state = EP_STATE_DISCONN_START; + ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn); + if (ret) { + QEDI_WARN(&qedi->dbg_ctx, + "destroy_conn failed returned %d\n", ret); + } else { + ret = wait_event_interruptible_timeout( + qedi_ep->tcp_ofld_wait, + (qedi_ep->state != + EP_STATE_DISCONN_START), + wait_delay); + if ((ret <= 0) || (qedi_ep->state == EP_STATE_DISCONN_START)) { + QEDI_WARN(&qedi->dbg_ctx, + "Destroy conn timedout or interrupted, ret=%d, delay=%d, cid=0x%x\n", + ret, wait_delay, qedi_ep->iscsi_cid); + } + } + +ep_release_conn: + ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle); + if (ret) + QEDI_WARN(&qedi->dbg_ctx, + "release_conn returned %d, cid=0x%x\n", + ret, qedi_ep->iscsi_cid); +ep_exit_recover: + qedi_ep->state = EP_STATE_IDLE; + qedi->ep_tbl[qedi_ep->iscsi_cid] = NULL; + qedi->cid_que.conn_cid_tbl[qedi_ep->iscsi_cid] = NULL; + qedi_free_id(&qedi->lcl_port_tbl, qedi_ep->src_port); + qedi_free_sq(qedi, qedi_ep); + + if (qedi_conn) + qedi_conn->ep = NULL; + + qedi_ep->conn = NULL; + qedi_ep->qedi = NULL; + atomic_dec(&qedi->num_offloads); + + iscsi_destroy_endpoint(ep); +} + +static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid) +{ + struct qed_dev *cdev = qedi->cdev; + struct qedi_uio_dev *udev; + struct qedi_uio_ctrl *uctrl; + struct sk_buff *skb; + u32 len; + int rc = 0; + + udev = qedi->udev; + if (!udev) { + QEDI_ERR(&qedi->dbg_ctx, "udev is NULL.\n"); + return -EINVAL; + } + + uctrl = (struct qedi_uio_ctrl *)udev->uctrl; + if (!uctrl) { + QEDI_ERR(&qedi->dbg_ctx, "uctlr is NULL.\n"); + return -EINVAL; + } + + len = uctrl->host_tx_pkt_len; + if (!len) { + QEDI_ERR(&qedi->dbg_ctx, "Invalid len %u\n", len); + return -EINVAL; + } + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) { + QEDI_ERR(&qedi->dbg_ctx, "alloc_skb failed\n"); + return -EINVAL; + } + + skb_put(skb, len); + memcpy(skb->data, udev->tx_pkt, len); + skb->ip_summed = CHECKSUM_NONE; + + if (vlanid) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); + + rc = qedi_ops->ll2->start_xmit(cdev, skb); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n", + rc); + kfree_skb(skb); + } + + uctrl->host_tx_pkt_len = 0; + uctrl->hw_tx_cons++; + + return rc; +} + +static void qedi_offload_work(struct work_struct *work) +{ + struct qedi_endpoint *qedi_ep = + container_of(work, struct qedi_endpoint, offload_work); + struct qedi_ctx *qedi; + int wait_delay = 20 * HZ; + int ret; + + qedi = qedi_ep->qedi; + + ret = qedi_iscsi_offload_conn(qedi_ep); + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, + "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n", + qedi_ep->iscsi_cid, qedi_ep, ret); + qedi_ep->state = EP_STATE_OFLDCONN_FAILED; + return; + } + + ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait, + (qedi_ep->state == + EP_STATE_OFLDCONN_COMPL), + wait_delay); + if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) { + qedi_ep->state = EP_STATE_OFLDCONN_FAILED; + QEDI_ERR(&qedi->dbg_ctx, + "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n", + qedi_ep->iscsi_cid, qedi_ep); + } +} + +static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) +{ + struct qedi_ctx *qedi; + struct qedi_endpoint *qedi_ep; + int ret = 0; + u32 iscsi_cid; + u16 port_id = 0; + + if (!shost) { + ret = -ENXIO; + QEDI_ERR(NULL, "shost is NULL\n"); + return ret; + } + + if (strcmp(shost->hostt->proc_name, "qedi")) { + ret = -ENXIO; + QEDI_ERR(NULL, "shost %s is invalid\n", + shost->hostt->proc_name); + return ret; + } + + qedi = iscsi_host_priv(shost); + if (path_data->handle == QEDI_PATH_HANDLE) { + ret = qedi_data_avail(qedi, path_data->vlan_id); + goto set_path_exit; + } + + iscsi_cid = (u32)path_data->handle; + qedi_ep = qedi->ep_tbl[iscsi_cid]; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); + + if (!is_valid_ether_addr(&path_data->mac_addr[0])) { + QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); + ret = -EIO; + goto set_path_exit; + } + + ether_addr_copy(&qedi_ep->src_mac[0], &qedi->mac[0]); + ether_addr_copy(&qedi_ep->dst_mac[0], &path_data->mac_addr[0]); + + qedi_ep->vlan_id = path_data->vlan_id; + if (path_data->pmtu < DEF_PATH_MTU) { + qedi_ep->pmtu = qedi->ll2_mtu; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "MTU cannot be %u, using default MTU %u\n", + path_data->pmtu, qedi_ep->pmtu); + } + + if (path_data->pmtu != qedi->ll2_mtu) { + if (path_data->pmtu > JUMBO_MTU) { + ret = -EINVAL; + QEDI_ERR(NULL, "Invalid MTU %u\n", path_data->pmtu); + goto set_path_exit; + } + + qedi_reset_host_mtu(qedi, path_data->pmtu); + qedi_ep->pmtu = qedi->ll2_mtu; + } + + port_id = qedi_ep->src_port; + if (port_id >= QEDI_LOCAL_PORT_MIN && + port_id < QEDI_LOCAL_PORT_MAX) { + if (qedi_alloc_id(&qedi->lcl_port_tbl, port_id)) + port_id = 0; + } else { + port_id = 0; + } + + if (!port_id) { + port_id = qedi_alloc_new_id(&qedi->lcl_port_tbl); + if (port_id == QEDI_LOCAL_PORT_INVALID) { + QEDI_ERR(&qedi->dbg_ctx, + "Failed to allocate port id for iscsi_cid=0x%x\n", + iscsi_cid); + ret = -ENOMEM; + goto set_path_exit; + } + } + + qedi_ep->src_port = port_id; + + if (qedi_ep->ip_type == TCP_IPV4) { + memcpy(&qedi_ep->src_addr[0], &path_data->src.v4_addr, + sizeof(struct in_addr)); + memcpy(&qedi->src_ip[0], &path_data->src.v4_addr, + sizeof(struct in_addr)); + qedi->ip_type = TCP_IPV4; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "src addr:port=%pI4:%u, dst addr:port=%pI4:%u\n", + qedi_ep->src_addr, qedi_ep->src_port, + qedi_ep->dst_addr, qedi_ep->dst_port); + } else { + memcpy(&qedi_ep->src_addr[0], &path_data->src.v6_addr, + sizeof(struct in6_addr)); + memcpy(&qedi->src_ip[0], &path_data->src.v6_addr, + sizeof(struct in6_addr)); + qedi->ip_type = TCP_IPV6; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "src addr:port=%pI6:%u, dst addr:port=%pI6:%u\n", + qedi_ep->src_addr, qedi_ep->src_port, + qedi_ep->dst_addr, qedi_ep->dst_port); + } + + INIT_WORK(&qedi_ep->offload_work, qedi_offload_work); + queue_work(qedi->offload_thread, &qedi_ep->offload_work); + + ret = 0; + +set_path_exit: + return ret; +} + +static umode_t qedi_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + return 0444; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_PING_TMO: + case ISCSI_PARAM_RECV_TMO: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_ERL: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_FAST_ABORT: + case ISCSI_PARAM_ABORT_TMO: + case ISCSI_PARAM_LU_RESET_TMO: + case ISCSI_PARAM_TGT_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: + case ISCSI_PARAM_BOOT_ROOT: + case ISCSI_PARAM_BOOT_NIC: + case ISCSI_PARAM_BOOT_TARGET: + return 0444; + default: + return 0; + } + } + + return 0; +} + +static void qedi_cleanup_task(struct iscsi_task *task) +{ + if (!task->sc || task->state == ISCSI_TASK_PENDING) { + QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n", + atomic_read(&task->refcount)); + return; + } + + qedi_iscsi_unmap_sg_list(task->dd_data); +} + +struct iscsi_transport qedi_iscsi_transport = { + .owner = THIS_MODULE, + .name = QEDI_MODULE_NAME, + .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST | + CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO, + .create_session = qedi_session_create, + .destroy_session = qedi_session_destroy, + .create_conn = qedi_conn_create, + .bind_conn = qedi_conn_bind, + .start_conn = qedi_conn_start, + .stop_conn = iscsi_conn_stop, + .destroy_conn = qedi_conn_destroy, + .set_param = iscsi_set_param, + .get_ep_param = qedi_ep_get_param, + .get_conn_param = iscsi_conn_get_param, + .get_session_param = iscsi_session_get_param, + .get_host_param = qedi_host_get_param, + .send_pdu = iscsi_conn_send_pdu, + .get_stats = qedi_conn_get_stats, + .xmit_task = qedi_task_xmit, + .cleanup_task = qedi_cleanup_task, + .session_recovery_timedout = iscsi_session_recovery_timedout, + .ep_connect = qedi_ep_connect, + .ep_poll = qedi_ep_poll, + .ep_disconnect = qedi_ep_disconnect, + .set_path = qedi_set_path, + .attr_is_visible = qedi_attr_is_visible, +}; + +void qedi_start_conn_recovery(struct qedi_ctx *qedi, + struct qedi_conn *qedi_conn) +{ + struct iscsi_cls_session *cls_sess; + struct iscsi_cls_conn *cls_conn; + struct iscsi_conn *conn; + + cls_conn = qedi_conn->cls_conn; + conn = cls_conn->dd_data; + cls_sess = iscsi_conn_to_session(cls_conn); + + if (iscsi_is_session_online(cls_sess)) { + qedi_conn->abrt_conn = 1; + QEDI_ERR(&qedi->dbg_ctx, + "Failing connection, state=0x%x, cid=0x%x\n", + conn->session->state, qedi_conn->iscsi_conn_id); + iscsi_conn_failure(qedi_conn->cls_conn->dd_data, + ISCSI_ERR_CONN_FAILED); + } +} + +static const struct { + enum iscsi_error_types error_code; + char *err_string; +} qedi_iscsi_error[] = { + { ISCSI_STATUS_NONE, + "tcp_error none" + }, + { ISCSI_CONN_ERROR_TASK_CID_MISMATCH, + "task cid mismatch" + }, + { ISCSI_CONN_ERROR_TASK_NOT_VALID, + "invalid task" + }, + { ISCSI_CONN_ERROR_RQ_RING_IS_FULL, + "rq ring full" + }, + { ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL, + "cmdq ring full" + }, + { ISCSI_CONN_ERROR_HQE_CACHING_FAILED, + "sge caching failed" + }, + { ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR, + "hdr digest error" + }, + { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR, + "local cmpl error" + }, + { ISCSI_CONN_ERROR_DATA_OVERRUN, + "invalid task" + }, + { ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR, + "out of sge error" + }, + { ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR, + "tcp seg ip options error" + }, + { ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR, + "tcp ip fragment error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN, + "AHS len protocol error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE, + "itt out of range error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE, + "data seg more than pdu size" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE, + "invalid opcode" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE, + "invalid opcode before update" + }, + { ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL, + "unexpected opcode" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA, + "r2t carries no data" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN, + "data sn error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT, + "data TTT error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT, + "r2t TTT error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET, + "buffer offset error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO, + "buffer offset ooo" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN, + "data seg len 0" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0, + "data xer len error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1, + "data xer len1 error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2, + "data xer len2 error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN, + "protocol lun error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO, + "f bit zero error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN, + "exp stat sn error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO, + "dsl not zero error" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL, + "invalid dsl" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG, + "data seg len too big" + }, + { ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT, + "outstanding r2t count error" + }, + { ISCSI_CONN_ERROR_SENSE_DATA_LENGTH, + "sense datalen error" + }, +}; + +char *qedi_get_iscsi_error(enum iscsi_error_types err_code) +{ + int i; + char *msg = NULL; + + for (i = 0; i < ARRAY_SIZE(qedi_iscsi_error); i++) { + if (qedi_iscsi_error[i].error_code == err_code) { + msg = qedi_iscsi_error[i].err_string; + break; + } + } + return msg; +} + +void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data) +{ + struct qedi_conn *qedi_conn; + struct qedi_ctx *qedi; + char warn_notice[] = "iscsi_warning"; + char error_notice[] = "iscsi_error"; + char unknown_msg[] = "Unknown error"; + char *message; + int need_recovery = 0; + u32 err_mask = 0; + char *msg; + + if (!ep) + return; + + qedi_conn = ep->conn; + if (!qedi_conn) + return; + + qedi = ep->qedi; + + QEDI_ERR(&qedi->dbg_ctx, "async event iscsi error:0x%x\n", + data->error_code); + + if (err_mask) { + need_recovery = 0; + message = warn_notice; + } else { + need_recovery = 1; + message = error_notice; + } + + msg = qedi_get_iscsi_error(data->error_code); + if (!msg) { + need_recovery = 0; + msg = unknown_msg; + } + + iscsi_conn_printk(KERN_ALERT, + qedi_conn->cls_conn->dd_data, + "qedi: %s - %s\n", message, msg); + + if (need_recovery) + qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn); +} + +void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data) +{ + struct qedi_conn *qedi_conn; + + if (!ep) + return; + + qedi_conn = ep->conn; + if (!qedi_conn) + return; + + QEDI_ERR(&ep->qedi->dbg_ctx, "async event TCP error:0x%x\n", + data->error_code); + + qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn); +} diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h new file mode 100644 index 0000000000000000000000000000000000000000..d3c06bbddb4e8195d0ba1809a1ed6ad671d402c1 --- /dev/null +++ b/drivers/scsi/qedi/qedi_iscsi.h @@ -0,0 +1,232 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QEDI_ISCSI_H_ +#define _QEDI_ISCSI_H_ + +#include +#include +#include "qedi.h" + +#define ISCSI_MAX_SESS_PER_HBA 4096 + +#define DEF_KA_TIMEOUT 7200000 +#define DEF_KA_INTERVAL 10000 +#define DEF_KA_MAX_PROBE_COUNT 10 +#define DEF_TOS 0 +#define DEF_TTL 0xfe +#define DEF_SND_SEQ_SCALE 0 +#define DEF_RCV_BUF 0xffff +#define DEF_SND_BUF 0xffff +#define DEF_SEED 0 +#define DEF_MAX_RT_TIME 8000 +#define DEF_MAX_DA_COUNT 2 +#define DEF_SWS_TIMER 1000 +#define DEF_MAX_CWND 2 +#define DEF_PATH_MTU 1500 +#define DEF_MSS 1460 +#define DEF_LL2_MTU 1560 +#define JUMBO_MTU 9000 + +#define MIN_MTU 576 /* rfc 793 */ +#define IPV4_HDR_LEN 20 +#define IPV6_HDR_LEN 40 +#define TCP_HDR_LEN 20 +#define TCP_OPTION_LEN 12 +#define VLAN_LEN 4 + +enum { + EP_STATE_IDLE = 0x0, + EP_STATE_ACQRCONN_START = 0x1, + EP_STATE_ACQRCONN_COMPL = 0x2, + EP_STATE_OFLDCONN_START = 0x4, + EP_STATE_OFLDCONN_COMPL = 0x8, + EP_STATE_DISCONN_START = 0x10, + EP_STATE_DISCONN_COMPL = 0x20, + EP_STATE_CLEANUP_START = 0x40, + EP_STATE_CLEANUP_CMPL = 0x80, + EP_STATE_TCP_FIN_RCVD = 0x100, + EP_STATE_TCP_RST_RCVD = 0x200, + EP_STATE_LOGOUT_SENT = 0x400, + EP_STATE_LOGOUT_RESP_RCVD = 0x800, + EP_STATE_CLEANUP_FAILED = 0x1000, + EP_STATE_OFLDCONN_FAILED = 0x2000, + EP_STATE_CONNECT_FAILED = 0x4000, + EP_STATE_DISCONN_TIMEDOUT = 0x8000, +}; + +struct qedi_conn; + +struct qedi_endpoint { + struct qedi_ctx *qedi; + u32 dst_addr[4]; + u32 src_addr[4]; + u16 src_port; + u16 dst_port; + u16 vlan_id; + u16 pmtu; + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + u8 ip_type; + int state; + wait_queue_head_t ofld_wait; + wait_queue_head_t tcp_ofld_wait; + u32 iscsi_cid; + /* identifier of the connection from qed */ + u32 handle; + u32 fw_cid; + void __iomem *p_doorbell; + + /* Send queue management */ + struct iscsi_wqe *sq; + dma_addr_t sq_dma; + + u16 sq_prod_idx; + u16 fw_sq_prod_idx; + u16 sq_con_idx; + u32 sq_mem_size; + + void *sq_pbl; + dma_addr_t sq_pbl_dma; + u32 sq_pbl_size; + struct qedi_conn *conn; + struct work_struct offload_work; +}; + +#define QEDI_SQ_WQES_MIN 16 + +struct qedi_io_bdt { + struct iscsi_sge *sge_tbl; + dma_addr_t sge_tbl_dma; + u16 sge_valid; +}; + +/** + * struct generic_pdu_resc - login pdu resource structure + * + * @req_buf: driver buffer used to stage payload associated with + * the login request + * @req_dma_addr: dma address for iscsi login request payload buffer + * @req_buf_size: actual login request payload length + * @req_wr_ptr: pointer into login request buffer when next data is + * to be written + * @resp_hdr: iscsi header where iscsi login response header is to + * be recreated + * @resp_buf: buffer to stage login response payload + * @resp_dma_addr: login response payload buffer dma address + * @resp_buf_size: login response paylod length + * @resp_wr_ptr: pointer into login response buffer when next data is + * to be written + * @req_bd_tbl: iscsi login request payload BD table + * @req_bd_dma: login request BD table dma address + * @resp_bd_tbl: iscsi login response payload BD table + * @resp_bd_dma: login request BD table dma address + * + * following structure defines buffer info for generic pdus such as iSCSI Login, + * Logout and NOP + */ +struct generic_pdu_resc { + char *req_buf; + dma_addr_t req_dma_addr; + u32 req_buf_size; + char *req_wr_ptr; + struct iscsi_hdr resp_hdr; + char *resp_buf; + dma_addr_t resp_dma_addr; + u32 resp_buf_size; + char *resp_wr_ptr; + char *req_bd_tbl; + dma_addr_t req_bd_dma; + char *resp_bd_tbl; + dma_addr_t resp_bd_dma; +}; + +struct qedi_conn { + struct iscsi_cls_conn *cls_conn; + struct qedi_ctx *qedi; + struct qedi_endpoint *ep; + struct list_head active_cmd_list; + spinlock_t list_lock; /* internal conn lock */ + u32 active_cmd_count; + u32 cmd_cleanup_req; + u32 cmd_cleanup_cmpl; + + u32 iscsi_conn_id; + int itt; + int abrt_conn; +#define QEDI_CID_RESERVED 0x5AFF + u32 fw_cid; + /* + * Buffer for login negotiation process + */ + struct generic_pdu_resc gen_pdu; + + struct list_head tmf_work_list; + wait_queue_head_t wait_queue; + spinlock_t tmf_work_lock; /* tmf work lock */ + unsigned long flags; +#define QEDI_CONN_FW_CLEANUP 1 +}; + +struct qedi_cmd { + struct list_head io_cmd; + bool io_cmd_in_list; + struct iscsi_hdr hdr; + struct qedi_conn *conn; + struct scsi_cmnd *scsi_cmd; + struct scatterlist *sg; + struct qedi_io_bdt io_tbl; + struct iscsi_task_context request; + unsigned char *sense_buffer; + dma_addr_t sense_buffer_dma; + u16 task_id; + + /* field populated for tmf work queue */ + struct iscsi_task *task; + struct work_struct tmf_work; + int state; +#define CLEANUP_WAIT 1 +#define CLEANUP_RECV 2 +#define CLEANUP_WAIT_FAILED 3 +#define CLEANUP_NOT_REQUIRED 4 +#define LUN_RESET_RESPONSE_RECEIVED 5 +#define RESPONSE_RECEIVED 6 + + int type; +#define TYPEIO 1 +#define TYPERESET 2 + + struct qedi_work_map *list_tmf_work; + /* slowpath management */ + bool use_slowpath; + + struct iscsi_tm_rsp *tmf_resp_buf; + struct qedi_work cqe_work; +}; + +struct qedi_work_map { + struct list_head list; + struct qedi_cmd *qedi_cmd; + int rtid; + + int state; +#define QEDI_WORK_QUEUED 1 +#define QEDI_WORK_SCHEDULED 2 +#define QEDI_WORK_EXIT 3 + + struct work_struct *ptr_tmf_work; +}; + +#define qedi_set_itt(task_id, itt) ((u32)(((task_id) & 0xffff) | ((itt) << 16))) +#define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16) + +#define QEDI_OFLD_WAIT_STATE(q) ((q)->state == EP_STATE_OFLDCONN_FAILED || \ + (q)->state == EP_STATE_OFLDCONN_COMPL) + +#endif /* _QEDI_ISCSI_H_ */ diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c new file mode 100644 index 0000000000000000000000000000000000000000..5eda21d903e93dfc96702552d2de9deb7f01c734 --- /dev/null +++ b/drivers/scsi/qedi/qedi_main.c @@ -0,0 +1,2095 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "qedi.h" +#include "qedi_gbl.h" +#include "qedi_iscsi.h" + +static uint qedi_fw_debug; +module_param(qedi_fw_debug, uint, 0644); +MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3"); + +uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM; +module_param(qedi_dbg_log, uint, 0644); +MODULE_PARM_DESC(qedi_dbg_log, " Default debug level"); + +uint qedi_io_tracing; +module_param(qedi_io_tracing, uint, 0644); +MODULE_PARM_DESC(qedi_io_tracing, + " Enable logging of SCSI requests/completions into trace buffer. (default off)."); + +const struct qed_iscsi_ops *qedi_ops; +static struct scsi_transport_template *qedi_scsi_transport; +static struct pci_driver qedi_pci_driver; +static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu); +static LIST_HEAD(qedi_udev_list); +/* Static function declaration */ +static int qedi_alloc_global_queues(struct qedi_ctx *qedi); +static void qedi_free_global_queues(struct qedi_ctx *qedi); +static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid); +static void qedi_reset_uio_rings(struct qedi_uio_dev *udev); +static void qedi_ll2_free_skbs(struct qedi_ctx *qedi); + +static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle) +{ + struct qedi_ctx *qedi; + struct qedi_endpoint *qedi_ep; + struct async_data *data; + int rval = 0; + + if (!context || !fw_handle) { + QEDI_ERR(NULL, "Recv event with ctx NULL\n"); + return -EINVAL; + } + + qedi = (struct qedi_ctx *)context; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle); + + data = (struct async_data *)fw_handle; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n", + data->cid, data->itid, data->error_code, + data->fw_debug_param); + + qedi_ep = qedi->ep_tbl[data->cid]; + + if (!qedi_ep) { + QEDI_WARN(&qedi->dbg_ctx, + "Cannot process event, ep already disconnected, cid=0x%x\n", + data->cid); + WARN_ON(1); + return -ENODEV; + } + + switch (fw_event_code) { + case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE: + if (qedi_ep->state == EP_STATE_OFLDCONN_START) + qedi_ep->state = EP_STATE_OFLDCONN_COMPL; + + wake_up_interruptible(&qedi_ep->tcp_ofld_wait); + break; + case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE: + qedi_ep->state = EP_STATE_DISCONN_COMPL; + wake_up_interruptible(&qedi_ep->tcp_ofld_wait); + break; + case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR: + qedi_process_iscsi_error(qedi_ep, data); + break; + case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD: + case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD: + case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME: + case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT: + case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT: + case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2: + case ISCSI_EVENT_TYPE_TCP_CONN_ERROR: + qedi_process_tcp_error(qedi_ep, data); + break; + default: + QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n", + fw_event_code); + } + + return rval; +} + +static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode) +{ + struct qedi_uio_dev *udev = uinfo->priv; + struct qedi_ctx *qedi = udev->qedi; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (udev->uio_dev != -1) + return -EBUSY; + + rtnl_lock(); + udev->uio_dev = iminor(inode); + qedi_reset_uio_rings(udev); + set_bit(UIO_DEV_OPENED, &qedi->flags); + rtnl_unlock(); + + return 0; +} + +static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode) +{ + struct qedi_uio_dev *udev = uinfo->priv; + struct qedi_ctx *qedi = udev->qedi; + + udev->uio_dev = -1; + clear_bit(UIO_DEV_OPENED, &qedi->flags); + qedi_ll2_free_skbs(qedi); + return 0; +} + +static void __qedi_free_uio_rings(struct qedi_uio_dev *udev) +{ + if (udev->ll2_ring) { + free_page((unsigned long)udev->ll2_ring); + udev->ll2_ring = NULL; + } + + if (udev->ll2_buf) { + free_pages((unsigned long)udev->ll2_buf, 2); + udev->ll2_buf = NULL; + } +} + +static void __qedi_free_uio(struct qedi_uio_dev *udev) +{ + uio_unregister_device(&udev->qedi_uinfo); + + __qedi_free_uio_rings(udev); + + pci_dev_put(udev->pdev); + kfree(udev->uctrl); + kfree(udev); +} + +static void qedi_free_uio(struct qedi_uio_dev *udev) +{ + if (!udev) + return; + + list_del_init(&udev->list); + __qedi_free_uio(udev); +} + +static void qedi_reset_uio_rings(struct qedi_uio_dev *udev) +{ + struct qedi_ctx *qedi = NULL; + struct qedi_uio_ctrl *uctrl = NULL; + + qedi = udev->qedi; + uctrl = udev->uctrl; + + spin_lock_bh(&qedi->ll2_lock); + uctrl->host_rx_cons = 0; + uctrl->hw_rx_prod = 0; + uctrl->hw_rx_bd_prod = 0; + uctrl->host_rx_bd_cons = 0; + + memset(udev->ll2_ring, 0, udev->ll2_ring_size); + memset(udev->ll2_buf, 0, udev->ll2_buf_size); + spin_unlock_bh(&qedi->ll2_lock); +} + +static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev) +{ + int rc = 0; + + if (udev->ll2_ring || udev->ll2_buf) + return rc; + + /* Allocating memory for LL2 ring */ + udev->ll2_ring_size = QEDI_PAGE_SIZE; + udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP); + if (!udev->ll2_ring) { + rc = -ENOMEM; + goto exit_alloc_ring; + } + + /* Allocating memory for Tx/Rx pkt buffer */ + udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE; + udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size); + udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP | + __GFP_ZERO, 2); + if (!udev->ll2_buf) { + rc = -ENOMEM; + goto exit_alloc_buf; + } + return rc; + +exit_alloc_buf: + free_page((unsigned long)udev->ll2_ring); + udev->ll2_ring = NULL; +exit_alloc_ring: + return rc; +} + +static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) +{ + struct qedi_uio_dev *udev = NULL; + struct qedi_uio_ctrl *uctrl = NULL; + int rc = 0; + + list_for_each_entry(udev, &qedi_udev_list, list) { + if (udev->pdev == qedi->pdev) { + udev->qedi = qedi; + if (__qedi_alloc_uio_rings(udev)) { + udev->qedi = NULL; + return -ENOMEM; + } + qedi->udev = udev; + return 0; + } + } + + udev = kzalloc(sizeof(*udev), GFP_KERNEL); + if (!udev) { + rc = -ENOMEM; + goto err_udev; + } + + uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL); + if (!uctrl) { + rc = -ENOMEM; + goto err_uctrl; + } + + udev->uio_dev = -1; + + udev->qedi = qedi; + udev->pdev = qedi->pdev; + udev->uctrl = uctrl; + + rc = __qedi_alloc_uio_rings(udev); + if (rc) + goto err_uio_rings; + + list_add(&udev->list, &qedi_udev_list); + + pci_dev_get(udev->pdev); + qedi->udev = udev; + + udev->tx_pkt = udev->ll2_buf; + udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE; + return 0; + + err_uio_rings: + kfree(uctrl); + err_uctrl: + kfree(udev); + err_udev: + return -ENOMEM; +} + +static int qedi_init_uio(struct qedi_ctx *qedi) +{ + struct qedi_uio_dev *udev = qedi->udev; + struct uio_info *uinfo; + int ret = 0; + + if (!udev) + return -ENOMEM; + + uinfo = &udev->qedi_uinfo; + + uinfo->mem[0].addr = (unsigned long)udev->uctrl; + uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl); + uinfo->mem[0].memtype = UIO_MEM_LOGICAL; + + uinfo->mem[1].addr = (unsigned long)udev->ll2_ring; + uinfo->mem[1].size = udev->ll2_ring_size; + uinfo->mem[1].memtype = UIO_MEM_LOGICAL; + + uinfo->mem[2].addr = (unsigned long)udev->ll2_buf; + uinfo->mem[2].size = udev->ll2_buf_size; + uinfo->mem[2].memtype = UIO_MEM_LOGICAL; + + uinfo->name = "qedi_uio"; + uinfo->version = QEDI_MODULE_VERSION; + uinfo->irq = UIO_IRQ_CUSTOM; + + uinfo->open = qedi_uio_open; + uinfo->release = qedi_uio_close; + + if (udev->uio_dev == -1) { + if (!uinfo->priv) { + uinfo->priv = udev; + + ret = uio_register_device(&udev->pdev->dev, uinfo); + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, + "UIO registration failed\n"); + } + } + } + + return ret; +} + +static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi, + struct qed_sb_info *sb_info, u16 sb_id) +{ + struct status_block *sb_virt; + dma_addr_t sb_phys; + int ret; + + sb_virt = dma_alloc_coherent(&qedi->pdev->dev, + sizeof(struct status_block), &sb_phys, + GFP_KERNEL); + if (!sb_virt) { + QEDI_ERR(&qedi->dbg_ctx, + "Status block allocation failed for id = %d.\n", + sb_id); + return -ENOMEM; + } + + ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys, + sb_id, QED_SB_TYPE_STORAGE); + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, + "Status block initialization failed for id = %d.\n", + sb_id); + return ret; + } + + return 0; +} + +static void qedi_free_sb(struct qedi_ctx *qedi) +{ + struct qed_sb_info *sb_info; + int id; + + for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { + sb_info = &qedi->sb_array[id]; + if (sb_info->sb_virt) + dma_free_coherent(&qedi->pdev->dev, + sizeof(*sb_info->sb_virt), + (void *)sb_info->sb_virt, + sb_info->sb_phys); + } +} + +static void qedi_free_fp(struct qedi_ctx *qedi) +{ + kfree(qedi->fp_array); + kfree(qedi->sb_array); +} + +static void qedi_destroy_fp(struct qedi_ctx *qedi) +{ + qedi_free_sb(qedi); + qedi_free_fp(qedi); +} + +static int qedi_alloc_fp(struct qedi_ctx *qedi) +{ + int ret = 0; + + qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi), + sizeof(struct qedi_fastpath), GFP_KERNEL); + if (!qedi->fp_array) { + QEDI_ERR(&qedi->dbg_ctx, + "fastpath fp array allocation failed.\n"); + return -ENOMEM; + } + + qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi), + sizeof(struct qed_sb_info), GFP_KERNEL); + if (!qedi->sb_array) { + QEDI_ERR(&qedi->dbg_ctx, + "fastpath sb array allocation failed.\n"); + ret = -ENOMEM; + goto free_fp; + } + + return ret; + +free_fp: + qedi_free_fp(qedi); + return ret; +} + +static void qedi_int_fp(struct qedi_ctx *qedi) +{ + struct qedi_fastpath *fp; + int id; + + memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) * + sizeof(*qedi->fp_array)); + memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) * + sizeof(*qedi->sb_array)); + + for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { + fp = &qedi->fp_array[id]; + fp->sb_info = &qedi->sb_array[id]; + fp->sb_id = id; + fp->qedi = qedi; + snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", + "qedi", id); + + /* fp_array[i] ---- irq cookie + * So init data which is needed in int ctx + */ + } +} + +static int qedi_prepare_fp(struct qedi_ctx *qedi) +{ + struct qedi_fastpath *fp; + int id, ret = 0; + + ret = qedi_alloc_fp(qedi); + if (ret) + goto err; + + qedi_int_fp(qedi); + + for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { + fp = &qedi->fp_array[id]; + ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id); + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, + "SB allocation and initialization failed.\n"); + ret = -EIO; + goto err_init; + } + } + + return 0; + +err_init: + qedi_free_sb(qedi); + qedi_free_fp(qedi); +err: + return ret; +} + +static int qedi_setup_cid_que(struct qedi_ctx *qedi) +{ + int i; + + qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns, + sizeof(u32), GFP_KERNEL); + if (!qedi->cid_que.cid_que_base) + return -ENOMEM; + + qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns, + sizeof(struct qedi_conn *), + GFP_KERNEL); + if (!qedi->cid_que.conn_cid_tbl) { + kfree(qedi->cid_que.cid_que_base); + qedi->cid_que.cid_que_base = NULL; + return -ENOMEM; + } + + qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base; + qedi->cid_que.cid_q_prod_idx = 0; + qedi->cid_que.cid_q_cons_idx = 0; + qedi->cid_que.cid_q_max_idx = qedi->max_active_conns; + qedi->cid_que.cid_free_cnt = qedi->max_active_conns; + + for (i = 0; i < qedi->max_active_conns; i++) { + qedi->cid_que.cid_que[i] = i; + qedi->cid_que.conn_cid_tbl[i] = NULL; + } + + return 0; +} + +static void qedi_release_cid_que(struct qedi_ctx *qedi) +{ + kfree(qedi->cid_que.cid_que_base); + qedi->cid_que.cid_que_base = NULL; + + kfree(qedi->cid_que.conn_cid_tbl); + qedi->cid_que.conn_cid_tbl = NULL; +} + +static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size, + u16 start_id, u16 next) +{ + id_tbl->start = start_id; + id_tbl->max = size; + id_tbl->next = next; + spin_lock_init(&id_tbl->lock); + id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); + if (!id_tbl->table) + return -ENOMEM; + + return 0; +} + +static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl) +{ + kfree(id_tbl->table); + id_tbl->table = NULL; +} + +int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id) +{ + int ret = -1; + + id -= id_tbl->start; + if (id >= id_tbl->max) + return ret; + + spin_lock(&id_tbl->lock); + if (!test_bit(id, id_tbl->table)) { + set_bit(id, id_tbl->table); + ret = 0; + } + spin_unlock(&id_tbl->lock); + return ret; +} + +u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl) +{ + u16 id; + + spin_lock(&id_tbl->lock); + id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); + if (id >= id_tbl->max) { + id = QEDI_LOCAL_PORT_INVALID; + if (id_tbl->next != 0) { + id = find_first_zero_bit(id_tbl->table, id_tbl->next); + if (id >= id_tbl->next) + id = QEDI_LOCAL_PORT_INVALID; + } + } + + if (id < id_tbl->max) { + set_bit(id, id_tbl->table); + id_tbl->next = (id + 1) & (id_tbl->max - 1); + id += id_tbl->start; + } + + spin_unlock(&id_tbl->lock); + + return id; +} + +void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id) +{ + if (id == QEDI_LOCAL_PORT_INVALID) + return; + + id -= id_tbl->start; + if (id >= id_tbl->max) + return; + + clear_bit(id, id_tbl->table); +} + +static void qedi_cm_free_mem(struct qedi_ctx *qedi) +{ + kfree(qedi->ep_tbl); + qedi->ep_tbl = NULL; + qedi_free_id_tbl(&qedi->lcl_port_tbl); +} + +static int qedi_cm_alloc_mem(struct qedi_ctx *qedi) +{ + u16 port_id; + + qedi->ep_tbl = kzalloc((qedi->max_active_conns * + sizeof(struct qedi_endpoint *)), GFP_KERNEL); + if (!qedi->ep_tbl) + return -ENOMEM; + port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE; + if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE, + QEDI_LOCAL_PORT_MIN, port_id)) { + qedi_cm_free_mem(qedi); + return -ENOMEM; + } + + return 0; +} + +static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct qedi_ctx *qedi = NULL; + + shost = iscsi_host_alloc(&qedi_host_template, + sizeof(struct qedi_ctx), 0); + if (!shost) { + QEDI_ERR(NULL, "Could not allocate shost\n"); + goto exit_setup_shost; + } + + shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA; + shost->max_channel = 0; + shost->max_lun = ~0; + shost->max_cmd_len = 16; + shost->transportt = qedi_scsi_transport; + + qedi = iscsi_host_priv(shost); + memset(qedi, 0, sizeof(*qedi)); + qedi->shost = shost; + qedi->dbg_ctx.host_no = shost->host_no; + qedi->pdev = pdev; + qedi->dbg_ctx.pdev = pdev; + qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA; + qedi->max_sqes = QEDI_SQ_SIZE; + + if (shost_use_blk_mq(shost)) + shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi); + + pci_set_drvdata(pdev, qedi); + +exit_setup_shost: + return qedi; +} + +static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2) +{ + struct qedi_ctx *qedi = (struct qedi_ctx *)cookie; + struct qedi_uio_dev *udev; + struct qedi_uio_ctrl *uctrl; + struct skb_work_list *work; + u32 prod; + + if (!qedi) { + QEDI_ERR(NULL, "qedi is NULL\n"); + return -1; + } + + if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO, + "UIO DEV is not opened\n"); + kfree_skb(skb); + return 0; + } + + udev = qedi->udev; + uctrl = udev->uctrl; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + QEDI_WARN(&qedi->dbg_ctx, + "Could not allocate work so dropping frame.\n"); + kfree_skb(skb); + return 0; + } + + INIT_LIST_HEAD(&work->list); + work->skb = skb; + + if (skb_vlan_tag_present(skb)) + work->vlan_id = skb_vlan_tag_get(skb); + + if (work->vlan_id) + __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id); + + spin_lock_bh(&qedi->ll2_lock); + list_add_tail(&work->list, &qedi->ll2_skb_list); + + ++uctrl->hw_rx_prod_cnt; + prod = (uctrl->hw_rx_prod + 1) % RX_RING; + if (prod != uctrl->host_rx_cons) { + uctrl->hw_rx_prod = prod; + spin_unlock_bh(&qedi->ll2_lock); + wake_up_process(qedi->ll2_recv_thread); + return 0; + } + + spin_unlock_bh(&qedi->ll2_lock); + return 0; +} + +/* map this skb to iscsiuio mmaped region */ +static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb, + u16 vlan_id) +{ + struct qedi_uio_dev *udev = NULL; + struct qedi_uio_ctrl *uctrl = NULL; + struct qedi_rx_bd rxbd; + struct qedi_rx_bd *p_rxbd; + u32 rx_bd_prod; + void *pkt; + int len = 0; + + if (!qedi) { + QEDI_ERR(NULL, "qedi is NULL\n"); + return -1; + } + + udev = qedi->udev; + uctrl = udev->uctrl; + pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE); + len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE); + memcpy(pkt, skb->data, len); + + memset(&rxbd, 0, sizeof(rxbd)); + rxbd.rx_pkt_index = uctrl->hw_rx_prod; + rxbd.rx_pkt_len = len; + rxbd.vlan_id = vlan_id; + + uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD; + rx_bd_prod = uctrl->hw_rx_bd_prod; + p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring; + p_rxbd += rx_bd_prod; + + memcpy(p_rxbd, &rxbd, sizeof(rxbd)); + + /* notify the iscsiuio about new packet */ + uio_event_notify(&udev->qedi_uinfo); + + return 0; +} + +static void qedi_ll2_free_skbs(struct qedi_ctx *qedi) +{ + struct skb_work_list *work, *work_tmp; + + spin_lock_bh(&qedi->ll2_lock); + list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) { + list_del(&work->list); + if (work->skb) + kfree_skb(work->skb); + kfree(work); + } + spin_unlock_bh(&qedi->ll2_lock); +} + +static int qedi_ll2_recv_thread(void *arg) +{ + struct qedi_ctx *qedi = (struct qedi_ctx *)arg; + struct skb_work_list *work, *work_tmp; + + set_user_nice(current, -20); + + while (!kthread_should_stop()) { + spin_lock_bh(&qedi->ll2_lock); + list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, + list) { + list_del(&work->list); + qedi_ll2_process_skb(qedi, work->skb, work->vlan_id); + kfree_skb(work->skb); + kfree(work); + } + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_bh(&qedi->ll2_lock); + schedule(); + } + + __set_current_state(TASK_RUNNING); + return 0; +} + +static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi) +{ + u8 num_sq_pages; + u32 log_page_size; + int rval = 0; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Min number of MSIX %d\n", + MIN_NUM_CPUS_MSIX(qedi)); + + num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE; + + qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi); + + memset(&qedi->pf_params.iscsi_pf_params, 0, + sizeof(qedi->pf_params.iscsi_pf_params)); + + qedi->p_cpuq = pci_alloc_consistent(qedi->pdev, + qedi->num_queues * sizeof(struct qedi_glbl_q_params), + &qedi->hw_p_cpuq); + if (!qedi->p_cpuq) { + QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n"); + rval = -1; + goto err_alloc_mem; + } + + rval = qedi_alloc_global_queues(qedi); + if (rval) { + QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n"); + rval = -1; + goto err_alloc_mem; + } + + qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA; + qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK; + qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10; + qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages; + qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages; + qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages; + qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues; + qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug; + + for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { + if ((1 << log_page_size) == PAGE_SIZE) + break; + } + qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size; + + qedi->pf_params.iscsi_pf_params.glbl_q_params_addr = + (u64)qedi->hw_p_cpuq; + + /* RQ BDQ initializations. + * rq_num_entries: suggested value for Initiator is 16 (4KB RQ) + * rqe_log_size: 8 for 256B RQE + */ + qedi->pf_params.iscsi_pf_params.rqe_log_size = 8; + /* BDQ address and size */ + qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] = + qedi->bdq_pbl_list_dma; + qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] = + qedi->bdq_pbl_list_num_entries; + qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE; + + /* cq_num_entries: num_tasks + rq_num_entries */ + qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048; + + qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX; + qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1; + qedi->pf_params.iscsi_pf_params.ooo_enable = 1; + +err_alloc_mem: + return rval; +} + +/* Free DMA coherent memory for array of queue pointers we pass to qed */ +static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi) +{ + size_t size = 0; + + if (qedi->p_cpuq) { + size = qedi->num_queues * sizeof(struct qedi_glbl_q_params); + pci_free_consistent(qedi->pdev, size, qedi->p_cpuq, + qedi->hw_p_cpuq); + } + + qedi_free_global_queues(qedi); + + kfree(qedi->global_queues); +} + +static void qedi_link_update(void *dev, struct qed_link_output *link) +{ + struct qedi_ctx *qedi = (struct qedi_ctx *)dev; + + if (link->link_up) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n"); + atomic_set(&qedi->link_state, QEDI_LINK_UP); + } else { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Link Down event.\n"); + atomic_set(&qedi->link_state, QEDI_LINK_DOWN); + } +} + +static struct qed_iscsi_cb_ops qedi_cb_ops = { + { + .link_update = qedi_link_update, + } +}; + +static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe, + u16 que_idx, struct qedi_percpu_s *p) +{ + struct qedi_work *qedi_work; + struct qedi_conn *q_conn; + struct iscsi_conn *conn; + struct qedi_cmd *qedi_cmd; + u32 iscsi_cid; + int rc = 0; + + iscsi_cid = cqe->cqe_common.conn_id; + q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; + if (!q_conn) { + QEDI_WARN(&qedi->dbg_ctx, + "Session no longer exists for cid=0x%x!!\n", + iscsi_cid); + return -1; + } + conn = q_conn->cls_conn->dd_data; + + switch (cqe->cqe_common.cqe_type) { + case ISCSI_CQE_TYPE_SOLICITED: + case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE: + qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid); + if (!qedi_cmd) { + rc = -1; + break; + } + INIT_LIST_HEAD(&qedi_cmd->cqe_work.list); + qedi_cmd->cqe_work.qedi = qedi; + memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe)); + qedi_cmd->cqe_work.que_idx = que_idx; + qedi_cmd->cqe_work.is_solicited = true; + list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list); + break; + case ISCSI_CQE_TYPE_UNSOLICITED: + case ISCSI_CQE_TYPE_DUMMY: + case ISCSI_CQE_TYPE_TASK_CLEANUP: + qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC); + if (!qedi_work) { + rc = -1; + break; + } + INIT_LIST_HEAD(&qedi_work->list); + qedi_work->qedi = qedi; + memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe)); + qedi_work->que_idx = que_idx; + qedi_work->is_solicited = false; + list_add_tail(&qedi_work->list, &p->work_list); + break; + default: + rc = -1; + QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n"); + } + return rc; +} + +static bool qedi_process_completions(struct qedi_fastpath *fp) +{ + struct qedi_ctx *qedi = fp->qedi; + struct qed_sb_info *sb_info = fp->sb_info; + struct status_block *sb = sb_info->sb_virt; + struct qedi_percpu_s *p = NULL; + struct global_queue *que; + u16 prod_idx; + unsigned long flags; + union iscsi_cqe *cqe; + int cpu; + int ret; + + /* Get the current firmware producer index */ + prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX]; + + if (prod_idx >= QEDI_CQ_SIZE) + prod_idx = prod_idx % QEDI_CQ_SIZE; + + que = qedi->global_queues[fp->sb_id]; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, + "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n", + que, prod_idx, que->cq_cons_idx, fp->sb_id); + + qedi->intr_cpu = fp->sb_id; + cpu = smp_processor_id(); + p = &per_cpu(qedi_percpu, cpu); + + if (unlikely(!p->iothread)) + WARN_ON(1); + + spin_lock_irqsave(&p->p_work_lock, flags); + while (que->cq_cons_idx != prod_idx) { + cqe = &que->cq[que->cq_cons_idx]; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, + "cqe=%p prod_idx=%d cons_idx=%d.\n", + cqe, prod_idx, que->cq_cons_idx); + + ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p); + if (ret) + continue; + + que->cq_cons_idx++; + if (que->cq_cons_idx == QEDI_CQ_SIZE) + que->cq_cons_idx = 0; + } + wake_up_process(p->iothread); + spin_unlock_irqrestore(&p->p_work_lock, flags); + + return true; +} + +static bool qedi_fp_has_work(struct qedi_fastpath *fp) +{ + struct qedi_ctx *qedi = fp->qedi; + struct global_queue *que; + struct qed_sb_info *sb_info = fp->sb_info; + struct status_block *sb = sb_info->sb_virt; + u16 prod_idx; + + barrier(); + + /* Get the current firmware producer index */ + prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX]; + + /* Get the pointer to the global CQ this completion is on */ + que = qedi->global_queues[fp->sb_id]; + + /* prod idx wrap around uint16 */ + if (prod_idx >= QEDI_CQ_SIZE) + prod_idx = prod_idx % QEDI_CQ_SIZE; + + return (que->cq_cons_idx != prod_idx); +} + +/* MSI-X fastpath handler code */ +static irqreturn_t qedi_msix_handler(int irq, void *dev_id) +{ + struct qedi_fastpath *fp = dev_id; + struct qedi_ctx *qedi = fp->qedi; + bool wake_io_thread = true; + + qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); + +process_again: + wake_io_thread = qedi_process_completions(fp); + if (wake_io_thread) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, + "process already running\n"); + } + + if (qedi_fp_has_work(fp) == 0) + qed_sb_update_sb_idx(fp->sb_info); + + /* Check for more work */ + rmb(); + + if (qedi_fp_has_work(fp) == 0) + qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); + else + goto process_again; + + return IRQ_HANDLED; +} + +/* simd handler for MSI/INTa */ +static void qedi_simd_int_handler(void *cookie) +{ + /* Cookie is qedi_ctx struct */ + struct qedi_ctx *qedi = (struct qedi_ctx *)cookie; + + QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi); +} + +#define QEDI_SIMD_HANDLER_NUM 0 +static void qedi_sync_free_irqs(struct qedi_ctx *qedi) +{ + int i; + + if (qedi->int_info.msix_cnt) { + for (i = 0; i < qedi->int_info.used_cnt; i++) { + synchronize_irq(qedi->int_info.msix[i].vector); + irq_set_affinity_hint(qedi->int_info.msix[i].vector, + NULL); + free_irq(qedi->int_info.msix[i].vector, + &qedi->fp_array[i]); + } + } else { + qedi_ops->common->simd_handler_clean(qedi->cdev, + QEDI_SIMD_HANDLER_NUM); + } + + qedi->int_info.used_cnt = 0; + qedi_ops->common->set_fp_int(qedi->cdev, 0); +} + +static int qedi_request_msix_irq(struct qedi_ctx *qedi) +{ + int i, rc, cpu; + + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) { + rc = request_irq(qedi->int_info.msix[i].vector, + qedi_msix_handler, 0, "qedi", + &qedi->fp_array[i]); + + if (rc) { + QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n"); + qedi_sync_free_irqs(qedi); + return rc; + } + qedi->int_info.used_cnt++; + rc = irq_set_affinity_hint(qedi->int_info.msix[i].vector, + get_cpu_mask(cpu)); + cpu = cpumask_next(cpu, cpu_online_mask); + } + + return 0; +} + +static int qedi_setup_int(struct qedi_ctx *qedi) +{ + int rc = 0; + + rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus()); + rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info); + if (rc) + goto exit_setup_int; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, + "Number of msix_cnt = 0x%x num of cpus = 0x%x\n", + qedi->int_info.msix_cnt, num_online_cpus()); + + if (qedi->int_info.msix_cnt) { + rc = qedi_request_msix_irq(qedi); + goto exit_setup_int; + } else { + qedi_ops->common->simd_handler_config(qedi->cdev, &qedi, + QEDI_SIMD_HANDLER_NUM, + qedi_simd_int_handler); + qedi->int_info.used_cnt = 1; + } + +exit_setup_int: + return rc; +} + +static void qedi_free_bdq(struct qedi_ctx *qedi) +{ + int i; + + if (qedi->bdq_pbl_list) + dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE, + qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma); + + if (qedi->bdq_pbl) + dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size, + qedi->bdq_pbl, qedi->bdq_pbl_dma); + + for (i = 0; i < QEDI_BDQ_NUM; i++) { + if (qedi->bdq[i].buf_addr) { + dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE, + qedi->bdq[i].buf_addr, + qedi->bdq[i].buf_dma); + } + } +} + +static void qedi_free_global_queues(struct qedi_ctx *qedi) +{ + int i; + struct global_queue **gl = qedi->global_queues; + + for (i = 0; i < qedi->num_queues; i++) { + if (!gl[i]) + continue; + + if (gl[i]->cq) + dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size, + gl[i]->cq, gl[i]->cq_dma); + if (gl[i]->cq_pbl) + dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size, + gl[i]->cq_pbl, gl[i]->cq_pbl_dma); + + kfree(gl[i]); + } + qedi_free_bdq(qedi); +} + +static int qedi_alloc_bdq(struct qedi_ctx *qedi) +{ + int i; + struct scsi_bd *pbl; + u64 *list; + dma_addr_t page; + + /* Alloc dma memory for BDQ buffers */ + for (i = 0; i < QEDI_BDQ_NUM; i++) { + qedi->bdq[i].buf_addr = + dma_alloc_coherent(&qedi->pdev->dev, + QEDI_BDQ_BUF_SIZE, + &qedi->bdq[i].buf_dma, + GFP_KERNEL); + if (!qedi->bdq[i].buf_addr) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not allocate BDQ buffer %d.\n", i); + return -ENOMEM; + } + } + + /* Alloc dma memory for BDQ page buffer list */ + qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd); + qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE); + qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n", + qedi->rq_num_entries); + + qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev, + qedi->bdq_pbl_mem_size, + &qedi->bdq_pbl_dma, GFP_KERNEL); + if (!qedi->bdq_pbl) { + QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n"); + return -ENOMEM; + } + + /* + * Populate BDQ PBL with physical and virtual address of individual + * BDQ buffers + */ + pbl = (struct scsi_bd *)qedi->bdq_pbl; + for (i = 0; i < QEDI_BDQ_NUM; i++) { + pbl->address.hi = + cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma)); + pbl->address.lo = + cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma)); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n", + pbl, pbl->address.hi, pbl->address.lo, i); + pbl->opaque.hi = 0; + pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i)); + pbl++; + } + + /* Allocate list of PBL pages */ + qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev, + PAGE_SIZE, + &qedi->bdq_pbl_list_dma, + GFP_KERNEL); + if (!qedi->bdq_pbl_list) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not allocate list of PBL pages.\n"); + return -ENOMEM; + } + memset(qedi->bdq_pbl_list, 0, PAGE_SIZE); + + /* + * Now populate PBL list with pages that contain pointers to the + * individual buffers. + */ + qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE; + list = (u64 *)qedi->bdq_pbl_list; + page = qedi->bdq_pbl_list_dma; + for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) { + *list = qedi->bdq_pbl_dma; + list++; + page += PAGE_SIZE; + } + + return 0; +} + +static int qedi_alloc_global_queues(struct qedi_ctx *qedi) +{ + u32 *list; + int i; + int status = 0, rc; + u32 *pbl; + dma_addr_t page; + int num_pages; + + /* + * Number of global queues (CQ / RQ). This should + * be <= number of available MSIX vectors for the PF + */ + if (!qedi->num_queues) { + QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n"); + return 1; + } + + /* Make sure we allocated the PBL that will contain the physical + * addresses of our queues + */ + if (!qedi->p_cpuq) { + status = 1; + goto mem_alloc_failure; + } + + qedi->global_queues = kzalloc((sizeof(struct global_queue *) * + qedi->num_queues), GFP_KERNEL); + if (!qedi->global_queues) { + QEDI_ERR(&qedi->dbg_ctx, + "Unable to allocate global queues array ptr memory\n"); + return -ENOMEM; + } + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, + "qedi->global_queues=%p.\n", qedi->global_queues); + + /* Allocate DMA coherent buffers for BDQ */ + rc = qedi_alloc_bdq(qedi); + if (rc) + goto mem_alloc_failure; + + /* Allocate a CQ and an associated PBL for each MSI-X + * vector. + */ + for (i = 0; i < qedi->num_queues; i++) { + qedi->global_queues[i] = + kzalloc(sizeof(*qedi->global_queues[0]), + GFP_KERNEL); + if (!qedi->global_queues[i]) { + QEDI_ERR(&qedi->dbg_ctx, + "Unable to allocation global queue %d.\n", i); + goto mem_alloc_failure; + } + + qedi->global_queues[i]->cq_mem_size = + (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe); + qedi->global_queues[i]->cq_mem_size = + (qedi->global_queues[i]->cq_mem_size + + (QEDI_PAGE_SIZE - 1)); + + qedi->global_queues[i]->cq_pbl_size = + (qedi->global_queues[i]->cq_mem_size / + QEDI_PAGE_SIZE) * sizeof(void *); + qedi->global_queues[i]->cq_pbl_size = + (qedi->global_queues[i]->cq_pbl_size + + (QEDI_PAGE_SIZE - 1)); + + qedi->global_queues[i]->cq = + dma_alloc_coherent(&qedi->pdev->dev, + qedi->global_queues[i]->cq_mem_size, + &qedi->global_queues[i]->cq_dma, + GFP_KERNEL); + + if (!qedi->global_queues[i]->cq) { + QEDI_WARN(&qedi->dbg_ctx, + "Could not allocate cq.\n"); + status = -ENOMEM; + goto mem_alloc_failure; + } + memset(qedi->global_queues[i]->cq, 0, + qedi->global_queues[i]->cq_mem_size); + + qedi->global_queues[i]->cq_pbl = + dma_alloc_coherent(&qedi->pdev->dev, + qedi->global_queues[i]->cq_pbl_size, + &qedi->global_queues[i]->cq_pbl_dma, + GFP_KERNEL); + + if (!qedi->global_queues[i]->cq_pbl) { + QEDI_WARN(&qedi->dbg_ctx, + "Could not allocate cq PBL.\n"); + status = -ENOMEM; + goto mem_alloc_failure; + } + memset(qedi->global_queues[i]->cq_pbl, 0, + qedi->global_queues[i]->cq_pbl_size); + + /* Create PBL */ + num_pages = qedi->global_queues[i]->cq_mem_size / + QEDI_PAGE_SIZE; + page = qedi->global_queues[i]->cq_dma; + pbl = (u32 *)qedi->global_queues[i]->cq_pbl; + + while (num_pages--) { + *pbl = (u32)page; + pbl++; + *pbl = (u32)((u64)page >> 32); + pbl++; + page += QEDI_PAGE_SIZE; + } + } + + list = (u32 *)qedi->p_cpuq; + + /* + * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer, + * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points + * to the physical address which contains an array of pointers to the + * physical addresses of the specific queue pages. + */ + for (i = 0; i < qedi->num_queues; i++) { + *list = (u32)qedi->global_queues[i]->cq_pbl_dma; + list++; + *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32); + list++; + + *list = (u32)0; + list++; + *list = (u32)((u64)0 >> 32); + list++; + } + + return 0; + +mem_alloc_failure: + qedi_free_global_queues(qedi); + return status; +} + +int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep) +{ + int rval = 0; + u32 *pbl; + dma_addr_t page; + int num_pages; + + if (!ep) + return -EIO; + + /* Calculate appropriate queue and PBL sizes */ + ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe); + ep->sq_mem_size += QEDI_PAGE_SIZE - 1; + + ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); + ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE; + + ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, + &ep->sq_dma, GFP_KERNEL); + if (!ep->sq) { + QEDI_WARN(&qedi->dbg_ctx, + "Could not allocate send queue.\n"); + rval = -ENOMEM; + goto out; + } + memset(ep->sq, 0, ep->sq_mem_size); + + ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, + &ep->sq_pbl_dma, GFP_KERNEL); + if (!ep->sq_pbl) { + QEDI_WARN(&qedi->dbg_ctx, + "Could not allocate send queue PBL.\n"); + rval = -ENOMEM; + goto out_free_sq; + } + memset(ep->sq_pbl, 0, ep->sq_pbl_size); + + /* Create PBL */ + num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE; + page = ep->sq_dma; + pbl = (u32 *)ep->sq_pbl; + + while (num_pages--) { + *pbl = (u32)page; + pbl++; + *pbl = (u32)((u64)page >> 32); + pbl++; + page += QEDI_PAGE_SIZE; + } + + return rval; + +out_free_sq: + dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq, + ep->sq_dma); +out: + return rval; +} + +void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep) +{ + if (ep->sq_pbl) + dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl, + ep->sq_pbl_dma); + if (ep->sq) + dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq, + ep->sq_dma); +} + +int qedi_get_task_idx(struct qedi_ctx *qedi) +{ + s16 tmp_idx; + +again: + tmp_idx = find_first_zero_bit(qedi->task_idx_map, + MAX_ISCSI_TASK_ENTRIES); + + if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) { + QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n"); + tmp_idx = -1; + goto err_idx; + } + + if (test_and_set_bit(tmp_idx, qedi->task_idx_map)) + goto again; + +err_idx: + return tmp_idx; +} + +void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) +{ + if (!test_and_clear_bit(idx, qedi->task_idx_map)) { + QEDI_ERR(&qedi->dbg_ctx, + "FW task context, already cleared, tid=0x%x\n", idx); + WARN_ON(1); + } +} + +void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, + struct qedi_cmd *cmd) +{ + qedi->itt_map[tid].itt = proto_itt; + qedi->itt_map[tid].p_cmd = cmd; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "update itt map tid=0x%x, with proto itt=0x%x\n", tid, + qedi->itt_map[tid].itt); +} + +void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid) +{ + u16 i; + + for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) { + if (qedi->itt_map[i].itt == itt) { + *tid = i; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "Ref itt=0x%x, found at tid=0x%x\n", + itt, *tid); + return; + } + } + + WARN_ON(1); +} + +void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt) +{ + *proto_itt = qedi->itt_map[tid].itt; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, + "Get itt map tid [0x%x with proto itt[0x%x]", + tid, *proto_itt); +} + +struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid) +{ + struct qedi_cmd *cmd = NULL; + + if (tid > MAX_ISCSI_TASK_ENTRIES) + return NULL; + + cmd = qedi->itt_map[tid].p_cmd; + if (cmd->task_id != tid) + return NULL; + + qedi->itt_map[tid].p_cmd = NULL; + + return cmd; +} + +static int qedi_alloc_itt(struct qedi_ctx *qedi) +{ + qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES, + sizeof(struct qedi_itt_map), GFP_KERNEL); + if (!qedi->itt_map) { + QEDI_ERR(&qedi->dbg_ctx, + "Unable to allocate itt map array memory\n"); + return -ENOMEM; + } + return 0; +} + +static void qedi_free_itt(struct qedi_ctx *qedi) +{ + kfree(qedi->itt_map); +} + +static struct qed_ll2_cb_ops qedi_ll2_cb_ops = { + .rx_cb = qedi_ll2_rx, + .tx_cb = NULL, +}; + +static int qedi_percpu_io_thread(void *arg) +{ + struct qedi_percpu_s *p = arg; + struct qedi_work *work, *tmp; + unsigned long flags; + LIST_HEAD(work_list); + + set_user_nice(current, -20); + + while (!kthread_should_stop()) { + spin_lock_irqsave(&p->p_work_lock, flags); + while (!list_empty(&p->work_list)) { + list_splice_init(&p->work_list, &work_list); + spin_unlock_irqrestore(&p->p_work_lock, flags); + + list_for_each_entry_safe(work, tmp, &work_list, list) { + list_del_init(&work->list); + qedi_fp_process_cqes(work); + if (!work->is_solicited) + kfree(work); + } + cond_resched(); + spin_lock_irqsave(&p->p_work_lock, flags); + } + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&p->p_work_lock, flags); + schedule(); + } + __set_current_state(TASK_RUNNING); + + return 0; +} + +static int qedi_cpu_online(unsigned int cpu) +{ + struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu); + struct task_struct *thread; + + thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p, + cpu_to_node(cpu), + "qedi_thread/%d", cpu); + if (IS_ERR(thread)) + return PTR_ERR(thread); + + kthread_bind(thread, cpu); + p->iothread = thread; + wake_up_process(thread); + return 0; +} + +static int qedi_cpu_offline(unsigned int cpu) +{ + struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu); + struct qedi_work *work, *tmp; + struct task_struct *thread; + + spin_lock_bh(&p->p_work_lock); + thread = p->iothread; + p->iothread = NULL; + + list_for_each_entry_safe(work, tmp, &p->work_list, list) { + list_del_init(&work->list); + qedi_fp_process_cqes(work); + if (!work->is_solicited) + kfree(work); + } + + spin_unlock_bh(&p->p_work_lock); + if (thread) + kthread_stop(thread); + return 0; +} + +void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu) +{ + struct qed_ll2_params params; + + qedi_recover_all_conns(qedi); + + qedi_ops->ll2->stop(qedi->cdev); + qedi_ll2_free_skbs(qedi); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n", + qedi->ll2_mtu, mtu); + memset(¶ms, 0, sizeof(params)); + qedi->ll2_mtu = mtu; + params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN; + params.drop_ttl0_packets = 0; + params.rx_vlan_stripping = 1; + ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac); + qedi_ops->ll2->start(qedi->cdev, ¶ms); +} + +static void __qedi_remove(struct pci_dev *pdev, int mode) +{ + struct qedi_ctx *qedi = pci_get_drvdata(pdev); + + if (qedi->tmf_thread) { + flush_workqueue(qedi->tmf_thread); + destroy_workqueue(qedi->tmf_thread); + qedi->tmf_thread = NULL; + } + + if (qedi->offload_thread) { + flush_workqueue(qedi->offload_thread); + destroy_workqueue(qedi->offload_thread); + qedi->offload_thread = NULL; + } + +#ifdef CONFIG_DEBUG_FS + qedi_dbg_host_exit(&qedi->dbg_ctx); +#endif + if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) + qedi_ops->common->set_power_state(qedi->cdev, PCI_D0); + + qedi_sync_free_irqs(qedi); + + if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { + qedi_ops->stop(qedi->cdev); + qedi_ops->ll2->stop(qedi->cdev); + } + + if (mode == QEDI_MODE_NORMAL) + qedi_free_iscsi_pf_param(qedi); + + if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { + qedi_ops->common->slowpath_stop(qedi->cdev); + qedi_ops->common->remove(qedi->cdev); + } + + qedi_destroy_fp(qedi); + + if (mode == QEDI_MODE_NORMAL) { + qedi_release_cid_que(qedi); + qedi_cm_free_mem(qedi); + qedi_free_uio(qedi->udev); + qedi_free_itt(qedi); + + iscsi_host_remove(qedi->shost); + iscsi_host_free(qedi->shost); + + if (qedi->ll2_recv_thread) { + kthread_stop(qedi->ll2_recv_thread); + qedi->ll2_recv_thread = NULL; + } + qedi_ll2_free_skbs(qedi); + } +} + +static int __qedi_probe(struct pci_dev *pdev, int mode) +{ + struct qedi_ctx *qedi; + struct qed_ll2_params params; + u32 dp_module = 0; + u8 dp_level = 0; + bool is_vf = false; + char host_buf[16]; + struct qed_link_params link_params; + struct qed_slowpath_params sp_params; + struct qed_probe_params qed_params; + void *task_start, *task_end; + int rc; + u16 tmp; + + if (mode != QEDI_MODE_RECOVERY) { + qedi = qedi_host_alloc(pdev); + if (!qedi) { + rc = -ENOMEM; + goto exit_probe; + } + } else { + qedi = pci_get_drvdata(pdev); + } + + memset(&qed_params, 0, sizeof(qed_params)); + qed_params.protocol = QED_PROTOCOL_ISCSI; + qed_params.dp_module = dp_module; + qed_params.dp_level = dp_level; + qed_params.is_vf = is_vf; + qedi->cdev = qedi_ops->common->probe(pdev, &qed_params); + if (!qedi->cdev) { + rc = -ENODEV; + QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n"); + goto free_host; + } + + qedi->msix_count = MAX_NUM_MSIX_PF; + atomic_set(&qedi->link_state, QEDI_LINK_DOWN); + + if (mode != QEDI_MODE_RECOVERY) { + rc = qedi_set_iscsi_pf_param(qedi); + if (rc) { + rc = -ENOMEM; + QEDI_ERR(&qedi->dbg_ctx, + "Set iSCSI pf param fail\n"); + goto free_host; + } + } + + qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params); + + rc = qedi_prepare_fp(qedi); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n"); + goto free_pf_params; + } + + /* Start the Slowpath-process */ + memset(&sp_params, 0, sizeof(struct qed_slowpath_params)); + sp_params.int_mode = QED_INT_MODE_MSIX; + sp_params.drv_major = QEDI_DRIVER_MAJOR_VER; + sp_params.drv_minor = QEDI_DRIVER_MINOR_VER; + sp_params.drv_rev = QEDI_DRIVER_REV_VER; + sp_params.drv_eng = QEDI_DRIVER_ENG_VER; + strlcpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE); + rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n"); + goto stop_hw; + } + + /* update_pf_params needs to be called before and after slowpath + * start + */ + qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params); + + qedi_setup_int(qedi); + if (rc) + goto stop_iscsi_func; + + qedi_ops->common->set_power_state(qedi->cdev, PCI_D0); + + /* Learn information crucial for qedi to progress */ + rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info); + if (rc) + goto stop_iscsi_func; + + /* Record BDQ producer doorbell addresses */ + qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr; + qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, + "BDQ primary_prod=%p secondary_prod=%p.\n", + qedi->bdq_primary_prod, + qedi->bdq_secondary_prod); + + /* + * We need to write the number of BDs in the BDQ we've preallocated so + * the f/w will do a prefetch and we'll get an unsolicited CQE when a + * packet arrives. + */ + qedi->bdq_prod_idx = QEDI_BDQ_NUM; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, + "Writing %d to primary and secondary BDQ doorbell registers.\n", + qedi->bdq_prod_idx); + writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod); + tmp = readw(qedi->bdq_primary_prod); + writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod); + tmp = readw(qedi->bdq_secondary_prod); + + ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n", + qedi->mac); + + sprintf(host_buf, "host_%d", qedi->shost->host_no); + qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION); + + qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi); + + memset(¶ms, 0, sizeof(params)); + params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN; + qedi->ll2_mtu = DEF_PATH_MTU; + params.drop_ttl0_packets = 0; + params.rx_vlan_stripping = 1; + ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac); + + if (mode != QEDI_MODE_RECOVERY) { + /* set up rx path */ + INIT_LIST_HEAD(&qedi->ll2_skb_list); + spin_lock_init(&qedi->ll2_lock); + /* start qedi context */ + spin_lock_init(&qedi->hba_lock); + spin_lock_init(&qedi->task_idx_lock); + } + qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi); + qedi_ops->ll2->start(qedi->cdev, ¶ms); + + if (mode != QEDI_MODE_RECOVERY) { + qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread, + (void *)qedi, + "qedi_ll2_thread"); + } + + rc = qedi_ops->start(qedi->cdev, &qedi->tasks, + qedi, qedi_iscsi_event_cb); + if (rc) { + rc = -ENODEV; + QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n"); + goto stop_slowpath; + } + + task_start = qedi_get_task_mem(&qedi->tasks, 0); + task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1); + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, + "Task context start=%p, end=%p block_size=%u.\n", + task_start, task_end, qedi->tasks.size); + + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = true; + rc = qedi_ops->common->set_link(qedi->cdev, &link_params); + if (rc) { + QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n"); + atomic_set(&qedi->link_state, QEDI_LINK_DOWN); + } + +#ifdef CONFIG_DEBUG_FS + qedi_dbg_host_init(&qedi->dbg_ctx, &qedi_debugfs_ops, + &qedi_dbg_fops); +#endif + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n", + QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION, + FW_REVISION_VERSION, FW_ENGINEERING_VERSION); + + if (mode == QEDI_MODE_NORMAL) { + if (iscsi_host_add(qedi->shost, &pdev->dev)) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not add iscsi host\n"); + rc = -ENOMEM; + goto remove_host; + } + + /* Allocate uio buffers */ + rc = qedi_alloc_uio_rings(qedi); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, + "UIO alloc ring failed err=%d\n", rc); + goto remove_host; + } + + rc = qedi_init_uio(qedi); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, + "UIO init failed, err=%d\n", rc); + goto free_uio; + } + + /* host the array on iscsi_conn */ + rc = qedi_setup_cid_que(qedi); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not setup cid que\n"); + goto free_uio; + } + + rc = qedi_cm_alloc_mem(qedi); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not alloc cm memory\n"); + goto free_cid_que; + } + + rc = qedi_alloc_itt(qedi); + if (rc) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not alloc itt memory\n"); + goto free_cid_que; + } + + sprintf(host_buf, "host_%d", qedi->shost->host_no); + qedi->tmf_thread = create_singlethread_workqueue(host_buf); + if (!qedi->tmf_thread) { + QEDI_ERR(&qedi->dbg_ctx, + "Unable to start tmf thread!\n"); + rc = -ENODEV; + goto free_cid_que; + } + + sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no); + qedi->offload_thread = create_workqueue(host_buf); + if (!qedi->offload_thread) { + QEDI_ERR(&qedi->dbg_ctx, + "Unable to start offload thread!\n"); + rc = -ENODEV; + goto free_cid_que; + } + + /* F/w needs 1st task context memory entry for performance */ + set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map); + atomic_set(&qedi->num_offloads, 0); + } + + return 0; + +free_cid_que: + qedi_release_cid_que(qedi); +free_uio: + qedi_free_uio(qedi->udev); +remove_host: +#ifdef CONFIG_DEBUG_FS + qedi_dbg_host_exit(&qedi->dbg_ctx); +#endif + iscsi_host_remove(qedi->shost); +stop_iscsi_func: + qedi_ops->stop(qedi->cdev); +stop_slowpath: + qedi_ops->common->slowpath_stop(qedi->cdev); +stop_hw: + qedi_ops->common->remove(qedi->cdev); +free_pf_params: + qedi_free_iscsi_pf_param(qedi); +free_host: + iscsi_host_free(qedi->shost); +exit_probe: + return rc; +} + +static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + return __qedi_probe(pdev, QEDI_MODE_NORMAL); +} + +static void qedi_remove(struct pci_dev *pdev) +{ + __qedi_remove(pdev, QEDI_MODE_NORMAL); +} + +static struct pci_device_id qedi_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) }, + { 0 }, +}; +MODULE_DEVICE_TABLE(pci, qedi_pci_tbl); + +static enum cpuhp_state qedi_cpuhp_state; + +static struct pci_driver qedi_pci_driver = { + .name = QEDI_MODULE_NAME, + .id_table = qedi_pci_tbl, + .probe = qedi_probe, + .remove = qedi_remove, +}; + +static int __init qedi_init(void) +{ + struct qedi_percpu_s *p; + int cpu, rc = 0; + + qedi_ops = qed_get_iscsi_ops(); + if (!qedi_ops) { + QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n"); + return -EINVAL; + } + +#ifdef CONFIG_DEBUG_FS + qedi_dbg_init("qedi"); +#endif + + qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport); + if (!qedi_scsi_transport) { + QEDI_ERR(NULL, "Could not register qedi transport"); + rc = -ENOMEM; + goto exit_qedi_init_1; + } + + for_each_possible_cpu(cpu) { + p = &per_cpu(qedi_percpu, cpu); + INIT_LIST_HEAD(&p->work_list); + spin_lock_init(&p->p_work_lock); + p->iothread = NULL; + } + + rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/qedi:online", + qedi_cpu_online, qedi_cpu_offline); + if (rc < 0) + goto exit_qedi_init_2; + qedi_cpuhp_state = rc; + + rc = pci_register_driver(&qedi_pci_driver); + if (rc) { + QEDI_ERR(NULL, "Failed to register driver\n"); + goto exit_qedi_hp; + } + + return 0; + +exit_qedi_hp: + cpuhp_remove_state(qedi_cpuhp_state); +exit_qedi_init_2: + iscsi_unregister_transport(&qedi_iscsi_transport); +exit_qedi_init_1: +#ifdef CONFIG_DEBUG_FS + qedi_dbg_exit(); +#endif + qed_put_iscsi_ops(); + return rc; +} + +static void __exit qedi_cleanup(void) +{ + pci_unregister_driver(&qedi_pci_driver); + cpuhp_remove_state(qedi_cpuhp_state); + iscsi_unregister_transport(&qedi_iscsi_transport); + +#ifdef CONFIG_DEBUG_FS + qedi_dbg_exit(); +#endif + qed_put_iscsi_ops(); +} + +MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("QLogic Corporation"); +MODULE_VERSION(QEDI_MODULE_VERSION); +module_init(qedi_init); +module_exit(qedi_cleanup); diff --git a/drivers/scsi/qedi/qedi_sysfs.c b/drivers/scsi/qedi/qedi_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..b10c48bd1428e6aac6a6448ee1f20dbf85ed7f37 --- /dev/null +++ b/drivers/scsi/qedi/qedi_sysfs.c @@ -0,0 +1,52 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include "qedi.h" +#include "qedi_gbl.h" +#include "qedi_iscsi.h" +#include "qedi_dbg.h" + +static inline struct qedi_ctx *qedi_dev_to_hba(struct device *dev) +{ + struct Scsi_Host *shost = class_to_shost(dev); + + return iscsi_host_priv(shost); +} + +static ssize_t qedi_show_port_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct qedi_ctx *qedi = qedi_dev_to_hba(dev); + + if (atomic_read(&qedi->link_state) == QEDI_LINK_UP) + return sprintf(buf, "Online\n"); + else + return sprintf(buf, "Linkdown\n"); +} + +static ssize_t qedi_show_speed(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qedi_ctx *qedi = qedi_dev_to_hba(dev); + struct qed_link_output if_link; + + qedi_ops->common->get_link(qedi->cdev, &if_link); + + return sprintf(buf, "%d Gbit\n", if_link.speed / 1000); +} + +static DEVICE_ATTR(port_state, 0444, qedi_show_port_state, NULL); +static DEVICE_ATTR(speed, 0444, qedi_show_speed, NULL); + +struct device_attribute *qedi_shost_attrs[] = { + &dev_attr_port_state, + &dev_attr_speed, + NULL +}; diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h new file mode 100644 index 0000000000000000000000000000000000000000..9543a1b139d4e9a00fbac1c9c8474d6bec0e7653 --- /dev/null +++ b/drivers/scsi/qedi/qedi_version.h @@ -0,0 +1,14 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#define QEDI_MODULE_VERSION "8.10.3.0" +#define QEDI_DRIVER_MAJOR_VER 8 +#define QEDI_DRIVER_MINOR_VER 10 +#define QEDI_DRIVER_REV_VER 3 +#define QEDI_DRIVER_ENG_VER 0 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index fe7469c901f76ac2d46006eeb208f3c9398d3ee1..47eb4d545d13c5f9b80149f162b04756108cd654 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1988,9 +1988,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); scsi_qla_host_t *vha = NULL; struct qla_hw_data *ha = base_vha->hw; - uint16_t options = 0; int cnt; struct req_que *req = ha->req_q_map[0]; + struct qla_qpair *qpair; ret = qla24xx_vport_create_req_sanity_check(fc_vport); if (ret) { @@ -2075,15 +2075,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) qlt_vport_create(vha, ha); qla24xx_vport_disable(fc_vport, disable); - if (ha->flags.cpu_affinity_enabled) { - req = ha->req_q_map[1]; - ql_dbg(ql_dbg_multiq, vha, 0xc000, - "Request queue %p attached with " - "VP[%d], cpu affinity =%d\n", - req, vha->vp_idx, ha->flags.cpu_affinity_enabled); - goto vport_queue; - } else if (ql2xmaxqueues == 1 || !ha->npiv_info) + if (!ql2xmqsupport || !ha->npiv_info) goto vport_queue; + /* Create a request queue in QoS mode for the vport */ for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) { if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0 @@ -2095,20 +2089,20 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) } if (qos) { - ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, - qos); - if (!ret) + qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx); + if (!qpair) ql_log(ql_log_warn, vha, 0x7084, - "Can't create request queue for VP[%d]\n", + "Can't create qpair for VP[%d]\n", vha->vp_idx); else { ql_dbg(ql_dbg_multiq, vha, 0xc001, - "Request Que:%d Q0s: %d) created for VP[%d]\n", - ret, qos, vha->vp_idx); + "Queue pair: %d Qos: %d) created for VP[%d]\n", + qpair->id, qos, vha->vp_idx); ql_dbg(ql_dbg_user, vha, 0x7085, - "Request Que:%d Q0s: %d) created for VP[%d]\n", - ret, qos, vha->vp_idx); - req = ha->req_q_map[ret]; + "Queue Pair: %d Qos: %d) created for VP[%d]\n", + qpair->id, qos, vha->vp_idx); + req = qpair->req; + vha->qpair = qpair; } } @@ -2162,10 +2156,10 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) clear_bit(vha->vp_idx, ha->vp_idx_map); mutex_unlock(&ha->vport_lock); - if (vha->req->id && !ha->flags.cpu_affinity_enabled) { - if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) + if (vha->qpair->vp_idx == vha->vp_idx) { + if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0x7087, - "Queue delete failed.\n"); + "Queue Pair delete failed.\n"); } ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id); diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 643014f82f7d82d0a314581e4a9ef10d6a56eea6..1bf8061ff8032fae46854f84d85946ea55c6072a 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -1,4 +1,4 @@ -/* + /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation * @@ -9,6 +9,7 @@ #include #include #include +#include /* BSG support for ELS/CT pass through */ void @@ -16,10 +17,12 @@ qla2x00_bsg_job_done(void *data, void *ptr, int res) { srb_t *sp = (srb_t *)ptr; struct scsi_qla_host *vha = (scsi_qla_host_t *)data; - struct fc_bsg_job *bsg_job = sp->u.bsg_job; + struct bsg_job *bsg_job = sp->u.bsg_job; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; - bsg_job->reply->result = res; - bsg_job->job_done(bsg_job); + bsg_reply->result = res; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); sp->free(vha, sp); } @@ -28,13 +31,15 @@ qla2x00_bsg_sp_free(void *data, void *ptr) { srb_t *sp = (srb_t *)ptr; struct scsi_qla_host *vha = sp->fcport->vha; - struct fc_bsg_job *bsg_job = sp->u.bsg_job; + struct bsg_job *bsg_job = sp->u.bsg_job; + struct fc_bsg_request *bsg_request = bsg_job->request; + struct qla_hw_data *ha = vha->hw; struct qla_mt_iocb_rqst_fx00 *piocb_rqst; if (sp->type == SRB_FXIOCB_BCMD) { piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) - &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; + &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) dma_unmap_sg(&ha->pdev->dev, @@ -116,9 +121,11 @@ qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, } static int -qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) +qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int ret = 0; @@ -131,7 +138,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) } /* Get the sub command */ - oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; + oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; /* Only set config is allowed if config memory is not allocated */ if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { @@ -145,10 +152,10 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) ha->fcp_prio_cfg->attributes &= ~FCP_PRIO_ATTR_ENABLE; qla24xx_update_all_fcp_prio(vha); - bsg_job->reply->result = DID_OK; + bsg_reply->result = DID_OK; } else { ret = -EINVAL; - bsg_job->reply->result = (DID_ERROR << 16); + bsg_reply->result = (DID_ERROR << 16); goto exit_fcp_prio_cfg; } break; @@ -160,10 +167,10 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) ha->fcp_prio_cfg->attributes |= FCP_PRIO_ATTR_ENABLE; qla24xx_update_all_fcp_prio(vha); - bsg_job->reply->result = DID_OK; + bsg_reply->result = DID_OK; } else { ret = -EINVAL; - bsg_job->reply->result = (DID_ERROR << 16); + bsg_reply->result = (DID_ERROR << 16); goto exit_fcp_prio_cfg; } } @@ -173,12 +180,12 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) len = bsg_job->reply_payload.payload_len; if (!len || len > FCP_PRIO_CFG_SIZE) { ret = -EINVAL; - bsg_job->reply->result = (DID_ERROR << 16); + bsg_reply->result = (DID_ERROR << 16); goto exit_fcp_prio_cfg; } - bsg_job->reply->result = DID_OK; - bsg_job->reply->reply_payload_rcv_len = + bsg_reply->result = DID_OK; + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer( bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, @@ -189,7 +196,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) case QLFC_FCP_PRIO_SET_CONFIG: len = bsg_job->request_payload.payload_len; if (!len || len > FCP_PRIO_CFG_SIZE) { - bsg_job->reply->result = (DID_ERROR << 16); + bsg_reply->result = (DID_ERROR << 16); ret = -EINVAL; goto exit_fcp_prio_cfg; } @@ -200,7 +207,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) ql_log(ql_log_warn, vha, 0x7050, "Unable to allocate memory for fcp prio " "config data (%x).\n", FCP_PRIO_CFG_SIZE); - bsg_job->reply->result = (DID_ERROR << 16); + bsg_reply->result = (DID_ERROR << 16); ret = -ENOMEM; goto exit_fcp_prio_cfg; } @@ -215,7 +222,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) if (!qla24xx_fcp_prio_cfg_valid(vha, (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) { - bsg_job->reply->result = (DID_ERROR << 16); + bsg_reply->result = (DID_ERROR << 16); ret = -EINVAL; /* If buffer was invalidatic int * fcp_prio_cfg is of no use @@ -229,7 +236,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) ha->flags.fcp_prio_enabled = 1; qla24xx_update_all_fcp_prio(vha); - bsg_job->reply->result = DID_OK; + bsg_reply->result = DID_OK; break; default: ret = -EINVAL; @@ -237,13 +244,15 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) } exit_fcp_prio_cfg: if (!ret) - bsg_job->job_done(bsg_job); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return ret; } static int -qla2x00_process_els(struct fc_bsg_job *bsg_job) +qla2x00_process_els(struct bsg_job *bsg_job) { + struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_rport *rport; fc_port_t *fcport = NULL; struct Scsi_Host *host; @@ -255,15 +264,15 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) int rval = (DRIVER_ERROR << 16); uint16_t nextlid = 0; - if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { - rport = bsg_job->rport; + if (bsg_request->msgcode == FC_BSG_RPT_ELS) { + rport = fc_bsg_to_rport(bsg_job); fcport = *(fc_port_t **) rport->dd_data; host = rport_to_shost(rport); vha = shost_priv(host); ha = vha->hw; type = "FC_BSG_RPT_ELS"; } else { - host = bsg_job->shost; + host = fc_bsg_to_shost(bsg_job); vha = shost_priv(host); ha = vha->hw; type = "FC_BSG_HST_ELS_NOLOGIN"; @@ -296,7 +305,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) } /* ELS request for rport */ - if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { + if (bsg_request->msgcode == FC_BSG_RPT_ELS) { /* make sure the rport is logged in, * if not perform fabric login */ @@ -322,11 +331,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) /* Initialize all required fields of fcport */ fcport->vha = vha; fcport->d_id.b.al_pa = - bsg_job->request->rqst_data.h_els.port_id[0]; + bsg_request->rqst_data.h_els.port_id[0]; fcport->d_id.b.area = - bsg_job->request->rqst_data.h_els.port_id[1]; + bsg_request->rqst_data.h_els.port_id[1]; fcport->d_id.b.domain = - bsg_job->request->rqst_data.h_els.port_id[2]; + bsg_request->rqst_data.h_els.port_id[2]; fcport->loop_id = (fcport->d_id.b.al_pa == 0xFD) ? NPH_FABRIC_CONTROLLER : NPH_F_PORT; @@ -366,11 +375,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) } sp->type = - (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? - SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); + (bsg_request->msgcode == FC_BSG_RPT_ELS ? + SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); sp->name = - (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? - "bsg_els_rpt" : "bsg_els_hst"); + (bsg_request->msgcode == FC_BSG_RPT_ELS ? + "bsg_els_rpt" : "bsg_els_hst"); sp->u.bsg_job = bsg_job; sp->free = qla2x00_bsg_sp_free; sp->done = qla2x00_bsg_job_done; @@ -378,7 +387,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) ql_dbg(ql_dbg_user, vha, 0x700a, "bsg rqst type: %s els type: %x - loop-id=%x " "portid=%-2x%02x%02x.\n", type, - bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id, + bsg_request->rqst_data.h_els.command_code, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); rval = qla2x00_start_sp(sp); @@ -399,7 +408,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) goto done_free_fcport; done_free_fcport: - if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) + if (bsg_request->msgcode == FC_BSG_RPT_ELS) kfree(fcport); done: return rval; @@ -420,10 +429,11 @@ qla24xx_calc_ct_iocbs(uint16_t dsds) } static int -qla2x00_process_ct(struct fc_bsg_job *bsg_job) +qla2x00_process_ct(struct bsg_job *bsg_job) { srb_t *sp; - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_request *bsg_request = bsg_job->request; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = (DRIVER_ERROR << 16); @@ -469,7 +479,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) } loop_id = - (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000) + (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000) >> 24; switch (loop_id) { case 0xFC: @@ -500,9 +510,9 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) /* Initialize all required fields of fcport */ fcport->vha = vha; - fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0]; - fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1]; - fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2]; + fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0]; + fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1]; + fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2]; fcport->loop_id = loop_id; /* Alloc SRB structure */ @@ -524,7 +534,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) ql_dbg(ql_dbg_user, vha, 0x7016, "bsg rqst type: %s else type: %x - " "loop-id=%x portid=%02x%02x%02x.\n", type, - (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16), + (bsg_request->rqst_data.h_ct.preamble_word2 >> 16), fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); @@ -697,9 +707,11 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, } static int -qla2x00_process_loopback(struct fc_bsg_job *bsg_job) +qla2x00_process_loopback(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval; @@ -780,9 +792,9 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) elreq.rcv_dma = rsp_data_dma; elreq.transfer_size = req_data_len; - elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; + elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; elreq.iteration_count = - bsg_job->request->rqst_data.h_vendor.vendor_cmd[2]; + bsg_request->rqst_data.h_vendor.vendor_cmd[2]; if (atomic_read(&vha->loop_state) == LOOP_READY && (ha->current_topology == ISP_CFG_F || @@ -896,12 +908,12 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) "Vendor request %s failed.\n", type); rval = 0; - bsg_job->reply->result = (DID_ERROR << 16); - bsg_job->reply->reply_payload_rcv_len = 0; + bsg_reply->result = (DID_ERROR << 16); + bsg_reply->reply_payload_rcv_len = 0; } else { ql_dbg(ql_dbg_user, vha, 0x702d, "Vendor request %s completed.\n", type); - bsg_job->reply->result = (DID_OK << 16); + bsg_reply->result = (DID_OK << 16); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, rsp_data, rsp_data_len); @@ -930,14 +942,17 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!rval) - bsg_job->job_done(bsg_job); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rval; } static int -qla84xx_reset(struct fc_bsg_job *bsg_job) +qla84xx_reset(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_request *bsg_request = bsg_job->request; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; @@ -948,7 +963,7 @@ qla84xx_reset(struct fc_bsg_job *bsg_job) return -EINVAL; } - flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; + flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); @@ -960,17 +975,20 @@ qla84xx_reset(struct fc_bsg_job *bsg_job) } else { ql_dbg(ql_dbg_user, vha, 0x7031, "Vendor request 84xx reset completed.\n"); - bsg_job->reply->result = DID_OK; - bsg_job->job_done(bsg_job); + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); } return rval; } static int -qla84xx_updatefw(struct fc_bsg_job *bsg_job) +qla84xx_updatefw(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; struct verify_chip_entry_84xx *mn = NULL; @@ -1027,7 +1045,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job) goto done_free_fw_buf; } - flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; + flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2))); memset(mn, 0, sizeof(struct access_chip_84xx)); @@ -1059,7 +1077,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job) "Vendor request 84xx updatefw completed.\n"); bsg_job->reply_len = sizeof(struct fc_bsg_reply); - bsg_job->reply->result = DID_OK; + bsg_reply->result = DID_OK; } dma_pool_free(ha->s_dma_pool, mn, mn_dma); @@ -1072,14 +1090,17 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job) bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!rval) - bsg_job->job_done(bsg_job); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rval; } static int -qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) +qla84xx_mgmt_cmd(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; struct access_chip_84xx *mn = NULL; @@ -1107,7 +1128,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) memset(mn, 0, sizeof(struct access_chip_84xx)); mn->entry_type = ACCESS_CHIP_IOCB_TYPE; mn->entry_count = 1; - ql84_mgmt = (void *)bsg_job->request + sizeof(struct fc_bsg_request); + ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request); switch (ql84_mgmt->mgmt.cmd) { case QLA84_MGMT_READ_MEM: case QLA84_MGMT_GET_INFO: @@ -1239,11 +1260,11 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) "Vendor request 84xx mgmt completed.\n"); bsg_job->reply_len = sizeof(struct fc_bsg_reply); - bsg_job->reply->result = DID_OK; + bsg_reply->result = DID_OK; if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { - bsg_job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; sg_copy_from_buffer(bsg_job->reply_payload.sg_list, @@ -1267,14 +1288,17 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) dma_pool_free(ha->s_dma_pool, mn, mn_dma); if (!rval) - bsg_job->job_done(bsg_job); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rval; } static int -qla24xx_iidma(struct fc_bsg_job *bsg_job) +qla24xx_iidma(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); int rval = 0; struct qla_port_param *port_param = NULL; @@ -1288,7 +1312,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) return -EINVAL; } - port_param = (void *)bsg_job->request + sizeof(struct fc_bsg_request); + port_param = (void *)bsg_request + sizeof(struct fc_bsg_request); if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { ql_log(ql_log_warn, vha, 0x7048, "Invalid destination type.\n"); @@ -1343,24 +1367,26 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(struct qla_port_param); - rsp_ptr = ((uint8_t *)bsg_job->reply) + + rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct fc_bsg_reply); memcpy(rsp_ptr, port_param, sizeof(struct qla_port_param)); } - bsg_job->reply->result = DID_OK; - bsg_job->job_done(bsg_job); + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); } return rval; } static int -qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha, +qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, uint8_t is_update) { + struct fc_bsg_request *bsg_request = bsg_job->request; uint32_t start = 0; int valid = 0; struct qla_hw_data *ha = vha->hw; @@ -1368,7 +1394,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha, if (unlikely(pci_channel_offline(ha->pdev))) return -EINVAL; - start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; + start = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; if (start > ha->optrom_size) { ql_log(ql_log_warn, vha, 0x7055, "start %d > optrom_size %d.\n", start, ha->optrom_size); @@ -1427,9 +1453,10 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha, } static int -qla2x00_read_optrom(struct fc_bsg_job *bsg_job) +qla2x00_read_optrom(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; @@ -1451,20 +1478,22 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job) bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, ha->optrom_region_size); - bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size; - bsg_job->reply->result = DID_OK; + bsg_reply->reply_payload_rcv_len = ha->optrom_region_size; + bsg_reply->result = DID_OK; vfree(ha->optrom_buffer); ha->optrom_buffer = NULL; ha->optrom_state = QLA_SWAITING; mutex_unlock(&ha->optrom_mutex); - bsg_job->job_done(bsg_job); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rval; } static int -qla2x00_update_optrom(struct fc_bsg_job *bsg_job) +qla2x00_update_optrom(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; @@ -1486,19 +1515,21 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job) ha->isp_ops->write_optrom(vha, ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); - bsg_job->reply->result = DID_OK; + bsg_reply->result = DID_OK; vfree(ha->optrom_buffer); ha->optrom_buffer = NULL; ha->optrom_state = QLA_SWAITING; mutex_unlock(&ha->optrom_mutex); - bsg_job->job_done(bsg_job); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rval; } static int -qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job) +qla2x00_update_fru_versions(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; @@ -1509,7 +1540,7 @@ qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job) dma_addr_t sfp_dma; void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); if (!sfp) { - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_NO_MEMORY; goto done; } @@ -1525,30 +1556,32 @@ qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job) image->field_address.device, image->field_address.offset, sizeof(image->field_info), image->field_address.option); if (rval) { - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_MAILBOX; goto dealloc; } image++; } - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; dealloc: dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); done: bsg_job->reply_len = sizeof(struct fc_bsg_reply); - bsg_job->reply->result = DID_OK << 16; - bsg_job->job_done(bsg_job); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return 0; } static int -qla2x00_read_fru_status(struct fc_bsg_job *bsg_job) +qla2x00_read_fru_status(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; @@ -1557,7 +1590,7 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job) dma_addr_t sfp_dma; uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); if (!sfp) { - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_NO_MEMORY; goto done; } @@ -1571,7 +1604,7 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job) sr->status_reg = *sfp; if (rval) { - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_MAILBOX; goto dealloc; } @@ -1579,24 +1612,26 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job) sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; dealloc: dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); done: bsg_job->reply_len = sizeof(struct fc_bsg_reply); - bsg_job->reply->reply_payload_rcv_len = sizeof(*sr); - bsg_job->reply->result = DID_OK << 16; - bsg_job->job_done(bsg_job); + bsg_reply->reply_payload_rcv_len = sizeof(*sr); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return 0; } static int -qla2x00_write_fru_status(struct fc_bsg_job *bsg_job) +qla2x00_write_fru_status(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; @@ -1605,7 +1640,7 @@ qla2x00_write_fru_status(struct fc_bsg_job *bsg_job) dma_addr_t sfp_dma; uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); if (!sfp) { - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_NO_MEMORY; goto done; } @@ -1619,28 +1654,30 @@ qla2x00_write_fru_status(struct fc_bsg_job *bsg_job) sizeof(sr->status_reg), sr->field_address.option); if (rval) { - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_MAILBOX; goto dealloc; } - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; dealloc: dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); done: bsg_job->reply_len = sizeof(struct fc_bsg_reply); - bsg_job->reply->result = DID_OK << 16; - bsg_job->job_done(bsg_job); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return 0; } static int -qla2x00_write_i2c(struct fc_bsg_job *bsg_job) +qla2x00_write_i2c(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; @@ -1649,7 +1686,7 @@ qla2x00_write_i2c(struct fc_bsg_job *bsg_job) dma_addr_t sfp_dma; uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); if (!sfp) { - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_NO_MEMORY; goto done; } @@ -1662,28 +1699,30 @@ qla2x00_write_i2c(struct fc_bsg_job *bsg_job) i2c->device, i2c->offset, i2c->length, i2c->option); if (rval) { - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_MAILBOX; goto dealloc; } - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; dealloc: dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); done: bsg_job->reply_len = sizeof(struct fc_bsg_reply); - bsg_job->reply->result = DID_OK << 16; - bsg_job->job_done(bsg_job); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return 0; } static int -qla2x00_read_i2c(struct fc_bsg_job *bsg_job) +qla2x00_read_i2c(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; @@ -1692,7 +1731,7 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job) dma_addr_t sfp_dma; uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); if (!sfp) { - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_NO_MEMORY; goto done; } @@ -1704,7 +1743,7 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job) i2c->device, i2c->offset, i2c->length, i2c->option); if (rval) { - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_MAILBOX; goto dealloc; } @@ -1713,24 +1752,26 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job) sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; dealloc: dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); done: bsg_job->reply_len = sizeof(struct fc_bsg_reply); - bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c); - bsg_job->reply->result = DID_OK << 16; - bsg_job->job_done(bsg_job); + bsg_reply->reply_payload_rcv_len = sizeof(*i2c); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return 0; } static int -qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job) +qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; uint32_t rval = EXT_STATUS_OK; @@ -1895,19 +1936,21 @@ qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job) /* Return an error vendor specific response * and complete the bsg request */ - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; bsg_job->reply_len = sizeof(struct fc_bsg_reply); - bsg_job->reply->reply_payload_rcv_len = 0; - bsg_job->reply->result = (DID_OK) << 16; - bsg_job->job_done(bsg_job); + bsg_reply->reply_payload_rcv_len = 0; + bsg_reply->result = (DID_OK) << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); /* Always return success, vendor rsp carries correct status */ return 0; } static int -qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job) +qlafx00_mgmt_cmd(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_request *bsg_request = bsg_job->request; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = (DRIVER_ERROR << 16); @@ -1919,7 +1962,7 @@ qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job) /* Copy the IOCB specific information */ piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) - &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; + &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; /* Dump the vendor information */ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, @@ -2027,9 +2070,10 @@ qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job) } static int -qla26xx_serdes_op(struct fc_bsg_job *bsg_job) +qla26xx_serdes_op(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); int rval = 0; struct qla_serdes_reg sr; @@ -2042,13 +2086,13 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job) switch (sr.cmd) { case INT_SC_SERDES_WRITE_REG: rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val); - bsg_job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; break; case INT_SC_SERDES_READ_REG: rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); - bsg_job->reply->reply_payload_rcv_len = sizeof(sr); + bsg_reply->reply_payload_rcv_len = sizeof(sr); break; default: ql_dbg(ql_dbg_user, vha, 0x708c, @@ -2057,19 +2101,21 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job) break; } - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval ? EXT_STATUS_MAILBOX : 0; bsg_job->reply_len = sizeof(struct fc_bsg_reply); - bsg_job->reply->result = DID_OK << 16; - bsg_job->job_done(bsg_job); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return 0; } static int -qla8044_serdes_op(struct fc_bsg_job *bsg_job) +qla8044_serdes_op(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); int rval = 0; struct qla_serdes_reg_ex sr; @@ -2082,13 +2128,13 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job) switch (sr.cmd) { case INT_SC_SERDES_WRITE_REG: rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); - bsg_job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; break; case INT_SC_SERDES_READ_REG: rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); - bsg_job->reply->reply_payload_rcv_len = sizeof(sr); + bsg_reply->reply_payload_rcv_len = sizeof(sr); break; default: ql_dbg(ql_dbg_user, vha, 0x70cf, @@ -2097,19 +2143,21 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job) break; } - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval ? EXT_STATUS_MAILBOX : 0; bsg_job->reply_len = sizeof(struct fc_bsg_reply); - bsg_job->reply->result = DID_OK << 16; - bsg_job->job_done(bsg_job); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return 0; } static int -qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job) +qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; struct qla_flash_update_caps cap; @@ -2125,21 +2173,23 @@ qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job) sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap)); - bsg_job->reply->reply_payload_rcv_len = sizeof(cap); + bsg_reply->reply_payload_rcv_len = sizeof(cap); - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; bsg_job->reply_len = sizeof(struct fc_bsg_reply); - bsg_job->reply->result = DID_OK << 16; - bsg_job->job_done(bsg_job); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return 0; } static int -qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job) +qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; uint64_t online_fw_attr = 0; @@ -2158,32 +2208,34 @@ qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job) (uint64_t)ha->fw_attributes; if (online_fw_attr != cap.capabilities) { - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_INVALID_PARAM; return -EINVAL; } if (cap.outage_duration < MAX_LOOP_TIMEOUT) { - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_INVALID_PARAM; return -EINVAL; } - bsg_job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; bsg_job->reply_len = sizeof(struct fc_bsg_reply); - bsg_job->reply->result = DID_OK << 16; - bsg_job->job_done(bsg_job); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return 0; } static int -qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job) +qla27xx_get_bbcr_data(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; struct qla_bbcr_data bbcr; @@ -2227,27 +2279,30 @@ qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job) done: sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr)); - bsg_job->reply->reply_payload_rcv_len = sizeof(bbcr); + bsg_reply->reply_payload_rcv_len = sizeof(bbcr); - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; bsg_job->reply_len = sizeof(struct fc_bsg_reply); - bsg_job->reply->result = DID_OK << 16; - bsg_job->job_done(bsg_job); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return 0; } static int -qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job) +qla2x00_get_priv_stats(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); struct link_statistics *stats = NULL; dma_addr_t stats_dma; int rval; - uint32_t *cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd; + uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd; uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0; if (test_bit(UNLOADING, &vha->dpc_flags)) @@ -2281,13 +2336,14 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job) bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); } - bsg_job->reply->reply_payload_rcv_len = sizeof(*stats); - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_payload_rcv_len = sizeof(*stats); + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; - bsg_job->reply_len = sizeof(*bsg_job->reply); - bsg_job->reply->result = DID_OK << 16; - bsg_job->job_done(bsg_job); + bsg_job->reply_len = sizeof(*bsg_reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, sizeof(*stats), stats, stats_dma); @@ -2296,9 +2352,10 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job) } static int -qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job) +qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) { - struct Scsi_Host *host = bsg_job->shost; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); int rval; struct qla_dport_diag *dd; @@ -2323,13 +2380,14 @@ qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job) bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); } - bsg_job->reply->reply_payload_rcv_len = sizeof(*dd); - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + bsg_reply->reply_payload_rcv_len = sizeof(*dd); + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; - bsg_job->reply_len = sizeof(*bsg_job->reply); - bsg_job->reply->result = DID_OK << 16; - bsg_job->job_done(bsg_job); + bsg_job->reply_len = sizeof(*bsg_reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); kfree(dd); @@ -2337,9 +2395,11 @@ qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job) } static int -qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) +qla2x00_process_vendor_specific(struct bsg_job *bsg_job) { - switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { + struct fc_bsg_request *bsg_request = bsg_job->request; + + switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) { case QL_VND_LOOPBACK: return qla2x00_process_loopback(bsg_job); @@ -2413,36 +2473,38 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) } int -qla24xx_bsg_request(struct fc_bsg_job *bsg_job) +qla24xx_bsg_request(struct bsg_job *bsg_job) { + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; int ret = -EINVAL; struct fc_rport *rport; struct Scsi_Host *host; scsi_qla_host_t *vha; /* In case no data transferred. */ - bsg_job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; - if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { - rport = bsg_job->rport; + if (bsg_request->msgcode == FC_BSG_RPT_ELS) { + rport = fc_bsg_to_rport(bsg_job); host = rport_to_shost(rport); vha = shost_priv(host); } else { - host = bsg_job->shost; + host = fc_bsg_to_shost(bsg_job); vha = shost_priv(host); } if (qla2x00_reset_active(vha)) { ql_dbg(ql_dbg_user, vha, 0x709f, "BSG: ISP abort active/needed -- cmd=%d.\n", - bsg_job->request->msgcode); + bsg_request->msgcode); return -EBUSY; } ql_dbg(ql_dbg_user, vha, 0x7000, - "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode); + "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode); - switch (bsg_job->request->msgcode) { + switch (bsg_request->msgcode) { case FC_BSG_RPT_ELS: case FC_BSG_HST_ELS_NOLOGIN: ret = qla2x00_process_els(bsg_job); @@ -2464,9 +2526,10 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job) } int -qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) +qla24xx_bsg_timeout(struct bsg_job *bsg_job) { - scsi_qla_host_t *vha = shost_priv(bsg_job->shost); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); struct qla_hw_data *ha = vha->hw; srb_t *sp; int cnt, que; @@ -2494,13 +2557,13 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) "mbx abort_command " "failed.\n"); bsg_job->req->errors = - bsg_job->reply->result = -EIO; + bsg_reply->result = -EIO; } else { ql_dbg(ql_dbg_user, vha, 0x708a, "mbx abort_command " "success.\n"); bsg_job->req->errors = - bsg_job->reply->result = 0; + bsg_reply->result = 0; } spin_lock_irqsave(&ha->hardware_lock, flags); goto done; @@ -2510,7 +2573,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) } spin_unlock_irqrestore(&ha->hardware_lock, flags); ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); - bsg_job->req->errors = bsg_job->reply->result = -ENXIO; + bsg_job->req->errors = bsg_reply->result = -ENXIO; return 0; done: diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 45af34ddc43297d06c8c7cf44425a1a7cee4cc7f..21d9fb7fc88796cbaa09fbfa160b9b20c17e2015 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -11,7 +11,7 @@ * ---------------------------------------------------------------------- * | Level | Last Value Used | Holes | * ---------------------------------------------------------------------- - * | Module Init and Probe | 0x0191 | 0x0146 | + * | Module Init and Probe | 0x0193 | 0x0146 | * | | | 0x015b-0x0160 | * | | | 0x016e | * | Mailbox commands | 0x1199 | 0x1193 | @@ -58,7 +58,7 @@ * | | | 0xb13a,0xb142 | * | | | 0xb13c-0xb140 | * | | | 0xb149 | - * | MultiQ | 0xc00c | | + * | MultiQ | 0xc010 | | * | Misc | 0xd301 | 0xd031-0xd0ff | * | | | 0xd101-0xd1fe | * | | | 0xd214-0xd2fe | diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 73b12e41d9928160e3fc75ec8248c1cc70ed20c6..f7df01b76714e09dc919cbb9660b66bed603d6bc 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -401,9 +401,10 @@ typedef struct srb { uint16_t type; char *name; int iocbs; + struct qla_qpair *qpair; union { struct srb_iocb iocb_cmd; - struct fc_bsg_job *bsg_job; + struct bsg_job *bsg_job; struct srb_cmd scmd; } u; void (*done)(void *, void *, int); @@ -2719,6 +2720,7 @@ struct isp_operations { int (*get_flash_version) (struct scsi_qla_host *, void *); int (*start_scsi) (srb_t *); + int (*start_scsi_mq) (srb_t *); int (*abort_isp) (struct scsi_qla_host *); int (*iospace_config)(struct qla_hw_data*); int (*initialize_adapter)(struct scsi_qla_host *); @@ -2730,8 +2732,10 @@ struct isp_operations { #define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7) #define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1) -#define QLA_MSIX_DEFAULT 0x00 -#define QLA_MSIX_RSP_Q 0x01 +#define QLA_MSIX_DEFAULT 0x00 +#define QLA_MSIX_RSP_Q 0x01 +#define QLA_ATIO_VECTOR 0x02 +#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03 #define QLA_MIDX_DEFAULT 0 #define QLA_MIDX_RSP_Q 1 @@ -2745,9 +2749,11 @@ struct scsi_qla_host; struct qla_msix_entry { int have_irq; + int in_use; uint32_t vector; uint16_t entry; - struct rsp_que *rsp; + char name[30]; + void *handle; struct irq_affinity_notify irq_notify; int cpuid; }; @@ -2872,7 +2878,6 @@ struct rsp_que { struct qla_msix_entry *msix; struct req_que *req; srb_t *status_srb; /* status continuation entry */ - struct work_struct q_work; dma_addr_t dma_fx00; response_t *ring_fx00; @@ -2909,6 +2914,37 @@ struct req_que { uint8_t req_pkt[REQUEST_ENTRY_SIZE]; }; +/*Queue pair data structure */ +struct qla_qpair { + spinlock_t qp_lock; + atomic_t ref_count; + /* distill these fields down to 'online=0/1' + * ha->flags.eeh_busy + * ha->flags.pci_channel_io_perm_failure + * base_vha->loop_state + */ + uint32_t online:1; + /* move vha->flags.difdix_supported here */ + uint32_t difdix_supported:1; + uint32_t delete_in_progress:1; + + uint16_t id; /* qp number used with FW */ + uint16_t num_active_cmd; /* cmds down at firmware */ + cpumask_t cpu_mask; /* CPU mask for cpu affinity operation */ + uint16_t vp_idx; /* vport ID */ + + mempool_t *srb_mempool; + + /* to do: New driver: move queues to here instead of pointers */ + struct req_que *req; + struct rsp_que *rsp; + struct atio_que *atio; + struct qla_msix_entry *msix; /* point to &ha->msix_entries[x] */ + struct qla_hw_data *hw; + struct work_struct q_work; + struct list_head qp_list_elem; /* vha->qp_list */ +}; + /* Place holder for FW buffer parameters */ struct qlfc_fw { void *fw_buf; @@ -3004,7 +3040,6 @@ struct qla_hw_data { uint32_t chip_reset_done :1; uint32_t running_gold_fw :1; uint32_t eeh_busy :1; - uint32_t cpu_affinity_enabled :1; uint32_t disable_msix_handshake :1; uint32_t fcp_prio_enabled :1; uint32_t isp82xx_fw_hung:1; @@ -3061,10 +3096,15 @@ struct qla_hw_data { uint8_t mqenable; struct req_que **req_q_map; struct rsp_que **rsp_q_map; + struct qla_qpair **queue_pair_map; unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; + unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8) + / sizeof(unsigned long)]; uint8_t max_req_queues; uint8_t max_rsp_queues; + uint8_t max_qpairs; + struct qla_qpair *base_qpair; struct qla_npiv_entry *npiv_info; uint16_t nvram_npiv_size; @@ -3328,6 +3368,7 @@ struct qla_hw_data { struct mutex vport_lock; /* Virtual port synchronization */ spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */ + struct mutex mq_lock; /* multi-queue synchronization */ struct completion mbx_cmd_comp; /* Serialize mbx access */ struct completion mbx_intr_comp; /* Used for completion notification */ struct completion dcbx_comp; /* For set port config notification */ @@ -3608,6 +3649,7 @@ typedef struct scsi_qla_host { uint32_t fw_tgt_reported:1; uint32_t bbcr_enable:1; + uint32_t qpairs_available:1; } flags; atomic_t loop_state; @@ -3646,6 +3688,7 @@ typedef struct scsi_qla_host { #define FX00_TARGET_SCAN 24 #define FX00_CRITEMP_RECOVERY 25 #define FX00_HOST_INFO_RESEND 26 +#define QPAIR_ONLINE_CHECK_NEEDED 27 unsigned long pci_flags; #define PFLG_DISCONNECTED 0 /* PCI device removed */ @@ -3704,10 +3747,13 @@ typedef struct scsi_qla_host { /* List of pending PLOGI acks, protected by hw lock */ struct list_head plogi_ack_list; + struct list_head qp_list; + uint32_t vp_abort_cnt; struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ uint16_t vp_idx; /* vport ID */ + struct qla_qpair *qpair; /* base qpair */ unsigned long vp_flags; #define VP_IDX_ACQUIRED 0 /* bit no 0 */ @@ -3763,6 +3809,23 @@ struct qla_tgt_vp_map { scsi_qla_host_t *vha; }; +struct qla2_sgx { + dma_addr_t dma_addr; /* OUT */ + uint32_t dma_len; /* OUT */ + + uint32_t tot_bytes; /* IN */ + struct scatterlist *cur_sg; /* IN */ + + /* for book keeping, bzero on initial invocation */ + uint32_t bytes_consumed; + uint32_t num_bytes; + uint32_t tot_partial; + + /* for debugging */ + uint32_t num_sg; + srb_t *sp; +}; + /* * Macros to help code, maintain, etc. */ @@ -3775,21 +3838,34 @@ struct qla_tgt_vp_map { (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \ test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) -#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \ - atomic_inc(&__vha->vref_count); \ - mb(); \ - if (__vha->flags.delete_progress) { \ - atomic_dec(&__vha->vref_count); \ - __bail = 1; \ - } else { \ - __bail = 0; \ - } \ +#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \ + atomic_inc(&__vha->vref_count); \ + mb(); \ + if (__vha->flags.delete_progress) { \ + atomic_dec(&__vha->vref_count); \ + __bail = 1; \ + } else { \ + __bail = 0; \ + } \ } while (0) -#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ - atomic_dec(&__vha->vref_count); \ +#define QLA_VHA_MARK_NOT_BUSY(__vha) \ + atomic_dec(&__vha->vref_count); \ + +#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \ + atomic_inc(&__qpair->ref_count); \ + mb(); \ + if (__qpair->delete_in_progress) { \ + atomic_dec(&__qpair->ref_count); \ + __bail = 1; \ + } else { \ + __bail = 0; \ + } \ } while (0) +#define QLA_QPAIR_MARK_NOT_BUSY(__qpair) \ + atomic_dec(&__qpair->ref_count); \ + /* * qla2x00 local function return status codes */ diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 6ca00813c71f0702a49965d97c6240c6b3657307..afa0116a163b12b5f8663a52bee5cc2ec846541d 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -91,12 +91,17 @@ extern int qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *); extern int qla2x00_init_rings(scsi_qla_host_t *); extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *); +extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *, + int, int); +extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *); /* * Global Data in qla_os.c source file. */ extern char qla2x00_version_str[]; +extern struct kmem_cache *srb_cachep; + extern int ql2xlogintimeout; extern int qlport_down_retry; extern int ql2xplogiabsentdevice; @@ -105,8 +110,7 @@ extern int ql2xfdmienable; extern int ql2xallocfwdump; extern int ql2xextended_error_logging; extern int ql2xiidmaenable; -extern int ql2xmaxqueues; -extern int ql2xmultique_tag; +extern int ql2xmqsupport; extern int ql2xfwloadbin; extern int ql2xetsenable; extern int ql2xshiftctondsd; @@ -172,6 +176,9 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern void qla2x00_disable_board_on_pci_error(struct work_struct *); +extern void qla2x00_sp_compl(void *, void *, int); +extern void qla2xxx_qpair_sp_free_dma(void *, void *); +extern void qla2xxx_qpair_sp_compl(void *, void *, int); /* * Global Functions in qla_mid.c source file. @@ -220,6 +227,8 @@ extern uint16_t qla2x00_calc_iocbs_32(uint16_t); extern uint16_t qla2x00_calc_iocbs_64(uint16_t); extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t); +extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, + uint16_t, struct req_que *); extern int qla2x00_start_scsi(srb_t *sp); extern int qla24xx_start_scsi(srb_t *sp); int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, @@ -227,6 +236,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, extern int qla2x00_start_sp(srb_t *); extern int qla24xx_dif_start_scsi(srb_t *); extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t); +extern int qla2xxx_dif_start_scsi_mq(srb_t *); extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); @@ -237,7 +247,10 @@ extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, uint32_t *, uint16_t, struct qla_tgt_cmd *); extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, uint32_t *, uint16_t, struct qla_tgt_cmd *); - +extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); +extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); +extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *, + struct cmd_type_crc_2 *, uint16_t, uint16_t, uint16_t); /* * Global Function Prototypes in qla_mbx.c source file. @@ -468,6 +481,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *, extern void qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *, uint32_t); +extern irqreturn_t +qla2xxx_msix_rsp_q(int irq, void *dev_id); /* * Global Function Prototypes in qla_sup.c source file. @@ -603,15 +618,18 @@ extern int qla2x00_dfs_setup(scsi_qla_host_t *); extern int qla2x00_dfs_remove(scsi_qla_host_t *); /* Globa function prototypes for multi-q */ -extern int qla25xx_request_irq(struct rsp_que *); +extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *, + struct qla_msix_entry *, int); extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, uint16_t, int, uint8_t); extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, - uint16_t, int); + uint16_t, struct qla_qpair *); + extern void qla2x00_init_response_q_entries(struct rsp_que *); extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); +extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *); extern int qla25xx_delete_queues(struct scsi_qla_host *); extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t); extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t); @@ -733,8 +751,8 @@ extern int qla82xx_read_temperature(scsi_qla_host_t *); extern int qla8044_read_temperature(scsi_qla_host_t *); /* BSG related functions */ -extern int qla24xx_bsg_request(struct fc_bsg_job *); -extern int qla24xx_bsg_timeout(struct fc_bsg_job *); +extern int qla24xx_bsg_request(struct bsg_job *); +extern int qla24xx_bsg_timeout(struct bsg_job *); extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t); extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, dma_addr_t, size_t, uint32_t); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 5b09296b46a3058f9c938990274d158b508af884..632d5f30386ab0ae529036c292f3c1c8e64162ca 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1769,8 +1769,7 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) if (req->outstanding_cmds) return QLA_SUCCESS; - if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase && - (ql2xmultique_tag || ql2xmaxqueues > 1))) + if (!IS_FWI2_CAPABLE(ha)) req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS; else { if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count) @@ -4248,10 +4247,7 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) struct req_que *req; struct rsp_que *rsp; - if (vha->hw->flags.cpu_affinity_enabled) - req = vha->hw->req_q_map[0]; - else - req = vha->req; + req = vha->req; rsp = req->rsp; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); @@ -6040,10 +6036,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha) return -EINVAL; rval = qla2x00_fw_ready(base_vha); - if (ha->flags.cpu_affinity_enabled) - req = ha->req_q_map[0]; + if (vha->qpair) + req = vha->qpair->req; else - req = vha->req; + req = ha->req_q_map[0]; rsp = req->rsp; if (rval == QLA_SUCCESS) { @@ -6725,3 +6721,162 @@ qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha) return ret; } + +struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int vp_idx) +{ + int rsp_id = 0; + int req_id = 0; + int i; + struct qla_hw_data *ha = vha->hw; + uint16_t qpair_id = 0; + struct qla_qpair *qpair = NULL; + struct qla_msix_entry *msix; + + if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) { + ql_log(ql_log_warn, vha, 0x00181, + "FW/Driver is not multi-queue capable.\n"); + return NULL; + } + + if (ql2xmqsupport) { + qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); + if (qpair == NULL) { + ql_log(ql_log_warn, vha, 0x0182, + "Failed to allocate memory for queue pair.\n"); + return NULL; + } + memset(qpair, 0, sizeof(struct qla_qpair)); + + qpair->hw = vha->hw; + + /* Assign available que pair id */ + mutex_lock(&ha->mq_lock); + qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); + if (qpair_id >= ha->max_qpairs) { + mutex_unlock(&ha->mq_lock); + ql_log(ql_log_warn, vha, 0x0183, + "No resources to create additional q pair.\n"); + goto fail_qid_map; + } + set_bit(qpair_id, ha->qpair_qid_map); + ha->queue_pair_map[qpair_id] = qpair; + qpair->id = qpair_id; + qpair->vp_idx = vp_idx; + + for (i = 0; i < ha->msix_count; i++) { + msix = &ha->msix_entries[i]; + if (msix->in_use) + continue; + qpair->msix = msix; + ql_log(ql_dbg_multiq, vha, 0xc00f, + "Vector %x selected for qpair\n", msix->vector); + break; + } + if (!qpair->msix) { + ql_log(ql_log_warn, vha, 0x0184, + "Out of MSI-X vectors!.\n"); + goto fail_msix; + } + + qpair->msix->in_use = 1; + list_add_tail(&qpair->qp_list_elem, &vha->qp_list); + + mutex_unlock(&ha->mq_lock); + + /* Create response queue first */ + rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair); + if (!rsp_id) { + ql_log(ql_log_warn, vha, 0x0185, + "Failed to create response queue.\n"); + goto fail_rsp; + } + + qpair->rsp = ha->rsp_q_map[rsp_id]; + + /* Create request queue */ + req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos); + if (!req_id) { + ql_log(ql_log_warn, vha, 0x0186, + "Failed to create request queue.\n"); + goto fail_req; + } + + qpair->req = ha->req_q_map[req_id]; + qpair->rsp->req = qpair->req; + + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { + if (ha->fw_attributes & BIT_4) + qpair->difdix_supported = 1; + } + + qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); + if (!qpair->srb_mempool) { + ql_log(ql_log_warn, vha, 0x0191, + "Failed to create srb mempool for qpair %d\n", + qpair->id); + goto fail_mempool; + } + + /* Mark as online */ + qpair->online = 1; + + if (!vha->flags.qpairs_available) + vha->flags.qpairs_available = 1; + + ql_dbg(ql_dbg_multiq, vha, 0xc00d, + "Request/Response queue pair created, id %d\n", + qpair->id); + ql_dbg(ql_dbg_init, vha, 0x0187, + "Request/Response queue pair created, id %d\n", + qpair->id); + } + return qpair; + +fail_mempool: +fail_req: + qla25xx_delete_rsp_que(vha, qpair->rsp); +fail_rsp: + mutex_lock(&ha->mq_lock); + qpair->msix->in_use = 0; + list_del(&qpair->qp_list_elem); + if (list_empty(&vha->qp_list)) + vha->flags.qpairs_available = 0; +fail_msix: + ha->queue_pair_map[qpair_id] = NULL; + clear_bit(qpair_id, ha->qpair_qid_map); + mutex_unlock(&ha->mq_lock); +fail_qid_map: + kfree(qpair); + return NULL; +} + +int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) +{ + int ret; + struct qla_hw_data *ha = qpair->hw; + + qpair->delete_in_progress = 1; + while (atomic_read(&qpair->ref_count)) + msleep(500); + + ret = qla25xx_delete_req_que(vha, qpair->req); + if (ret != QLA_SUCCESS) + goto fail; + ret = qla25xx_delete_rsp_que(vha, qpair->rsp); + if (ret != QLA_SUCCESS) + goto fail; + + mutex_lock(&ha->mq_lock); + ha->queue_pair_map[qpair->id] = NULL; + clear_bit(qpair->id, ha->qpair_qid_map); + list_del(&qpair->qp_list_elem); + if (list_empty(&vha->qp_list)) + vha->flags.qpairs_available = 0; + mempool_destroy(qpair->srb_mempool); + kfree(qpair); + mutex_unlock(&ha->mq_lock); + + return QLA_SUCCESS; +fail: + return ret; +} diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index edc48f3b8230cd60b9b3df25bbf87394f5ad6676..44e404583c86fca78d50be48c12129c2f89ad695 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -215,6 +215,36 @@ qla2x00_reset_active(scsi_qla_host_t *vha) test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); } +static inline srb_t * +qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag) +{ + srb_t *sp = NULL; + uint8_t bail; + + QLA_QPAIR_MARK_BUSY(qpair, bail); + if (unlikely(bail)) + return NULL; + + sp = mempool_alloc(qpair->srb_mempool, flag); + if (!sp) + goto done; + + memset(sp, 0, sizeof(*sp)); + sp->fcport = fcport; + sp->iocbs = 1; +done: + if (!sp) + QLA_QPAIR_MARK_NOT_BUSY(qpair); + return sp; +} + +static inline void +qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp) +{ + mempool_free(sp, qpair->srb_mempool); + QLA_QPAIR_MARK_NOT_BUSY(qpair); +} + static inline srb_t * qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) { diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index b41265a75ed58edc1bac763264399a454dd576b3..58e49a3e1de8bcc30b448a889ab8f4b0144fd982 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -12,7 +12,6 @@ #include -static void qla25xx_set_que(srb_t *, struct rsp_que **); /** * qla2x00_get_cmd_direction() - Determine control_flag data direction. * @cmd: SCSI command @@ -143,7 +142,7 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) return (cont_pkt); } -static inline int +inline int qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) { struct scsi_cmnd *cmd = GET_CMD_SP(sp); @@ -693,10 +692,11 @@ qla24xx_calc_dsd_lists(uint16_t dsds) * @sp: SRB command to process * @cmd_pkt: Command type 3 IOCB * @tot_dsds: Total number of segments to transfer + * @req: pointer to request queue */ -static inline void +inline void qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, - uint16_t tot_dsds) + uint16_t tot_dsds, struct req_que *req) { uint16_t avail_dsds; uint32_t *cur_dsd; @@ -745,7 +745,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, * Five DSDs are available in the Continuation * Type 1 IOCB. */ - cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); + cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; avail_dsds = 5; } @@ -845,24 +845,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, } } -struct qla2_sgx { - dma_addr_t dma_addr; /* OUT */ - uint32_t dma_len; /* OUT */ - - uint32_t tot_bytes; /* IN */ - struct scatterlist *cur_sg; /* IN */ - - /* for book keeping, bzero on initial invocation */ - uint32_t bytes_consumed; - uint32_t num_bytes; - uint32_t tot_partial; - - /* for debugging */ - uint32_t num_sg; - srb_t *sp; -}; - -static int +int qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, uint32_t *partial) { @@ -1207,7 +1190,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, * @cmd_pkt: Command type 3 IOCB * @tot_dsds: Total number of segments to transfer */ -static inline int +inline int qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) { @@ -1436,8 +1419,8 @@ qla24xx_start_scsi(srb_t *sp) struct qla_hw_data *ha = vha->hw; /* Setup device pointers. */ - qla25xx_set_que(sp, &rsp); req = vha->req; + rsp = req->rsp; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; @@ -1523,12 +1506,10 @@ qla24xx_start_scsi(srb_t *sp) cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); /* Build IOCB segments */ - qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); + qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; - /* Specify response queue number where completion should happen */ - cmd_pkt->entry_status = (uint8_t) rsp->id; wmb(); /* Adjust ring index. */ req->ring_index++; @@ -1597,9 +1578,8 @@ qla24xx_dif_start_scsi(srb_t *sp) } /* Setup device pointers. */ - - qla25xx_set_que(sp, &rsp); req = vha->req; + rsp = req->rsp; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; @@ -1764,18 +1744,365 @@ qla24xx_dif_start_scsi(srb_t *sp) return QLA_FUNCTION_FAILED; } - -static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) +/** + * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP + * @sp: command to send to the ISP + * + * Returns non-zero if a failure occurred, else zero. + */ +static int +qla2xxx_start_scsi_mq(srb_t *sp) { + int nseg; + unsigned long flags; + uint32_t *clr_ptr; + uint32_t index; + uint32_t handle; + struct cmd_type_7 *cmd_pkt; + uint16_t cnt; + uint16_t req_cnt; + uint16_t tot_dsds; + struct req_que *req = NULL; + struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct qla_hw_data *ha = sp->fcport->vha->hw; - int affinity = cmd->request->cpu; + struct scsi_qla_host *vha = sp->fcport->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_qpair *qpair = sp->qpair; + + /* Setup qpair pointers */ + rsp = qpair->rsp; + req = qpair->req; + + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + /* Send marker if required */ + if (vha->marker_needed != 0) { + if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != + QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + vha->marker_needed = 0; + } + + /* Acquire qpair specific lock */ + spin_lock_irqsave(&qpair->qp_lock, flags); + + /* Check for room in outstanding command list. */ + handle = req->current_outstanding_cmd; + for (index = 1; index < req->num_outstanding_cmds; index++) { + handle++; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + break; + } + if (index == req->num_outstanding_cmds) + goto queuing_error; + + /* Map the sg table so we have an accurate count of sg entries needed */ + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + } else + nseg = 0; + + tot_dsds = nseg; + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + if (req->cnt < (req_cnt + 2)) { + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + RD_REG_DWORD_RELAXED(req->req_q_out); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + } + + /* Build command packet. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + cmd->host_scribble = (unsigned char *)(unsigned long)handle; + req->cnt -= req_cnt; + + cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; + cmd_pkt->handle = MAKE_HANDLE(req->id, handle); + + /* Zero out remaining portion of packet. */ + /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + + /* Set NPORT-ID and LUN number*/ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + + int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + + cmd_pkt->task = TSK_SIMPLE; + + /* Load SCSI command packet. */ + memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); + host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); + + cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); + + /* Build IOCB segments */ + qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); + + /* Set total data segment count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + wmb(); + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + sp->flags |= SRB_DMA_VALID; + + /* Set chip new ring index. */ + WRT_REG_DWORD(req->req_q_in, req->ring_index); + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(vha, rsp); + + spin_unlock_irqrestore(&qpair->qp_lock, flags); + return QLA_SUCCESS; + +queuing_error: + if (tot_dsds) + scsi_dma_unmap(cmd); + + spin_unlock_irqrestore(&qpair->qp_lock, flags); + + return QLA_FUNCTION_FAILED; +} + + +/** + * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP + * @sp: command to send to the ISP + * + * Returns non-zero if a failure occurred, else zero. + */ +int +qla2xxx_dif_start_scsi_mq(srb_t *sp) +{ + int nseg; + unsigned long flags; + uint32_t *clr_ptr; + uint32_t index; + uint32_t handle; + uint16_t cnt; + uint16_t req_cnt = 0; + uint16_t tot_dsds; + uint16_t tot_prot_dsds; + uint16_t fw_prot_opts = 0; + struct req_que *req = NULL; + struct rsp_que *rsp = NULL; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct scsi_qla_host *vha = sp->fcport->vha; + struct qla_hw_data *ha = vha->hw; + struct cmd_type_crc_2 *cmd_pkt; + uint32_t status = 0; + struct qla_qpair *qpair = sp->qpair; + +#define QDSS_GOT_Q_SPACE BIT_0 + + /* Check for host side state */ + if (!qpair->online) { + cmd->result = DID_NO_CONNECT << 16; + return QLA_INTERFACE_ERROR; + } + + if (!qpair->difdix_supported && + scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { + cmd->result = DID_NO_CONNECT << 16; + return QLA_INTERFACE_ERROR; + } + + /* Only process protection or >16 cdb in this routine */ + if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { + if (cmd->cmd_len <= 16) + return qla2xxx_start_scsi_mq(sp); + } + + /* Setup qpair pointers */ + rsp = qpair->rsp; + req = qpair->req; + + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + /* Send marker if required */ + if (vha->marker_needed != 0) { + if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != + QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + vha->marker_needed = 0; + } + + /* Acquire ring specific lock */ + spin_lock_irqsave(&qpair->qp_lock, flags); + + /* Check for room in outstanding command list. */ + handle = req->current_outstanding_cmd; + for (index = 1; index < req->num_outstanding_cmds; index++) { + handle++; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + break; + } + + if (index == req->num_outstanding_cmds) + goto queuing_error; + + /* Compute number of required data segments */ + /* Map the sg table so we have an accurate count of sg entries needed */ + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + else + sp->flags |= SRB_DMA_VALID; + + if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || + (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { + struct qla2_sgx sgx; + uint32_t partial; + + memset(&sgx, 0, sizeof(struct qla2_sgx)); + sgx.tot_bytes = scsi_bufflen(cmd); + sgx.cur_sg = scsi_sglist(cmd); + sgx.sp = sp; + + nseg = 0; + while (qla24xx_get_one_block_sg( + cmd->device->sector_size, &sgx, &partial)) + nseg++; + } + } else + nseg = 0; + + /* number of required data segments */ + tot_dsds = nseg; + + /* Compute number of required protection segments */ + if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), + scsi_prot_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + else + sp->flags |= SRB_CRC_PROT_DMA_VALID; + + if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || + (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { + nseg = scsi_bufflen(cmd) / cmd->device->sector_size; + } + } else { + nseg = 0; + } + + req_cnt = 1; + /* Total Data and protection sg segment(s) */ + tot_prot_dsds = nseg; + tot_dsds += nseg; + if (req->cnt < (req_cnt + 2)) { + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + RD_REG_DWORD_RELAXED(req->req_q_out); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + } + + status |= QDSS_GOT_Q_SPACE; + + /* Build header part of command packet (excluding the OPCODE). */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + cmd->host_scribble = (unsigned char *)(unsigned long)handle; + req->cnt -= req_cnt; + + /* Fill-in common area */ + cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; + cmd_pkt->handle = MAKE_HANDLE(req->id, handle); + + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + + /* Set NPORT-ID and LUN number*/ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; - if (ha->flags.cpu_affinity_enabled && affinity >= 0 && - affinity < ha->max_rsp_queues - 1) - *rsp = ha->rsp_q_map[affinity + 1]; - else - *rsp = ha->rsp_q_map[0]; + int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + + /* Total Data and protection segment(s) */ + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + + /* Build IOCB segments and adjust for data protection segments */ + if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) + req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != + QLA_SUCCESS) + goto queuing_error; + + cmd_pkt->entry_count = (uint8_t)req_cnt; + cmd_pkt->timeout = cpu_to_le16(0); + wmb(); + + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + /* Set chip new ring index. */ + WRT_REG_DWORD(req->req_q_in, req->ring_index); + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(vha, rsp); + + spin_unlock_irqrestore(&qpair->qp_lock, flags); + + return QLA_SUCCESS; + +queuing_error: + if (status & QDSS_GOT_Q_SPACE) { + req->outstanding_cmds[handle] = NULL; + req->cnt += req_cnt; + } + /* Cleanup will be performed by the caller (queuecommand) */ + + spin_unlock_irqrestore(&qpair->qp_lock, flags); + return QLA_FUNCTION_FAILED; } /* Generic Control-SRB manipulation functions. */ @@ -2197,7 +2524,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) static void qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) { - struct fc_bsg_job *bsg_job = sp->u.bsg_job; + struct bsg_job *bsg_job = sp->u.bsg_job; + struct fc_bsg_request *bsg_request = bsg_job->request; els_iocb->entry_type = ELS_IOCB_TYPE; els_iocb->entry_count = 1; @@ -2212,8 +2540,8 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->opcode = sp->type == SRB_ELS_CMD_RPT ? - bsg_job->request->rqst_data.r_els.els_code : - bsg_job->request->rqst_data.h_els.command_code; + bsg_request->rqst_data.r_els.els_code : + bsg_request->rqst_data.h_els.command_code; els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; els_iocb->port_id[1] = sp->fcport->d_id.b.area; els_iocb->port_id[2] = sp->fcport->d_id.b.domain; @@ -2250,7 +2578,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) uint16_t tot_dsds; scsi_qla_host_t *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; - struct fc_bsg_job *bsg_job = sp->u.bsg_job; + struct bsg_job *bsg_job = sp->u.bsg_job; int loop_iterartion = 0; int entry_count = 1; @@ -2327,7 +2655,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) uint16_t tot_dsds; scsi_qla_host_t *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; - struct fc_bsg_job *bsg_job = sp->u.bsg_job; + struct bsg_job *bsg_job = sp->u.bsg_job; int loop_iterartion = 0; int entry_count = 1; @@ -2663,7 +2991,7 @@ qla82xx_start_scsi(srb_t *sp) cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); /* Build IOCB segments */ - qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); + qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; @@ -2833,7 +3161,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, struct scatterlist *sg; int index; int entry_count = 1; - struct fc_bsg_job *bsg_job = sp->u.bsg_job; + struct bsg_job *bsg_job = sp->u.bsg_job; /*Update entry type to indicate bidir command */ *((uint32_t *)(&cmd_pkt->entry_type)) = diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 068c4e47fac99c2cdcf480776ceba3dc5b33cf99..5093ca9b02ec52c8e70674f88205941cc0967d9f 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -1356,7 +1357,8 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, const char func[] = "CT_IOCB"; const char *type; srb_t *sp; - struct fc_bsg_job *bsg_job; + struct bsg_job *bsg_job; + struct fc_bsg_reply *bsg_reply; uint16_t comp_status; int res; @@ -1365,6 +1367,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, return; bsg_job = sp->u.bsg_job; + bsg_reply = bsg_job->reply; type = "ct pass-through"; @@ -1373,32 +1376,32 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT * fc payload to the caller */ - bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; + bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; bsg_job->reply_len = sizeof(struct fc_bsg_reply); if (comp_status != CS_COMPLETE) { if (comp_status == CS_DATA_UNDERRUN) { res = DID_OK << 16; - bsg_job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); ql_log(ql_log_warn, vha, 0x5048, "CT pass-through-%s error " "comp_status-status=0x%x total_byte = 0x%x.\n", type, comp_status, - bsg_job->reply->reply_payload_rcv_len); + bsg_reply->reply_payload_rcv_len); } else { ql_log(ql_log_warn, vha, 0x5049, "CT pass-through-%s error " "comp_status-status=0x%x.\n", type, comp_status); res = DID_ERROR << 16; - bsg_job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; } ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, (uint8_t *)pkt, sizeof(*pkt)); } else { res = DID_OK << 16; - bsg_job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; bsg_job->reply_len = 0; } @@ -1413,7 +1416,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, const char func[] = "ELS_CT_IOCB"; const char *type; srb_t *sp; - struct fc_bsg_job *bsg_job; + struct bsg_job *bsg_job; + struct fc_bsg_reply *bsg_reply; uint16_t comp_status; uint32_t fw_status[3]; uint8_t* fw_sts_ptr; @@ -1423,6 +1427,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, if (!sp) return; bsg_job = sp->u.bsg_job; + bsg_reply = bsg_job->reply; type = NULL; switch (sp->type) { @@ -1452,13 +1457,13 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT * fc payload to the caller */ - bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; + bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); if (comp_status != CS_COMPLETE) { if (comp_status == CS_DATA_UNDERRUN) { res = DID_OK << 16; - bsg_job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); ql_dbg(ql_dbg_user, vha, 0x503f, @@ -1480,7 +1485,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, le16_to_cpu(((struct els_sts_entry_24xx *) pkt)->error_subcode_2)); res = DID_ERROR << 16; - bsg_job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); } @@ -1489,7 +1494,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, } else { res = DID_OK << 16; - bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; + bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; bsg_job->reply_len = 0; } @@ -1904,7 +1909,9 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, uint16_t scsi_status; uint16_t thread_id; uint32_t rval = EXT_STATUS_OK; - struct fc_bsg_job *bsg_job = NULL; + struct bsg_job *bsg_job = NULL; + struct fc_bsg_request *bsg_request; + struct fc_bsg_reply *bsg_reply; sts_entry_t *sts; struct sts_entry_24xx *sts24; sts = (sts_entry_t *) pkt; @@ -1919,11 +1926,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, } sp = req->outstanding_cmds[index]; - if (sp) { - /* Free outstanding command slot. */ - req->outstanding_cmds[index] = NULL; - bsg_job = sp->u.bsg_job; - } else { + if (!sp) { ql_log(ql_log_warn, vha, 0x70b0, "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", req->id, index); @@ -1932,6 +1935,12 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, return; } + /* Free outstanding command slot. */ + req->outstanding_cmds[index] = NULL; + bsg_job = sp->u.bsg_job; + bsg_request = bsg_job->request; + bsg_reply = bsg_job->reply; + if (IS_FWI2_CAPABLE(ha)) { comp_status = le16_to_cpu(sts24->comp_status); scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; @@ -1940,14 +1949,14 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; } - thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; + thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; switch (comp_status) { case CS_COMPLETE: if (scsi_status == 0) { - bsg_job->reply->reply_payload_rcv_len = + bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; vha->qla_stats.input_bytes += - bsg_job->reply->reply_payload_rcv_len; + bsg_reply->reply_payload_rcv_len; vha->qla_stats.input_requests++; rval = EXT_STATUS_OK; } @@ -2028,11 +2037,11 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, rval = EXT_STATUS_ERR; break; } - bsg_job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; done: /* Return the vendor specific reply to API */ - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; bsg_job->reply_len = sizeof(struct fc_bsg_reply); /* Always return DID_OK, bsg will send the vendor specific response * in this case only */ @@ -2862,41 +2871,6 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) return IRQ_HANDLED; } -static irqreturn_t -qla25xx_msix_rsp_q(int irq, void *dev_id) -{ - struct qla_hw_data *ha; - scsi_qla_host_t *vha; - struct rsp_que *rsp; - struct device_reg_24xx __iomem *reg; - unsigned long flags; - uint32_t hccr = 0; - - rsp = (struct rsp_que *) dev_id; - if (!rsp) { - ql_log(ql_log_info, NULL, 0x505b, - "%s: NULL response queue pointer.\n", __func__); - return IRQ_NONE; - } - ha = rsp->hw; - vha = pci_get_drvdata(ha->pdev); - - /* Clear the interrupt, if enabled, for this response queue */ - if (!ha->flags.disable_msix_handshake) { - reg = &ha->iobase->isp24; - spin_lock_irqsave(&ha->hardware_lock, flags); - WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); - hccr = RD_REG_DWORD_RELAXED(®->hccr); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - } - if (qla2x00_check_reg32_for_disconnect(vha, hccr)) - goto out; - queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); - -out: - return IRQ_HANDLED; -} - static irqreturn_t qla24xx_msix_default(int irq, void *dev_id) { @@ -2993,6 +2967,35 @@ qla24xx_msix_default(int irq, void *dev_id) return IRQ_HANDLED; } +irqreturn_t +qla2xxx_msix_rsp_q(int irq, void *dev_id) +{ + struct qla_hw_data *ha; + struct qla_qpair *qpair; + struct device_reg_24xx __iomem *reg; + unsigned long flags; + + qpair = dev_id; + if (!qpair) { + ql_log(ql_log_info, NULL, 0x505b, + "%s: NULL response queue pointer.\n", __func__); + return IRQ_NONE; + } + ha = qpair->hw; + + /* Clear the interrupt, if enabled, for this response queue */ + if (unlikely(!ha->flags.disable_msix_handshake)) { + reg = &ha->iobase->isp24; + spin_lock_irqsave(&ha->hardware_lock, flags); + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + + queue_work(ha->wq, &qpair->q_work); + + return IRQ_HANDLED; +} + /* Interrupt handling helpers. */ struct qla_init_msix_entry { @@ -3000,69 +3003,28 @@ struct qla_init_msix_entry { irq_handler_t handler; }; -static struct qla_init_msix_entry msix_entries[3] = { +static struct qla_init_msix_entry msix_entries[] = { { "qla2xxx (default)", qla24xx_msix_default }, { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, - { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, + { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, + { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q }, }; -static struct qla_init_msix_entry qla82xx_msix_entries[2] = { +static struct qla_init_msix_entry qla82xx_msix_entries[] = { { "qla2xxx (default)", qla82xx_msix_default }, { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, }; -static struct qla_init_msix_entry qla83xx_msix_entries[3] = { - { "qla2xxx (default)", qla24xx_msix_default }, - { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, - { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, -}; - -static void -qla24xx_disable_msix(struct qla_hw_data *ha) -{ - int i; - struct qla_msix_entry *qentry; - scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); - - for (i = 0; i < ha->msix_count; i++) { - qentry = &ha->msix_entries[i]; - if (qentry->have_irq) { - /* un-register irq cpu affinity notification */ - irq_set_affinity_notifier(qentry->vector, NULL); - free_irq(qentry->vector, qentry->rsp); - } - } - pci_disable_msix(ha->pdev); - kfree(ha->msix_entries); - ha->msix_entries = NULL; - ha->flags.msix_enabled = 0; - ql_dbg(ql_dbg_init, vha, 0x0042, - "Disabled the MSI.\n"); -} - static int qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) { #define MIN_MSIX_COUNT 2 -#define ATIO_VECTOR 2 int i, ret; - struct msix_entry *entries; struct qla_msix_entry *qentry; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); - entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, - GFP_KERNEL); - if (!entries) { - ql_log(ql_log_warn, vha, 0x00bc, - "Failed to allocate memory for msix_entry.\n"); - return -ENOMEM; - } - - for (i = 0; i < ha->msix_count; i++) - entries[i].entry = i; - - ret = pci_enable_msix_range(ha->pdev, - entries, MIN_MSIX_COUNT, ha->msix_count); + ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); if (ret < 0) { ql_log(ql_log_fatal, vha, 0x00c7, "MSI-X: Failed to enable support, " @@ -3072,10 +3034,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) } else if (ret < ha->msix_count) { ql_log(ql_log_warn, vha, 0x00c6, "MSI-X: Failed to enable support " - "-- %d/%d\n Retry with %d vectors.\n", - ha->msix_count, ret, ret); + "with %d vectors, using %d vectors.\n", + ha->msix_count, ret); ha->msix_count = ret; - ha->max_rsp_queues = ha->msix_count - 1; + /* Recalculate queue values */ + if (ha->mqiobase && ql2xmqsupport) { + ha->max_req_queues = ha->msix_count - 1; + + /* ATIOQ needs 1 vector. That's 1 less QPair */ + if (QLA_TGT_MODE_ENABLED()) + ha->max_req_queues--; + + ha->max_rsp_queues = ha->max_req_queues; + + ha->max_qpairs = ha->max_req_queues - 1; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, + "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); + } } ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * ha->msix_count, GFP_KERNEL); @@ -3089,20 +3064,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) for (i = 0; i < ha->msix_count; i++) { qentry = &ha->msix_entries[i]; - qentry->vector = entries[i].vector; - qentry->entry = entries[i].entry; + qentry->vector = pci_irq_vector(ha->pdev, i); + qentry->entry = i; qentry->have_irq = 0; - qentry->rsp = NULL; + qentry->in_use = 0; + qentry->handle = NULL; qentry->irq_notify.notify = qla_irq_affinity_notify; qentry->irq_notify.release = qla_irq_affinity_release; qentry->cpuid = -1; } /* Enable MSI-X vectors for the base queue */ - for (i = 0; i < 2; i++) { + for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) { qentry = &ha->msix_entries[i]; - qentry->rsp = rsp; + qentry->handle = rsp; rsp->msix = qentry; + scnprintf(qentry->name, sizeof(qentry->name), + msix_entries[i].name); if (IS_P3P_TYPE(ha)) ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, @@ -3114,6 +3092,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) if (ret) goto msix_register_fail; qentry->have_irq = 1; + qentry->in_use = 1; /* Register for CPU affinity notification. */ irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify); @@ -3133,12 +3112,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) * queue. */ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { - qentry = &ha->msix_entries[ATIO_VECTOR]; - qentry->rsp = rsp; + qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; rsp->msix = qentry; + qentry->handle = rsp; + scnprintf(qentry->name, sizeof(qentry->name), + msix_entries[QLA_ATIO_VECTOR].name); + qentry->in_use = 1; ret = request_irq(qentry->vector, - qla83xx_msix_entries[ATIO_VECTOR].handler, - 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); + msix_entries[QLA_ATIO_VECTOR].handler, + 0, msix_entries[QLA_ATIO_VECTOR].name, rsp); qentry->have_irq = 1; } @@ -3147,7 +3129,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ql_log(ql_log_fatal, vha, 0x00cb, "MSI-X: unable to register handler -- %x/%d.\n", qentry->vector, ret); - qla24xx_disable_msix(ha); + qla2x00_free_irqs(vha); ha->mqenable = 0; goto msix_out; } @@ -3155,11 +3137,13 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) /* Enable MSI-X vector for response queue update for queue 0 */ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { if (ha->msixbase && ha->mqiobase && - (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) + (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || + ql2xmqsupport)) ha->mqenable = 1; } else - if (ha->mqiobase - && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) + if (ha->mqiobase && + (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || + ql2xmqsupport)) ha->mqenable = 1; ql_dbg(ql_dbg_multiq, vha, 0xc005, "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", @@ -3169,7 +3153,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); msix_out: - kfree(entries); return ret; } @@ -3222,7 +3205,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) !IS_QLA27XX(ha)) goto skip_msi; - ret = pci_enable_msi(ha->pdev); + ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); if (!ret) { ql_dbg(ql_dbg_init, vha, 0x0038, "MSI: Enabled.\n"); @@ -3267,6 +3250,8 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct rsp_que *rsp; + struct qla_msix_entry *qentry; + int i; /* * We need to check that ha->rsp_q_map is valid in case we are called @@ -3276,25 +3261,36 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) return; rsp = ha->rsp_q_map[0]; - if (ha->flags.msix_enabled) - qla24xx_disable_msix(ha); - else if (ha->flags.msi_enabled) { - free_irq(ha->pdev->irq, rsp); - pci_disable_msi(ha->pdev); - } else - free_irq(ha->pdev->irq, rsp); -} + if (ha->flags.msix_enabled) { + for (i = 0; i < ha->msix_count; i++) { + qentry = &ha->msix_entries[i]; + if (qentry->have_irq) { + irq_set_affinity_notifier(qentry->vector, NULL); + free_irq(pci_irq_vector(ha->pdev, i), qentry->handle); + } + } + kfree(ha->msix_entries); + ha->msix_entries = NULL; + ha->flags.msix_enabled = 0; + ql_dbg(ql_dbg_init, vha, 0x0042, + "Disabled MSI-X.\n"); + } else { + free_irq(pci_irq_vector(ha->pdev, 0), rsp); + } + pci_free_irq_vectors(ha->pdev); +} -int qla25xx_request_irq(struct rsp_que *rsp) +int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, + struct qla_msix_entry *msix, int vector_type) { - struct qla_hw_data *ha = rsp->hw; - struct qla_init_msix_entry *intr = &msix_entries[2]; - struct qla_msix_entry *msix = rsp->msix; + struct qla_init_msix_entry *intr = &msix_entries[vector_type]; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); int ret; - ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); + scnprintf(msix->name, sizeof(msix->name), + "qla2xxx%lu_qpair%d", vha->host_no, qpair->id); + ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair); if (ret) { ql_log(ql_log_fatal, vha, 0x00e6, "MSI-X: Unable to register handler -- %x/%d.\n", @@ -3302,7 +3298,7 @@ int qla25xx_request_irq(struct rsp_que *rsp) return ret; } msix->have_irq = 1; - msix->rsp = rsp; + msix->handle = qpair; return ret; } @@ -3315,11 +3311,12 @@ static void qla_irq_affinity_notify(struct irq_affinity_notify *notify, container_of(notify, struct qla_msix_entry, irq_notify); struct qla_hw_data *ha; struct scsi_qla_host *base_vha; + struct rsp_que *rsp = e->handle; /* user is recommended to set mask to just 1 cpu */ e->cpuid = cpumask_first(mask); - ha = e->rsp->hw; + ha = rsp->hw; base_vha = pci_get_drvdata(ha->pdev); ql_dbg(ql_dbg_init, base_vha, 0xffff, @@ -3343,9 +3340,10 @@ static void qla_irq_affinity_release(struct kref *ref) container_of(ref, struct irq_affinity_notify, kref); struct qla_msix_entry *e = container_of(notify, struct qla_msix_entry, irq_notify); - struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev); + struct rsp_que *rsp = e->handle; + struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev); ql_dbg(ql_dbg_init, base_vha, 0xffff, - "%s: host%ld: vector %d cpu %d \n", __func__, + "%s: host%ld: vector %d cpu %d\n", __func__, base_vha->host_no, e->vector, e->cpuid); } diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 23698c9986998a0a7279e6e256d4c377f6fceab9..2819ceb96041e5b97b234f115c9b35d4b4251ffe 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -10,6 +10,43 @@ #include #include +struct rom_cmd { + uint16_t cmd; +} rom_cmds[] = { + { MBC_LOAD_RAM }, + { MBC_EXECUTE_FIRMWARE }, + { MBC_READ_RAM_WORD }, + { MBC_MAILBOX_REGISTER_TEST }, + { MBC_VERIFY_CHECKSUM }, + { MBC_GET_FIRMWARE_VERSION }, + { MBC_LOAD_RISC_RAM }, + { MBC_DUMP_RISC_RAM }, + { MBC_LOAD_RISC_RAM_EXTENDED }, + { MBC_DUMP_RISC_RAM_EXTENDED }, + { MBC_WRITE_RAM_WORD_EXTENDED }, + { MBC_READ_RAM_EXTENDED }, + { MBC_GET_RESOURCE_COUNTS }, + { MBC_SET_FIRMWARE_OPTION }, + { MBC_MID_INITIALIZE_FIRMWARE }, + { MBC_GET_FIRMWARE_STATE }, + { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, + { MBC_GET_RETRY_COUNT }, + { MBC_TRACE_CONTROL }, +}; + +static int is_rom_cmd(uint16_t cmd) +{ + int i; + struct rom_cmd *wc; + + for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { + wc = rom_cmds + i; + if (wc->cmd == cmd) + return 1; + } + + return 0; +} /* * qla2x00_mailbox_command @@ -92,6 +129,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) return QLA_FUNCTION_TIMEOUT; } + /* check if ISP abort is active and return cmd with timeout */ + if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || + test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || + test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && + !is_rom_cmd(mcp->mb[0])) { + ql_log(ql_log_info, vha, 0x1005, + "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", + mcp->mb[0]); + return QLA_FUNCTION_TIMEOUT; + } + /* * Wait for active mailbox commands to finish by waiting at most tov * seconds. This is to serialize actual issuing of mailbox cmds during @@ -178,6 +226,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); + wait_time = jiffies; if (!wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ)) { ql_dbg(ql_dbg_mbx, vha, 0x117a, @@ -186,6 +235,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); spin_unlock_irqrestore(&ha->hardware_lock, flags); } + if (time_after(jiffies, wait_time + 5 * HZ)) + ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", + command, jiffies_to_msecs(jiffies - wait_time)); } else { ql_dbg(ql_dbg_mbx, vha, 0x1011, "Cmd=%x Polling Mode.\n", command); @@ -1194,12 +1246,17 @@ qla2x00_abort_command(srb_t *sp) fc_port_t *fcport = sp->fcport; scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; - struct req_que *req = vha->req; + struct req_que *req; struct scsi_cmnd *cmd = GET_CMD_SP(sp); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, "Entered %s.\n", __func__); + if (vha->flags.qpairs_available && sp->qpair) + req = sp->qpair->req; + else + req = vha->req; + spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < req->num_outstanding_cmds; handle++) { if (req->outstanding_cmds[handle] == sp) @@ -2152,10 +2209,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, "Entered %s.\n", __func__); - if (ha->flags.cpu_affinity_enabled) - req = ha->req_q_map[0]; + if (vha->vp_idx && vha->qpair) + req = vha->qpair->req; else - req = vha->req; + req = ha->req_q_map[0]; lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { @@ -2435,10 +2492,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, } memset(lg, 0, sizeof(struct logio_entry_24xx)); - if (ql2xmaxqueues > 1) - req = ha->req_q_map[0]; - else - req = vha->req; + req = vha->req; lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; lg->handle = MAKE_HANDLE(req->id, lg->handle); @@ -2904,6 +2958,9 @@ qla24xx_abort_command(srb_t *sp) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, "Entered %s.\n", __func__); + if (vha->flags.qpairs_available && sp->qpair) + req = sp->qpair->req; + if (ql2xasynctmfenable) return qla24xx_async_abort_command(sp); @@ -2984,6 +3041,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, struct qla_hw_data *ha; struct req_que *req; struct rsp_que *rsp; + struct qla_qpair *qpair; vha = fcport->vha; ha = vha->hw; @@ -2992,10 +3050,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, "Entered %s.\n", __func__); - if (ha->flags.cpu_affinity_enabled) - rsp = ha->rsp_q_map[tag + 1]; - else + if (vha->vp_idx && vha->qpair) { + /* NPIV port */ + qpair = vha->qpair; + rsp = qpair->rsp; + req = qpair->req; + } else { rsp = req->rsp; + } + tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); if (tsk == NULL) { ql_log(ql_log_warn, vha, 0x1093, diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index cf7ba52bae665fa482b8535f92e5fcb1dbe25244..c6d6f0d912ff75ffaf9b9d810f81af735e39549b 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -540,9 +540,10 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) uint16_t que_id = rsp->id; if (rsp->msix && rsp->msix->have_irq) { - free_irq(rsp->msix->vector, rsp); + free_irq(rsp->msix->vector, rsp->msix->handle); rsp->msix->have_irq = 0; - rsp->msix->rsp = NULL; + rsp->msix->in_use = 0; + rsp->msix->handle = NULL; } dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * sizeof(response_t), rsp->ring, rsp->dma); @@ -573,7 +574,7 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) return ret; } -static int +int qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) { int ret = -1; @@ -596,34 +597,42 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct qla_hw_data *ha = vha->hw; + struct qla_qpair *qpair, *tqpair; - /* Delete request queues */ - for (cnt = 1; cnt < ha->max_req_queues; cnt++) { - req = ha->req_q_map[cnt]; - if (req && test_bit(cnt, ha->req_qid_map)) { - ret = qla25xx_delete_req_que(vha, req); - if (ret != QLA_SUCCESS) { - ql_log(ql_log_warn, vha, 0x00ea, - "Couldn't delete req que %d.\n", - req->id); - return ret; + if (ql2xmqsupport) { + list_for_each_entry_safe(qpair, tqpair, &vha->qp_list, + qp_list_elem) + qla2xxx_delete_qpair(vha, qpair); + } else { + /* Delete request queues */ + for (cnt = 1; cnt < ha->max_req_queues; cnt++) { + req = ha->req_q_map[cnt]; + if (req && test_bit(cnt, ha->req_qid_map)) { + ret = qla25xx_delete_req_que(vha, req); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x00ea, + "Couldn't delete req que %d.\n", + req->id); + return ret; + } } } - } - /* Delete response queues */ - for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { - rsp = ha->rsp_q_map[cnt]; - if (rsp && test_bit(cnt, ha->rsp_qid_map)) { - ret = qla25xx_delete_rsp_que(vha, rsp); - if (ret != QLA_SUCCESS) { - ql_log(ql_log_warn, vha, 0x00eb, - "Couldn't delete rsp que %d.\n", - rsp->id); - return ret; + /* Delete response queues */ + for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { + rsp = ha->rsp_q_map[cnt]; + if (rsp && test_bit(cnt, ha->rsp_qid_map)) { + ret = qla25xx_delete_rsp_que(vha, rsp); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x00eb, + "Couldn't delete rsp que %d.\n", + rsp->id); + return ret; + } } } } + return ret; } @@ -659,10 +668,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, if (ret != QLA_SUCCESS) goto que_failed; - mutex_lock(&ha->vport_lock); + mutex_lock(&ha->mq_lock); que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); if (que_id >= ha->max_req_queues) { - mutex_unlock(&ha->vport_lock); + mutex_unlock(&ha->mq_lock); ql_log(ql_log_warn, base_vha, 0x00db, "No resources to create additional request queue.\n"); goto que_failed; @@ -708,7 +717,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, req->req_q_out = ®->isp25mq.req_q_out; req->max_q_depth = ha->req_q_map[0]->max_q_depth; req->out_ptr = (void *)(req->ring + req->length); - mutex_unlock(&ha->vport_lock); + mutex_unlock(&ha->mq_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc004, "ring_ptr=%p ring_index=%d, " "cnt=%d id=%d max_q_depth=%d.\n", @@ -724,9 +733,9 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, if (ret != QLA_SUCCESS) { ql_log(ql_log_fatal, base_vha, 0x00df, "%s failed.\n", __func__); - mutex_lock(&ha->vport_lock); + mutex_lock(&ha->mq_lock); clear_bit(que_id, ha->req_qid_map); - mutex_unlock(&ha->vport_lock); + mutex_unlock(&ha->mq_lock); goto que_failed; } @@ -741,20 +750,20 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, static void qla_do_work(struct work_struct *work) { unsigned long flags; - struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); + struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work); struct scsi_qla_host *vha; - struct qla_hw_data *ha = rsp->hw; + struct qla_hw_data *ha = qpair->hw; - spin_lock_irqsave(&rsp->hw->hardware_lock, flags); + spin_lock_irqsave(&qpair->qp_lock, flags); vha = pci_get_drvdata(ha->pdev); - qla24xx_process_response_queue(vha, rsp); - spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags); + qla24xx_process_response_queue(vha, qpair->rsp); + spin_unlock_irqrestore(&qpair->qp_lock, flags); } /* create response queue */ int qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, - uint8_t vp_idx, uint16_t rid, int req) + uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair) { int ret = 0; struct rsp_que *rsp = NULL; @@ -779,28 +788,24 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, goto que_failed; } - mutex_lock(&ha->vport_lock); + mutex_lock(&ha->mq_lock); que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); if (que_id >= ha->max_rsp_queues) { - mutex_unlock(&ha->vport_lock); + mutex_unlock(&ha->mq_lock); ql_log(ql_log_warn, base_vha, 0x00e2, "No resources to create additional request queue.\n"); goto que_failed; } set_bit(que_id, ha->rsp_qid_map); - if (ha->flags.msix_enabled) - rsp->msix = &ha->msix_entries[que_id + 1]; - else - ql_log(ql_log_warn, base_vha, 0x00e3, - "MSIX not enabled.\n"); + rsp->msix = qpair->msix; ha->rsp_q_map[que_id] = rsp; rsp->rid = rid; rsp->vp_idx = vp_idx; rsp->hw = ha; ql_dbg(ql_dbg_init, base_vha, 0x00e4, - "queue_id=%d rid=%d vp_idx=%d hw=%p.\n", + "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n", que_id, rsp->rid, rsp->vp_idx, rsp->hw); /* Use alternate PCI bus number */ if (MSB(rsp->rid)) @@ -812,23 +817,27 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, if (!IS_MSIX_NACK_CAPABLE(ha)) options |= BIT_6; + /* Set option to indicate response queue creation */ + options |= BIT_1; + rsp->options = options; rsp->id = que_id; reg = ISP_QUE_REG(ha, que_id); rsp->rsp_q_in = ®->isp25mq.rsp_q_in; rsp->rsp_q_out = ®->isp25mq.rsp_q_out; rsp->in_ptr = (void *)(rsp->ring + rsp->length); - mutex_unlock(&ha->vport_lock); + mutex_unlock(&ha->mq_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, - "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", + "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", rsp->options, rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); ql_dbg(ql_dbg_init, base_vha, 0x00e5, - "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", + "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", rsp->options, rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); - ret = qla25xx_request_irq(rsp); + ret = qla25xx_request_irq(ha, qpair, qpair->msix, + QLA_MSIX_QPAIR_MULTIQ_RSP_Q); if (ret) goto que_failed; @@ -836,19 +845,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, if (ret != QLA_SUCCESS) { ql_log(ql_log_fatal, base_vha, 0x00e7, "%s failed.\n", __func__); - mutex_lock(&ha->vport_lock); + mutex_lock(&ha->mq_lock); clear_bit(que_id, ha->rsp_qid_map); - mutex_unlock(&ha->vport_lock); + mutex_unlock(&ha->mq_lock); goto que_failed; } - if (req >= 0) - rsp->req = ha->req_q_map[req]; - else - rsp->req = NULL; + rsp->req = NULL; qla2x00_init_response_q_entries(rsp); - if (rsp->hw->wq) - INIT_WORK(&rsp->q_work, qla_do_work); + if (qpair->hw->wq) + INIT_WORK(&qpair->q_work, qla_do_work); return rsp->id; que_failed: diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 15dff7099955b857d7a343661319ba9002cdea77..02f1de18bc2b61db57ad7021fea4e7272e7af437 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -2206,7 +2207,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, { const char func[] = "IOSB_IOCB"; srb_t *sp; - struct fc_bsg_job *bsg_job; + struct bsg_job *bsg_job; + struct fc_bsg_reply *bsg_reply; struct srb_iocb *iocb_job; int res; struct qla_mt_iocb_rsp_fx00 fstatus; @@ -2226,6 +2228,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, pkt->dataword_r; } else { bsg_job = sp->u.bsg_job; + bsg_reply = bsg_job->reply; memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00)); @@ -2257,8 +2260,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, sp->fcport->vha, 0x5074, (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00)); - res = bsg_job->reply->result = DID_OK << 16; - bsg_job->reply->reply_payload_rcv_len = + res = bsg_reply->result = DID_OK << 16; + bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; } sp->done(vha, sp, res); @@ -3252,7 +3255,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) { struct srb_iocb *fxio = &sp->u.iocb_cmd; struct qla_mt_iocb_rqst_fx00 *piocb_rqst; - struct fc_bsg_job *bsg_job; + struct bsg_job *bsg_job; + struct fc_bsg_request *bsg_request; struct fxdisc_entry_fx00 fx_iocb; uint8_t entry_cnt = 1; @@ -3301,8 +3305,9 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) } else { struct scatterlist *sg; bsg_job = sp->u.bsg_job; + bsg_request = bsg_job->request; piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) - &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; + &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; fx_iocb.func_num = piocb_rqst->func_type; fx_iocb.adapid = piocb_rqst->adapid; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ace65db1d2a25becd6dc3c4a158d932f7dab8fff..8521cfe302e9e3e72c7aaf1a4753ca75f953b972 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -30,7 +31,7 @@ static int apidev_major; /* * SRB allocation cache */ -static struct kmem_cache *srb_cachep; +struct kmem_cache *srb_cachep; /* * CT6 CTX allocation cache @@ -143,19 +144,12 @@ MODULE_PARM_DESC(ql2xiidmaenable, "Enables iIDMA settings " "Default is 1 - perform iIDMA. 0 - no iIDMA."); -int ql2xmaxqueues = 1; -module_param(ql2xmaxqueues, int, S_IRUGO); -MODULE_PARM_DESC(ql2xmaxqueues, - "Enables MQ settings " - "Default is 1 for single queue. Set it to number " - "of queues in MQ mode."); - -int ql2xmultique_tag; -module_param(ql2xmultique_tag, int, S_IRUGO); -MODULE_PARM_DESC(ql2xmultique_tag, - "Enables CPU affinity settings for the driver " - "Default is 0 for no affinity of request and response IO. " - "Set it to 1 to turn on the cpu affinity."); +int ql2xmqsupport = 1; +module_param(ql2xmqsupport, int, S_IRUGO); +MODULE_PARM_DESC(ql2xmqsupport, + "Enable on demand multiple queue pairs support " + "Default is 1 for supported. " + "Set it to 0 to turn off mq qpair support."); int ql2xfwloadbin; module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); @@ -261,6 +255,7 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *); static void qla2x00_clear_drv_active(struct qla_hw_data *); static void qla2x00_free_device(scsi_qla_host_t *); static void qla83xx_disable_laser(scsi_qla_host_t *vha); +static int qla2xxx_map_queues(struct Scsi_Host *shost); struct scsi_host_template qla2xxx_driver_template = { .module = THIS_MODULE, @@ -280,6 +275,7 @@ struct scsi_host_template qla2xxx_driver_template = { .scan_finished = qla2xxx_scan_finished, .scan_start = qla2xxx_scan_start, .change_queue_depth = scsi_change_queue_depth, + .map_queues = qla2xxx_map_queues, .this_id = -1, .cmd_per_lun = 3, .use_clustering = ENABLE_CLUSTERING, @@ -339,6 +335,8 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, struct req_que **, struct rsp_que **); static void qla2x00_free_fw_dump(struct qla_hw_data *); static void qla2x00_mem_free(struct qla_hw_data *); +int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, + struct qla_qpair *qpair); /* -------------------------------------------------------------------------- */ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, @@ -360,6 +358,25 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, "Unable to allocate memory for response queue ptrs.\n"); goto fail_rsp_map; } + + if (ql2xmqsupport && ha->max_qpairs) { + ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *), + GFP_KERNEL); + if (!ha->queue_pair_map) { + ql_log(ql_log_fatal, vha, 0x0180, + "Unable to allocate memory for queue pair ptrs.\n"); + goto fail_qpair_map; + } + ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); + if (ha->base_qpair == NULL) { + ql_log(ql_log_warn, vha, 0x0182, + "Failed to allocate base queue pair memory.\n"); + goto fail_base_qpair; + } + ha->base_qpair->req = req; + ha->base_qpair->rsp = rsp; + } + /* * Make sure we record at least the request and response queue zero in * case we need to free them if part of the probe fails. @@ -370,6 +387,11 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, set_bit(0, ha->req_qid_map); return 1; +fail_base_qpair: + kfree(ha->queue_pair_map); +fail_qpair_map: + kfree(ha->rsp_q_map); + ha->rsp_q_map = NULL; fail_rsp_map: kfree(ha->req_q_map); ha->req_q_map = NULL; @@ -417,82 +439,43 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) struct req_que *req; struct rsp_que *rsp; int cnt; + unsigned long flags; + spin_lock_irqsave(&ha->hardware_lock, flags); for (cnt = 0; cnt < ha->max_req_queues; cnt++) { if (!test_bit(cnt, ha->req_qid_map)) continue; req = ha->req_q_map[cnt]; + clear_bit(cnt, ha->req_qid_map); + ha->req_q_map[cnt] = NULL; + + spin_unlock_irqrestore(&ha->hardware_lock, flags); qla2x00_free_req_que(ha, req); + spin_lock_irqsave(&ha->hardware_lock, flags); } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + kfree(ha->req_q_map); ha->req_q_map = NULL; + + spin_lock_irqsave(&ha->hardware_lock, flags); for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { if (!test_bit(cnt, ha->rsp_qid_map)) continue; rsp = ha->rsp_q_map[cnt]; + clear_bit(cnt, ha->req_qid_map); + ha->rsp_q_map[cnt] = NULL; + spin_unlock_irqrestore(&ha->hardware_lock, flags); qla2x00_free_rsp_que(ha, rsp); + spin_lock_irqsave(&ha->hardware_lock, flags); } - kfree(ha->rsp_q_map); - ha->rsp_q_map = NULL; -} - -static int qla25xx_setup_mode(struct scsi_qla_host *vha) -{ - uint16_t options = 0; - int ques, req, ret; - struct qla_hw_data *ha = vha->hw; + spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (!(ha->fw_attributes & BIT_6)) { - ql_log(ql_log_warn, vha, 0x00d8, - "Firmware is not multi-queue capable.\n"); - goto fail; - } - if (ql2xmultique_tag) { - /* create a request queue for IO */ - options |= BIT_7; - req = qla25xx_create_req_que(ha, options, 0, 0, -1, - QLA_DEFAULT_QUE_QOS); - if (!req) { - ql_log(ql_log_warn, vha, 0x00e0, - "Failed to create request queue.\n"); - goto fail; - } - ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); - vha->req = ha->req_q_map[req]; - options |= BIT_1; - for (ques = 1; ques < ha->max_rsp_queues; ques++) { - ret = qla25xx_create_rsp_que(ha, options, 0, 0, req); - if (!ret) { - ql_log(ql_log_warn, vha, 0x00e8, - "Failed to create response queue.\n"); - goto fail2; - } - } - ha->flags.cpu_affinity_enabled = 1; - ql_dbg(ql_dbg_multiq, vha, 0xc007, - "CPU affinity mode enabled, " - "no. of response queues:%d no. of request queues:%d.\n", - ha->max_rsp_queues, ha->max_req_queues); - ql_dbg(ql_dbg_init, vha, 0x00e9, - "CPU affinity mode enabled, " - "no. of response queues:%d no. of request queues:%d.\n", - ha->max_rsp_queues, ha->max_req_queues); - } - return 0; -fail2: - qla25xx_delete_queues(vha); - destroy_workqueue(ha->wq); - ha->wq = NULL; - vha->req = ha->req_q_map[0]; -fail: - ha->mqenable = 0; - kfree(ha->req_q_map); kfree(ha->rsp_q_map); - ha->max_req_queues = ha->max_rsp_queues = 1; - return 1; + ha->rsp_q_map = NULL; } static char * @@ -669,7 +652,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr) qla2x00_rel_sp(sp->fcport->vha, sp); } -static void +void qla2x00_sp_compl(void *data, void *ptr, int res) { struct qla_hw_data *ha = (struct qla_hw_data *)data; @@ -693,6 +676,75 @@ qla2x00_sp_compl(void *data, void *ptr, int res) cmd->scsi_done(cmd); } +void +qla2xxx_qpair_sp_free_dma(void *vha, void *ptr) +{ + srb_t *sp = (srb_t *)ptr; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct qla_hw_data *ha = sp->fcport->vha->hw; + void *ctx = GET_CMD_CTX_SP(sp); + + if (sp->flags & SRB_DMA_VALID) { + scsi_dma_unmap(cmd); + sp->flags &= ~SRB_DMA_VALID; + } + + if (sp->flags & SRB_CRC_PROT_DMA_VALID) { + dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), + scsi_prot_sg_count(cmd), cmd->sc_data_direction); + sp->flags &= ~SRB_CRC_PROT_DMA_VALID; + } + + if (sp->flags & SRB_CRC_CTX_DSD_VALID) { + /* List assured to be having elements */ + qla2x00_clean_dsd_pool(ha, sp, NULL); + sp->flags &= ~SRB_CRC_CTX_DSD_VALID; + } + + if (sp->flags & SRB_CRC_CTX_DMA_VALID) { + dma_pool_free(ha->dl_dma_pool, ctx, + ((struct crc_context *)ctx)->crc_ctx_dma); + sp->flags &= ~SRB_CRC_CTX_DMA_VALID; + } + + if (sp->flags & SRB_FCP_CMND_DMA_VALID) { + struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; + + dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, + ctx1->fcp_cmnd_dma); + list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); + ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; + ha->gbl_dsd_avail += ctx1->dsd_use_cnt; + mempool_free(ctx1, ha->ctx_mempool); + } + + CMD_SP(cmd) = NULL; + qla2xxx_rel_qpair_sp(sp->qpair, sp); +} + +void +qla2xxx_qpair_sp_compl(void *data, void *ptr, int res) +{ + srb_t *sp = (srb_t *)ptr; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + + cmd->result = res; + + if (atomic_read(&sp->ref_count) == 0) { + ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079, + "SP reference-count to ZERO -- sp=%p cmd=%p.\n", + sp, GET_CMD_SP(sp)); + if (ql2xextended_error_logging & ql_dbg_io) + WARN_ON(atomic_read(&sp->ref_count) == 0); + return; + } + if (!atomic_dec_and_test(&sp->ref_count)) + return; + + qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp); + cmd->scsi_done(cmd); +} + /* If we are SP1 here, we need to still take and release the host_lock as SP1 * does not have the changes necessary to avoid taking host->host_lock. */ @@ -706,6 +758,27 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); srb_t *sp; int rval; + struct qla_qpair *qpair = NULL; + uint32_t tag; + uint16_t hwq; + + if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) { + cmd->result = DID_NO_CONNECT << 16; + goto qc24_fail_command; + } + + if (ha->mqenable) { + if (shost_use_blk_mq(vha->host)) { + tag = blk_mq_unique_tag(cmd->request); + hwq = blk_mq_unique_tag_to_hwq(tag); + qpair = ha->queue_pair_map[hwq]; + } else if (vha->vp_idx && vha->qpair) { + qpair = vha->qpair; + } + + if (qpair) + return qla2xxx_mqueuecommand(host, cmd, qpair); + } if (ha->flags.eeh_busy) { if (ha->flags.pci_channel_io_perm_failure) { @@ -803,6 +876,95 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) return 0; } +/* For MQ supported I/O */ +int +qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, + struct qla_qpair *qpair) +{ + scsi_qla_host_t *vha = shost_priv(host); + fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; + struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + srb_t *sp; + int rval; + + rval = fc_remote_port_chkready(rport); + if (rval) { + cmd->result = rval; + ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076, + "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", + cmd, rval); + goto qc24_fail_command; + } + + if (!fcport) { + cmd->result = DID_NO_CONNECT << 16; + goto qc24_fail_command; + } + + if (atomic_read(&fcport->state) != FCS_ONLINE) { + if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || + atomic_read(&base_vha->loop_state) == LOOP_DEAD) { + ql_dbg(ql_dbg_io, vha, 0x3077, + "Returning DNC, fcport_state=%d loop_state=%d.\n", + atomic_read(&fcport->state), + atomic_read(&base_vha->loop_state)); + cmd->result = DID_NO_CONNECT << 16; + goto qc24_fail_command; + } + goto qc24_target_busy; + } + + /* + * Return target busy if we've received a non-zero retry_delay_timer + * in a FCP_RSP. + */ + if (fcport->retry_delay_timestamp == 0) { + /* retry delay not set */ + } else if (time_after(jiffies, fcport->retry_delay_timestamp)) + fcport->retry_delay_timestamp = 0; + else + goto qc24_target_busy; + + sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC); + if (!sp) + goto qc24_host_busy; + + sp->u.scmd.cmd = cmd; + sp->type = SRB_SCSI_CMD; + atomic_set(&sp->ref_count, 1); + CMD_SP(cmd) = (void *)sp; + sp->free = qla2xxx_qpair_sp_free_dma; + sp->done = qla2xxx_qpair_sp_compl; + sp->qpair = qpair; + + rval = ha->isp_ops->start_scsi_mq(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, + "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); + if (rval == QLA_INTERFACE_ERROR) + goto qc24_fail_command; + goto qc24_host_busy_free_sp; + } + + return 0; + +qc24_host_busy_free_sp: + qla2xxx_qpair_sp_free_dma(vha, sp); + +qc24_host_busy: + return SCSI_MLQUEUE_HOST_BUSY; + +qc24_target_busy: + return SCSI_MLQUEUE_TARGET_BUSY; + +qc24_fail_command: + cmd->scsi_done(cmd); + + return 0; +} + /* * qla2x00_eh_wait_on_command * Waits for the command to be returned by the Firmware for some @@ -1451,6 +1613,20 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { + /* Don't abort commands in adapter during EEH + * recovery as it's not accessible/responding. + */ + if (!ha->flags.eeh_busy) { + /* Get a reference to the sp and drop the lock. + * The reference ensures this sp->done() call + * - and not the call in qla2xxx_eh_abort() - + * ends the SCSI command (with result 'res'). + */ + sp_get(sp); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + qla2xxx_eh_abort(GET_CMD_SP(sp)); + spin_lock_irqsave(&ha->hardware_lock, flags); + } req->outstanding_cmds[cnt] = NULL; sp->done(vha, sp, res); } @@ -1582,7 +1758,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha) { resource_size_t pio; uint16_t msix; - int cpus; if (pci_request_selected_regions(ha->pdev, ha->bars, QLA2XXX_DRIVER_NAME)) { @@ -1639,9 +1814,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha) /* Determine queue resources */ ha->max_req_queues = ha->max_rsp_queues = 1; - if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) || - (ql2xmaxqueues > 1 && ql2xmultique_tag) || - (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) + if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) goto mqiobase_exit; ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), @@ -1651,26 +1824,18 @@ qla2x00_iospace_config(struct qla_hw_data *ha) "MQIO Base=%p.\n", ha->mqiobase); /* Read MSIX vector size of the board */ pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); - ha->msix_count = msix; + ha->msix_count = msix + 1; /* Max queues are bounded by available msix vectors */ - /* queue 0 uses two msix vectors */ - if (ql2xmultique_tag) { - cpus = num_online_cpus(); - ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ? - (cpus + 1) : (ha->msix_count - 1); - ha->max_req_queues = 2; - } else if (ql2xmaxqueues > 1) { - ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? - QLA_MQ_SIZE : ql2xmaxqueues; - ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008, - "QoS mode set, max no of request queues:%d.\n", - ha->max_req_queues); - ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019, - "QoS mode set, max no of request queues:%d.\n", - ha->max_req_queues); - } + /* MB interrupt uses 1 vector */ + ha->max_req_queues = ha->msix_count - 1; + ha->max_rsp_queues = ha->max_req_queues; + /* Queue pairs is the max value minus the base queue pair */ + ha->max_qpairs = ha->max_rsp_queues - 1; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188, + "Max no of queues pairs: %d.\n", ha->max_qpairs); + ql_log_pci(ql_log_info, ha->pdev, 0x001a, - "MSI-X vector count: %d.\n", msix); + "MSI-X vector count: %d.\n", ha->msix_count); } else ql_log_pci(ql_log_info, ha->pdev, 0x001b, "BAR 3 not enabled.\n"); @@ -1690,7 +1855,6 @@ static int qla83xx_iospace_config(struct qla_hw_data *ha) { uint16_t msix; - int cpus; if (pci_request_selected_regions(ha->pdev, ha->bars, QLA2XXX_DRIVER_NAME)) { @@ -1742,32 +1906,36 @@ qla83xx_iospace_config(struct qla_hw_data *ha) /* Read MSIX vector size of the board */ pci_read_config_word(ha->pdev, QLA_83XX_PCI_MSIX_CONTROL, &msix); - ha->msix_count = msix; - /* Max queues are bounded by available msix vectors */ - /* queue 0 uses two msix vectors */ - if (ql2xmultique_tag) { - cpus = num_online_cpus(); - ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ? - (cpus + 1) : (ha->msix_count - 1); - ha->max_req_queues = 2; - } else if (ql2xmaxqueues > 1) { - ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? - QLA_MQ_SIZE : ql2xmaxqueues; - ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c, - "QoS mode set, max no of request queues:%d.\n", - ha->max_req_queues); - ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, - "QoS mode set, max no of request queues:%d.\n", - ha->max_req_queues); + ha->msix_count = msix + 1; + /* + * By default, driver uses at least two msix vectors + * (default & rspq) + */ + if (ql2xmqsupport) { + /* MB interrupt uses 1 vector */ + ha->max_req_queues = ha->msix_count - 1; + ha->max_rsp_queues = ha->max_req_queues; + + /* ATIOQ needs 1 vector. That's 1 less QPair */ + if (QLA_TGT_MODE_ENABLED()) + ha->max_req_queues--; + + /* Queue pairs is the max value minus + * the base queue pair */ + ha->max_qpairs = ha->max_req_queues - 1; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, + "Max no of queues pairs: %d.\n", ha->max_qpairs); } ql_log_pci(ql_log_info, ha->pdev, 0x011c, - "MSI-X vector count: %d.\n", msix); + "MSI-X vector count: %d.\n", ha->msix_count); } else ql_log_pci(ql_log_info, ha->pdev, 0x011e, "BAR 1 not enabled.\n"); mqiobase_exit: ha->msix_count = ha->max_rsp_queues + 1; + if (QLA_TGT_MODE_ENABLED()) + ha->msix_count++; qlt_83xx_iospace_config(ha); @@ -1812,6 +1980,7 @@ static struct isp_operations qla2100_isp_ops = { .write_optrom = qla2x00_write_optrom_data, .get_flash_version = qla2x00_get_flash_version, .start_scsi = qla2x00_start_scsi, + .start_scsi_mq = NULL, .abort_isp = qla2x00_abort_isp, .iospace_config = qla2x00_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, @@ -1850,6 +2019,7 @@ static struct isp_operations qla2300_isp_ops = { .write_optrom = qla2x00_write_optrom_data, .get_flash_version = qla2x00_get_flash_version, .start_scsi = qla2x00_start_scsi, + .start_scsi_mq = NULL, .abort_isp = qla2x00_abort_isp, .iospace_config = qla2x00_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, @@ -1888,6 +2058,7 @@ static struct isp_operations qla24xx_isp_ops = { .write_optrom = qla24xx_write_optrom_data, .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_start_scsi, + .start_scsi_mq = NULL, .abort_isp = qla2x00_abort_isp, .iospace_config = qla2x00_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, @@ -1926,6 +2097,7 @@ static struct isp_operations qla25xx_isp_ops = { .write_optrom = qla24xx_write_optrom_data, .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_dif_start_scsi, + .start_scsi_mq = qla2xxx_dif_start_scsi_mq, .abort_isp = qla2x00_abort_isp, .iospace_config = qla2x00_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, @@ -1964,6 +2136,7 @@ static struct isp_operations qla81xx_isp_ops = { .write_optrom = qla24xx_write_optrom_data, .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_dif_start_scsi, + .start_scsi_mq = qla2xxx_dif_start_scsi_mq, .abort_isp = qla2x00_abort_isp, .iospace_config = qla2x00_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, @@ -2002,6 +2175,7 @@ static struct isp_operations qla82xx_isp_ops = { .write_optrom = qla82xx_write_optrom_data, .get_flash_version = qla82xx_get_flash_version, .start_scsi = qla82xx_start_scsi, + .start_scsi_mq = NULL, .abort_isp = qla82xx_abort_isp, .iospace_config = qla82xx_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, @@ -2040,6 +2214,7 @@ static struct isp_operations qla8044_isp_ops = { .write_optrom = qla8044_write_optrom_data, .get_flash_version = qla82xx_get_flash_version, .start_scsi = qla82xx_start_scsi, + .start_scsi_mq = NULL, .abort_isp = qla8044_abort_isp, .iospace_config = qla82xx_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, @@ -2078,6 +2253,7 @@ static struct isp_operations qla83xx_isp_ops = { .write_optrom = qla24xx_write_optrom_data, .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_dif_start_scsi, + .start_scsi_mq = qla2xxx_dif_start_scsi_mq, .abort_isp = qla2x00_abort_isp, .iospace_config = qla83xx_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, @@ -2116,6 +2292,7 @@ static struct isp_operations qlafx00_isp_ops = { .write_optrom = qla24xx_write_optrom_data, .get_flash_version = qla24xx_get_flash_version, .start_scsi = qlafx00_start_scsi, + .start_scsi_mq = NULL, .abort_isp = qlafx00_abort_isp, .iospace_config = qlafx00_iospace_config, .initialize_adapter = qlafx00_initialize_adapter, @@ -2154,6 +2331,7 @@ static struct isp_operations qla27xx_isp_ops = { .write_optrom = qla24xx_write_optrom_data, .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_dif_start_scsi, + .start_scsi_mq = qla2xxx_dif_start_scsi_mq, .abort_isp = qla2x00_abort_isp, .iospace_config = qla83xx_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, @@ -2341,6 +2519,8 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) { scsi_qla_host_t *vha = shost_priv(shost); + if (test_bit(UNLOADING, &vha->dpc_flags)) + return 1; if (!vha->host) return 1; if (time > vha->hw->loop_reset_delay * HZ) @@ -2366,6 +2546,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) uint16_t req_length = 0, rsp_length = 0; struct req_que *req = NULL; struct rsp_que *rsp = NULL; + int i; + bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); sht = &qla2xxx_driver_template; if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || @@ -2629,6 +2811,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) "Found an ISP%04X irq %d iobase 0x%p.\n", pdev->device, pdev->irq, ha->iobase); mutex_init(&ha->vport_lock); + mutex_init(&ha->mq_lock); init_completion(&ha->mbx_cmd_comp); complete(&ha->mbx_cmd_comp); init_completion(&ha->mbx_intr_comp); @@ -2716,7 +2899,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->max_cmd_len, host->max_channel, host->max_lun, host->transportt, sht->vendor_id); -que_init: + /* Set up the irqs */ + ret = qla2x00_request_irqs(ha, rsp); + if (ret) + goto probe_init_failed; + /* Alloc arrays of request and response ring ptrs */ if (!qla2x00_alloc_queues(ha, req, rsp)) { ql_log(ql_log_fatal, base_vha, 0x003d, @@ -2725,12 +2912,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto probe_init_failed; } - qlt_probe_one_stage1(base_vha, ha); + if (ha->mqenable && shost_use_blk_mq(host)) { + /* number of hardware queues supported by blk/scsi-mq*/ + host->nr_hw_queues = ha->max_qpairs; - /* Set up the irqs */ - ret = qla2x00_request_irqs(ha, rsp); - if (ret) - goto probe_init_failed; + ql_dbg(ql_dbg_init, base_vha, 0x0192, + "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues); + } else + ql_dbg(ql_dbg_init, base_vha, 0x0193, + "blk/scsi-mq disabled.\n"); + + qlt_probe_one_stage1(base_vha, ha); pci_save_state(pdev); @@ -2821,11 +3013,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->can_queue, base_vha->req, base_vha->mgmt_svr_loop_id, host->sg_tablesize); - if (ha->mqenable) { - if (qla25xx_setup_mode(base_vha)) { - ql_log(ql_log_warn, base_vha, 0x00ec, - "Failed to create queues, falling back to single queue mode.\n"); - goto que_init; + if (ha->mqenable && qla_ini_mode_enabled(base_vha)) { + ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); + /* Create start of day qpairs for Block MQ */ + if (shost_use_blk_mq(host)) { + for (i = 0; i < ha->max_qpairs; i++) + qla2xxx_create_qpair(base_vha, 5, 0); } } @@ -3094,13 +3287,6 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) static void qla2x00_destroy_deferred_work(struct qla_hw_data *ha) { - /* Flush the work queue and remove it */ - if (ha->wq) { - flush_workqueue(ha->wq); - destroy_workqueue(ha->wq); - ha->wq = NULL; - } - /* Cancel all work and destroy DPC workqueues */ if (ha->dpc_lp_wq) { cancel_work_sync(&ha->idc_aen); @@ -3296,9 +3482,17 @@ qla2x00_free_device(scsi_qla_host_t *vha) ha->isp_ops->disable_intrs(ha); } + qla2x00_free_fcports(vha); + qla2x00_free_irqs(vha); - qla2x00_free_fcports(vha); + /* Flush the work queue and remove it */ + if (ha->wq) { + flush_workqueue(ha->wq); + destroy_workqueue(ha->wq); + ha->wq = NULL; + } + qla2x00_mem_free(ha); @@ -4013,6 +4207,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list); INIT_LIST_HEAD(&vha->logo_list); INIT_LIST_HEAD(&vha->plogi_ack_list); + INIT_LIST_HEAD(&vha->qp_list); spin_lock_init(&vha->work_lock); spin_lock_init(&vha->cmd_list_lock); @@ -5017,8 +5212,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) base_vha->flags.init_done = 0; qla25xx_delete_queues(base_vha); - qla2x00_free_irqs(base_vha); qla2x00_free_fcports(base_vha); + qla2x00_free_irqs(base_vha); qla2x00_mem_free(ha); qla82xx_md_free(base_vha); qla2x00_free_queues(ha); @@ -5052,6 +5247,8 @@ qla2x00_do_dpc(void *data) { scsi_qla_host_t *base_vha; struct qla_hw_data *ha; + uint32_t online; + struct qla_qpair *qpair; ha = (struct qla_hw_data *)data; base_vha = pci_get_drvdata(ha->pdev); @@ -5313,6 +5510,22 @@ qla2x00_do_dpc(void *data) ha->isp_ops->beacon_blink(base_vha); } + /* qpair online check */ + if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED, + &base_vha->dpc_flags)) { + if (ha->flags.eeh_busy || + ha->flags.pci_channel_io_perm_failure) + online = 0; + else + online = 1; + + mutex_lock(&ha->mq_lock); + list_for_each_entry(qpair, &base_vha->qp_list, + qp_list_elem) + qpair->online = online; + mutex_unlock(&ha->mq_lock); + } + if (!IS_QLAFX00(ha)) qla2x00_do_dpc_all_vps(base_vha); @@ -5655,6 +5868,10 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) switch (state) { case pci_channel_io_normal: ha->flags.eeh_busy = 0; + if (ql2xmqsupport) { + set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: ha->flags.eeh_busy = 1; @@ -5668,10 +5885,18 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) pci_disable_device(pdev); /* Return back all IOs */ qla2x00_abort_all_cmds(vha, DID_RESET << 16); + if (ql2xmqsupport) { + set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: ha->flags.pci_channel_io_perm_failure = 1; qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); + if (ql2xmqsupport) { + set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_NEED_RESET; @@ -5939,6 +6164,13 @@ qla83xx_disable_laser(scsi_qla_host_t *vha) qla83xx_wr_reg(vha, reg, data); } +static int qla2xxx_map_queues(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; + + return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev); +} + static const struct pci_error_handlers qla2xxx_err_handler = { .error_detected = qla2xxx_pci_error_detected, .mmio_enabled = qla2xxx_pci_mmio_enabled, diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 9f6012b78e565e3225e3e1f9b8a6f23e7eeef103..b4336e0cd85f077d4ebe241501b525a8810b7380 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include /* * NVRAM support routines diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index a7cfc270bd08a1f01867affc2dee0fc6b7611472..aeebefb1e9f830c493e5a887d247b9ac602902cc 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -409,18 +409,9 @@ struct qla4_8xxx_legacy_intr_set { /* MSI-X Support */ -#define QLA_MSIX_DEFAULT 0x00 -#define QLA_MSIX_RSP_Q 0x01 - +#define QLA_MSIX_DEFAULT 0 +#define QLA_MSIX_RSP_Q 1 #define QLA_MSIX_ENTRIES 2 -#define QLA_MIDX_DEFAULT 0 -#define QLA_MIDX_RSP_Q 1 - -struct ql4_msix_entry { - int have_irq; - uint16_t msix_vector; - uint16_t msix_entry; -}; /* * ISP Operations @@ -572,9 +563,6 @@ struct scsi_qla_host { #define AF_IRQ_ATTACHED 10 /* 0x00000400 */ #define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */ #define AF_HA_REMOVAL 12 /* 0x00001000 */ -#define AF_INTx_ENABLED 15 /* 0x00008000 */ -#define AF_MSI_ENABLED 16 /* 0x00010000 */ -#define AF_MSIX_ENABLED 17 /* 0x00020000 */ #define AF_MBOX_COMMAND_NOPOLL 18 /* 0x00040000 */ #define AF_FW_RECOVERY 19 /* 0x00080000 */ #define AF_EEH_BUSY 20 /* 0x00100000 */ @@ -762,8 +750,6 @@ struct scsi_qla_host { struct isp_operations *isp_ops; struct ql82xx_hw_data hw; - struct ql4_msix_entry msix_entries[QLA_MSIX_ENTRIES]; - uint32_t nx_dev_init_timeout; uint32_t nx_reset_timeout; void *fw_dump; diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index 2559144f54757cbbaec41f315bf71d3121f061bd..bce96a58f14e0e7a381f795fac8050b196f64d21 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h @@ -134,7 +134,6 @@ int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha); void qla4_82xx_enable_intrs(struct scsi_qla_host *ha); void qla4_82xx_disable_intrs(struct scsi_qla_host *ha); int qla4_8xxx_enable_msix(struct scsi_qla_host *ha); -void qla4_8xxx_disable_msix(struct scsi_qla_host *ha); irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id); irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id); irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id); diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 4f9c0f2be89d951515b3c22a4a134b5113107e99..d2cd33d8d67fc9c7de67553217ccb3c1fd77ab54 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -1107,7 +1107,7 @@ static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha, DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n")); if (is_qla8022(ha)) { writel(0, &ha->qla4_82xx_reg->host_int); - if (test_bit(AF_INTx_ENABLED, &ha->flags)) + if (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled) qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); } @@ -1564,19 +1564,18 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha) try_msi: /* Trying MSI */ - ret = pci_enable_msi(ha->pdev); - if (!ret) { + ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); + if (ret > 0) { ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler, 0, DRIVER_NAME, ha); if (!ret) { DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n")); - set_bit(AF_MSI_ENABLED, &ha->flags); goto irq_attached; } else { ql4_printk(KERN_WARNING, ha, "MSI: Failed to reserve interrupt %d " "already in use.\n", ha->pdev->irq); - pci_disable_msi(ha->pdev); + pci_free_irq_vectors(ha->pdev); } } @@ -1592,7 +1591,6 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha) IRQF_SHARED, DRIVER_NAME, ha); if (!ret) { DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n")); - set_bit(AF_INTx_ENABLED, &ha->flags); goto irq_attached; } else { @@ -1614,14 +1612,11 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha) void qla4xxx_free_irqs(struct scsi_qla_host *ha) { - if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) { - if (test_bit(AF_MSIX_ENABLED, &ha->flags)) { - qla4_8xxx_disable_msix(ha); - } else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) { - free_irq(ha->pdev->irq, ha); - pci_disable_msi(ha->pdev); - } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) { - free_irq(ha->pdev->irq, ha); - } - } + if (!test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) + return; + + if (ha->pdev->msix_enabled) + free_irq(pci_irq_vector(ha->pdev, 1), ha); + free_irq(pci_irq_vector(ha->pdev, 0), ha); + pci_free_irq_vectors(ha->pdev); } diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index c291fdff1b338a2ff0ec50930f82298dab279e07..1da04f323d384a220d316beaea4773746537dbd3 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -2032,10 +2032,7 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha, ptid = (uint16_t *)&fw_ddb_entry->isid[1]; *ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id); - DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%02x%02x%02x%02x%02x%02x]\n", - fw_ddb_entry->isid[5], fw_ddb_entry->isid[4], - fw_ddb_entry->isid[3], fw_ddb_entry->isid[2], - fw_ddb_entry->isid[1], fw_ddb_entry->isid[0])); + DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%pmR]\n", fw_ddb_entry->isid)); iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options); memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias)); diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 06ddd13cb7ccb9d4872e651eed2fdddfd55f797a..e91abb327745ee1f0e70cde84e061b9b96b78b64 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -3945,7 +3945,7 @@ void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count) ha->isp_ops->interrupt_service_routine(ha, intr_status); if (test_bit(AF_INTERRUPTS_ON, &ha->flags) && - test_bit(AF_INTx_ENABLED, &ha->flags)) + (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled)) qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); } @@ -4094,12 +4094,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha) ha->phy_port_num = sys_info->port_num; ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt; - DEBUG2(printk("scsi%ld: %s: " - "mac %02x:%02x:%02x:%02x:%02x:%02x " - "serial %s\n", ha->host_no, __func__, - ha->my_mac[0], ha->my_mac[1], ha->my_mac[2], - ha->my_mac[3], ha->my_mac[4], ha->my_mac[5], - ha->serial_number)); + DEBUG2(printk("scsi%ld: %s: mac %pM serial %s\n", + ha->host_no, __func__, ha->my_mac, ha->serial_number)); status = QLA_SUCCESS; @@ -4178,78 +4174,37 @@ qla4_82xx_disable_intrs(struct scsi_qla_host *ha) spin_unlock_irq(&ha->hardware_lock); } -struct ql4_init_msix_entry { - uint16_t entry; - uint16_t index; - const char *name; - irq_handler_t handler; -}; - -static struct ql4_init_msix_entry qla4_8xxx_msix_entries[QLA_MSIX_ENTRIES] = { - { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT, - "qla4xxx (default)", - (irq_handler_t)qla4_8xxx_default_intr_handler }, - { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q, - "qla4xxx (rsp_q)", (irq_handler_t)qla4_8xxx_msix_rsp_q }, -}; - -void -qla4_8xxx_disable_msix(struct scsi_qla_host *ha) -{ - int i; - struct ql4_msix_entry *qentry; - - for (i = 0; i < QLA_MSIX_ENTRIES; i++) { - qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index]; - if (qentry->have_irq) { - free_irq(qentry->msix_vector, ha); - DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n", - __func__, qla4_8xxx_msix_entries[i].name)); - } - } - pci_disable_msix(ha->pdev); - clear_bit(AF_MSIX_ENABLED, &ha->flags); -} - int qla4_8xxx_enable_msix(struct scsi_qla_host *ha) { - int i, ret; - struct msix_entry entries[QLA_MSIX_ENTRIES]; - struct ql4_msix_entry *qentry; - - for (i = 0; i < QLA_MSIX_ENTRIES; i++) - entries[i].entry = qla4_8xxx_msix_entries[i].entry; + int ret; - ret = pci_enable_msix_exact(ha->pdev, entries, ARRAY_SIZE(entries)); - if (ret) { + ret = pci_alloc_irq_vectors(ha->pdev, QLA_MSIX_ENTRIES, + QLA_MSIX_ENTRIES, PCI_IRQ_MSIX); + if (ret < 0) { ql4_printk(KERN_WARNING, ha, "MSI-X: Failed to enable support -- %d/%d\n", QLA_MSIX_ENTRIES, ret); - goto msix_out; - } - set_bit(AF_MSIX_ENABLED, &ha->flags); - - for (i = 0; i < QLA_MSIX_ENTRIES; i++) { - qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index]; - qentry->msix_vector = entries[i].vector; - qentry->msix_entry = entries[i].entry; - qentry->have_irq = 0; - ret = request_irq(qentry->msix_vector, - qla4_8xxx_msix_entries[i].handler, 0, - qla4_8xxx_msix_entries[i].name, ha); - if (ret) { - ql4_printk(KERN_WARNING, ha, - "MSI-X: Unable to register handler -- %x/%d.\n", - qla4_8xxx_msix_entries[i].index, ret); - qla4_8xxx_disable_msix(ha); - goto msix_out; - } - qentry->have_irq = 1; - DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n", - __func__, qla4_8xxx_msix_entries[i].name)); + return ret; } -msix_out: + + ret = request_irq(pci_irq_vector(ha->pdev, 0), + qla4_8xxx_default_intr_handler, 0, "qla4xxx (default)", + ha); + if (ret) + goto out_free_vectors; + + ret = request_irq(pci_irq_vector(ha->pdev, 1), + qla4_8xxx_msix_rsp_q, 0, "qla4xxx (rsp_q)", ha); + if (ret) + goto out_free_default_irq; + + return 0; + +out_free_default_irq: + free_irq(pci_irq_vector(ha->pdev, 0), ha); +out_free_vectors: + pci_free_irq_vectors(ha->pdev); return ret; } diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 01c3610a60cf0b767f7566fc97967158f5eeb618..9fbb33fc90c7299376a9fa671a2f906714d5a613 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -6304,13 +6304,9 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, * ISID would not match firmware generated ISID. */ if (is_isid_compare) { - DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x" - "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n", - __func__, old_tddb->isid[5], old_tddb->isid[4], - old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1], - old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4], - new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1], - new_tddb->isid[0])); + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: old ISID [%pmR] New ISID [%pmR]\n", + __func__, old_tddb->isid, new_tddb->isid)); if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], sizeof(old_tddb->isid))) @@ -7925,10 +7921,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout); break; case ISCSI_FLASHNODE_ISID: - rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", - fnode_sess->isid[0], fnode_sess->isid[1], - fnode_sess->isid[2], fnode_sess->isid[3], - fnode_sess->isid[4], fnode_sess->isid[5]); + rc = sprintf(buf, "%pm\n", fnode_sess->isid); break; case ISCSI_FLASHNODE_TSID: rc = sprintf(buf, "%u\n", fnode_sess->tsid); diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h index 4377e87ee79c360c4bb797b4a93df405b02c1f00..892a0b058b997e2ffad02d913347aee0d4c9f542 100644 --- a/drivers/scsi/qlogicpti.h +++ b/drivers/scsi/qlogicpti.h @@ -356,8 +356,8 @@ struct qlogicpti { /* The rest of the elements are unimportant for performance. */ struct qlogicpti *next; - __u32 res_dvma; /* Ptr to RESPONSE bufs (DVMA)*/ - __u32 req_dvma; /* Ptr to REQUEST bufs (DVMA) */ + dma_addr_t res_dvma; /* Ptr to RESPONSE bufs (DVMA)*/ + dma_addr_t req_dvma; /* Ptr to REQUEST bufs (DVMA) */ u_char fware_majrev, fware_minrev, fware_micrev; struct Scsi_Host *qhost; int qpti_id; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 1deb6adc411f795833a444a84b1e80e9f9629c4d..75455d4dab68b5cd49378635d2848da8340b7ff3 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -621,6 +621,9 @@ int scsi_change_queue_depth(struct scsi_device *sdev, int depth) wmb(); } + if (sdev->request_queue) + blk_set_queue_depth(sdev->request_queue, depth); + return sdev->queue_depth; } EXPORT_SYMBOL(scsi_change_queue_depth); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index cf04a364fd8b35f869c31e44491d237ae1b3bc6e..03051e12a0721ec0632ebca9f66538c295844479 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -4085,7 +4085,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, jiffies_to_timespec(delta_jiff, &ts); kt = ktime_set(ts.tv_sec, ts.tv_nsec); } else - kt = ktime_set(0, sdebug_ndelay); + kt = sdebug_ndelay; if (NULL == sd_dp) { sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC); if (NULL == sd_dp) diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 2464569253350b61fef59ed0583e8b4cd1a25944..28fea83ae2fea7546d5cb2d969b649110f57c52e 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -220,8 +220,6 @@ static struct { {"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"NEC", "iStorage", NULL, BLIST_REPORTLUN2}, - {"NETAPP", "LUN C-Mode", NULL, BLIST_SYNC_ALUA}, - {"NETAPP", "INF-01-00", NULL, BLIST_SYNC_ALUA}, {"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 106a6adbd6f12d0a86906239345ec3423eab88cf..996e134d79faaf37674731e03a8de1360194d011 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -1988,7 +1988,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev) req->cmd_len = COMMAND_SIZE(req->cmd[0]); - req->cmd_flags |= REQ_QUIET; + req->rq_flags |= RQF_QUIET; req->timeout = 10 * HZ; req->retries = 5; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index c4f7b56fa6f60d880fb5ce6b57e91a2c694bb30f..8b8c814df5c75dfa87bdbb3b2c74e3cac536199c 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 2cca9cffc63fde351f8ced6687951c0be2f86d6a..c35b6de4ca643297d1908341421c865c2cb93e84 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -86,10 +86,8 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason) static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; - struct request_queue *q = cmd->request->q; - blk_mq_requeue_request(cmd->request); - blk_mq_kick_requeue_list(q); + blk_mq_requeue_request(cmd->request, true); put_device(&sdev->sdev_gendev); } @@ -163,26 +161,11 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) { __scsi_queue_insert(cmd, reason, 1); } -/** - * scsi_execute - insert request and wait for the result - * @sdev: scsi device - * @cmd: scsi command - * @data_direction: data direction - * @buffer: data buffer - * @bufflen: len of buffer - * @sense: optional sense buffer - * @timeout: request timeout in seconds - * @retries: number of times to retry request - * @flags: or into request flags; - * @resid: optional residual length - * - * returns the req->errors value which is the scsi_cmnd result - * field. - */ -int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, + +static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, unsigned char *sense, int timeout, int retries, u64 flags, - int *resid) + req_flags_t rq_flags, int *resid) { struct request *req; int write = (data_direction == DMA_TO_DEVICE); @@ -203,7 +186,8 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, req->sense_len = 0; req->retries = retries; req->timeout = timeout; - req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; + req->cmd_flags |= flags; + req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT; /* * head injection *required* here otherwise quiesce won't work @@ -227,12 +211,37 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, return ret; } + +/** + * scsi_execute - insert request and wait for the result + * @sdev: scsi device + * @cmd: scsi command + * @data_direction: data direction + * @buffer: data buffer + * @bufflen: len of buffer + * @sense: optional sense buffer + * @timeout: request timeout in seconds + * @retries: number of times to retry request + * @flags: or into request flags; + * @resid: optional residual length + * + * returns the req->errors value which is the scsi_cmnd result + * field. + */ +int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, + int data_direction, void *buffer, unsigned bufflen, + unsigned char *sense, int timeout, int retries, u64 flags, + int *resid) +{ + return __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense, + timeout, retries, flags, 0, resid); +} EXPORT_SYMBOL(scsi_execute); int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, int retries, - int *resid, u64 flags) + int *resid, u64 flags, req_flags_t rq_flags) { char *sense = NULL; int result; @@ -242,8 +251,8 @@ int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, if (!sense) return DRIVER_ERROR << 24; } - result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, - sense, timeout, retries, flags, resid); + result = __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, + sense, timeout, retries, flags, rq_flags, resid); if (sshdr) scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); @@ -813,7 +822,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) */ if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) ; - else if (!(req->cmd_flags & REQ_QUIET)) + else if (!(req->rq_flags & RQF_QUIET)) scsi_print_sense(cmd); result = 0; /* BLOCK_PC may have set error */ @@ -943,7 +952,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) switch (action) { case ACTION_FAIL: /* Give up and fail the remainder of the request */ - if (!(req->cmd_flags & REQ_QUIET)) { + if (!(req->rq_flags & RQF_QUIET)) { static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); @@ -972,7 +981,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) * A new command will be prepared and issued. */ if (q->mq_ops) { - cmd->request->cmd_flags &= ~REQ_DONTPREP; + cmd->request->rq_flags &= ~RQF_DONTPREP; scsi_mq_uninit_cmd(cmd); scsi_mq_requeue_cmd(cmd); } else { @@ -998,8 +1007,8 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) /* * If sg table allocation fails, requeue request later. */ - if (unlikely(sg_alloc_table_chained(&sdb->table, req->nr_phys_segments, - sdb->table.sgl))) + if (unlikely(sg_alloc_table_chained(&sdb->table, + blk_rq_nr_phys_segments(req), sdb->table.sgl))) return BLKPREP_DEFER; /* @@ -1031,7 +1040,7 @@ int scsi_init_io(struct scsi_cmnd *cmd) bool is_mq = (rq->mq_ctx != NULL); int error; - BUG_ON(!rq->nr_phys_segments); + BUG_ON(!blk_rq_nr_phys_segments(rq)); error = scsi_init_sgtable(rq, &cmd->sdb); if (error) @@ -1234,7 +1243,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req) /* * If the devices is blocked we defer normal commands. */ - if (!(req->cmd_flags & REQ_PREEMPT)) + if (!(req->rq_flags & RQF_PREEMPT)) ret = BLKPREP_DEFER; break; default: @@ -1243,7 +1252,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req) * special commands. In particular any user initiated * command is not allowed. */ - if (!(req->cmd_flags & REQ_PREEMPT)) + if (!(req->rq_flags & RQF_PREEMPT)) ret = BLKPREP_KILL; break; } @@ -1279,7 +1288,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret) blk_delay_queue(q, SCSI_QUEUE_DELAY); break; default: - req->cmd_flags |= REQ_DONTPREP; + req->rq_flags |= RQF_DONTPREP; } return ret; @@ -1736,7 +1745,7 @@ static void scsi_request_fn(struct request_queue *q) * we add the dev to the starved list so it eventually gets * a run when a tag is freed. */ - if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) { + if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) { spin_lock_irq(shost->host_lock); if (list_empty(&sdev->starved_entry)) list_add_tail(&sdev->starved_entry, @@ -1801,7 +1810,7 @@ static inline int prep_to_mq(int ret) { switch (ret) { case BLKPREP_OK: - return 0; + return BLK_MQ_RQ_QUEUE_OK; case BLKPREP_DEFER: return BLK_MQ_RQ_QUEUE_BUSY; default: @@ -1888,7 +1897,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, int reason; ret = prep_to_mq(scsi_prep_state_check(sdev, req)); - if (ret) + if (ret != BLK_MQ_RQ_QUEUE_OK) goto out; ret = BLK_MQ_RQ_QUEUE_BUSY; @@ -1903,11 +1912,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, goto out_dec_target_busy; - if (!(req->cmd_flags & REQ_DONTPREP)) { + if (!(req->rq_flags & RQF_DONTPREP)) { ret = prep_to_mq(scsi_mq_prep_fn(req)); - if (ret) + if (ret != BLK_MQ_RQ_QUEUE_OK) goto out_dec_host_busy; - req->cmd_flags |= REQ_DONTPREP; + req->rq_flags |= RQF_DONTPREP; } else { blk_mq_start_request(req); } @@ -1941,7 +1950,6 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, out: switch (ret) { case BLK_MQ_RQ_QUEUE_BUSY: - blk_mq_stop_hw_queue(hctx); if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev)) blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY); @@ -1952,7 +1960,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, * we hit an error, as we will never see this command * again. */ - if (req->cmd_flags & REQ_DONTPREP) + if (req->rq_flags & RQF_DONTPREP) scsi_mq_uninit_cmd(cmd); break; default: @@ -1990,6 +1998,15 @@ static void scsi_exit_request(void *data, struct request *rq, kfree(cmd->sense_buffer); } +static int scsi_map_queues(struct blk_mq_tag_set *set) +{ + struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); + + if (shost->hostt->map_queues) + return shost->hostt->map_queues(shost); + return blk_mq_map_queues(set); +} + static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) { struct device *host_dev; @@ -2082,6 +2099,7 @@ static struct blk_mq_ops scsi_mq_ops = { .timeout = scsi_timeout, .init_request = scsi_init_request, .exit_request = scsi_exit_request, + .map_queues = scsi_map_queues, }; struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) @@ -2723,6 +2741,39 @@ void sdev_evt_send_simple(struct scsi_device *sdev, } EXPORT_SYMBOL_GPL(sdev_evt_send_simple); +/** + * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn() + * @sdev: SCSI device to count the number of scsi_request_fn() callers for. + */ +static int scsi_request_fn_active(struct scsi_device *sdev) +{ + struct request_queue *q = sdev->request_queue; + int request_fn_active; + + WARN_ON_ONCE(sdev->host->use_blk_mq); + + spin_lock_irq(q->queue_lock); + request_fn_active = q->request_fn_active; + spin_unlock_irq(q->queue_lock); + + return request_fn_active; +} + +/** + * scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls + * @sdev: SCSI device pointer. + * + * Wait until the ongoing shost->hostt->queuecommand() calls that are + * invoked from scsi_request_fn() have finished. + */ +static void scsi_wait_for_queuecommand(struct scsi_device *sdev) +{ + WARN_ON_ONCE(sdev->host->use_blk_mq); + + while (scsi_request_fn_active(sdev)) + msleep(20); +} + /** * scsi_device_quiesce - Block user issued commands. * @sdev: scsi device to quiesce. @@ -2807,8 +2858,7 @@ EXPORT_SYMBOL(scsi_target_resume); * @sdev: device to block * * Block request made by scsi lld's to temporarily stop all - * scsi commands on the specified device. Called from interrupt - * or normal process context. + * scsi commands on the specified device. May sleep. * * Returns zero if successful or error if not * @@ -2817,6 +2867,10 @@ EXPORT_SYMBOL(scsi_target_resume); * (which must be a legal transition). When the device is in this * state, all commands are deferred until the scsi lld reenables * the device with scsi_device_unblock or device_block_tmo fires. + * + * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after + * scsi_internal_device_block() has blocked a SCSI device and also + * remove the rport mutex lock and unlock calls from srp_queuecommand(). */ int scsi_internal_device_block(struct scsi_device *sdev) @@ -2844,6 +2898,7 @@ scsi_internal_device_block(struct scsi_device *sdev) spin_lock_irqsave(q->queue_lock, flags); blk_stop_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); + scsi_wait_for_queuecommand(sdev); } return 0; diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index 7a74b82e8973984510091d06dd3fdf201fc327a0..480a597b387755d7506fc75a10f77c9d295ed596 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 07349270535d19002a39fa12f968f22c3ebd8cb1..82dfe07b1d47f7e1f8ae3517191f15d190168834 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1204,10 +1204,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) struct request_queue *rq = sdev->request_queue; struct scsi_target *starget = sdev->sdev_target; - error = scsi_device_set_state(sdev, SDEV_RUNNING); - if (error) - return error; - error = scsi_target_add(starget); if (error) return error; diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 0f3a3869524bc089f57b2c4e87d44c4875fe919f..03577bde6ac56b91e92f8e631d98907a19dfb273 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -2592,7 +2593,7 @@ fc_rport_final_delete(struct work_struct *work) /** - * fc_rport_create - allocates and creates a remote FC port. + * fc_remote_port_create - allocates and creates a remote FC port. * @shost: scsi host the remote port is connected to. * @channel: Channel on shost port connected to. * @ids: The world wide names, fc address, and FC4 port @@ -2605,8 +2606,8 @@ fc_rport_final_delete(struct work_struct *work) * This routine assumes no locks are held on entry. */ static struct fc_rport * -fc_rport_create(struct Scsi_Host *shost, int channel, - struct fc_rport_identifiers *ids) +fc_remote_port_create(struct Scsi_Host *shost, int channel, + struct fc_rport_identifiers *ids) { struct fc_host_attrs *fc_host = shost_to_fc_host(shost); struct fc_internal *fci = to_fc_internal(shost->transportt); @@ -2914,7 +2915,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, spin_unlock_irqrestore(shost->host_lock, flags); /* No consistent binding found - create new remote port entry */ - rport = fc_rport_create(shost, channel, ids); + rport = fc_remote_port_create(shost, channel, ids); return rport; } @@ -3554,81 +3555,6 @@ fc_vport_sched_delete(struct work_struct *work) * BSG support */ - -/** - * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job - * @job: fc_bsg_job that is to be torn down - */ -static void -fc_destroy_bsgjob(struct fc_bsg_job *job) -{ - unsigned long flags; - - spin_lock_irqsave(&job->job_lock, flags); - if (job->ref_cnt) { - spin_unlock_irqrestore(&job->job_lock, flags); - return; - } - spin_unlock_irqrestore(&job->job_lock, flags); - - put_device(job->dev); /* release reference for the request */ - - kfree(job->request_payload.sg_list); - kfree(job->reply_payload.sg_list); - kfree(job); -} - -/** - * fc_bsg_jobdone - completion routine for bsg requests that the LLD has - * completed - * @job: fc_bsg_job that is complete - */ -static void -fc_bsg_jobdone(struct fc_bsg_job *job) -{ - struct request *req = job->req; - struct request *rsp = req->next_rq; - int err; - - err = job->req->errors = job->reply->result; - - if (err < 0) - /* we're only returning the result field in the reply */ - job->req->sense_len = sizeof(uint32_t); - else - job->req->sense_len = job->reply_len; - - /* we assume all request payload was transferred, residual == 0 */ - req->resid_len = 0; - - if (rsp) { - WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len); - - /* set reply (bidi) residual */ - rsp->resid_len -= min(job->reply->reply_payload_rcv_len, - rsp->resid_len); - } - blk_complete_request(req); -} - -/** - * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests - * @rq: BSG request that holds the job to be destroyed - */ -static void fc_bsg_softirq_done(struct request *rq) -{ - struct fc_bsg_job *job = rq->special; - unsigned long flags; - - spin_lock_irqsave(&job->job_lock, flags); - job->state_flags |= FC_RQST_STATE_DONE; - job->ref_cnt--; - spin_unlock_irqrestore(&job->job_lock, flags); - - blk_end_request_all(rq, rq->errors); - fc_destroy_bsgjob(job); -} - /** * fc_bsg_job_timeout - handler for when a bsg request timesout * @req: request that timed out @@ -3636,27 +3562,22 @@ static void fc_bsg_softirq_done(struct request *rq) static enum blk_eh_timer_return fc_bsg_job_timeout(struct request *req) { - struct fc_bsg_job *job = (void *) req->special; - struct Scsi_Host *shost = job->shost; + struct bsg_job *job = (void *) req->special; + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct fc_rport *rport = fc_bsg_to_rport(job); struct fc_internal *i = to_fc_internal(shost->transportt); - unsigned long flags; - int err = 0, done = 0; + int err = 0, inflight = 0; - if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED) + if (rport && rport->port_state == FC_PORTSTATE_BLOCKED) return BLK_EH_RESET_TIMER; - spin_lock_irqsave(&job->job_lock, flags); - if (job->state_flags & FC_RQST_STATE_DONE) - done = 1; - else - job->ref_cnt++; - spin_unlock_irqrestore(&job->job_lock, flags); + inflight = bsg_job_get(job); - if (!done && i->f->bsg_timeout) { + if (inflight && i->f->bsg_timeout) { /* call LLDD to abort the i/o as it has timed out */ err = i->f->bsg_timeout(job); if (err == -EAGAIN) { - job->ref_cnt--; + bsg_job_put(job); return BLK_EH_RESET_TIMER; } else if (err) printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " @@ -3664,126 +3585,33 @@ fc_bsg_job_timeout(struct request *req) } /* the blk_end_sync_io() doesn't check the error */ - if (done) + if (!inflight) return BLK_EH_NOT_HANDLED; else return BLK_EH_HANDLED; } -static int -fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req) -{ - size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); - - BUG_ON(!req->nr_phys_segments); - - buf->sg_list = kzalloc(sz, GFP_KERNEL); - if (!buf->sg_list) - return -ENOMEM; - sg_init_table(buf->sg_list, req->nr_phys_segments); - buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); - buf->payload_len = blk_rq_bytes(req); - return 0; -} - - -/** - * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the - * bsg request - * @shost: SCSI Host corresponding to the bsg object - * @rport: (optional) FC Remote Port corresponding to the bsg object - * @req: BSG request that needs a job structure - */ -static int -fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport, - struct request *req) -{ - struct fc_internal *i = to_fc_internal(shost->transportt); - struct request *rsp = req->next_rq; - struct fc_bsg_job *job; - int ret; - - BUG_ON(req->special); - - job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size, - GFP_KERNEL); - if (!job) - return -ENOMEM; - - /* - * Note: this is a bit silly. - * The request gets formatted as a SGIO v4 ioctl request, which - * then gets reformatted as a blk request, which then gets - * reformatted as a fc bsg request. And on completion, we have - * to wrap return results such that SGIO v4 thinks it was a scsi - * status. I hope this was all worth it. - */ - - req->special = job; - job->shost = shost; - job->rport = rport; - job->req = req; - if (i->f->dd_bsg_size) - job->dd_data = (void *)&job[1]; - spin_lock_init(&job->job_lock); - job->request = (struct fc_bsg_request *)req->cmd; - job->request_len = req->cmd_len; - job->reply = req->sense; - job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer - * allocated */ - if (req->bio) { - ret = fc_bsg_map_buffer(&job->request_payload, req); - if (ret) - goto failjob_rls_job; - } - if (rsp && rsp->bio) { - ret = fc_bsg_map_buffer(&job->reply_payload, rsp); - if (ret) - goto failjob_rls_rqst_payload; - } - job->job_done = fc_bsg_jobdone; - if (rport) - job->dev = &rport->dev; - else - job->dev = &shost->shost_gendev; - get_device(job->dev); /* take a reference for the request */ - - job->ref_cnt = 1; - - return 0; - - -failjob_rls_rqst_payload: - kfree(job->request_payload.sg_list); -failjob_rls_job: - kfree(job); - return -ENOMEM; -} - - -enum fc_dispatch_result { - FC_DISPATCH_BREAK, /* on return, q is locked, break from q loop */ - FC_DISPATCH_LOCKED, /* on return, q is locked, continue on */ - FC_DISPATCH_UNLOCKED, /* on return, q is unlocked, continue on */ -}; - - /** * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD - * @q: fc host request queue * @shost: scsi host rport attached to * @job: bsg job to be processed */ -static enum fc_dispatch_result -fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost, - struct fc_bsg_job *job) +static int fc_bsg_host_dispatch(struct Scsi_Host *shost, struct bsg_job *job) { struct fc_internal *i = to_fc_internal(shost->transportt); + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ int ret; + /* check if we really have all the request data needed */ + if (job->request_len < cmdlen) { + ret = -ENOMSG; + goto fail_host_msg; + } + /* Validate the host command */ - switch (job->request->msgcode) { + switch (bsg_request->msgcode) { case FC_BSG_HST_ADD_RPORT: cmdlen += sizeof(struct fc_bsg_host_add_rport); break; @@ -3815,7 +3643,7 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost, case FC_BSG_HST_VENDOR: cmdlen += sizeof(struct fc_bsg_host_vendor); if ((shost->hostt->vendor_id == 0L) || - (job->request->rqst_data.h_vendor.vendor_id != + (bsg_request->rqst_data.h_vendor.vendor_id != shost->hostt->vendor_id)) { ret = -ESRCH; goto fail_host_msg; @@ -3827,24 +3655,19 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost, goto fail_host_msg; } - /* check if we really have all the request data needed */ - if (job->request_len < cmdlen) { - ret = -ENOMSG; - goto fail_host_msg; - } - ret = i->f->bsg_request(job); if (!ret) - return FC_DISPATCH_UNLOCKED; + return 0; fail_host_msg: /* return the errno failure code as the only status */ BUG_ON(job->reply_len < sizeof(uint32_t)); - job->reply->reply_payload_rcv_len = 0; - job->reply->result = ret; + bsg_reply->reply_payload_rcv_len = 0; + bsg_reply->result = ret; job->reply_len = sizeof(uint32_t); - fc_bsg_jobdone(job); - return FC_DISPATCH_UNLOCKED; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return 0; } @@ -3855,34 +3678,38 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost, static void fc_bsg_goose_queue(struct fc_rport *rport) { - if (!rport->rqst_q) + struct request_queue *q = rport->rqst_q; + unsigned long flags; + + if (!q) return; - /* - * This get/put dance makes no sense - */ - get_device(&rport->dev); - blk_run_queue_async(rport->rqst_q); - put_device(&rport->dev); + spin_lock_irqsave(q->queue_lock, flags); + blk_run_queue_async(q); + spin_unlock_irqrestore(q->queue_lock, flags); } /** * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD - * @q: rport request queue * @shost: scsi host rport attached to - * @rport: rport request destined to * @job: bsg job to be processed */ -static enum fc_dispatch_result -fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost, - struct fc_rport *rport, struct fc_bsg_job *job) +static int fc_bsg_rport_dispatch(struct Scsi_Host *shost, struct bsg_job *job) { struct fc_internal *i = to_fc_internal(shost->transportt); + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ int ret; + /* check if we really have all the request data needed */ + if (job->request_len < cmdlen) { + ret = -ENOMSG; + goto fail_rport_msg; + } + /* Validate the rport command */ - switch (job->request->msgcode) { + switch (bsg_request->msgcode) { case FC_BSG_RPT_ELS: cmdlen += sizeof(struct fc_bsg_rport_els); goto check_bidi; @@ -3902,133 +3729,31 @@ fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost, goto fail_rport_msg; } - /* check if we really have all the request data needed */ - if (job->request_len < cmdlen) { - ret = -ENOMSG; - goto fail_rport_msg; - } - ret = i->f->bsg_request(job); if (!ret) - return FC_DISPATCH_UNLOCKED; + return 0; fail_rport_msg: /* return the errno failure code as the only status */ BUG_ON(job->reply_len < sizeof(uint32_t)); - job->reply->reply_payload_rcv_len = 0; - job->reply->result = ret; + bsg_reply->reply_payload_rcv_len = 0; + bsg_reply->result = ret; job->reply_len = sizeof(uint32_t); - fc_bsg_jobdone(job); - return FC_DISPATCH_UNLOCKED; -} - - -/** - * fc_bsg_request_handler - generic handler for bsg requests - * @q: request queue to manage - * @shost: Scsi_Host related to the bsg object - * @rport: FC remote port related to the bsg object (optional) - * @dev: device structure for bsg object - */ -static void -fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost, - struct fc_rport *rport, struct device *dev) -{ - struct request *req; - struct fc_bsg_job *job; - enum fc_dispatch_result ret; - - if (!get_device(dev)) - return; - - while (1) { - if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) && - !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) - break; - - req = blk_fetch_request(q); - if (!req) - break; - - if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) { - req->errors = -ENXIO; - spin_unlock_irq(q->queue_lock); - blk_end_request_all(req, -ENXIO); - spin_lock_irq(q->queue_lock); - continue; - } - - spin_unlock_irq(q->queue_lock); - - ret = fc_req_to_bsgjob(shost, rport, req); - if (ret) { - req->errors = ret; - blk_end_request_all(req, ret); - spin_lock_irq(q->queue_lock); - continue; - } - - job = req->special; - - /* check if we have the msgcode value at least */ - if (job->request_len < sizeof(uint32_t)) { - BUG_ON(job->reply_len < sizeof(uint32_t)); - job->reply->reply_payload_rcv_len = 0; - job->reply->result = -ENOMSG; - job->reply_len = sizeof(uint32_t); - fc_bsg_jobdone(job); - spin_lock_irq(q->queue_lock); - continue; - } - - /* the dispatch routines will unlock the queue_lock */ - if (rport) - ret = fc_bsg_rport_dispatch(q, shost, rport, job); - else - ret = fc_bsg_host_dispatch(q, shost, job); - - /* did dispatcher hit state that can't process any more */ - if (ret == FC_DISPATCH_BREAK) - break; - - /* did dispatcher had released the lock */ - if (ret == FC_DISPATCH_UNLOCKED) - spin_lock_irq(q->queue_lock); - } - - spin_unlock_irq(q->queue_lock); - put_device(dev); - spin_lock_irq(q->queue_lock); -} - - -/** - * fc_bsg_host_handler - handler for bsg requests for a fc host - * @q: fc host request queue - */ -static void -fc_bsg_host_handler(struct request_queue *q) -{ - struct Scsi_Host *shost = q->queuedata; - - fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev); + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return 0; } - -/** - * fc_bsg_rport_handler - handler for bsg requests for a fc rport - * @q: rport request queue - */ -static void -fc_bsg_rport_handler(struct request_queue *q) +static int fc_bsg_dispatch(struct bsg_job *job) { - struct fc_rport *rport = q->queuedata; - struct Scsi_Host *shost = rport_to_shost(rport); + struct Scsi_Host *shost = fc_bsg_to_shost(job); - fc_bsg_request_handler(q, shost, rport, &rport->dev); + if (scsi_is_fc_rport(job->dev)) + return fc_bsg_rport_dispatch(shost, job); + else + return fc_bsg_host_dispatch(shost, job); } - /** * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests * @shost: shost for fc_host @@ -4051,33 +3776,42 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) snprintf(bsg_name, sizeof(bsg_name), "fc_host%d", shost->host_no); - q = __scsi_alloc_queue(shost, fc_bsg_host_handler); + q = __scsi_alloc_queue(shost, bsg_request_fn); if (!q) { - printk(KERN_ERR "fc_host%d: bsg interface failed to " - "initialize - no request queue\n", - shost->host_no); + dev_err(dev, + "fc_host%d: bsg interface failed to initialize - no request queue\n", + shost->host_no); return -ENOMEM; } - q->queuedata = shost; - queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); - blk_queue_softirq_done(q, fc_bsg_softirq_done); - blk_queue_rq_timed_out(q, fc_bsg_job_timeout); - blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); - - err = bsg_register_queue(q, dev, bsg_name, NULL); + err = bsg_setup_queue(dev, q, bsg_name, fc_bsg_dispatch, + i->f->dd_bsg_size); if (err) { - printk(KERN_ERR "fc_host%d: bsg interface failed to " - "initialize - register queue\n", - shost->host_no); + dev_err(dev, + "fc_host%d: bsg interface failed to initialize - setup queue\n", + shost->host_no); blk_cleanup_queue(q); return err; } - + blk_queue_rq_timed_out(q, fc_bsg_job_timeout); + blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); fc_host->rqst_q = q; return 0; } +static int fc_bsg_rport_prep(struct request_queue *q, struct request *req) +{ + struct fc_rport *rport = dev_to_rport(q->queuedata); + + if (rport->port_state == FC_PORTSTATE_BLOCKED && + !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) + return BLKPREP_DEFER; + + if (rport->port_state != FC_PORTSTATE_ONLINE) + return BLKPREP_KILL; + + return BLKPREP_OK; +} /** * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests @@ -4097,29 +3831,22 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) if (!i->f->bsg_request) return -ENOTSUPP; - q = __scsi_alloc_queue(shost, fc_bsg_rport_handler); + q = __scsi_alloc_queue(shost, bsg_request_fn); if (!q) { - printk(KERN_ERR "%s: bsg interface failed to " - "initialize - no request queue\n", - dev->kobj.name); + dev_err(dev, "bsg interface failed to initialize - no request queue\n"); return -ENOMEM; } - q->queuedata = rport; - queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); - blk_queue_softirq_done(q, fc_bsg_softirq_done); - blk_queue_rq_timed_out(q, fc_bsg_job_timeout); - blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); - - err = bsg_register_queue(q, dev, NULL, NULL); + err = bsg_setup_queue(dev, q, NULL, fc_bsg_dispatch, i->f->dd_bsg_size); if (err) { - printk(KERN_ERR "%s: bsg interface failed to " - "initialize - register queue\n", - dev->kobj.name); + dev_err(dev, "failed to setup bsg queue\n"); blk_cleanup_queue(q); return err; } + blk_queue_prep_rq(q, fc_bsg_rport_prep); + blk_queue_rq_timed_out(q, fc_bsg_job_timeout); + blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); rport->rqst_q = q; return 0; } diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index e3cd3ece44121c7d8ee3806a40a42acf069f6958..b87a78673f6501be8e14f0dc28c97e244fa372ef 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -115,21 +114,12 @@ static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup, static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports", NULL, NULL, NULL); -#define SRP_PID(p) \ - (p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \ - (p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \ - (p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \ - (p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15] - -#define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \ - "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x" - static ssize_t show_srp_rport_id(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_rport *rport = transport_class_to_srp_rport(dev); - return sprintf(buf, SRP_PID_FMT "\n", SRP_PID(rport)); + return sprintf(buf, "%16phC\n", rport->port_id); } static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL); @@ -402,36 +392,6 @@ static void srp_reconnect_work(struct work_struct *work) } } -/** - * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn() - * @shost: SCSI host for which to count the number of scsi_request_fn() callers. - * - * To do: add support for scsi-mq in this function. - */ -static int scsi_request_fn_active(struct Scsi_Host *shost) -{ - struct scsi_device *sdev; - struct request_queue *q; - int request_fn_active = 0; - - shost_for_each_device(sdev, shost) { - q = sdev->request_queue; - - spin_lock_irq(q->queue_lock); - request_fn_active += q->request_fn_active; - spin_unlock_irq(q->queue_lock); - } - - return request_fn_active; -} - -/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */ -static void srp_wait_for_queuecommand(struct Scsi_Host *shost) -{ - while (scsi_request_fn_active(shost)) - msleep(20); -} - static void __rport_fail_io_fast(struct srp_rport *rport) { struct Scsi_Host *shost = rport_to_shost(rport); @@ -441,14 +401,17 @@ static void __rport_fail_io_fast(struct srp_rport *rport) if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST)) return; + /* + * Call scsi_target_block() to wait for ongoing shost->queuecommand() + * calls before invoking i->f->terminate_rport_io(). + */ + scsi_target_block(rport->dev.parent); scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); /* Involve the LLD if possible to terminate all I/O on the rport. */ i = to_srp_internal(shost->transportt); - if (i->f->terminate_rport_io) { - srp_wait_for_queuecommand(shost); + if (i->f->terminate_rport_io) i->f->terminate_rport_io(rport); - } } /** @@ -576,7 +539,6 @@ int srp_reconnect_rport(struct srp_rport *rport) if (res) goto out; scsi_target_block(&shost->shost_gendev); - srp_wait_for_queuecommand(shost); res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; pr_debug("%s (state %d): transport.reconnect() returned %d\n", dev_name(&shost->shost_gendev), rport->state, res); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 51e56296f465a89cce7d5ce366021ac8345ed729..b1933041da39d414f9c6e127052ba8cbaa25e65e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -53,7 +53,7 @@ #include #include #include -#include +#include #include #include @@ -93,6 +93,7 @@ MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR); MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK); MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); +MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); #if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT) #define SD_MINORS 16 @@ -163,7 +164,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr, static const char temp[] = "temporary "; int len; - if (sdp->type != TYPE_DISK) + if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) /* no cache control on RBC devices; theoretically they * can do it, but there's probably so many exceptions * it's not worth the risk */ @@ -262,7 +263,7 @@ allow_restart_store(struct device *dev, struct device_attribute *attr, if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (sdp->type != TYPE_DISK) + if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) return -EINVAL; sdp->allow_restart = simple_strtoul(buf, NULL, 10); @@ -392,6 +393,11 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr, if (!capable(CAP_SYS_ADMIN)) return -EACCES; + if (sd_is_zoned(sdkp)) { + sd_config_discard(sdkp, SD_LBP_DISABLE); + return count; + } + if (sdp->type != TYPE_DISK) return -EINVAL; @@ -459,7 +465,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (sdp->type != TYPE_DISK) + if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) return -EINVAL; err = kstrtoul(buf, 10, &max); @@ -710,7 +716,6 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd) struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); sector_t sector = blk_rq_pos(rq); unsigned int nr_sectors = blk_rq_sectors(rq); - unsigned int nr_bytes = blk_rq_bytes(rq); unsigned int len; int ret; char *buf; @@ -766,24 +771,19 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd) goto out; } - rq->completion_data = page; rq->timeout = SD_TIMEOUT; cmd->transfersize = len; cmd->allowed = SD_MAX_RETRIES; - /* - * Initially __data_len is set to the amount of data that needs to be - * transferred to the target. This amount depends on whether WRITE SAME - * or UNMAP is being used. After the scatterlist has been mapped by - * scsi_init_io() we set __data_len to the size of the area to be - * discarded on disk. This allows us to report completion on the full - * amount of blocks described by the request. - */ - blk_add_request_payload(rq, page, 0, len); - ret = scsi_init_io(cmd); - rq->__data_len = nr_bytes; + rq->special_vec.bv_page = page; + rq->special_vec.bv_offset = 0; + rq->special_vec.bv_len = len; + + rq->rq_flags |= RQF_SPECIAL_PAYLOAD; + rq->resid_len = len; + ret = scsi_init_io(cmd); out: if (ret != BLKPREP_OK) __free_page(page); @@ -844,6 +844,12 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); + if (sd_is_zoned(sdkp)) { + ret = sd_zbc_setup_write_cmnd(cmd); + if (ret != BLKPREP_OK) + return ret; + } + sector >>= ilog2(sdp->sector_size) - 9; nr_sectors >>= ilog2(sdp->sector_size) - 9; @@ -901,19 +907,25 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) struct request *rq = SCpnt->request; struct scsi_device *sdp = SCpnt->device; struct gendisk *disk = rq->rq_disk; - struct scsi_disk *sdkp; + struct scsi_disk *sdkp = scsi_disk(disk); sector_t block = blk_rq_pos(rq); sector_t threshold; unsigned int this_count = blk_rq_sectors(rq); unsigned int dif, dix; + bool zoned_write = sd_is_zoned(sdkp) && rq_data_dir(rq) == WRITE; int ret; unsigned char protect; + if (zoned_write) { + ret = sd_zbc_setup_write_cmnd(SCpnt); + if (ret != BLKPREP_OK) + return ret; + } + ret = scsi_init_io(SCpnt); if (ret != BLKPREP_OK) goto out; SCpnt = rq->special; - sdkp = scsi_disk(disk); /* from here on until we're complete, any goto out * is used for a killable error condition */ @@ -1013,8 +1025,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) } else if (rq_data_dir(rq) == READ) { SCpnt->cmnd[0] = READ_6; } else { - scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n", - req_op(rq), (unsigned long long) rq->cmd_flags); + scmd_printk(KERN_ERR, SCpnt, "Unknown command %d\n", req_op(rq)); goto out; } @@ -1132,6 +1143,9 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) */ ret = BLKPREP_OK; out: + if (zoned_write && ret != BLKPREP_OK) + sd_zbc_cancel_write_cmnd(SCpnt); + return ret; } @@ -1149,6 +1163,10 @@ static int sd_init_command(struct scsi_cmnd *cmd) case REQ_OP_READ: case REQ_OP_WRITE: return sd_setup_read_write_cmnd(cmd); + case REQ_OP_ZONE_REPORT: + return sd_zbc_setup_report_cmnd(cmd); + case REQ_OP_ZONE_RESET: + return sd_zbc_setup_reset_cmnd(cmd); default: BUG(); } @@ -1158,8 +1176,8 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt) { struct request *rq = SCpnt->request; - if (req_op(rq) == REQ_OP_DISCARD) - __free_page(rq->completion_data); + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + __free_page(rq->special_vec.bv_page); if (SCpnt->cmnd != rq->cmd) { mempool_free(SCpnt->cmnd, sd_cdb_pool); @@ -1495,7 +1513,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp) */ res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, timeout, SD_MAX_RETRIES, - NULL, REQ_PM); + NULL, 0, RQF_PM); if (res == 0) break; } @@ -1780,7 +1798,10 @@ static int sd_done(struct scsi_cmnd *SCpnt) unsigned char op = SCpnt->cmnd[0]; unsigned char unmap = SCpnt->cmnd[1] & 8; - if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_SAME) { + switch (req_op(req)) { + case REQ_OP_DISCARD: + case REQ_OP_WRITE_SAME: + case REQ_OP_ZONE_RESET: if (!result) { good_bytes = blk_rq_bytes(req); scsi_set_resid(SCpnt, 0); @@ -1788,6 +1809,17 @@ static int sd_done(struct scsi_cmnd *SCpnt) good_bytes = 0; scsi_set_resid(SCpnt, blk_rq_bytes(req)); } + break; + case REQ_OP_ZONE_REPORT: + if (!result) { + good_bytes = scsi_bufflen(SCpnt) + - scsi_get_resid(SCpnt); + scsi_set_resid(SCpnt, 0); + } else { + good_bytes = 0; + scsi_set_resid(SCpnt, blk_rq_bytes(req)); + } + break; } if (result) { @@ -1840,7 +1872,7 @@ static int sd_done(struct scsi_cmnd *SCpnt) good_bytes = 0; req->__data_len = blk_rq_bytes(req); - req->cmd_flags |= REQ_QUIET; + req->rq_flags |= RQF_QUIET; } } } @@ -1848,7 +1880,11 @@ static int sd_done(struct scsi_cmnd *SCpnt) default: break; } + out: + if (sd_is_zoned(sdkp)) + sd_zbc_complete(SCpnt, good_bytes, &sshdr); + SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt, "sd_done: completed %d of %d bytes\n", good_bytes, scsi_bufflen(SCpnt))); @@ -1983,7 +2019,6 @@ sd_spinup_disk(struct scsi_disk *sdkp) } } - /* * Determine whether disk supports Data Integrity Field. */ @@ -2133,6 +2168,9 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, /* Logical blocks per physical block exponent */ sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size; + /* RC basis */ + sdkp->rc_basis = (buffer[12] >> 4) & 0x3; + /* Lowest aligned logical block */ alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; blk_queue_alignment_offset(sdp->request_queue, alignment); @@ -2242,7 +2280,6 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) { int sector_size; struct scsi_device *sdp = sdkp->device; - sector_t old_capacity = sdkp->capacity; if (sd_try_rc16_first(sdp)) { sector_size = read_capacity_16(sdkp, sdp, buffer); @@ -2323,35 +2360,44 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) sector_size = 512; } blk_queue_logical_block_size(sdp->request_queue, sector_size); + blk_queue_physical_block_size(sdp->request_queue, + sdkp->physical_block_size); + sdkp->device->sector_size = sector_size; - { - char cap_str_2[10], cap_str_10[10]; + if (sdkp->capacity > 0xffffffff) + sdp->use_16_for_rw = 1; - string_get_size(sdkp->capacity, sector_size, - STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); - string_get_size(sdkp->capacity, sector_size, - STRING_UNITS_10, cap_str_10, - sizeof(cap_str_10)); +} - if (sdkp->first_scan || old_capacity != sdkp->capacity) { - sd_printk(KERN_NOTICE, sdkp, - "%llu %d-byte logical blocks: (%s/%s)\n", - (unsigned long long)sdkp->capacity, - sector_size, cap_str_10, cap_str_2); +/* + * Print disk capacity + */ +static void +sd_print_capacity(struct scsi_disk *sdkp, + sector_t old_capacity) +{ + int sector_size = sdkp->device->sector_size; + char cap_str_2[10], cap_str_10[10]; - if (sdkp->physical_block_size != sector_size) - sd_printk(KERN_NOTICE, sdkp, - "%u-byte physical blocks\n", - sdkp->physical_block_size); - } - } + string_get_size(sdkp->capacity, sector_size, + STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); + string_get_size(sdkp->capacity, sector_size, + STRING_UNITS_10, cap_str_10, + sizeof(cap_str_10)); - if (sdkp->capacity > 0xffffffff) - sdp->use_16_for_rw = 1; + if (sdkp->first_scan || old_capacity != sdkp->capacity) { + sd_printk(KERN_NOTICE, sdkp, + "%llu %d-byte logical blocks: (%s/%s)\n", + (unsigned long long)sdkp->capacity, + sector_size, cap_str_10, cap_str_2); - blk_queue_physical_block_size(sdp->request_queue, - sdkp->physical_block_size); - sdkp->device->sector_size = sector_size; + if (sdkp->physical_block_size != sector_size) + sd_printk(KERN_NOTICE, sdkp, + "%u-byte physical blocks\n", + sdkp->physical_block_size); + + sd_zbc_print_zones(sdkp); + } } /* called with buffer of length 512 */ @@ -2419,9 +2465,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) if (sdkp->first_scan || old_wp != sdkp->write_prot) { sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", sdkp->write_prot ? "on" : "off"); - sd_printk(KERN_DEBUG, sdkp, - "Mode Sense: %02x %02x %02x %02x\n", - buffer[0], buffer[1], buffer[2], buffer[3]); + sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer); } } } @@ -2613,7 +2657,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) struct scsi_mode_data data; struct scsi_sense_hdr sshdr; - if (sdp->type != TYPE_DISK) + if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) return; if (sdkp->protection_type == 0) @@ -2720,6 +2764,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) */ static void sd_read_block_characteristics(struct scsi_disk *sdkp) { + struct request_queue *q = sdkp->disk->queue; unsigned char *buffer; u16 rot; const int vpd_len = 64; @@ -2734,10 +2779,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) rot = get_unaligned_be16(&buffer[4]); if (rot == 1) { - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, sdkp->disk->queue); + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); } + sdkp->zoned = (buffer[8] >> 4) & 3; + if (sdkp->zoned == 1) + q->limits.zoned = BLK_ZONED_HA; + else if (sdkp->device->type == TYPE_ZBC) + q->limits.zoned = BLK_ZONED_HM; + else + q->limits.zoned = BLK_ZONED_NONE; + if (blk_queue_is_zoned(q) && sdkp->first_scan) + sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", + q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); + out: kfree(buffer); } @@ -2809,6 +2865,7 @@ static int sd_revalidate_disk(struct gendisk *disk) struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; struct request_queue *q = sdkp->disk->queue; + sector_t old_capacity = sdkp->capacity; unsigned char *buffer; unsigned int dev_max, rw_max; @@ -2842,8 +2899,11 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_block_provisioning(sdkp); sd_read_block_limits(sdkp); sd_read_block_characteristics(sdkp); + sd_zbc_read_zones(sdkp, buffer); } + sd_print_capacity(sdkp, old_capacity); + sd_read_write_protect_flag(sdkp, buffer); sd_read_cache_type(sdkp, buffer); sd_read_app_tag_own(sdkp, buffer); @@ -3041,9 +3101,16 @@ static int sd_probe(struct device *dev) scsi_autopm_get_device(sdp); error = -ENODEV; - if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC) + if (sdp->type != TYPE_DISK && + sdp->type != TYPE_ZBC && + sdp->type != TYPE_MOD && + sdp->type != TYPE_RBC) goto out; +#ifndef CONFIG_BLK_DEV_ZONED + if (sdp->type == TYPE_ZBC) + goto out; +#endif SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp, "sd_probe\n")); @@ -3147,6 +3214,8 @@ static int sd_remove(struct device *dev) del_gendisk(sdkp->disk); sd_shutdown(dev); + sd_zbc_remove(sdkp); + blk_register_region(devt, SD_MINORS, NULL, sd_default_probe, NULL, NULL); @@ -3200,7 +3269,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start) return -ENODEV; res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM); + SD_TIMEOUT, SD_MAX_RETRIES, NULL, 0, RQF_PM); if (res) { sd_print_result(sdkp, "Start/Stop Unit failed", res); if (driver_byte(res) & DRIVER_SENSE) diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index c8d986368da9e64ce130d8d49b8c63ae7db85140..4dac35e96a75baf833acbfe017c3edef8f8f23b8 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -64,6 +64,15 @@ struct scsi_disk { struct scsi_device *device; struct device dev; struct gendisk *disk; +#ifdef CONFIG_BLK_DEV_ZONED + unsigned int nr_zones; + unsigned int zone_blocks; + unsigned int zone_shift; + unsigned long *zones_wlock; + unsigned int zones_optimal_open; + unsigned int zones_optimal_nonseq; + unsigned int zones_max_open; +#endif atomic_t openers; sector_t capacity; /* size in logical blocks */ u32 max_xfer_blocks; @@ -94,6 +103,9 @@ struct scsi_disk { unsigned lbpvpd : 1; unsigned ws10 : 1; unsigned ws16 : 1; + unsigned rc_basis: 2; + unsigned zoned: 2; + unsigned urswrz : 1; }; #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) @@ -156,6 +168,11 @@ static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t b return blocks * sdev->sector_size; } +static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sector) +{ + return sector >> (ilog2(sdev->sector_size) - 9); +} + /* * Look up the DIX operation based on whether the command is read or * write and whether dix and dif are enabled. @@ -239,4 +256,57 @@ static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a) #endif /* CONFIG_BLK_DEV_INTEGRITY */ +static inline int sd_is_zoned(struct scsi_disk *sdkp) +{ + return sdkp->zoned == 1 || sdkp->device->type == TYPE_ZBC; +} + +#ifdef CONFIG_BLK_DEV_ZONED + +extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer); +extern void sd_zbc_remove(struct scsi_disk *sdkp); +extern void sd_zbc_print_zones(struct scsi_disk *sdkp); +extern int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd); +extern void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd); +extern int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd); +extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd); +extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, + struct scsi_sense_hdr *sshdr); + +#else /* CONFIG_BLK_DEV_ZONED */ + +static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, + unsigned char *buf) +{ + return 0; +} + +static inline void sd_zbc_remove(struct scsi_disk *sdkp) {} + +static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {} + +static inline int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd) +{ + /* Let the drive fail requests */ + return BLKPREP_OK; +} + +static inline void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd) {} + +static inline int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd) +{ + return BLKPREP_INVALID; +} + +static inline int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) +{ + return BLKPREP_INVALID; +} + +static inline void sd_zbc_complete(struct scsi_cmnd *cmd, + unsigned int good_bytes, + struct scsi_sense_hdr *sshdr) {} + +#endif /* CONFIG_BLK_DEV_ZONED */ + #endif /* _SCSI_DISK_H */ diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c new file mode 100644 index 0000000000000000000000000000000000000000..92620c8ea8ad93722c466652a4b82cd43013d8a8 --- /dev/null +++ b/drivers/scsi/sd_zbc.c @@ -0,0 +1,648 @@ +/* + * SCSI Zoned Block commands + * + * Copyright (C) 2014-2015 SUSE Linux GmbH + * Written by: Hannes Reinecke + * Modified by: Damien Le Moal + * Modified by: Shaun Tancheff + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "sd.h" +#include "scsi_priv.h" + +enum zbc_zone_type { + ZBC_ZONE_TYPE_CONV = 0x1, + ZBC_ZONE_TYPE_SEQWRITE_REQ, + ZBC_ZONE_TYPE_SEQWRITE_PREF, + ZBC_ZONE_TYPE_RESERVED, +}; + +enum zbc_zone_cond { + ZBC_ZONE_COND_NO_WP, + ZBC_ZONE_COND_EMPTY, + ZBC_ZONE_COND_IMP_OPEN, + ZBC_ZONE_COND_EXP_OPEN, + ZBC_ZONE_COND_CLOSED, + ZBC_ZONE_COND_READONLY = 0xd, + ZBC_ZONE_COND_FULL, + ZBC_ZONE_COND_OFFLINE, +}; + +/** + * Convert a zone descriptor to a zone struct. + */ +static void sd_zbc_parse_report(struct scsi_disk *sdkp, + u8 *buf, + struct blk_zone *zone) +{ + struct scsi_device *sdp = sdkp->device; + + memset(zone, 0, sizeof(struct blk_zone)); + + zone->type = buf[0] & 0x0f; + zone->cond = (buf[1] >> 4) & 0xf; + if (buf[1] & 0x01) + zone->reset = 1; + if (buf[1] & 0x02) + zone->non_seq = 1; + + zone->len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8])); + zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16])); + zone->wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24])); + if (zone->type != ZBC_ZONE_TYPE_CONV && + zone->cond == ZBC_ZONE_COND_FULL) + zone->wp = zone->start + zone->len; +} + +/** + * Issue a REPORT ZONES scsi command. + */ +static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf, + unsigned int buflen, sector_t lba) +{ + struct scsi_device *sdp = sdkp->device; + const int timeout = sdp->request_queue->rq_timeout; + struct scsi_sense_hdr sshdr; + unsigned char cmd[16]; + unsigned int rep_len; + int result; + + memset(cmd, 0, 16); + cmd[0] = ZBC_IN; + cmd[1] = ZI_REPORT_ZONES; + put_unaligned_be64(lba, &cmd[2]); + put_unaligned_be32(buflen, &cmd[10]); + memset(buf, 0, buflen); + + result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, + buf, buflen, &sshdr, + timeout, SD_MAX_RETRIES, NULL); + if (result) { + sd_printk(KERN_ERR, sdkp, + "REPORT ZONES lba %llu failed with %d/%d\n", + (unsigned long long)lba, + host_byte(result), driver_byte(result)); + return -EIO; + } + + rep_len = get_unaligned_be32(&buf[0]); + if (rep_len < 64) { + sd_printk(KERN_ERR, sdkp, + "REPORT ZONES report invalid length %u\n", + rep_len); + return -EIO; + } + + return 0; +} + +int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd) +{ + struct request *rq = cmd->request; + struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); + sector_t lba, sector = blk_rq_pos(rq); + unsigned int nr_bytes = blk_rq_bytes(rq); + int ret; + + WARN_ON(nr_bytes == 0); + + if (!sd_is_zoned(sdkp)) + /* Not a zoned device */ + return BLKPREP_KILL; + + ret = scsi_init_io(cmd); + if (ret != BLKPREP_OK) + return ret; + + cmd->cmd_len = 16; + memset(cmd->cmnd, 0, cmd->cmd_len); + cmd->cmnd[0] = ZBC_IN; + cmd->cmnd[1] = ZI_REPORT_ZONES; + lba = sectors_to_logical(sdkp->device, sector); + put_unaligned_be64(lba, &cmd->cmnd[2]); + put_unaligned_be32(nr_bytes, &cmd->cmnd[10]); + /* Do partial report for speeding things up */ + cmd->cmnd[14] = ZBC_REPORT_ZONE_PARTIAL; + + cmd->sc_data_direction = DMA_FROM_DEVICE; + cmd->sdb.length = nr_bytes; + cmd->transfersize = sdkp->device->sector_size; + cmd->allowed = 0; + + /* + * Report may return less bytes than requested. Make sure + * to report completion on the entire initial request. + */ + rq->__data_len = nr_bytes; + + return BLKPREP_OK; +} + +static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd, + unsigned int good_bytes) +{ + struct request *rq = scmd->request; + struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); + struct sg_mapping_iter miter; + struct blk_zone_report_hdr hdr; + struct blk_zone zone; + unsigned int offset, bytes = 0; + unsigned long flags; + u8 *buf; + + if (good_bytes < 64) + return; + + memset(&hdr, 0, sizeof(struct blk_zone_report_hdr)); + + sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd), + SG_MITER_TO_SG | SG_MITER_ATOMIC); + + local_irq_save(flags); + while (sg_miter_next(&miter) && bytes < good_bytes) { + + buf = miter.addr; + offset = 0; + + if (bytes == 0) { + /* Set the report header */ + hdr.nr_zones = min_t(unsigned int, + (good_bytes - 64) / 64, + get_unaligned_be32(&buf[0]) / 64); + memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr)); + offset += 64; + bytes += 64; + } + + /* Parse zone descriptors */ + while (offset < miter.length && hdr.nr_zones) { + WARN_ON(offset > miter.length); + buf = miter.addr + offset; + sd_zbc_parse_report(sdkp, buf, &zone); + memcpy(buf, &zone, sizeof(struct blk_zone)); + offset += 64; + bytes += 64; + hdr.nr_zones--; + } + + if (!hdr.nr_zones) + break; + + } + sg_miter_stop(&miter); + local_irq_restore(flags); +} + +static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp) +{ + return logical_to_sectors(sdkp->device, sdkp->zone_blocks); +} + +static inline unsigned int sd_zbc_zone_no(struct scsi_disk *sdkp, + sector_t sector) +{ + return sectors_to_logical(sdkp->device, sector) >> sdkp->zone_shift; +} + +int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) +{ + struct request *rq = cmd->request; + struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); + sector_t sector = blk_rq_pos(rq); + sector_t block = sectors_to_logical(sdkp->device, sector); + unsigned int zno = block >> sdkp->zone_shift; + + if (!sd_is_zoned(sdkp)) + /* Not a zoned device */ + return BLKPREP_KILL; + + if (sdkp->device->changed) + return BLKPREP_KILL; + + if (sector & (sd_zbc_zone_sectors(sdkp) - 1)) + /* Unaligned request */ + return BLKPREP_KILL; + + /* Do not allow concurrent reset and writes */ + if (sdkp->zones_wlock && + test_and_set_bit(zno, sdkp->zones_wlock)) + return BLKPREP_DEFER; + + cmd->cmd_len = 16; + memset(cmd->cmnd, 0, cmd->cmd_len); + cmd->cmnd[0] = ZBC_OUT; + cmd->cmnd[1] = ZO_RESET_WRITE_POINTER; + put_unaligned_be64(block, &cmd->cmnd[2]); + + rq->timeout = SD_TIMEOUT; + cmd->sc_data_direction = DMA_NONE; + cmd->transfersize = 0; + cmd->allowed = 0; + + return BLKPREP_OK; +} + +int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd) +{ + struct request *rq = cmd->request; + struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); + sector_t sector = blk_rq_pos(rq); + sector_t zone_sectors = sd_zbc_zone_sectors(sdkp); + unsigned int zno = sd_zbc_zone_no(sdkp, sector); + + /* + * Note: Checks of the alignment of the write command on + * logical blocks is done in sd.c + */ + + /* Do not allow zone boundaries crossing on host-managed drives */ + if (blk_queue_zoned_model(sdkp->disk->queue) == BLK_ZONED_HM && + (sector & (zone_sectors - 1)) + blk_rq_sectors(rq) > zone_sectors) + return BLKPREP_KILL; + + /* + * Do not issue more than one write at a time per + * zone. This solves write ordering problems due to + * the unlocking of the request queue in the dispatch + * path in the non scsi-mq case. For scsi-mq, this + * also avoids potential write reordering when multiple + * threads running on different CPUs write to the same + * zone (with a synchronized sequential pattern). + */ + if (sdkp->zones_wlock && + test_and_set_bit(zno, sdkp->zones_wlock)) + return BLKPREP_DEFER; + + return BLKPREP_OK; +} + +static void sd_zbc_unlock_zone(struct request *rq) +{ + struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); + + if (sdkp->zones_wlock) { + unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq)); + WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock)); + clear_bit_unlock(zno, sdkp->zones_wlock); + smp_mb__after_atomic(); + } +} + +void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd) +{ + sd_zbc_unlock_zone(cmd->request); +} + +void sd_zbc_complete(struct scsi_cmnd *cmd, + unsigned int good_bytes, + struct scsi_sense_hdr *sshdr) +{ + int result = cmd->result; + struct request *rq = cmd->request; + + switch (req_op(rq)) { + case REQ_OP_WRITE: + case REQ_OP_WRITE_SAME: + case REQ_OP_ZONE_RESET: + + /* Unlock the zone */ + sd_zbc_unlock_zone(rq); + + if (!result || + sshdr->sense_key != ILLEGAL_REQUEST) + break; + + switch (sshdr->asc) { + case 0x24: + /* + * INVALID FIELD IN CDB error: For a zone reset, + * this means that a reset of a conventional + * zone was attempted. Nothing to worry about in + * this case, so be quiet about the error. + */ + if (req_op(rq) == REQ_OP_ZONE_RESET) + rq->rq_flags |= RQF_QUIET; + break; + case 0x21: + /* + * INVALID ADDRESS FOR WRITE error: It is unlikely that + * retrying write requests failed with any kind of + * alignement error will result in success. So don't. + */ + cmd->allowed = 0; + break; + } + + break; + + case REQ_OP_ZONE_REPORT: + + if (!result) + sd_zbc_report_zones_complete(cmd, good_bytes); + break; + + } +} + +/** + * Read zoned block device characteristics (VPD page B6). + */ +static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp, + unsigned char *buf) +{ + + if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) { + sd_printk(KERN_NOTICE, sdkp, + "Unconstrained-read check failed\n"); + return -ENODEV; + } + + if (sdkp->device->type != TYPE_ZBC) { + /* Host-aware */ + sdkp->urswrz = 1; + sdkp->zones_optimal_open = get_unaligned_be64(&buf[8]); + sdkp->zones_optimal_nonseq = get_unaligned_be64(&buf[12]); + sdkp->zones_max_open = 0; + } else { + /* Host-managed */ + sdkp->urswrz = buf[4] & 1; + sdkp->zones_optimal_open = 0; + sdkp->zones_optimal_nonseq = 0; + sdkp->zones_max_open = get_unaligned_be64(&buf[16]); + } + + return 0; +} + +/** + * Check reported capacity. + */ +static int sd_zbc_check_capacity(struct scsi_disk *sdkp, + unsigned char *buf) +{ + sector_t lba; + int ret; + + if (sdkp->rc_basis != 0) + return 0; + + /* Do a report zone to get the maximum LBA to check capacity */ + ret = sd_zbc_report_zones(sdkp, buf, SD_BUF_SIZE, 0); + if (ret) + return ret; + + /* The max_lba field is the capacity of this device */ + lba = get_unaligned_be64(&buf[8]); + if (lba + 1 == sdkp->capacity) + return 0; + + if (sdkp->first_scan) + sd_printk(KERN_WARNING, sdkp, + "Changing capacity from %llu to max LBA+1 %llu\n", + (unsigned long long)sdkp->capacity, + (unsigned long long)lba + 1); + sdkp->capacity = lba + 1; + + return 0; +} + +#define SD_ZBC_BUF_SIZE 131072 + +static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) +{ + u64 zone_blocks; + sector_t block = 0; + unsigned char *buf; + unsigned char *rec; + unsigned int buf_len; + unsigned int list_length; + int ret; + u8 same; + + sdkp->zone_blocks = 0; + + /* Get a buffer */ + buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Do a report zone to get the same field */ + ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0); + if (ret) { + zone_blocks = 0; + goto out; + } + + same = buf[4] & 0x0f; + if (same > 0) { + rec = &buf[64]; + zone_blocks = get_unaligned_be64(&rec[8]); + goto out; + } + + /* + * Check the size of all zones: all zones must be of + * equal size, except the last zone which can be smaller + * than other zones. + */ + do { + + /* Parse REPORT ZONES header */ + list_length = get_unaligned_be32(&buf[0]) + 64; + rec = buf + 64; + if (list_length < SD_ZBC_BUF_SIZE) + buf_len = list_length; + else + buf_len = SD_ZBC_BUF_SIZE; + + /* Parse zone descriptors */ + while (rec < buf + buf_len) { + zone_blocks = get_unaligned_be64(&rec[8]); + if (sdkp->zone_blocks == 0) { + sdkp->zone_blocks = zone_blocks; + } else if (zone_blocks != sdkp->zone_blocks && + (block + zone_blocks < sdkp->capacity + || zone_blocks > sdkp->zone_blocks)) { + zone_blocks = 0; + goto out; + } + block += zone_blocks; + rec += 64; + } + + if (block < sdkp->capacity) { + ret = sd_zbc_report_zones(sdkp, buf, + SD_ZBC_BUF_SIZE, block); + if (ret) + return ret; + } + + } while (block < sdkp->capacity); + + zone_blocks = sdkp->zone_blocks; + +out: + kfree(buf); + + if (!zone_blocks) { + if (sdkp->first_scan) + sd_printk(KERN_NOTICE, sdkp, + "Devices with non constant zone " + "size are not supported\n"); + return -ENODEV; + } + + if (!is_power_of_2(zone_blocks)) { + if (sdkp->first_scan) + sd_printk(KERN_NOTICE, sdkp, + "Devices with non power of 2 zone " + "size are not supported\n"); + return -ENODEV; + } + + if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { + if (sdkp->first_scan) + sd_printk(KERN_NOTICE, sdkp, + "Zone size too large\n"); + return -ENODEV; + } + + sdkp->zone_blocks = zone_blocks; + + return 0; +} + +static int sd_zbc_setup(struct scsi_disk *sdkp) +{ + + /* chunk_sectors indicates the zone size */ + blk_queue_chunk_sectors(sdkp->disk->queue, + logical_to_sectors(sdkp->device, sdkp->zone_blocks)); + sdkp->zone_shift = ilog2(sdkp->zone_blocks); + sdkp->nr_zones = sdkp->capacity >> sdkp->zone_shift; + if (sdkp->capacity & (sdkp->zone_blocks - 1)) + sdkp->nr_zones++; + + if (!sdkp->zones_wlock) { + sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones), + sizeof(unsigned long), + GFP_KERNEL); + if (!sdkp->zones_wlock) + return -ENOMEM; + } + + return 0; +} + +int sd_zbc_read_zones(struct scsi_disk *sdkp, + unsigned char *buf) +{ + sector_t capacity; + int ret = 0; + + if (!sd_is_zoned(sdkp)) + /* + * Device managed or normal SCSI disk, + * no special handling required + */ + return 0; + + + /* Get zoned block device characteristics */ + ret = sd_zbc_read_zoned_characteristics(sdkp, buf); + if (ret) + goto err; + + /* + * Check for unconstrained reads: host-managed devices with + * constrained reads (drives failing read after write pointer) + * are not supported. + */ + if (!sdkp->urswrz) { + if (sdkp->first_scan) + sd_printk(KERN_NOTICE, sdkp, + "constrained reads devices are not supported\n"); + ret = -ENODEV; + goto err; + } + + /* Check capacity */ + ret = sd_zbc_check_capacity(sdkp, buf); + if (ret) + goto err; + capacity = logical_to_sectors(sdkp->device, sdkp->capacity); + + /* + * Check zone size: only devices with a constant zone size (except + * an eventual last runt zone) that is a power of 2 are supported. + */ + ret = sd_zbc_check_zone_size(sdkp); + if (ret) + goto err; + + /* The drive satisfies the kernel restrictions: set it up */ + ret = sd_zbc_setup(sdkp); + if (ret) + goto err; + + /* READ16/WRITE16 is mandatory for ZBC disks */ + sdkp->device->use_16_for_rw = 1; + sdkp->device->use_10_for_rw = 0; + + return 0; + +err: + sdkp->capacity = 0; + + return ret; +} + +void sd_zbc_remove(struct scsi_disk *sdkp) +{ + kfree(sdkp->zones_wlock); + sdkp->zones_wlock = NULL; +} + +void sd_zbc_print_zones(struct scsi_disk *sdkp) +{ + if (!sd_is_zoned(sdkp) || !sdkp->capacity) + return; + + if (sdkp->capacity & (sdkp->zone_blocks - 1)) + sd_printk(KERN_NOTICE, sdkp, + "%u zones of %u logical blocks + 1 runt zone\n", + sdkp->nr_zones - 1, + sdkp->zone_blocks); + else + sd_printk(KERN_NOTICE, sdkp, + "%u zones of %u logical blocks\n", + sdkp->nr_zones, + sdkp->zone_blocks); +} diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 070332eb41f33de2c765bedc724f50f404af62a9..dbe5b4b95df0d9d317dbdc2261914e4d8771991f 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -581,6 +581,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; + if (unlikely(segment_eq(get_fs(), KERNEL_DS))) + return -EINVAL; + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 07b6444d3e0ab5f3c52d885879cff5356e048959..b673825f46b51a8a90b2da929c09cb553de935f5 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -929,8 +929,6 @@ struct pqi_ctrl_info { int max_msix_vectors; int num_msix_vectors_enabled; int num_msix_vectors_initialized; - u32 msix_vectors[PQI_MAX_MSIX_VECTORS]; - void *intr_data[PQI_MAX_MSIX_VECTORS]; int event_irq; struct Scsi_Host *scsi_host; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index a535b2661f3831ffbf87730e6a4ce56c679f4054..8702d9cf80409ff00576fe1b92b013f30649e651 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -2887,19 +2888,19 @@ static irqreturn_t pqi_irq_handler(int irq, void *data) static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) { + struct pci_dev *pdev = ctrl_info->pci_dev; int i; int rc; - ctrl_info->event_irq = ctrl_info->msix_vectors[0]; + ctrl_info->event_irq = pci_irq_vector(pdev, 0); for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { - rc = request_irq(ctrl_info->msix_vectors[i], - pqi_irq_handler, 0, - DRIVER_NAME_SHORT, ctrl_info->intr_data[i]); + rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0, + DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); if (rc) { - dev_err(&ctrl_info->pci_dev->dev, + dev_err(&pdev->dev, "irq %u init failed with error %d\n", - ctrl_info->msix_vectors[i], rc); + pci_irq_vector(pdev, i), rc); return rc; } ctrl_info->num_msix_vectors_initialized++; @@ -2908,72 +2909,23 @@ static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) return 0; } -static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) -{ - int i; - - for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) - free_irq(ctrl_info->msix_vectors[i], - ctrl_info->intr_data[i]); -} - static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) { - unsigned int i; - int max_vectors; - int num_vectors_enabled; - struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS]; - - max_vectors = ctrl_info->num_queue_groups; - - for (i = 0; i < max_vectors; i++) - msix_entries[i].entry = i; - - num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev, - msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors); + int ret; - if (num_vectors_enabled < 0) { + ret = pci_alloc_irq_vectors(ctrl_info->pci_dev, + PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); + if (ret < 0) { dev_err(&ctrl_info->pci_dev->dev, - "MSI-X init failed with error %d\n", - num_vectors_enabled); - return num_vectors_enabled; - } - - ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; - for (i = 0; i < num_vectors_enabled; i++) { - ctrl_info->msix_vectors[i] = msix_entries[i].vector; - ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i]; + "MSI-X init failed with error %d\n", ret); + return ret; } + ctrl_info->num_msix_vectors_enabled = ret; return 0; } -static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info) -{ - int i; - int rc; - int cpu; - - cpu = cpumask_first(cpu_online_mask); - for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) { - rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i], - get_cpu_mask(cpu)); - if (rc) - dev_err(&ctrl_info->pci_dev->dev, - "error %d setting affinity hint for irq vector %u\n", - rc, ctrl_info->msix_vectors[i]); - cpu = cpumask_next(cpu, cpu_online_mask); - } -} - -static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info) -{ - int i; - - for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) - irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL); -} - static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) { unsigned int i; @@ -4743,6 +4695,13 @@ static int pqi_slave_configure(struct scsi_device *sdev) return 0; } +static int pqi_map_queues(struct Scsi_Host *shost) +{ + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + + return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev); +} + static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) { @@ -5130,6 +5089,7 @@ static struct scsi_host_template pqi_driver_template = { .ioctl = pqi_ioctl, .slave_alloc = pqi_slave_alloc, .slave_configure = pqi_slave_configure, + .map_queues = pqi_map_queues, .sdev_attrs = pqi_sdev_attrs, .shost_attrs = pqi_shost_attrs, }; @@ -5159,7 +5119,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) shost->cmd_per_lun = shost->can_queue; shost->sg_tablesize = ctrl_info->sg_tablesize; shost->transportt = pqi_sas_transport_template; - shost->irq = ctrl_info->msix_vectors[0]; + shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); shost->unique_id = shost->irq; shost->nr_hw_queues = ctrl_info->num_queue_groups; shost->hostdata[0] = (unsigned long)ctrl_info; @@ -5409,8 +5369,6 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) if (rc) return rc; - pqi_irq_set_affinity_hint(ctrl_info); - rc = pqi_create_queues(ctrl_info); if (rc) return rc; @@ -5557,10 +5515,14 @@ static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) { - pqi_irq_unset_affinity_hint(ctrl_info); - pqi_free_irqs(ctrl_info); - if (ctrl_info->num_msix_vectors_enabled) - pci_disable_msix(ctrl_info->pci_dev); + int i; + + for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) { + free_irq(pci_irq_vector(ctrl_info->pci_dev, i), + &ctrl_info->queue_groups[i]); + } + + pci_free_irq_vectors(ctrl_info->pci_dev); } static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index bed2bbd6b92304cec9eabb73245224782da7fc21..94352e4df831be803c3ef6a76cf7f6177750f29c 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -46,7 +46,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index 03054c0e7689bd0587e17c5a56d22212739d8c04..dfffdf63e44c922bcc172e7f9dec01651f6c800a 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 618422ea3a4123d8d0b115f8811634e8225de8a1..5f35b863e1a709fa3354ca3bae923fb9aee24fae 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -41,7 +41,7 @@ static const char *verstr = "20160209"; #include #include -#include +#include #include #include @@ -546,7 +546,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd, return DRIVER_ERROR << 24; blk_rq_set_block_pc(req); - req->cmd_flags |= REQ_QUIET; + req->rq_flags |= RQF_QUIET; mdata->null_mapped = 1; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 8ccfc9ea874bc2eb1b6197fdcfec72ca2c876d9a..05526b71541b0aed0a2b8d37c227fd0fcb3ae24d 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1495,9 +1495,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) if (sg_count) { if (sg_count > MAX_PAGE_BUFFER_COUNT) { - payload_sz = (sg_count * sizeof(void *) + + payload_sz = (sg_count * sizeof(u64) + sizeof(struct vmbus_packet_mpb_array)); - payload = kmalloc(payload_sz, GFP_ATOMIC); + payload = kzalloc(payload_sz, GFP_ATOMIC); if (!payload) return SCSI_MLQUEUE_DEVICE_BUSY; } diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 3c4c07038948d5cef306f500630d4f3d48ca36b5..88db6992420e47f023fdbba0a6c1d781e325408c 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -43,20 +43,18 @@ #define NCR5380_implementation_fields /* none */ -#define NCR5380_read(reg) sun3scsi_read(reg) -#define NCR5380_write(reg, value) sun3scsi_write(reg, value) +#define NCR5380_read(reg) in_8(hostdata->io + (reg)) +#define NCR5380_write(reg, value) out_8(hostdata->io + (reg), value) #define NCR5380_queue_command sun3scsi_queue_command #define NCR5380_bus_reset sun3scsi_bus_reset #define NCR5380_abort sun3scsi_abort #define NCR5380_info sun3scsi_info -#define NCR5380_dma_recv_setup(instance, data, count) (count) -#define NCR5380_dma_send_setup(instance, data, count) (count) -#define NCR5380_dma_residual(instance) \ - sun3scsi_dma_residual(instance) -#define NCR5380_dma_xfer_len(instance, cmd, phase) \ - sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd) +#define NCR5380_dma_xfer_len sun3scsi_dma_xfer_len +#define NCR5380_dma_recv_setup sun3scsi_dma_count +#define NCR5380_dma_send_setup sun3scsi_dma_count +#define NCR5380_dma_residual sun3scsi_dma_residual #define NCR5380_acquire_dma_irq(instance) (1) #define NCR5380_release_dma_irq(instance) @@ -82,7 +80,6 @@ module_param(setup_hostid, int, 0); #define SUN3_DVMA_BUFSIZE 0xe000 static struct scsi_cmnd *sun3_dma_setup_done; -static unsigned char *sun3_scsi_regp; static volatile struct sun3_dma_regs *dregs; static struct sun3_udc_regs *udc_regs; static unsigned char *sun3_dma_orig_addr; @@ -90,20 +87,6 @@ static unsigned long sun3_dma_orig_count; static int sun3_dma_active; static unsigned long last_residual; -/* - * NCR 5380 register access functions - */ - -static inline unsigned char sun3scsi_read(int reg) -{ - return in_8(sun3_scsi_regp + reg); -} - -static inline void sun3scsi_write(int reg, int value) -{ - out_8(sun3_scsi_regp + reg, value); -} - #ifndef SUN3_SCSI_VME /* dma controller register access functions */ @@ -158,8 +141,8 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dev) } /* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ -static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance, - void *data, unsigned long count, int write_flag) +static int sun3scsi_dma_setup(struct NCR5380_hostdata *hostdata, + unsigned char *data, int count, int write_flag) { void *addr; @@ -211,9 +194,10 @@ static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance, dregs->csr |= CSR_FIFO; if(dregs->fifo_count != count) { - shost_printk(KERN_ERR, instance, "FIFO mismatch %04x not %04x\n", + shost_printk(KERN_ERR, hostdata->host, + "FIFO mismatch %04x not %04x\n", dregs->fifo_count, (unsigned int) count); - NCR5380_dprint(NDEBUG_DMA, instance); + NCR5380_dprint(NDEBUG_DMA, hostdata->host); } /* setup udc */ @@ -248,14 +232,34 @@ static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance, } -static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) +static int sun3scsi_dma_count(struct NCR5380_hostdata *hostdata, + unsigned char *data, int count) +{ + return count; +} + +static inline int sun3scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata, + unsigned char *data, int count) +{ + return sun3scsi_dma_setup(hostdata, data, count, 0); +} + +static inline int sun3scsi_dma_send_setup(struct NCR5380_hostdata *hostdata, + unsigned char *data, int count) +{ + return sun3scsi_dma_setup(hostdata, data, count, 1); +} + +static int sun3scsi_dma_residual(struct NCR5380_hostdata *hostdata) { return last_residual; } -static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted_len, - struct scsi_cmnd *cmd) +static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, + struct scsi_cmnd *cmd) { + int wanted_len = cmd->SCp.this_residual; + if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS) return 0; @@ -428,9 +432,10 @@ static struct scsi_host_template sun3_scsi_template = { static int __init sun3_scsi_probe(struct platform_device *pdev) { struct Scsi_Host *instance; + struct NCR5380_hostdata *hostdata; int error; struct resource *irq, *mem; - unsigned char *ioaddr; + void __iomem *ioaddr; int host_flags = 0; #ifdef SUN3_SCSI_VME int i; @@ -493,8 +498,6 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) } #endif - sun3_scsi_regp = ioaddr; - instance = scsi_host_alloc(&sun3_scsi_template, sizeof(struct NCR5380_hostdata)); if (!instance) { @@ -502,9 +505,12 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) goto fail_alloc; } - instance->io_port = (unsigned long)ioaddr; instance->irq = irq->start; + hostdata = shost_priv(instance); + hostdata->base = mem->start; + hostdata->io = ioaddr; + error = NCR5380_init(instance, host_flags); if (error) goto fail_init; @@ -552,13 +558,15 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) fail_alloc: if (udc_regs) dvma_free(udc_regs); - iounmap(sun3_scsi_regp); + iounmap(ioaddr); return error; } static int __exit sun3_scsi_remove(struct platform_device *pdev) { struct Scsi_Host *instance = platform_get_drvdata(pdev); + struct NCR5380_hostdata *hostdata = shost_priv(instance); + void __iomem *ioaddr = hostdata->io; scsi_remove_host(instance); free_irq(instance->irq, instance); @@ -566,7 +574,7 @@ static int __exit sun3_scsi_remove(struct platform_device *pdev) scsi_host_put(instance); if (udc_regs) dvma_free(udc_regs); - iounmap(sun3_scsi_regp); + iounmap(ioaddr); return 0; } diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 3aedf73f1131369efe18e4feb5ce793507d4714d..abe6173726614f627c085129f4e8aa9dc6ba1eda 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -23,6 +23,7 @@ #include "unipro.h" #include "ufs-qcom.h" #include "ufshci.h" +#include "ufs_quirks.h" #define UFS_QCOM_DEFAULT_DBG_PRINT_EN \ (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN) @@ -1031,6 +1032,34 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, return ret; } +static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) +{ + int err; + u32 pa_vs_config_reg1; + + err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), + &pa_vs_config_reg1); + if (err) + goto out; + + /* Allow extension of MSB bits of PA_SaveConfigTime attribute */ + err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), + (pa_vs_config_reg1 | (1 << 12))); + +out: + return err; +} + +static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) +{ + int err = 0; + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) + err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); + + return err; +} + static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); @@ -1094,10 +1123,12 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba) * ufs_qcom_setup_clocks - enables/disable clocks * @hba: host controller instance * @on: If true, enable clocks else disable them. + * @status: PRE_CHANGE or POST_CHANGE notify * * Returns 0 on success, non-zero on failure. */ -static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) +static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); int err; @@ -1111,18 +1142,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) if (!host) return 0; - if (on) { - err = ufs_qcom_phy_enable_iface_clk(host->generic_phy); - if (err) - goto out; + if (on && (status == POST_CHANGE)) { + phy_power_on(host->generic_phy); - err = ufs_qcom_phy_enable_ref_clk(host->generic_phy); - if (err) { - dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n", - __func__, err); - ufs_qcom_phy_disable_iface_clk(host->generic_phy); - goto out; - } /* enable the device ref clock for HS mode*/ if (ufshcd_is_hs_mode(&hba->pwr_info)) ufs_qcom_dev_ref_clk_ctrl(host, true); @@ -1130,14 +1152,15 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) if (vote == host->bus_vote.min_bw_vote) ufs_qcom_update_bus_bw_vote(host); - } else { - - /* M-PHY RMMI interface clocks can be turned off */ - ufs_qcom_phy_disable_iface_clk(host->generic_phy); - if (!ufs_qcom_is_link_active(hba)) + } else if (!on && (status == PRE_CHANGE)) { + if (!ufs_qcom_is_link_active(hba)) { /* disable device ref_clk */ ufs_qcom_dev_ref_clk_ctrl(host, false); + /* powering off PHY during aggressive clk gating */ + phy_power_off(host->generic_phy); + } + vote = host->bus_vote.min_bw_vote; } @@ -1146,7 +1169,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) dev_err(hba->dev, "%s: set bus vote failed %d\n", __func__, err); -out: return err; } @@ -1201,15 +1223,24 @@ static int ufs_qcom_init(struct ufs_hba *hba) */ host->generic_phy = devm_phy_get(dev, "ufsphy"); - if (IS_ERR(host->generic_phy)) { + if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) { + /* + * UFS driver might be probed before the phy driver does. + * In that case we would like to return EPROBE_DEFER code. + */ + err = -EPROBE_DEFER; + dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n", + __func__, err); + goto out_variant_clear; + } else if (IS_ERR(host->generic_phy)) { err = PTR_ERR(host->generic_phy); dev_err(dev, "%s: PHY get failed %d\n", __func__, err); - goto out; + goto out_variant_clear; } err = ufs_qcom_bus_register(host); if (err) - goto out_host_free; + goto out_variant_clear; ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, &host->hw_ver.minor, &host->hw_ver.step); @@ -1254,7 +1285,7 @@ static int ufs_qcom_init(struct ufs_hba *hba) ufs_qcom_set_caps(hba); ufs_qcom_advertise_quirks(hba); - ufs_qcom_setup_clocks(hba, true); + ufs_qcom_setup_clocks(hba, true, POST_CHANGE); if (hba->dev->id < MAX_UFS_QCOM_HOSTS) ufs_qcom_hosts[hba->dev->id] = host; @@ -1274,8 +1305,7 @@ static int ufs_qcom_init(struct ufs_hba *hba) phy_power_off(host->generic_phy); out_unregister_bus: phy_exit(host->generic_phy); -out_host_free: - devm_kfree(dev, host); +out_variant_clear: ufshcd_set_variant(hba, NULL); out: return err; @@ -1287,6 +1317,7 @@ static void ufs_qcom_exit(struct ufs_hba *hba) ufs_qcom_disable_lane_clks(host); phy_power_off(host->generic_phy); + phy_exit(host->generic_phy); } static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, @@ -1439,7 +1470,8 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM); print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv); - ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1); + /* clear bit 17 - UTP_DBG_RAMS_EN */ + ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1); reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv); @@ -1616,6 +1648,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .hce_enable_notify = ufs_qcom_hce_enable_notify, .link_startup_notify = ufs_qcom_link_startup_notify, .pwr_change_notify = ufs_qcom_pwr_change_notify, + .apply_dev_quirks = ufs_qcom_apply_dev_quirks, .suspend = ufs_qcom_suspend, .resume = ufs_qcom_resume, .dbg_register_dump = ufs_qcom_dump_dbg_regs, diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index a19307a57ce248f5cb102c294408227b8ed603de..fe517cd7dac348b40b97c322e49e26976b7256cc 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -142,6 +142,7 @@ enum ufs_qcom_phy_init_type { UFS_QCOM_DBG_PRINT_TEST_BUS_EN) /* QUniPro Vendor specific attributes */ +#define PA_VS_CONFIG_REG1 0x9000 #define DME_VS_CORE_CLK_CTRL 0xD002 /* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */ #define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8) diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 845b874e29773883e9469db83bcd5b9b9656269c..8e6709a3fb6b1be98c48aab9914c59a1e4c4dbd3 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -46,6 +46,7 @@ #define QUERY_DESC_HDR_SIZE 2 #define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \ (sizeof(struct utp_upiu_header))) +#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18 #define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\ cpu_to_be32((byte3 << 24) | (byte2 << 16) |\ @@ -162,7 +163,7 @@ enum desc_header_offset { }; enum ufs_desc_max_size { - QUERY_DESC_DEVICE_MAX_SIZE = 0x1F, + QUERY_DESC_DEVICE_MAX_SIZE = 0x40, QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90, QUERY_DESC_UNIT_MAX_SIZE = 0x23, QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06, @@ -416,7 +417,7 @@ struct utp_cmd_rsp { __be32 residual_transfer_count; __be32 reserved[4]; __be16 sense_data_len; - u8 sense_data[18]; + u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH]; }; /** diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index 22f881e9253a256a1614df77048081cafc12efdb..08b799d4efcc68b99b7c43fa7af968dd3b543a2c 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -128,26 +128,23 @@ struct ufs_dev_fix { */ #define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6) +/* + * Some UFS devices require host PA_TACTIVATE to be lower than device + * PA_TACTIVATE, enabling this quirk ensure this. + */ +#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7) + +/* + * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for + * some vendors. + * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime. + * Gear switch can be issued by host controller as an error recovery and any + * software delay will not help on this case so we need to increase + * PA_SaveConfigTime to >32us as per vendor recommendation. + */ +#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8) + struct ufs_hba; void ufs_advertise_fixup_device(struct ufs_hba *hba); -static struct ufs_dev_fix ufs_fixups[] = { - /* UFS cards deviations table */ - UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, - UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), - UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), - UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, - UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS), - UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, - UFS_DEVICE_NO_FASTAUTO), - UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL, - UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), - UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG", - UFS_DEVICE_QUIRK_PA_TACTIVATE), - UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG", - UFS_DEVICE_QUIRK_PA_TACTIVATE), - UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), - - END_FIX -}; #endif /* UFS_QUIRKS_H_ */ diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index d15eaa466c59ffc7f48dda6a642c68de98de0b6d..52b546fb509b7a3636b8a9b65ee7f6c03dc10171 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -104,6 +104,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev) pm_runtime_forbid(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); ufshcd_remove(hba); + ufshcd_dealloc_host(hba); } /** @@ -147,6 +148,7 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = ufshcd_init(hba, mmio_base, pdev->irq); if (err) { dev_err(&pdev->dev, "Initialization failed\n"); + ufshcd_dealloc_host(hba); return err; } diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index db53f38da864f76baa3c5f697b1673d109539c32..a72a4ba78125b09a135c781533d46b1cbf0bc042 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -163,7 +163,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name, if (ret) { dev_err(dev, "%s: unable to find %s err %d\n", __func__, prop_name, ret); - goto out_free; + goto out; } vreg->min_uA = 0; @@ -185,9 +185,6 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name, goto out; -out_free: - devm_kfree(dev, vreg); - vreg = NULL; out: if (!ret) *out_vreg = vreg; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 05c745663c103a7ddb70d4e6845702a6c08ebd08..20e5e5fb048cddbb3df34e06723adfa083083835 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -45,6 +45,8 @@ #include "ufs_quirks.h" #include "unipro.h" +#define UFSHCD_REQ_SENSE_SIZE 18 + #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ UTP_TASK_REQ_COMPL |\ UFSHCD_ERROR_MASK) @@ -57,15 +59,9 @@ #define NOP_OUT_TIMEOUT 30 /* msecs */ /* Query request retries */ -#define QUERY_REQ_RETRIES 10 +#define QUERY_REQ_RETRIES 3 /* Query request timeout */ -#define QUERY_REQ_TIMEOUT 30 /* msec */ -/* - * Query request timeout for fDeviceInit flag - * fDeviceInit query response time for some devices is too large that default - * QUERY_REQ_TIMEOUT may not be enough for such devices. - */ -#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */ +#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ /* Task management command timeout */ #define TM_CMD_TIMEOUT 100 /* msecs */ @@ -123,6 +119,7 @@ enum { UFSHCD_STATE_RESET, UFSHCD_STATE_ERROR, UFSHCD_STATE_OPERATIONAL, + UFSHCD_STATE_EH_SCHEDULED, }; /* UFSHCD error handling flags */ @@ -188,6 +185,30 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) return ufs_pm_lvl_states[lvl].link_state; } +static struct ufs_dev_fix ufs_fixups[] = { + /* UFS cards deviations table */ + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS), + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, + UFS_DEVICE_NO_FASTAUTO), + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE), + UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), + UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG", + UFS_DEVICE_QUIRK_PA_TACTIVATE), + UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG", + UFS_DEVICE_QUIRK_PA_TACTIVATE), + UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), + UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME), + + END_FIX +}; + static void ufshcd_tmc_handler(struct ufs_hba *hba); static void ufshcd_async_scan(void *data, async_cookie_t cookie); static int ufshcd_reset_and_restore(struct ufs_hba *hba); @@ -291,10 +312,24 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, */ static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) { - if (hba->ufs_version == UFSHCI_VERSION_10) - return INTERRUPT_MASK_ALL_VER_10; - else - return INTERRUPT_MASK_ALL_VER_11; + u32 intr_mask = 0; + + switch (hba->ufs_version) { + case UFSHCI_VERSION_10: + intr_mask = INTERRUPT_MASK_ALL_VER_10; + break; + /* allow fall through */ + case UFSHCI_VERSION_11: + case UFSHCI_VERSION_20: + intr_mask = INTERRUPT_MASK_ALL_VER_11; + break; + /* allow fall through */ + case UFSHCI_VERSION_21: + default: + intr_mask = INTERRUPT_MASK_ALL_VER_21; + } + + return intr_mask; } /** @@ -598,6 +633,20 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) return false; } +static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) +{ + if (ufshcd_is_clkscaling_enabled(hba)) { + devfreq_suspend_device(hba->devfreq); + hba->clk_scaling.window_start_t = 0; + } +} + +static void ufshcd_resume_clkscaling(struct ufs_hba *hba) +{ + if (ufshcd_is_clkscaling_enabled(hba)) + devfreq_resume_device(hba->devfreq); +} + static void ufshcd_ungate_work(struct work_struct *work) { int ret; @@ -631,8 +680,7 @@ static void ufshcd_ungate_work(struct work_struct *work) hba->clk_gating.is_suspended = false; } unblock_reqs: - if (ufshcd_is_clkscaling_enabled(hba)) - devfreq_resume_device(hba->devfreq); + ufshcd_resume_clkscaling(hba); scsi_unblock_requests(hba->host); } @@ -660,6 +708,21 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) start: switch (hba->clk_gating.state) { case CLKS_ON: + /* + * Wait for the ungate work to complete if in progress. + * Though the clocks may be in ON state, the link could + * still be in hibner8 state if hibern8 is allowed + * during clock gating. + * Make sure we exit hibern8 state also in addition to + * clocks being ON. + */ + if (ufshcd_can_hibern8_during_gating(hba) && + ufshcd_is_link_hibern8(hba)) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + flush_work(&hba->clk_gating.ungate_work); + spin_lock_irqsave(hba->host->host_lock, flags); + goto start; + } break; case REQ_CLKS_OFF: if (cancel_delayed_work(&hba->clk_gating.gate_work)) { @@ -709,7 +772,14 @@ static void ufshcd_gate_work(struct work_struct *work) unsigned long flags; spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->clk_gating.is_suspended) { + /* + * In case you are here to cancel this work the gating state + * would be marked as REQ_CLKS_ON. In this case save time by + * skipping the gating work and exit after changing the clock + * state to CLKS_ON. + */ + if (hba->clk_gating.is_suspended || + (hba->clk_gating.state == REQ_CLKS_ON)) { hba->clk_gating.state = CLKS_ON; goto rel_lock; } @@ -731,10 +801,7 @@ static void ufshcd_gate_work(struct work_struct *work) ufshcd_set_link_hibern8(hba); } - if (ufshcd_is_clkscaling_enabled(hba)) { - devfreq_suspend_device(hba->devfreq); - hba->clk_scaling.window_start_t = 0; - } + ufshcd_suspend_clkscaling(hba); if (!ufshcd_is_link_active(hba)) ufshcd_setup_clocks(hba, false); @@ -863,7 +930,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) if (!hba->outstanding_reqs && scaling->is_busy_started) { scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), scaling->busy_start_t)); - scaling->busy_start_t = ktime_set(0, 0); + scaling->busy_start_t = 0; scaling->is_busy_started = false; } } @@ -878,6 +945,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) ufshcd_clk_scaling_start_busy(hba); __set_bit(task_tag, &hba->outstanding_reqs); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); + /* Make sure that doorbell is committed immediately */ + wmb(); } /** @@ -889,10 +958,14 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) int len; if (lrbp->sense_buffer && ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { + int len_to_copy; + len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); + len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len); + memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data, - min_t(int, len, SCSI_SENSE_BUFFERSIZE)); + min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE)); } } @@ -1088,7 +1161,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * * Returns 0 in case of success, non-zero value in case of failure */ -static int ufshcd_map_sg(struct ufshcd_lrb *lrbp) +static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { struct ufshcd_sg_entry *prd_table; struct scatterlist *sg; @@ -1102,8 +1175,13 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp) return sg_segments; if (sg_segments) { - lrbp->utr_descriptor_ptr->prd_table_length = - cpu_to_le16((u16) (sg_segments)); + if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) + lrbp->utr_descriptor_ptr->prd_table_length = + cpu_to_le16((u16)(sg_segments * + sizeof(struct ufshcd_sg_entry))); + else + lrbp->utr_descriptor_ptr->prd_table_length = + cpu_to_le16((u16) (sg_segments)); prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr; @@ -1410,6 +1488,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) switch (hba->ufshcd_state) { case UFSHCD_STATE_OPERATIONAL: break; + case UFSHCD_STATE_EH_SCHEDULED: case UFSHCD_STATE_RESET: err = SCSI_MLQUEUE_HOST_BUSY; goto out_unlock; @@ -1457,7 +1536,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) WARN_ON(lrbp->cmd); lrbp->cmd = cmd; - lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; + lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE; lrbp->sense_buffer = cmd->sense_buffer; lrbp->task_tag = tag; lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); @@ -1465,15 +1544,18 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ufshcd_comp_scsi_upiu(hba, lrbp); - err = ufshcd_map_sg(lrbp); + err = ufshcd_map_sg(hba, lrbp); if (err) { lrbp->cmd = NULL; clear_bit_unlock(tag, &hba->lrb_in_use); goto out; } + /* Make sure descriptors are ready before ringing the doorbell */ + wmb(); /* issue command to the controller */ spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false)); ufshcd_send_command(hba, tag); out_unlock: spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -1581,6 +1663,8 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, time_left = wait_for_completion_timeout(hba->dev_cmd.complete, msecs_to_jiffies(max_timeout)); + /* Make sure descriptors are ready before ringing the doorbell */ + wmb(); spin_lock_irqsave(hba->host->host_lock, flags); hba->dev_cmd.complete = NULL; if (likely(time_left)) { @@ -1683,6 +1767,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, /* Make sure descriptors are ready before ringing the doorbell */ wmb(); spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false)); ufshcd_send_command(hba, tag); spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -1789,9 +1874,6 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, goto out_unlock; } - if (idn == QUERY_FLAG_IDN_FDEVICEINIT) - timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT; - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); if (err) { @@ -1861,8 +1943,8 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); if (err) { - dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", - __func__, opcode, idn, err); + dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", + __func__, opcode, idn, index, err); goto out_unlock; } @@ -1961,8 +2043,8 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); if (err) { - dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", - __func__, opcode, idn, err); + dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", + __func__, opcode, idn, index, err); goto out_unlock; } @@ -2055,18 +2137,41 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba, desc_id, desc_index, 0, desc_buf, &buff_len); - if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) || - (desc_buf[QUERY_DESC_LENGTH_OFFSET] != - ufs_query_desc_max_size[desc_id]) - || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) { - dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d", - __func__, desc_id, param_offset, buff_len, ret); - if (!ret) - ret = -EINVAL; + if (ret) { + dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d", + __func__, desc_id, desc_index, param_offset, ret); goto out; } + /* Sanity check */ + if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) { + dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header", + __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]); + ret = -EINVAL; + goto out; + } + + /* + * While reading variable size descriptors (like string descriptor), + * some UFS devices may report the "LENGTH" (field in "Transaction + * Specific fields" of Query Response UPIU) same as what was requested + * in Query Request UPIU instead of reporting the actual size of the + * variable size descriptor. + * Although it's safe to ignore the "LENGTH" field for variable size + * descriptors as we can always derive the length of the descriptor from + * the descriptor header fields. Hence this change impose the length + * match check only for fixed size descriptors (for which we always + * request the correct size as part of Query Request UPIU). + */ + if ((desc_id != QUERY_DESC_IDN_STRING) && + (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) { + dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d", + __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]); + ret = -EINVAL; + goto out; + } + if (is_kmalloc) memcpy(param_read_buf, &desc_buf[param_offset], param_size); out: @@ -2088,7 +2193,18 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba, u8 *buf, u32 size) { - return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); + int err = 0; + int retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + /* Read descriptor*/ + err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); + if (!err) + break; + dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); + } + + return err; } int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) @@ -2320,12 +2436,21 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba) cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); /* Response upiu and prdt offset should be in double words */ - utrdlp[i].response_upiu_offset = + if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { + utrdlp[i].response_upiu_offset = + cpu_to_le16(response_offset); + utrdlp[i].prd_table_offset = + cpu_to_le16(prdt_offset); + utrdlp[i].response_upiu_length = + cpu_to_le16(ALIGNED_UPIU_SIZE); + } else { + utrdlp[i].response_upiu_offset = cpu_to_le16((response_offset >> 2)); - utrdlp[i].prd_table_offset = + utrdlp[i].prd_table_offset = cpu_to_le16((prdt_offset >> 2)); - utrdlp[i].response_upiu_length = + utrdlp[i].response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); + } hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); hba->lrb[i].ucd_req_ptr = @@ -2429,10 +2554,10 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); } while (ret && peer && --retries); - if (!retries) + if (ret) dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", - set, UIC_GET_ATTR_ID(attr_sel), mib_val, - retries); + set, UIC_GET_ATTR_ID(attr_sel), mib_val, + UFS_UIC_COMMAND_RETRIES - retries); return ret; } @@ -2496,9 +2621,10 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, get, UIC_GET_ATTR_ID(attr_sel), ret); } while (ret && peer && --retries); - if (!retries) + if (ret) dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", - get, UIC_GET_ATTR_ID(attr_sel), retries); + get, UIC_GET_ATTR_ID(attr_sel), + UFS_UIC_COMMAND_RETRIES - retries); if (mib_val && !ret) *mib_val = uic_cmd.argument3; @@ -2651,6 +2777,8 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) int ret; struct uic_command uic_cmd = {0}; + ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); + uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); @@ -2664,7 +2792,9 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) */ if (ufshcd_link_recovery(hba)) ret = -ENOLINK; - } + } else + ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, + POST_CHANGE); return ret; } @@ -2687,13 +2817,17 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) struct uic_command uic_cmd = {0}; int ret; + ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); + uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); if (ret) { dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", __func__, ret); ret = ufshcd_link_recovery(hba); - } + } else + ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, + POST_CHANGE); return ret; } @@ -2725,8 +2859,8 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) if (hba->max_pwr_info.is_valid) return 0; - pwr_info->pwr_tx = FASTAUTO_MODE; - pwr_info->pwr_rx = FASTAUTO_MODE; + pwr_info->pwr_tx = FAST_MODE; + pwr_info->pwr_rx = FAST_MODE; pwr_info->hs_rate = PA_HS_MODE_B; /* Get the connected lane count */ @@ -2757,7 +2891,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) __func__, pwr_info->gear_rx); return -EINVAL; } - pwr_info->pwr_rx = SLOWAUTO_MODE; + pwr_info->pwr_rx = SLOW_MODE; } ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), @@ -2770,7 +2904,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) __func__, pwr_info->gear_tx); return -EINVAL; } - pwr_info->pwr_tx = SLOWAUTO_MODE; + pwr_info->pwr_tx = SLOW_MODE; } hba->max_pwr_info.is_valid = true; @@ -3090,7 +3224,16 @@ static int ufshcd_link_startup(struct ufs_hba *hba) { int ret; int retries = DME_LINKSTARTUP_RETRIES; + bool link_startup_again = false; + + /* + * If UFS device isn't active then we will have to issue link startup + * 2 times to make sure the device state move to active. + */ + if (!ufshcd_is_ufs_dev_active(hba)) + link_startup_again = true; +link_startup: do { ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); @@ -3116,6 +3259,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba) /* failed to get the link up... retire */ goto out; + if (link_startup_again) { + link_startup_again = false; + retries = DME_LINKSTARTUP_RETRIES; + goto link_startup; + } + if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { ret = ufshcd_disable_device_tx_lcc(hba); if (ret) @@ -3181,16 +3330,24 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev) { int ret = 0; u8 lun_qdepth; + int retries; struct ufs_hba *hba; hba = shost_priv(sdev->host); lun_qdepth = hba->nutrs; - ret = ufshcd_read_unit_desc_param(hba, - ufshcd_scsi_to_upiu_lun(sdev->lun), - UNIT_DESC_PARAM_LU_Q_DEPTH, - &lun_qdepth, - sizeof(lun_qdepth)); + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + /* Read descriptor*/ + ret = ufshcd_read_unit_desc_param(hba, + ufshcd_scsi_to_upiu_lun(sdev->lun), + UNIT_DESC_PARAM_LU_Q_DEPTH, + &lun_qdepth, + sizeof(lun_qdepth)); + if (!ret || ret == -ENOTSUPP) + break; + + dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, ret); + } /* Some WLUN doesn't support unit descriptor */ if (ret == -EOPNOTSUPP) @@ -4097,6 +4254,17 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba) { u32 reg; + /* PHY layer lane error */ + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); + /* Ignore LINERESET indication, as this is not an error */ + if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) && + (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) + /* + * To know whether this error is fatal or not, DB timeout + * must be checked but this error is handled separately. + */ + dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__); + /* PA_INIT_ERROR is fatal and needs UIC reset */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) @@ -4158,7 +4326,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba) /* block commands from scsi mid-layer */ scsi_block_requests(hba->host); - hba->ufshcd_state = UFSHCD_STATE_ERROR; + hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED; schedule_work(&hba->eh_work); } } @@ -4311,6 +4479,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, task_req_upiup->input_param1 = cpu_to_be32(lun_id); task_req_upiup->input_param2 = cpu_to_be32(task_id); + ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function); + /* send command to the controller */ __set_bit(free_slot, &hba->outstanding_tasks); @@ -4318,6 +4488,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, wmb(); ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); + /* Make sure that doorbell is committed immediately */ + wmb(); spin_unlock_irqrestore(host->host_lock, flags); @@ -4722,6 +4894,24 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, return icc_level; } +static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level) +{ + int ret = 0; + int retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + /* write attribute */ + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level); + if (!ret) + break; + + dev_dbg(hba->dev, "%s: failed with error %d\n", __func__, ret); + } + + return ret; +} + static void ufshcd_init_icc_levels(struct ufs_hba *hba) { int ret; @@ -4742,9 +4932,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, hba->init_prefetch_data.icc_level); - ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, - QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, - &hba->init_prefetch_data.icc_level); + ret = ufshcd_set_icc_levels_attr(hba, + hba->init_prefetch_data.icc_level); if (ret) dev_err(hba->dev, @@ -4965,6 +5154,76 @@ static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) return ret; } +/** + * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is + * less than device PA_TACTIVATE time. + * @hba: per-adapter instance + * + * Some UFS devices require host PA_TACTIVATE to be lower than device + * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk + * for such devices. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) +{ + int ret = 0; + u32 granularity, peer_granularity; + u32 pa_tactivate, peer_pa_tactivate; + u32 pa_tactivate_us, peer_pa_tactivate_us; + u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100}; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), + &granularity); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), + &peer_granularity); + if (ret) + goto out; + + if ((granularity < PA_GRANULARITY_MIN_VAL) || + (granularity > PA_GRANULARITY_MAX_VAL)) { + dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d", + __func__, granularity); + return -EINVAL; + } + + if ((peer_granularity < PA_GRANULARITY_MIN_VAL) || + (peer_granularity > PA_GRANULARITY_MAX_VAL)) { + dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d", + __func__, peer_granularity); + return -EINVAL; + } + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), + &peer_pa_tactivate); + if (ret) + goto out; + + pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1]; + peer_pa_tactivate_us = peer_pa_tactivate * + gran_to_us_table[peer_granularity - 1]; + + if (pa_tactivate_us > peer_pa_tactivate_us) { + u32 new_peer_pa_tactivate; + + new_peer_pa_tactivate = pa_tactivate_us / + gran_to_us_table[peer_granularity - 1]; + new_peer_pa_tactivate++; + ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), + new_peer_pa_tactivate); + } + +out: + return ret; +} + static void ufshcd_tune_unipro_params(struct ufs_hba *hba) { if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { @@ -4975,6 +5234,11 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba) if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) /* set 1ms timeout for PA_TACTIVATE */ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) + ufshcd_quirk_tune_host_pa_tactivate(hba); + + ufshcd_vops_apply_dev_quirks(hba); } /** @@ -5027,9 +5291,11 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) __func__); } else { ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); - if (ret) + if (ret) { dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", __func__, ret); + goto out; + } } /* set the state as operational after switching to desired gear */ @@ -5062,8 +5328,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) hba->is_init_prefetch = true; /* Resume devfreq after UFS device is detected */ - if (ufshcd_is_clkscaling_enabled(hba)) - devfreq_resume_device(hba->devfreq); + ufshcd_resume_clkscaling(hba); out: /* @@ -5389,6 +5654,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, if (!head || list_empty(head)) goto out; + ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); + if (ret) + return ret; + list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk)) { if (skip_ref_clk && !strcmp(clki->name, "ref_clk")) @@ -5410,7 +5679,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, } } - ret = ufshcd_vops_setup_clocks(hba, on); + ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); + if (ret) + return ret; + out: if (ret) { list_for_each_entry(clki, head, list) { @@ -5500,8 +5772,6 @@ static void ufshcd_variant_hba_exit(struct ufs_hba *hba) if (!hba->vops) return; - ufshcd_vops_setup_clocks(hba, false); - ufshcd_vops_setup_regulators(hba, false); ufshcd_vops_exit(hba); @@ -5564,6 +5834,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) if (hba->is_powered) { ufshcd_variant_hba_exit(hba); ufshcd_setup_vreg(hba, false); + ufshcd_suspend_clkscaling(hba); ufshcd_setup_clocks(hba, false); ufshcd_setup_hba_vreg(hba, false); hba->is_powered = false; @@ -5577,20 +5848,20 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) 0, 0, 0, - SCSI_SENSE_BUFFERSIZE, + UFSHCD_REQ_SENSE_SIZE, 0}; char *buffer; int ret; - buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); + buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL); if (!buffer) { ret = -ENOMEM; goto out; } ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer, - SCSI_SENSE_BUFFERSIZE, NULL, - msecs_to_jiffies(1000), 3, NULL, REQ_PM); + UFSHCD_REQ_SENSE_SIZE, NULL, + msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM); if (ret) pr_err("%s: failed with err %d\n", __func__, ret); @@ -5652,11 +5923,11 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, /* * Current function would be generally called from the power management - * callbacks hence set the REQ_PM flag so that it doesn't resume the + * callbacks hence set the RQF_PM flag so that it doesn't resume the * already suspended childs. */ ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - START_STOP_TIMEOUT, 0, NULL, REQ_PM); + START_STOP_TIMEOUT, 0, NULL, 0, RQF_PM); if (ret) { sdev_printk(KERN_WARNING, sdp, "START_STOP failed for power mode: %d, result %x\n", @@ -5766,7 +6037,6 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) !hba->dev_info.is_lu_power_on_wp) { ret = ufshcd_setup_vreg(hba, true); } else if (!ufshcd_is_ufs_dev_active(hba)) { - ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); if (!ret && !ufshcd_is_link_active(hba)) { ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); if (ret) @@ -5775,6 +6045,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) if (ret) goto vccq_lpm; } + ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); } goto out; @@ -5839,6 +6110,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufshcd_hold(hba, false); hba->clk_gating.is_suspended = true; + ufshcd_suspend_clkscaling(hba); + if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && req_link_state == UIC_LINK_ACTIVE_STATE) { goto disable_clks; @@ -5846,12 +6119,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && (req_link_state == hba->uic_link_state)) - goto out; + goto enable_gating; /* UFS device & link must be active before we enter in this function */ if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { ret = -EINVAL; - goto out; + goto enable_gating; } if (ufshcd_is_runtime_pm(pm_op)) { @@ -5887,15 +6160,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufshcd_vreg_set_lpm(hba); disable_clks: - /* - * The clock scaling needs access to controller registers. Hence, Wait - * for pending clock scaling work to be done before clocks are - * turned off. - */ - if (ufshcd_is_clkscaling_enabled(hba)) { - devfreq_suspend_device(hba->devfreq); - hba->clk_scaling.window_start_t = 0; - } /* * Call vendor specific suspend callback. As these callbacks may access * vendor specific host controller register space call them before the @@ -5905,10 +6169,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) if (ret) goto set_link_active; - ret = ufshcd_vops_setup_clocks(hba, false); - if (ret) - goto vops_resume; - if (!ufshcd_is_link_active(hba)) ufshcd_setup_clocks(hba, false); else @@ -5925,9 +6185,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufshcd_hba_vreg_set_lpm(hba); goto out; -vops_resume: - ufshcd_vops_resume(hba, pm_op); set_link_active: + ufshcd_resume_clkscaling(hba); ufshcd_vreg_set_hpm(hba); if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) ufshcd_set_link_active(hba); @@ -5937,6 +6196,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) ufshcd_disable_auto_bkops(hba); enable_gating: + ufshcd_resume_clkscaling(hba); hba->clk_gating.is_suspended = false; ufshcd_release(hba); out: @@ -6015,8 +6275,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufshcd_urgent_bkops(hba); hba->clk_gating.is_suspended = false; - if (ufshcd_is_clkscaling_enabled(hba)) - devfreq_resume_device(hba->devfreq); + ufshcd_resume_clkscaling(hba); /* Schedule clock gating in case of no access to UFS device yet */ ufshcd_release(hba); @@ -6030,6 +6289,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufshcd_vreg_set_lpm(hba); disable_irq_and_vops_clks: ufshcd_disable_irq(hba); + ufshcd_suspend_clkscaling(hba); ufshcd_setup_clocks(hba, false); out: hba->pm_op_in_progress = 0; @@ -6052,16 +6312,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba) if (!hba || !hba->is_powered) return 0; - if (pm_runtime_suspended(hba->dev)) { - if (hba->rpm_lvl == hba->spm_lvl) - /* - * There is possibility that device may still be in - * active state during the runtime suspend. - */ - if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == - hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled) - goto out; + if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == + hba->curr_dev_pwr_mode) && + (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) == + hba->uic_link_state)) + goto out; + if (pm_runtime_suspended(hba->dev)) { /* * UFS device and/or UFS link low power states during runtime * suspend seems to be different than what is expected during @@ -6092,7 +6349,10 @@ EXPORT_SYMBOL(ufshcd_system_suspend); int ufshcd_system_resume(struct ufs_hba *hba) { - if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev)) + if (!hba) + return -EINVAL; + + if (!hba->is_powered || pm_runtime_suspended(hba->dev)) /* * Let the runtime resume take care of resuming * if runtime suspended. @@ -6113,7 +6373,10 @@ EXPORT_SYMBOL(ufshcd_system_resume); */ int ufshcd_runtime_suspend(struct ufs_hba *hba) { - if (!hba || !hba->is_powered) + if (!hba) + return -EINVAL; + + if (!hba->is_powered) return 0; return ufshcd_suspend(hba, UFS_RUNTIME_PM); @@ -6143,10 +6406,13 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend); */ int ufshcd_runtime_resume(struct ufs_hba *hba) { - if (!hba || !hba->is_powered) + if (!hba) + return -EINVAL; + + if (!hba->is_powered) return 0; - else - return ufshcd_resume(hba, UFS_RUNTIME_PM); + + return ufshcd_resume(hba, UFS_RUNTIME_PM); } EXPORT_SYMBOL(ufshcd_runtime_resume); @@ -6198,11 +6464,7 @@ void ufshcd_remove(struct ufs_hba *hba) ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_hba_stop(hba, true); - scsi_host_put(hba->host); - ufshcd_exit_clk_gating(hba); - if (ufshcd_is_clkscaling_enabled(hba)) - devfreq_remove_device(hba->devfreq); ufshcd_hba_exit(hba); } EXPORT_SYMBOL_GPL(ufshcd_remove); @@ -6324,15 +6586,47 @@ static int ufshcd_devfreq_target(struct device *dev, { int err = 0; struct ufs_hba *hba = dev_get_drvdata(dev); + bool release_clk_hold = false; + unsigned long irq_flags; if (!ufshcd_is_clkscaling_enabled(hba)) return -EINVAL; + spin_lock_irqsave(hba->host->host_lock, irq_flags); + if (ufshcd_eh_in_progress(hba)) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + return 0; + } + + if (ufshcd_is_clkgating_allowed(hba) && + (hba->clk_gating.state != CLKS_ON)) { + if (cancel_delayed_work(&hba->clk_gating.gate_work)) { + /* hold the vote until the scaling work is completed */ + hba->clk_gating.active_reqs++; + release_clk_hold = true; + hba->clk_gating.state = CLKS_ON; + } else { + /* + * Clock gating work seems to be running in parallel + * hence skip scaling work to avoid deadlock between + * current scaling work and gating work. + */ + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + return 0; + } + } + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + if (*freq == UINT_MAX) err = ufshcd_scale_clks(hba, true); else if (*freq == 0) err = ufshcd_scale_clks(hba, false); + spin_lock_irqsave(hba->host->host_lock, irq_flags); + if (release_clk_hold) + __ufshcd_release(hba); + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + return err; } @@ -6367,7 +6661,7 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev, scaling->busy_start_t = ktime_get(); scaling->is_busy_started = true; } else { - scaling->busy_start_t = ktime_set(0, 0); + scaling->busy_start_t = 0; scaling->is_busy_started = false; } spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -6413,6 +6707,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) /* Get UFS version supported by the controller */ hba->ufs_version = ufshcd_get_ufs_version(hba); + if ((hba->ufs_version != UFSHCI_VERSION_10) && + (hba->ufs_version != UFSHCI_VERSION_11) && + (hba->ufs_version != UFSHCI_VERSION_20) && + (hba->ufs_version != UFSHCI_VERSION_21)) + dev_err(hba->dev, "invalid UFS version 0x%x\n", + hba->ufs_version); + /* Get Interrupt bit mask per version */ hba->intr_mask = ufshcd_get_intr_mask(hba); @@ -6498,7 +6799,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) } if (ufshcd_is_clkscaling_enabled(hba)) { - hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile, + hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile, "simple_ondemand", NULL); if (IS_ERR(hba->devfreq)) { dev_err(hba->dev, "Unable to register with devfreq %ld\n", @@ -6507,18 +6808,19 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) goto out_remove_scsi_host; } /* Suspend devfreq until the UFS device is detected */ - devfreq_suspend_device(hba->devfreq); - hba->clk_scaling.window_start_t = 0; + ufshcd_suspend_clkscaling(hba); } /* Hold auto suspend until async scan completes */ pm_runtime_get_sync(dev); /* - * The device-initialize-sequence hasn't been invoked yet. - * Set the device to power-off state + * We are assuming that device wasn't put in sleep/power-down + * state exclusively during the boot stage before kernel. + * This assumption helps avoid doing link startup twice during + * ufshcd_probe_hba(). */ - ufshcd_set_ufs_dev_poweroff(hba); + ufshcd_set_ufs_dev_active(hba); async_schedule(ufshcd_async_scan, hba); @@ -6530,7 +6832,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ufshcd_exit_clk_gating(hba); out_disable: hba->is_irq_enabled = false; - scsi_host_put(host); ufshcd_hba_exit(hba); out_error: return err; diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 430bef11129324a49b131460b9dfa4f0d7b97022..08cd26ed238270a3f6bfb797137f4962bfe04312 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -261,6 +261,12 @@ struct ufs_pwr_mode_info { * @pwr_change_notify: called before and after a power mode change * is carried out to allow vendor spesific capabilities * to be set. + * @setup_xfer_req: called before any transfer request is issued + * to set some things + * @setup_task_mgmt: called before any task management request is issued + * to set some things + * @hibern8_notify: called around hibern8 enter/exit + * @apply_dev_quirks: called to apply device specific quirks * @suspend: called during host controller PM callback * @resume: called during host controller PM callback * @dbg_register_dump: used to dump controller debug information @@ -273,7 +279,8 @@ struct ufs_hba_variant_ops { u32 (*get_ufs_hci_version)(struct ufs_hba *); int (*clk_scale_notify)(struct ufs_hba *, bool, enum ufs_notify_change_status); - int (*setup_clocks)(struct ufs_hba *, bool); + int (*setup_clocks)(struct ufs_hba *, bool, + enum ufs_notify_change_status); int (*setup_regulators)(struct ufs_hba *, bool); int (*hce_enable_notify)(struct ufs_hba *, enum ufs_notify_change_status); @@ -283,6 +290,11 @@ struct ufs_hba_variant_ops { enum ufs_notify_change_status status, struct ufs_pa_layer_attr *, struct ufs_pa_layer_attr *); + void (*setup_xfer_req)(struct ufs_hba *, int, bool); + void (*setup_task_mgmt)(struct ufs_hba *, int, u8); + void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme, + enum ufs_notify_change_status); + int (*apply_dev_quirks)(struct ufs_hba *); int (*suspend)(struct ufs_hba *, enum ufs_pm_op); int (*resume)(struct ufs_hba *, enum ufs_pm_op); void (*dbg_register_dump)(struct ufs_hba *hba); @@ -474,6 +486,12 @@ struct ufs_hba { */ #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5) + /* + * This quirk needs to be enabled if the host contoller regards + * resolution of the values of PRDTO and PRDTL in UTRD as byte. + */ + #define UFSHCD_QUIRK_PRDT_BYTE_GRAN UFS_BIT(7) + unsigned int quirks; /* Deviations from standard UFSHCI spec. */ /* Device deviations from standard UFS device spec. */ @@ -755,10 +773,11 @@ static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, return 0; } -static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on) +static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) { if (hba->vops && hba->vops->setup_clocks) - return hba->vops->setup_clocks(hba, on); + return hba->vops->setup_clocks(hba, on, status); return 0; } @@ -799,6 +818,35 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, return -ENOTSUPP; } +static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag, + bool is_scsi_cmd) +{ + if (hba->vops && hba->vops->setup_xfer_req) + return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); +} + +static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba, + int tag, u8 tm_function) +{ + if (hba->vops && hba->vops->setup_task_mgmt) + return hba->vops->setup_task_mgmt(hba, tag, tm_function); +} + +static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba, + enum uic_cmd_dme cmd, + enum ufs_notify_change_status status) +{ + if (hba->vops && hba->vops->hibern8_notify) + return hba->vops->hibern8_notify(hba, cmd, status); +} + +static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->apply_dev_quirks) + return hba->vops->apply_dev_quirks(hba); + return 0; +} + static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op) { if (hba->vops && hba->vops->suspend) diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 9599741ff606cfad3eb037a2b0bc4da1227134cf..8c5190e2e1c928407e8aac111b67758fff9bc191 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -72,6 +72,10 @@ enum { REG_UIC_COMMAND_ARG_1 = 0x94, REG_UIC_COMMAND_ARG_2 = 0x98, REG_UIC_COMMAND_ARG_3 = 0x9C, + REG_UFS_CCAP = 0x100, + REG_UFS_CRYPTOCAP = 0x104, + + UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400, }; /* Controller capability masks */ @@ -83,6 +87,8 @@ enum { MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000, }; +#define UFS_MASK(mask, offset) ((mask) << (offset)) + /* UFS Version 08h */ #define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0) #define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16) @@ -166,6 +172,7 @@ enum { /* UECPA - Host UIC Error Code PHY Adapter Layer 38h */ #define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31) #define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F +#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF /* UECDL - Host UIC Error Code Data Link Layer 3Ch */ #define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31) @@ -272,6 +279,9 @@ enum { /* Interrupt disable mask for UFSHCI v1.1 */ INTERRUPT_MASK_ALL_VER_11 = 0x31FFF, + + /* Interrupt disable mask for UFSHCI v2.1 */ + INTERRUPT_MASK_ALL_VER_21 = 0x71FFF, }; /* diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h index eff8b567557532090f745779ce92f89d2171b94b..23129d7b2678dfea93b21f010c31a7aece9e9233 100644 --- a/drivers/scsi/ufs/unipro.h +++ b/drivers/scsi/ufs/unipro.h @@ -123,6 +123,7 @@ #define PA_MAXRXHSGEAR 0x1587 #define PA_RXHSUNTERMCAP 0x15A5 #define PA_RXLSTERMCAP 0x15A6 +#define PA_GRANULARITY 0x15AA #define PA_PACPREQTIMEOUT 0x1590 #define PA_PACPREQEOBTIMEOUT 0x1591 #define PA_HIBERN8TIME 0x15A7 @@ -158,6 +159,9 @@ #define VS_DEBUGOMC 0xD09E #define VS_POWERSTATE 0xD083 +#define PA_GRANULARITY_MIN_VAL 1 +#define PA_GRANULARITY_MAX_VAL 6 + /* PHY Adapter Protocol Constants */ #define PA_MAXDATALANES 4 diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 4a0d3cdc607cd5d8bfdb28867187fd7d12df0919..15ca09cd16f34ad6f7a8ece088e1dababef3a1b5 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -793,6 +793,7 @@ static int pvscsi_abort(struct scsi_cmnd *cmd) unsigned long flags; int result = SUCCESS; DECLARE_COMPLETION_ONSTACK(abort_cmp); + int done; scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", adapter->host->host_no, cmd); @@ -824,10 +825,10 @@ static int pvscsi_abort(struct scsi_cmnd *cmd) pvscsi_abort_cmd(adapter, ctx); spin_unlock_irqrestore(&adapter->hw_lock, flags); /* Wait for 2 secs for the completion. */ - wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000)); + done = wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000)); spin_lock_irqsave(&adapter->hw_lock, flags); - if (!completion_done(&abort_cmp)) { + if (!done) { /* * Failed to abort the command, unmark the fact that it * was requested to be aborted. diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index c097d2ccbde3163eaa9d1ab0a20f402446c79002..d41292ef85f2ff93b879237a8da7f2357c489b24 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -26,7 +26,7 @@ #include -#define PVSCSI_DRIVER_VERSION_STRING "1.0.6.0-k" +#define PVSCSI_DRIVER_VERSION_STRING "1.0.7.0-k" #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index 9dc8687bf0480e53b2d89431ece47f8504fd5779..9aa1fe1fc93967109256690be004d31fa04b52e7 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -79,10 +79,13 @@ struct vscsifrnt_shadow { /* command between backend and frontend */ unsigned char act; + uint8_t nr_segments; uint16_t rqid; + uint16_t ref_rqid; unsigned int nr_grants; /* number of grants in gref[] */ struct scsiif_request_segment *sg; /* scatter/gather elements */ + struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE]; /* Do reset or abort function. */ wait_queue_head_t wq_reset; /* reset work queue */ @@ -172,68 +175,90 @@ static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id) scsifront_wake_up(info); } -static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info) +static int scsifront_do_request(struct vscsifrnt_info *info, + struct vscsifrnt_shadow *shadow) { struct vscsiif_front_ring *ring = &(info->ring); struct vscsiif_request *ring_req; + struct scsi_cmnd *sc = shadow->sc; uint32_t id; + int i, notify; + + if (RING_FULL(&info->ring)) + return -EBUSY; id = scsifront_get_rqid(info); /* use id in response */ if (id >= VSCSIIF_MAX_REQS) - return NULL; + return -EBUSY; - ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); + info->shadow[id] = shadow; + shadow->rqid = id; + ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); ring->req_prod_pvt++; - ring_req->rqid = (uint16_t)id; + ring_req->rqid = id; + ring_req->act = shadow->act; + ring_req->ref_rqid = shadow->ref_rqid; + ring_req->nr_segments = shadow->nr_segments; - return ring_req; -} + ring_req->id = sc->device->id; + ring_req->lun = sc->device->lun; + ring_req->channel = sc->device->channel; + ring_req->cmd_len = sc->cmd_len; -static void scsifront_do_request(struct vscsifrnt_info *info) -{ - struct vscsiif_front_ring *ring = &(info->ring); - int notify; + BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); + + memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); + + ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; + ring_req->timeout_per_command = sc->request->timeout / HZ; + + for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++) + ring_req->seg[i] = shadow->seg[i]; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); if (notify) notify_remote_via_irq(info->irq); + + return 0; } -static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id) +static void scsifront_gnttab_done(struct vscsifrnt_info *info, + struct vscsifrnt_shadow *shadow) { - struct vscsifrnt_shadow *s = info->shadow[id]; int i; - if (s->sc->sc_data_direction == DMA_NONE) + if (shadow->sc->sc_data_direction == DMA_NONE) return; - for (i = 0; i < s->nr_grants; i++) { - if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) { + for (i = 0; i < shadow->nr_grants; i++) { + if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) { shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME "grant still in use by backend\n"); BUG(); } - gnttab_end_foreign_access(s->gref[i], 0, 0UL); + gnttab_end_foreign_access(shadow->gref[i], 0, 0UL); } - kfree(s->sg); + kfree(shadow->sg); } static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, struct vscsiif_response *ring_rsp) { + struct vscsifrnt_shadow *shadow; struct scsi_cmnd *sc; uint32_t id; uint8_t sense_len; id = ring_rsp->rqid; - sc = info->shadow[id]->sc; + shadow = info->shadow[id]; + sc = shadow->sc; BUG_ON(sc == NULL); - scsifront_gnttab_done(info, id); + scsifront_gnttab_done(info, shadow); scsifront_put_rqid(info, id); sc->result = ring_rsp->rslt; @@ -366,7 +391,6 @@ static void scsifront_finish_all(struct vscsifrnt_info *info) static int map_data_for_request(struct vscsifrnt_info *info, struct scsi_cmnd *sc, - struct vscsiif_request *ring_req, struct vscsifrnt_shadow *shadow) { grant_ref_t gref_head; @@ -379,7 +403,6 @@ static int map_data_for_request(struct vscsifrnt_info *info, struct scatterlist *sg; struct scsiif_request_segment *seg; - ring_req->nr_segments = 0; if (sc->sc_data_direction == DMA_NONE || !data_len) return 0; @@ -398,7 +421,7 @@ static int map_data_for_request(struct vscsifrnt_info *info, if (!shadow->sg) return -ENOMEM; } - seg = shadow->sg ? : ring_req->seg; + seg = shadow->sg ? : shadow->seg; err = gnttab_alloc_grant_references(seg_grants + data_grants, &gref_head); @@ -423,9 +446,9 @@ static int map_data_for_request(struct vscsifrnt_info *info, info->dev->otherend_id, xen_page_to_gfn(page), 1); shadow->gref[ref_cnt] = ref; - ring_req->seg[ref_cnt].gref = ref; - ring_req->seg[ref_cnt].offset = (uint16_t)off; - ring_req->seg[ref_cnt].length = (uint16_t)bytes; + shadow->seg[ref_cnt].gref = ref; + shadow->seg[ref_cnt].offset = (uint16_t)off; + shadow->seg[ref_cnt].length = (uint16_t)bytes; page++; len -= bytes; @@ -473,44 +496,14 @@ static int map_data_for_request(struct vscsifrnt_info *info, } if (seg_grants) - ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants; + shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants; else - ring_req->nr_segments = (uint8_t)ref_cnt; + shadow->nr_segments = (uint8_t)ref_cnt; shadow->nr_grants = ref_cnt; return 0; } -static struct vscsiif_request *scsifront_command2ring( - struct vscsifrnt_info *info, struct scsi_cmnd *sc, - struct vscsifrnt_shadow *shadow) -{ - struct vscsiif_request *ring_req; - - memset(shadow, 0, sizeof(*shadow)); - - ring_req = scsifront_pre_req(info); - if (!ring_req) - return NULL; - - info->shadow[ring_req->rqid] = shadow; - shadow->rqid = ring_req->rqid; - - ring_req->id = sc->device->id; - ring_req->lun = sc->device->lun; - ring_req->channel = sc->device->channel; - ring_req->cmd_len = sc->cmd_len; - - BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); - - memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); - - ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; - ring_req->timeout_per_command = sc->request->timeout / HZ; - - return ring_req; -} - static int scsifront_enter(struct vscsifrnt_info *info) { if (info->pause) @@ -536,36 +529,25 @@ static int scsifront_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) { struct vscsifrnt_info *info = shost_priv(shost); - struct vscsiif_request *ring_req; struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc); unsigned long flags; int err; - uint16_t rqid; + + sc->result = 0; + memset(shadow, 0, sizeof(*shadow)); + + shadow->sc = sc; + shadow->act = VSCSIIF_ACT_SCSI_CDB; spin_lock_irqsave(shost->host_lock, flags); if (scsifront_enter(info)) { spin_unlock_irqrestore(shost->host_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } - if (RING_FULL(&info->ring)) - goto busy; - - ring_req = scsifront_command2ring(info, sc, shadow); - if (!ring_req) - goto busy; - - sc->result = 0; - - rqid = ring_req->rqid; - ring_req->act = VSCSIIF_ACT_SCSI_CDB; - shadow->sc = sc; - shadow->act = VSCSIIF_ACT_SCSI_CDB; - - err = map_data_for_request(info, sc, ring_req, shadow); + err = map_data_for_request(info, sc, shadow); if (err < 0) { pr_debug("%s: err %d\n", __func__, err); - scsifront_put_rqid(info, rqid); scsifront_return(info); spin_unlock_irqrestore(shost->host_lock, flags); if (err == -ENOMEM) @@ -575,7 +557,11 @@ static int scsifront_queuecommand(struct Scsi_Host *shost, return 0; } - scsifront_do_request(info); + if (scsifront_do_request(info, shadow)) { + scsifront_gnttab_done(info, shadow); + goto busy; + } + scsifront_return(info); spin_unlock_irqrestore(shost->host_lock, flags); @@ -598,26 +584,30 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act) struct Scsi_Host *host = sc->device->host; struct vscsifrnt_info *info = shost_priv(host); struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc); - struct vscsiif_request *ring_req; int err = 0; - shadow = kmalloc(sizeof(*shadow), GFP_NOIO); + shadow = kzalloc(sizeof(*shadow), GFP_NOIO); if (!shadow) return FAILED; + shadow->act = act; + shadow->rslt_reset = RSLT_RESET_WAITING; + shadow->sc = sc; + shadow->ref_rqid = s->rqid; + init_waitqueue_head(&shadow->wq_reset); + spin_lock_irq(host->host_lock); for (;;) { - if (!RING_FULL(&info->ring)) { - ring_req = scsifront_command2ring(info, sc, shadow); - if (ring_req) - break; - } - if (err || info->pause) { - spin_unlock_irq(host->host_lock); - kfree(shadow); - return FAILED; - } + if (scsifront_enter(info)) + goto fail; + + if (!scsifront_do_request(info, shadow)) + break; + + scsifront_return(info); + if (err) + goto fail; info->wait_ring_available = 1; spin_unlock_irq(host->host_lock); err = wait_event_interruptible(info->wq_sync, @@ -625,22 +615,6 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act) spin_lock_irq(host->host_lock); } - if (scsifront_enter(info)) { - spin_unlock_irq(host->host_lock); - return FAILED; - } - - ring_req->act = act; - ring_req->ref_rqid = s->rqid; - - shadow->act = act; - shadow->rslt_reset = RSLT_RESET_WAITING; - init_waitqueue_head(&shadow->wq_reset); - - ring_req->nr_segments = 0; - - scsifront_do_request(info); - spin_unlock_irq(host->host_lock); err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset); spin_lock_irq(host->host_lock); @@ -659,6 +633,11 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act) scsifront_return(info); spin_unlock_irq(host->host_lock); return err; + +fail: + spin_unlock_irq(host->host_lock); + kfree(shadow); + return FAILED; } static int scsifront_eh_abort_handler(struct scsi_cmnd *sc) @@ -1060,13 +1039,9 @@ static void scsifront_read_backend_params(struct xenbus_device *dev, struct vscsifrnt_info *info) { unsigned int sg_grant, nr_segs; - int ret; struct Scsi_Host *host = info->host; - ret = xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg-grant", "%u", - &sg_grant); - if (ret != 1) - sg_grant = 0; + sg_grant = xenbus_read_unsigned(dev->otherend, "feature-sg-grant", 0); nr_segs = min_t(unsigned int, sg_grant, SG_ALL); nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE); nr_segs = min_t(unsigned int, nr_segs, diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c index e7899624aa0b637d8602aad96fb7e7439590a286..35bbe288ddb4ebe11eccebce4ca776b1e47a1340 100644 --- a/drivers/sh/intc/virq.c +++ b/drivers/sh/intc/virq.c @@ -254,7 +254,7 @@ static void __init intc_subgroup_map(struct intc_desc_int *d) radix_tree_tag_clear(&d->tree, entry->enum_id, INTC_TAG_VIRQ_NEEDS_ALLOC); - radix_tree_replace_slot((void **)entries[i], + radix_tree_replace_slot(&d->tree, (void **)entries[i], &intc_irq_xlate[irq]); } diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index e6e90e80519a7db028fb23dcc6f121b1c64fc0e6..f31bceb69c0d00390d1a9352b7e46e225e8c1170 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -1,8 +1,7 @@ menu "SOC (System On Chip) specific Drivers" source "drivers/soc/bcm/Kconfig" -source "drivers/soc/fsl/qbman/Kconfig" -source "drivers/soc/fsl/qe/Kconfig" +source "drivers/soc/fsl/Kconfig" source "drivers/soc/mediatek/Kconfig" source "drivers/soc/qcom/Kconfig" source "drivers/soc/rockchip/Kconfig" diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..7a9fb9baa66d27c6bb2b1c8f3a22ada6fad84309 --- /dev/null +++ b/drivers/soc/fsl/Kconfig @@ -0,0 +1,18 @@ +# +# Freescale SOC drivers +# + +source "drivers/soc/fsl/qbman/Kconfig" +source "drivers/soc/fsl/qe/Kconfig" + +config FSL_GUTS + bool + select SOC_BUS + help + The global utilities block controls power management, I/O device + enabling, power-onreset(POR) configuration monitoring, alternate + function selection for multiplexed signals,and clock control. + This driver is to manage and access global utilities block. + Initially only reading SVR and registering soc device are supported. + Other guts accesses, such as reading RCW, should eventually be moved + into this driver as well. diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile index 75e1f5334821080e0643b47ae1fd592dc7124509..44b3bebef24a3dac1de85c1615ddfde51fa9453c 100644 --- a/drivers/soc/fsl/Makefile +++ b/drivers/soc/fsl/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_FSL_DPAA) += qbman/ obj-$(CONFIG_QUICC_ENGINE) += qe/ obj-$(CONFIG_CPM) += qe/ +obj-$(CONFIG_FSL_GUTS) += guts.o diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c new file mode 100644 index 0000000000000000000000000000000000000000..6af7a11f09a5465028a821251e5fc7ee659ec1c8 --- /dev/null +++ b/drivers/soc/fsl/guts.c @@ -0,0 +1,239 @@ +/* + * Freescale QorIQ Platforms GUTS Driver + * + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct guts { + struct ccsr_guts __iomem *regs; + bool little_endian; +}; + +struct fsl_soc_die_attr { + char *die; + u32 svr; + u32 mask; +}; + +static struct guts *guts; +static struct soc_device_attribute soc_dev_attr; +static struct soc_device *soc_dev; + + +/* SoC die attribute definition for QorIQ platform */ +static const struct fsl_soc_die_attr fsl_soc_die[] = { + /* + * Power Architecture-based SoCs T Series + */ + + /* Die: T4240, SoC: T4240/T4160/T4080 */ + { .die = "T4240", + .svr = 0x82400000, + .mask = 0xfff00000, + }, + /* Die: T1040, SoC: T1040/T1020/T1042/T1022 */ + { .die = "T1040", + .svr = 0x85200000, + .mask = 0xfff00000, + }, + /* Die: T2080, SoC: T2080/T2081 */ + { .die = "T2080", + .svr = 0x85300000, + .mask = 0xfff00000, + }, + /* Die: T1024, SoC: T1024/T1014/T1023/T1013 */ + { .die = "T1024", + .svr = 0x85400000, + .mask = 0xfff00000, + }, + + /* + * ARM-based SoCs LS Series + */ + + /* Die: LS1043A, SoC: LS1043A/LS1023A */ + { .die = "LS1043A", + .svr = 0x87920000, + .mask = 0xffff0000, + }, + /* Die: LS2080A, SoC: LS2080A/LS2040A/LS2085A */ + { .die = "LS2080A", + .svr = 0x87010000, + .mask = 0xff3f0000, + }, + /* Die: LS1088A, SoC: LS1088A/LS1048A/LS1084A/LS1044A */ + { .die = "LS1088A", + .svr = 0x87030000, + .mask = 0xff3f0000, + }, + /* Die: LS1012A, SoC: LS1012A */ + { .die = "LS1012A", + .svr = 0x87040000, + .mask = 0xffff0000, + }, + /* Die: LS1046A, SoC: LS1046A/LS1026A */ + { .die = "LS1046A", + .svr = 0x87070000, + .mask = 0xffff0000, + }, + /* Die: LS2088A, SoC: LS2088A/LS2048A/LS2084A/LS2044A */ + { .die = "LS2088A", + .svr = 0x87090000, + .mask = 0xff3f0000, + }, + /* Die: LS1021A, SoC: LS1021A/LS1020A/LS1022A */ + { .die = "LS1021A", + .svr = 0x87000000, + .mask = 0xfff70000, + }, + { }, +}; + +static const struct fsl_soc_die_attr *fsl_soc_die_match( + u32 svr, const struct fsl_soc_die_attr *matches) +{ + while (matches->svr) { + if (matches->svr == (svr & matches->mask)) + return matches; + matches++; + }; + return NULL; +} + +u32 fsl_guts_get_svr(void) +{ + u32 svr = 0; + + if (!guts || !guts->regs) + return svr; + + if (guts->little_endian) + svr = ioread32(&guts->regs->svr); + else + svr = ioread32be(&guts->regs->svr); + + return svr; +} +EXPORT_SYMBOL(fsl_guts_get_svr); + +static int fsl_guts_probe(struct platform_device *pdev) +{ + struct device_node *root, *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct resource *res; + const struct fsl_soc_die_attr *soc_die; + const char *machine; + u32 svr; + + /* Initialize guts */ + guts = devm_kzalloc(dev, sizeof(*guts), GFP_KERNEL); + if (!guts) + return -ENOMEM; + + guts->little_endian = of_property_read_bool(np, "little-endian"); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + guts->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(guts->regs)) + return PTR_ERR(guts->regs); + + /* Register soc device */ + root = of_find_node_by_path("/"); + if (of_property_read_string(root, "model", &machine)) + of_property_read_string_index(root, "compatible", 0, &machine); + of_node_put(root); + if (machine) + soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL); + + svr = fsl_guts_get_svr(); + soc_die = fsl_soc_die_match(svr, fsl_soc_die); + if (soc_die) { + soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, + "QorIQ %s", soc_die->die); + } else { + soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, "QorIQ"); + } + soc_dev_attr.soc_id = devm_kasprintf(dev, GFP_KERNEL, + "svr:0x%08x", svr); + soc_dev_attr.revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d", + (svr >> 4) & 0xf, svr & 0xf); + + soc_dev = soc_device_register(&soc_dev_attr); + if (IS_ERR(soc_dev)) + return PTR_ERR(soc_dev); + + pr_info("Machine: %s\n", soc_dev_attr.machine); + pr_info("SoC family: %s\n", soc_dev_attr.family); + pr_info("SoC ID: %s, Revision: %s\n", + soc_dev_attr.soc_id, soc_dev_attr.revision); + return 0; +} + +static int fsl_guts_remove(struct platform_device *dev) +{ + soc_device_unregister(soc_dev); + return 0; +} + +/* + * Table for matching compatible strings, for device tree + * guts node, for Freescale QorIQ SOCs. + */ +static const struct of_device_id fsl_guts_of_match[] = { + { .compatible = "fsl,qoriq-device-config-1.0", }, + { .compatible = "fsl,qoriq-device-config-2.0", }, + { .compatible = "fsl,p1010-guts", }, + { .compatible = "fsl,p1020-guts", }, + { .compatible = "fsl,p1021-guts", }, + { .compatible = "fsl,p1022-guts", }, + { .compatible = "fsl,p1023-guts", }, + { .compatible = "fsl,p2020-guts", }, + { .compatible = "fsl,bsc9131-guts", }, + { .compatible = "fsl,bsc9132-guts", }, + { .compatible = "fsl,mpc8536-guts", }, + { .compatible = "fsl,mpc8544-guts", }, + { .compatible = "fsl,mpc8548-guts", }, + { .compatible = "fsl,mpc8568-guts", }, + { .compatible = "fsl,mpc8569-guts", }, + { .compatible = "fsl,mpc8572-guts", }, + { .compatible = "fsl,ls1021a-dcfg", }, + { .compatible = "fsl,ls1043a-dcfg", }, + { .compatible = "fsl,ls2080a-dcfg", }, + {} +}; +MODULE_DEVICE_TABLE(of, fsl_guts_of_match); + +static struct platform_driver fsl_guts_driver = { + .driver = { + .name = "fsl-guts", + .of_match_table = fsl_guts_of_match, + }, + .probe = fsl_guts_probe, + .remove = fsl_guts_remove, +}; + +static int __init fsl_guts_init(void) +{ + return platform_driver_register(&fsl_guts_driver); +} +core_initcall(fsl_guts_init); + +static void __exit fsl_guts_exit(void) +{ + platform_driver_unregister(&fsl_guts_driver); +} +module_exit(fsl_guts_exit); diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c index ffa48fdbb1a9a0cd91df52f2e01bcbc2e42d61ad..a3d6d7cfa9296aee3af6504be125dd4deb02f5fa 100644 --- a/drivers/soc/fsl/qbman/bman.c +++ b/drivers/soc/fsl/qbman/bman.c @@ -167,12 +167,12 @@ struct bm_portal { /* Cache-inhibited register access. */ static inline u32 bm_in(struct bm_portal *p, u32 offset) { - return __raw_readl(p->addr.ci + offset); + return be32_to_cpu(__raw_readl(p->addr.ci + offset)); } static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) { - __raw_writel(val, p->addr.ci + offset); + __raw_writel(cpu_to_be32(val), p->addr.ci + offset); } /* Cache Enabled Portal Access */ @@ -188,7 +188,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset) static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) { - return __raw_readl(p->addr.ce + offset); + return be32_to_cpu(__raw_readl(p->addr.ce + offset)); } struct bman_portal { @@ -391,7 +391,7 @@ static void bm_rcr_finish(struct bm_portal *portal) i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); if (i != rcr_ptr2idx(rcr->cursor)) - pr_crit("losing uncommited RCR entries\n"); + pr_crit("losing uncommitted RCR entries\n"); i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); if (i != rcr->ci) diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c index 9deb0524543f01c4b4dd6c4c91b693537e0fd307..a8e8389a6894fbd5c277f9cc87f10de413d210df 100644 --- a/drivers/soc/fsl/qbman/bman_ccsr.c +++ b/drivers/soc/fsl/qbman/bman_ccsr.c @@ -181,8 +181,7 @@ static int fsl_bman_probe(struct platform_device *pdev) node->full_name); return -ENXIO; } - bm_ccsr_start = devm_ioremap(dev, res->start, - res->end - res->start + 1); + bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res)); if (!bm_ccsr_start) return -ENXIO; diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c index 6579cc18811ac1040ef8ebfa073730371b6d625d..8354d4dabdadf50dbf685738a61db4e98f4176f2 100644 --- a/drivers/soc/fsl/qbman/bman_portal.c +++ b/drivers/soc/fsl/qbman/bman_portal.c @@ -53,58 +53,38 @@ static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg) return p; } -static void bman_offline_cpu(unsigned int cpu) +static int bman_offline_cpu(unsigned int cpu) { struct bman_portal *p = affine_bportals[cpu]; const struct bm_portal_config *pcfg; if (!p) - return; + return 0; pcfg = bman_get_bm_portal_config(p); if (!pcfg) - return; + return 0; irq_set_affinity(pcfg->irq, cpumask_of(0)); + return 0; } -static void bman_online_cpu(unsigned int cpu) +static int bman_online_cpu(unsigned int cpu) { struct bman_portal *p = affine_bportals[cpu]; const struct bm_portal_config *pcfg; if (!p) - return; + return 0; pcfg = bman_get_bm_portal_config(p); if (!pcfg) - return; + return 0; irq_set_affinity(pcfg->irq, cpumask_of(cpu)); + return 0; } -static int bman_hotplug_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - bman_online_cpu(cpu); - break; - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - bman_offline_cpu(cpu); - } - - return NOTIFY_OK; -} - -static struct notifier_block bman_hotplug_cpu_notifier = { - .notifier_call = bman_hotplug_cpu_callback, -}; - static int bman_portal_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -146,15 +126,19 @@ static int bman_portal_probe(struct platform_device *pdev) pcfg->irq = irq; va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); - if (!va) + if (!va) { + dev_err(dev, "ioremap::CE failed\n"); goto err_ioremap1; + } pcfg->addr_virt[DPAA_PORTAL_CE] = va; va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), _PAGE_GUARDED | _PAGE_NO_CACHE); - if (!va) + if (!va) { + dev_err(dev, "ioremap::CI failed\n"); goto err_ioremap2; + } pcfg->addr_virt[DPAA_PORTAL_CI] = va; @@ -170,8 +154,10 @@ static int bman_portal_probe(struct platform_device *pdev) spin_unlock(&bman_lock); pcfg->cpu = cpu; - if (!init_pcfg(pcfg)) - goto err_ioremap2; + if (!init_pcfg(pcfg)) { + dev_err(dev, "portal init failed\n"); + goto err_portal_init; + } /* clear irq affinity if assigned cpu is offline */ if (!cpu_online(cpu)) @@ -179,10 +165,11 @@ static int bman_portal_probe(struct platform_device *pdev) return 0; +err_portal_init: + iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]); err_ioremap2: iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); err_ioremap1: - dev_err(dev, "ioremap failed\n"); return -ENXIO; } @@ -210,8 +197,14 @@ static int __init bman_portal_driver_register(struct platform_driver *drv) if (ret < 0) return ret; - register_hotcpu_notifier(&bman_hotplug_cpu_notifier); - + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "soc/qbman_portal:online", + bman_online_cpu, bman_offline_cpu); + if (ret < 0) { + pr_err("bman: failed to register hotplug callbacks.\n"); + platform_driver_unregister(drv); + return ret; + } return 0; } diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h index b63fd72295c6d22a3d2eae0e111111e6e324d657..2eaf3184f61dae754b746392c659c751b88da817 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.h +++ b/drivers/soc/fsl/qbman/dpaa_sys.h @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 119054bc922bfc0032d0db52476e4b53985e0d5c..6f509f68085e9874a36bdecbc4587182e9e23d51 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -140,10 +140,10 @@ enum qm_mr_cmode { /* matches QCSP_CFG::MM */ struct qm_eqcr_entry { u8 _ncw_verb; /* writes to this are non-coherent */ u8 dca; - u16 seqnum; - u32 orp; /* 24-bit */ - u32 fqid; /* 24-bit */ - u32 tag; + __be16 seqnum; + u8 __reserved[4]; + __be32 fqid; /* 24-bit */ + __be32 tag; struct qm_fd fd; u8 __reserved3[32]; } __packed; @@ -183,41 +183,22 @@ struct qm_mr { }; /* MC (Management Command) command */ -/* "Query FQ" */ -struct qm_mcc_queryfq { +/* "FQ" command layout */ +struct qm_mcc_fq { u8 _ncw_verb; u8 __reserved1[3]; - u32 fqid; /* 24-bit */ + __be32 fqid; /* 24-bit */ u8 __reserved2[56]; } __packed; -/* "Alter FQ State Commands " */ -struct qm_mcc_alterfq { - u8 _ncw_verb; - u8 __reserved1[3]; - u32 fqid; /* 24-bit */ - u8 __reserved2; - u8 count; /* number of consecutive FQID */ - u8 __reserved3[10]; - u32 context_b; /* frame queue context b */ - u8 __reserved4[40]; -} __packed; -/* "Query CGR" */ -struct qm_mcc_querycgr { +/* "CGR" command layout */ +struct qm_mcc_cgr { u8 _ncw_verb; u8 __reserved1[30]; u8 cgid; u8 __reserved2[32]; }; -struct qm_mcc_querywq { - u8 _ncw_verb; - u8 __reserved; - /* select channel if verb != QUERYWQ_DEDICATED */ - u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */ - u8 __reserved2[60]; -} __packed; - #define QM_MCC_VERB_VBIT 0x80 #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ #define QM_MCC_VERB_INITFQ_PARKED 0x40 @@ -243,12 +224,9 @@ union qm_mc_command { u8 __reserved[63]; }; struct qm_mcc_initfq initfq; - struct qm_mcc_queryfq queryfq; - struct qm_mcc_alterfq alterfq; struct qm_mcc_initcgr initcgr; - struct qm_mcc_querycgr querycgr; - struct qm_mcc_querywq querywq; - struct qm_mcc_queryfq_np queryfq_np; + struct qm_mcc_fq fq; + struct qm_mcc_cgr cgr; }; /* MC (Management Command) result */ @@ -343,12 +321,12 @@ struct qm_portal { /* Cache-inhibited register access. */ static inline u32 qm_in(struct qm_portal *p, u32 offset) { - return __raw_readl(p->addr.ci + offset); + return be32_to_cpu(__raw_readl(p->addr.ci + offset)); } static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) { - __raw_writel(val, p->addr.ci + offset); + __raw_writel(cpu_to_be32(val), p->addr.ci + offset); } /* Cache Enabled Portal Access */ @@ -364,7 +342,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) { - return __raw_readl(p->addr.ce + offset); + return be32_to_cpu(__raw_readl(p->addr.ce + offset)); } /* --- EQCR API --- */ @@ -443,7 +421,7 @@ static inline void qm_eqcr_finish(struct qm_portal *portal) DPAA_ASSERT(!eqcr->busy); if (pi != eqcr_ptr2idx(eqcr->cursor)) - pr_crit("losing uncommited EQCR entries\n"); + pr_crit("losing uncommitted EQCR entries\n"); if (ci != eqcr->ci) pr_crit("missing existing EQCR completions\n"); if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) @@ -492,8 +470,7 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) { DPAA_ASSERT(eqcr->busy); - DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); - DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); + DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK)); DPAA_ASSERT(eqcr->available >= 1); } @@ -962,8 +939,6 @@ struct qman_portal { u32 sdqcr; /* probing time config params for cpu-affine portals */ const struct qm_portal_config *config; - /* needed for providing a non-NULL device to dma_map_***() */ - struct platform_device *pdev; /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ struct qman_cgrs *cgrs; /* linked-list of CSCN handlers. */ @@ -1133,7 +1108,6 @@ static int qman_create_portal(struct qman_portal *portal, const struct qman_cgrs *cgrs) { struct qm_portal *p; - char buf[16]; int ret; u32 isdr; @@ -1196,15 +1170,6 @@ static int qman_create_portal(struct qman_portal *portal, portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; - sprintf(buf, "qportal-%d", c->channel); - portal->pdev = platform_device_alloc(buf, -1); - if (!portal->pdev) - goto fail_devalloc; - if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) - goto fail_devadd; - ret = platform_device_add(portal->pdev); - if (ret) - goto fail_devadd; isdr = 0xffffffff; qm_out(p, QM_REG_ISDR, isdr); portal->irq_sources = 0; @@ -1239,8 +1204,8 @@ static int qman_create_portal(struct qman_portal *portal, /* special handling, drain just in case it's a few FQRNIs */ const union qm_mr_entry *e = qm_mr_current(p); - dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x", - e->verb, e->ern.rc, e->ern.fd.addr_lo); + dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n", + e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd)); goto fail_dqrr_mr_empty; } /* Success */ @@ -1256,10 +1221,6 @@ static int qman_create_portal(struct qman_portal *portal, fail_affinity: free_irq(c->irq, portal); fail_irq: - platform_device_del(portal->pdev); -fail_devadd: - platform_device_put(portal->pdev); -fail_devalloc: kfree(portal->cgrs); fail_cgrs: qm_mc_finish(p); @@ -1321,9 +1282,6 @@ static void qman_destroy_portal(struct qman_portal *qm) qm_dqrr_finish(&qm->p); qm_eqcr_finish(&qm->p); - platform_device_del(qm->pdev); - platform_device_put(qm->pdev); - qm->config = NULL; } @@ -1428,7 +1386,7 @@ static void qm_mr_process_task(struct work_struct *work) case QM_MR_VERB_FQRN: case QM_MR_VERB_FQRL: /* Lookup in the retirement table */ - fq = fqid_to_fq(msg->fq.fqid); + fq = fqid_to_fq(qm_fqid_get(&msg->fq)); if (WARN_ON(!fq)) break; fq_state_change(p, fq, msg, verb); @@ -1437,7 +1395,7 @@ static void qm_mr_process_task(struct work_struct *work) break; case QM_MR_VERB_FQPN: /* Parked */ - fq = tag_to_fq(msg->fq.contextB); + fq = tag_to_fq(be32_to_cpu(msg->fq.context_b)); fq_state_change(p, fq, msg, verb); if (fq->cb.fqs) fq->cb.fqs(p, fq, msg); @@ -1451,7 +1409,7 @@ static void qm_mr_process_task(struct work_struct *work) } } else { /* Its a software ERN */ - fq = tag_to_fq(msg->ern.tag); + fq = tag_to_fq(be32_to_cpu(msg->ern.tag)); fq->cb.ern(p, fq, msg); } num++; @@ -1536,7 +1494,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p, if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { /* - * VDQCR: don't trust contextB as the FQ may have + * VDQCR: don't trust context_b as the FQ may have * been configured for h/w consumption and we're * draining it post-retirement. */ @@ -1562,8 +1520,8 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p, if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) clear_vdqcr(p, fq); } else { - /* SDQCR: contextB points to the FQ */ - fq = tag_to_fq(dq->contextB); + /* SDQCR: context_b points to the FQ */ + fq = tag_to_fq(be32_to_cpu(dq->context_b)); /* Now let the callback do its stuff */ res = fq->cb.dqrr(p, fq, dq); /* @@ -1780,9 +1738,9 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) return -EINVAL; #endif - if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { + if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) { /* And can't be set at the same time as TDTHRESH */ - if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) + if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH) return -EINVAL; } /* Issue an INITFQ_[PARKED|SCHED] management command */ @@ -1796,37 +1754,49 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) mcc = qm_mc_start(&p->p); if (opts) mcc->initfq = *opts; - mcc->initfq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); mcc->initfq.count = 0; /* - * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a + * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a * demux pointer. Otherwise, the caller-provided value is allowed to * stand, don't overwrite it. */ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { dma_addr_t phys_fq; - mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; - mcc->initfq.fqd.context_b = fq_to_tag(fq); + mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB); + mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq)); /* * and the physical address - NB, if the user wasn't trying to * set CONTEXTA, clear the stashing settings. */ - if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { - mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; + if (!(be16_to_cpu(mcc->initfq.we_mask) & + QM_INITFQ_WE_CONTEXTA)) { + mcc->initfq.we_mask |= + cpu_to_be16(QM_INITFQ_WE_CONTEXTA); memset(&mcc->initfq.fqd.context_a, 0, sizeof(mcc->initfq.fqd.context_a)); } else { - phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq), - DMA_TO_DEVICE); + struct qman_portal *p = qman_dma_portal; + + phys_fq = dma_map_single(p->config->dev, fq, + sizeof(*fq), DMA_TO_DEVICE); + if (dma_mapping_error(p->config->dev, phys_fq)) { + dev_err(p->config->dev, "dma_mapping failed\n"); + ret = -EIO; + goto out; + } + qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); } } if (flags & QMAN_INITFQ_FLAG_LOCAL) { int wq = 0; - if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { - mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; + if (!(be16_to_cpu(mcc->initfq.we_mask) & + QM_INITFQ_WE_DESTWQ)) { + mcc->initfq.we_mask |= + cpu_to_be16(QM_INITFQ_WE_DESTWQ); wq = 4; } qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); @@ -1845,13 +1815,13 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) goto out; } if (opts) { - if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { - if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) + if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) { + if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE) fq_set(fq, QMAN_FQ_STATE_CGR_EN); else fq_clear(fq, QMAN_FQ_STATE_CGR_EN); } - if (opts->we_mask & QM_INITFQ_WE_CGID) + if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID) fq->cgr_groupid = opts->fqd.cgid; } fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? @@ -1884,7 +1854,7 @@ int qman_schedule_fq(struct qman_fq *fq) goto out; } mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(p->config->dev, "ALTER_SCHED timeout\n"); @@ -1927,7 +1897,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) goto out; } mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); @@ -1970,8 +1940,8 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) msg.verb = QM_MR_VERB_FQRNI; msg.fq.fqs = mcr->alterfq.fqs; - msg.fq.fqid = fq->fqid; - msg.fq.contextB = fq_to_tag(fq); + qm_fqid_set(&msg.fq, fq->fqid); + msg.fq.context_b = cpu_to_be32(fq_to_tag(fq)); fq->cb.fqs(p, fq, &msg); } } else if (res == QM_MCR_RESULT_PENDING) { @@ -2006,7 +1976,7 @@ int qman_oos_fq(struct qman_fq *fq) goto out; } mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2032,7 +2002,7 @@ int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) int ret = 0; mcc = qm_mc_start(&p->p); - mcc->queryfq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2058,7 +2028,7 @@ static int qman_query_fq_np(struct qman_fq *fq, int ret = 0; mcc = qm_mc_start(&p->p); - mcc->queryfq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2086,7 +2056,7 @@ static int qman_query_cgr(struct qman_cgr *cgr, int ret = 0; mcc = qm_mc_start(&p->p); - mcc->querycgr.cgid = cgr->cgrid; + mcc->cgr.cgid = cgr->cgrid; qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2239,8 +2209,8 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) if (unlikely(!eq)) goto out; - eq->fqid = fq->fqid; - eq->tag = fq_to_tag(fq); + qm_fqid_set(eq, fq->fqid); + eq->tag = cpu_to_be32(fq_to_tag(fq)); eq->fd = *fd; qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); @@ -2282,7 +2252,24 @@ static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags, } #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) -#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n)) + +/* congestion state change notification target update control */ +static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val) +{ + if (qman_ip_rev >= QMAN_REV30) + cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi | + QM_CGR_TARG_UDP_CTRL_WRITE_BIT); + else + cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi)); +} + +static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val) +{ + if (qman_ip_rev >= QMAN_REV30) + cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi); + else + cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi)); +} static u8 qman_cgr_cpus[CGR_NUM]; @@ -2305,7 +2292,6 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, struct qm_mcc_initcgr *opts) { struct qm_mcr_querycgr cgr_state; - struct qm_mcc_initcgr local_opts = {}; int ret; struct qman_portal *p; @@ -2327,22 +2313,18 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, spin_lock(&p->cgr_lock); if (opts) { + struct qm_mcc_initcgr local_opts = *opts; + ret = qman_query_cgr(cgr, &cgr_state); if (ret) goto out; - if (opts) - local_opts = *opts; - if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) - local_opts.cgr.cscn_targ_upd_ctrl = - QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); - else - /* Overwrite TARG */ - local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | - TARG_MASK(p); - local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; + + qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p), + be32_to_cpu(cgr_state.cgr.cscn_targ)); + local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG); /* send init if flags indicate so */ - if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) + if (flags & QMAN_CGR_FLAG_USE_INIT) ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts); else @@ -2405,13 +2387,11 @@ int qman_delete_cgr(struct qman_cgr *cgr) list_add(&cgr->node, &p->cgr_cbs); goto release_lock; } - /* Overwrite TARG */ - local_opts.we_mask = QM_CGR_WE_CSCN_TARG; - if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) - local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p); - else - local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & - ~(TARG_MASK(p)); + + local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG); + qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p), + be32_to_cpu(cgr_state.cgr.cscn_targ)); + ret = qm_modify_cgr(cgr, 0, &local_opts); if (ret) /* add back to the list */ @@ -2501,7 +2481,7 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, } while (wait && !dqrr); while (dqrr) { - if (dqrr->fqid == fqid && (dqrr->stat & s)) + if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s)) found = 1; qm_dqrr_cdc_consume_1ptr(p, dqrr, 0); qm_dqrr_pvb_update(p); @@ -2537,7 +2517,7 @@ static int qman_shutdown_fq(u32 fqid) dev = p->config->dev; /* Determine the state of the FQID */ mcc = qm_mc_start(&p->p); - mcc->queryfq_np.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(dev, "QUERYFQ_NP timeout\n"); @@ -2552,7 +2532,7 @@ static int qman_shutdown_fq(u32 fqid) /* Query which channel the FQ is using */ mcc = qm_mc_start(&p->p); - mcc->queryfq.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(dev, "QUERYFQ timeout\n"); @@ -2572,7 +2552,7 @@ static int qman_shutdown_fq(u32 fqid) case QM_MCR_NP_STATE_PARKED: orl_empty = 0; mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(dev, "QUERYFQ_NP timeout\n"); @@ -2667,7 +2647,7 @@ static int qman_shutdown_fq(u32 fqid) cpu_relax(); } mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2687,7 +2667,7 @@ static int qman_shutdown_fq(u32 fqid) case QM_MCR_NP_STATE_RETIRED: /* Send OOS Command */ mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2722,6 +2702,7 @@ const struct qm_portal_config *qman_get_qm_portal_config( { return portal->config; } +EXPORT_SYMBOL(qman_get_qm_portal_config); struct gen_pool *qm_fqalloc; /* FQID allocator */ struct gen_pool *qm_qpalloc; /* pool-channel allocator */ @@ -2789,15 +2770,18 @@ static int qpool_cleanup(u32 qp) struct qm_mcr_queryfq_np np; err = qman_query_fq_np(&fq, &np); - if (err) + if (err == -ERANGE) /* FQID range exceeded, found no problems */ return 0; + else if (WARN_ON(err)) + return err; + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { struct qm_fqd fqd; err = qman_query_fq(&fq, &fqd); if (WARN_ON(err)) - return 0; + return err; if (qm_fqd_get_chan(&fqd) == qp) { /* The channel is the FQ's target, clean it */ err = qman_shutdown_fq(fq.fqid); @@ -2836,7 +2820,7 @@ static int cgr_cleanup(u32 cgrid) * error, looking for non-OOS FQDs whose CGR is the CGR being released */ struct qman_fq fq = { - .fqid = 1 + .fqid = QM_FQID_RANGE_START }; int err; @@ -2844,16 +2828,19 @@ static int cgr_cleanup(u32 cgrid) struct qm_mcr_queryfq_np np; err = qman_query_fq_np(&fq, &np); - if (err) + if (err == -ERANGE) /* FQID range exceeded, found no problems */ return 0; + else if (WARN_ON(err)) + return err; + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { struct qm_fqd fqd; err = qman_query_fq(&fq, &fqd); if (WARN_ON(err)) - return 0; - if ((fqd.fq_ctrl & QM_FQCTRL_CGE) && + return err; + if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE && fqd.cgid == cgrid) { pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", cgrid, fq.fqid); diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c index 0cace9e0077e0c685b0c74bd0da57389d2d037e7..f4e6e70de259918c4d154698919b81698dff4bc7 100644 --- a/drivers/soc/fsl/qbman/qman_ccsr.c +++ b/drivers/soc/fsl/qbman/qman_ccsr.c @@ -444,6 +444,9 @@ static int zero_priv_mem(struct device *dev, struct device_node *node, /* map as cacheable, non-guarded */ void __iomem *tmpp = ioremap_prot(addr, sz, 0); + if (!tmpp) + return -ENOMEM; + memset_io(tmpp, 0, sz); flush_dcache_range((unsigned long)tmpp, (unsigned long)tmpp + sz); diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index 148614388fcaba99f03886f10e99d23a3ac4abc1..adbaa30d3c5ad12aadf5bfa3c01829c392657f1a 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -30,6 +30,9 @@ #include "qman_priv.h" +struct qman_portal *qman_dma_portal; +EXPORT_SYMBOL(qman_dma_portal); + /* Enable portal interupts (as opposed to polling mode) */ #define CONFIG_FSL_DPA_PIRQ_SLOW 1 #define CONFIG_FSL_DPA_PIRQ_FAST 1 @@ -150,6 +153,10 @@ static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg) /* all assigned portals are initialized now */ qman_init_cgr_all(); } + + if (!qman_dma_portal) + qman_dma_portal = p; + spin_unlock(&qman_lock); dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); @@ -179,7 +186,7 @@ static void qman_portal_update_sdest(const struct qm_portal_config *pcfg, qman_set_sdest(pcfg->channel, cpu); } -static void qman_offline_cpu(unsigned int cpu) +static int qman_offline_cpu(unsigned int cpu) { struct qman_portal *p; const struct qm_portal_config *pcfg; @@ -192,9 +199,10 @@ static void qman_offline_cpu(unsigned int cpu) qman_portal_update_sdest(pcfg, 0); } } + return 0; } -static void qman_online_cpu(unsigned int cpu) +static int qman_online_cpu(unsigned int cpu) { struct qman_portal *p; const struct qm_portal_config *pcfg; @@ -207,40 +215,18 @@ static void qman_online_cpu(unsigned int cpu) qman_portal_update_sdest(pcfg, cpu); } } + return 0; } -static int qman_hotplug_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - qman_online_cpu(cpu); - break; - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - qman_offline_cpu(cpu); - default: - break; - } - return NOTIFY_OK; -} - -static struct notifier_block qman_hotplug_cpu_notifier = { - .notifier_call = qman_hotplug_cpu_callback, -}; - static int qman_portal_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct qm_portal_config *pcfg; struct resource *addr_phys[2]; - const u32 *channel; void __iomem *va; - int irq, len, cpu; + int irq, cpu, err; + u32 val; pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); if (!pcfg) @@ -264,13 +250,13 @@ static int qman_portal_probe(struct platform_device *pdev) return -ENXIO; } - channel = of_get_property(node, "cell-index", &len); - if (!channel || (len != 4)) { + err = of_property_read_u32(node, "cell-index", &val); + if (err) { dev_err(dev, "Can't get %s property 'cell-index'\n", node->full_name); - return -ENXIO; + return err; } - pcfg->channel = *channel; + pcfg->channel = val; pcfg->cpu = -1; irq = platform_get_irq(pdev, 0); if (irq <= 0) { @@ -280,15 +266,19 @@ static int qman_portal_probe(struct platform_device *pdev) pcfg->irq = irq; va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); - if (!va) + if (!va) { + dev_err(dev, "ioremap::CE failed\n"); goto err_ioremap1; + } pcfg->addr_virt[DPAA_PORTAL_CE] = va; va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), _PAGE_GUARDED | _PAGE_NO_CACHE); - if (!va) + if (!va) { + dev_err(dev, "ioremap::CI failed\n"); goto err_ioremap2; + } pcfg->addr_virt[DPAA_PORTAL_CI] = va; @@ -306,8 +296,15 @@ static int qman_portal_probe(struct platform_device *pdev) spin_unlock(&qman_lock); pcfg->cpu = cpu; - if (!init_pcfg(pcfg)) - goto err_ioremap2; + if (dma_set_mask(dev, DMA_BIT_MASK(40))) { + dev_err(dev, "dma_set_mask() failed\n"); + goto err_portal_init; + } + + if (!init_pcfg(pcfg)) { + dev_err(dev, "portal init failed\n"); + goto err_portal_init; + } /* clear irq affinity if assigned cpu is offline */ if (!cpu_online(cpu)) @@ -315,10 +312,11 @@ static int qman_portal_probe(struct platform_device *pdev) return 0; +err_portal_init: + iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]); err_ioremap2: iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); err_ioremap1: - dev_err(dev, "ioremap failed\n"); return -ENXIO; } @@ -346,8 +344,14 @@ static int __init qman_portal_driver_register(struct platform_driver *drv) if (ret < 0) return ret; - register_hotcpu_notifier(&qman_hotplug_cpu_notifier); - + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "soc/qman_portal:online", + qman_online_cpu, qman_offline_cpu); + if (ret < 0) { + pr_err("qman: failed to register hotplug callbacks.\n"); + platform_driver_unregister(drv); + return ret; + } return 0; } diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h index 5cf821e623a9c6ff6fbec65f6c4842ea8a379768..53685b59718e3e4c483e84a9120f95485367b96b 100644 --- a/drivers/soc/fsl/qbman/qman_priv.h +++ b/drivers/soc/fsl/qbman/qman_priv.h @@ -73,29 +73,23 @@ struct qm_mcr_querycgr { struct __qm_mc_cgr cgr; /* CGR fields */ u8 __reserved2[6]; u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */ - u32 i_bcnt_lo; /* low 32-bits of 40-bit */ + __be32 i_bcnt_lo; /* low 32-bits of 40-bit */ u8 __reserved3[3]; u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */ - u32 a_bcnt_lo; /* low 32-bits of 40-bit */ - u32 cscn_targ_swp[4]; + __be32 a_bcnt_lo; /* low 32-bits of 40-bit */ + __be32 cscn_targ_swp[4]; } __packed; static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) { - return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo; + return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo); } static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) { - return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo; + return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo); } /* "Query FQ Non-Programmable Fields" */ -struct qm_mcc_queryfq_np { - u8 _ncw_verb; - u8 __reserved1[3]; - u32 fqid; /* 24-bit */ - u8 __reserved2[56]; -} __packed; struct qm_mcr_queryfq_np { u8 verb; @@ -367,5 +361,6 @@ int qman_alloc_fq_table(u32 num_fqids); #define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) extern struct qman_portal *affine_portals[NR_CPUS]; +extern struct qman_portal *qman_dma_portal; const struct qm_portal_config *qman_get_qm_portal_config( struct qman_portal *portal); diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c index 6880ff17f45e3b102ec250511a4a05133c527132..2895d062cf513bfbca42c2cf85cbb46b420a9b88 100644 --- a/drivers/soc/fsl/qbman/qman_test_api.c +++ b/drivers/soc/fsl/qbman/qman_test_api.c @@ -65,7 +65,7 @@ static void fd_init(struct qm_fd *fd) { qm_fd_addr_set64(fd, 0xabdeadbeefLLU); qm_fd_set_contig_big(fd, 0x0000ffff); - fd->cmd = 0xfeedf00d; + fd->cmd = cpu_to_be32(0xfeedf00d); } static void fd_inc(struct qm_fd *fd) @@ -86,26 +86,19 @@ static void fd_inc(struct qm_fd *fd) len--; qm_fd_set_param(fd, fmt, off, len); - fd->cmd++; + fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1); } /* The only part of the 'fd' we can't memcmp() is the ppid */ -static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b) +static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b) { - int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1; + bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b); - if (!r) { - enum qm_fd_format fmt_a, fmt_b; + neq |= qm_fd_get_format(a) != qm_fd_get_format(b); + neq |= a->cfg != b->cfg; + neq |= a->cmd != b->cmd; - fmt_a = qm_fd_get_format(a); - fmt_b = qm_fd_get_format(b); - r = fmt_a - fmt_b; - } - if (!r) - r = a->cfg - b->cfg; - if (!r) - r = a->cmd - b->cmd; - return r; + return neq; } /* test */ @@ -217,12 +210,12 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, struct qman_fq *fq, const struct qm_dqrr_entry *dq) { - if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) { + if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) { pr_err("BADNESS: dequeued frame doesn't match;\n"); return qman_cb_dqrr_consume; } fd_inc(&fd_dq); - if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) { + if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) { sdqcr_complete = 1; wake_up(&waitqueue); } diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c index 43cf66ba42f59a512230eed2e7d2b200717db231..e87b65403b6721b8bc64157feb56a369ac45f506 100644 --- a/drivers/soc/fsl/qbman/qman_test_stash.c +++ b/drivers/soc/fsl/qbman/qman_test_stash.c @@ -175,7 +175,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); /* links together the hp_cpu structs, in first-come first-serve order. */ static LIST_HEAD(hp_cpu_list); -static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock); +static DEFINE_SPINLOCK(hp_lock); static unsigned int hp_cpu_list_length; @@ -191,6 +191,9 @@ static void *__frame_ptr; static u32 *frame_ptr; static dma_addr_t frame_dma; +/* needed for dma_map*() */ +static const struct qm_portal_config *pcfg; + /* the main function waits on this */ static DECLARE_WAIT_QUEUE_HEAD(queue); @@ -210,16 +213,14 @@ static int allocate_frame_data(void) { u32 lfsr = HP_FIRST_WORD; int loop; - struct platform_device *pdev = platform_device_alloc("foobar", -1); - if (!pdev) { - pr_crit("platform_device_alloc() failed"); - return -EIO; - } - if (platform_device_add(pdev)) { - pr_crit("platform_device_add() failed"); + if (!qman_dma_portal) { + pr_crit("portal not available\n"); return -EIO; } + + pcfg = qman_get_qm_portal_config(qman_dma_portal); + __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); if (!__frame_ptr) return -ENOMEM; @@ -229,15 +230,22 @@ static int allocate_frame_data(void) frame_ptr[loop] = lfsr; lfsr = do_lfsr(lfsr); } - frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS, + + frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS, DMA_BIDIRECTIONAL); - platform_device_del(pdev); - platform_device_put(pdev); + if (dma_mapping_error(pcfg->dev, frame_dma)) { + pr_crit("dma mapping failure\n"); + kfree(__frame_ptr); + return -EIO; + } + return 0; } static void deallocate_frame_data(void) { + dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS, + DMA_BIDIRECTIONAL); kfree(__frame_ptr); } @@ -249,7 +257,8 @@ static inline int process_frame_data(struct hp_handler *handler, int loop; if (qm_fd_addr_get64(fd) != handler->addr) { - pr_crit("bad frame address"); + pr_crit("bad frame address, [%llX != %llX]\n", + qm_fd_addr_get64(fd), handler->addr); return -EIO; } for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { @@ -397,8 +406,9 @@ static int init_handler(void *h) goto failed; } memset(&opts, 0, sizeof(opts)); - opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; - opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; + opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | + QM_INITFQ_WE_CONTEXTA); + opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING); qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL); err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | QMAN_INITFQ_FLAG_LOCAL, &opts); diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index 2707a827261b10378ef45460082463cf80890dd1..ade168f5328e7fac1e68c4d55004cf94ac842f72 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c @@ -717,9 +717,5 @@ static struct platform_driver qe_driver = { .resume = qe_resume, }; -static int __init qe_drv_init(void) -{ - return platform_driver_register(&qe_driver); -} -device_initcall(qe_drv_init); +builtin_platform_driver(qe_driver); #endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */ diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index 0a4ea809a61b0cddb37fd6fc2b72e6047a343bf4..609bb3424c145d9bc9f3d1c67b124c56d2438bbd 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig @@ -23,7 +23,7 @@ config MTK_PMIC_WRAP config MTK_SCPSYS bool "MediaTek SCPSYS Support" depends on ARCH_MEDIATEK || COMPILE_TEST - default ARM64 && ARCH_MEDIATEK + default ARCH_MEDIATEK select REGMAP select MTK_INFRACFG select PM_GENERIC_DOMAINS if PM diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c index 837effe199071bcb319a525e4c9498b4cf2225ba..beb79162369a050ee844023499a9342abf478ec6 100644 --- a/drivers/soc/mediatek/mtk-scpsys.c +++ b/drivers/soc/mediatek/mtk-scpsys.c @@ -11,17 +11,16 @@ * GNU General Public License for more details. */ #include -#include +#include #include -#include #include -#include #include #include #include -#include -#include #include +#include + +#include #include #define SPM_VDE_PWR_CON 0x0210 @@ -29,11 +28,17 @@ #define SPM_VEN_PWR_CON 0x0230 #define SPM_ISP_PWR_CON 0x0238 #define SPM_DIS_PWR_CON 0x023c +#define SPM_CONN_PWR_CON 0x0280 #define SPM_VEN2_PWR_CON 0x0298 -#define SPM_AUDIO_PWR_CON 0x029c +#define SPM_AUDIO_PWR_CON 0x029c /* MT8173 */ +#define SPM_BDP_PWR_CON 0x029c /* MT2701 */ +#define SPM_ETH_PWR_CON 0x02a0 +#define SPM_HIF_PWR_CON 0x02a4 +#define SPM_IFR_MSC_PWR_CON 0x02a8 #define SPM_MFG_2D_PWR_CON 0x02c0 #define SPM_MFG_ASYNC_PWR_CON 0x02c4 #define SPM_USB_PWR_CON 0x02cc + #define SPM_PWR_STATUS 0x060c #define SPM_PWR_STATUS_2ND 0x0610 @@ -43,10 +48,15 @@ #define PWR_ON_2ND_BIT BIT(3) #define PWR_CLK_DIS_BIT BIT(4) +#define PWR_STATUS_CONN BIT(1) #define PWR_STATUS_DISP BIT(3) #define PWR_STATUS_MFG BIT(4) #define PWR_STATUS_ISP BIT(5) #define PWR_STATUS_VDEC BIT(7) +#define PWR_STATUS_BDP BIT(14) +#define PWR_STATUS_ETH BIT(15) +#define PWR_STATUS_HIF BIT(16) +#define PWR_STATUS_IFR_MSC BIT(17) #define PWR_STATUS_VENC_LT BIT(20) #define PWR_STATUS_VENC BIT(21) #define PWR_STATUS_MFG_2D BIT(22) @@ -55,12 +65,23 @@ #define PWR_STATUS_USB BIT(25) enum clk_id { - MT8173_CLK_NONE, - MT8173_CLK_MM, - MT8173_CLK_MFG, - MT8173_CLK_VENC, - MT8173_CLK_VENC_LT, - MT8173_CLK_MAX, + CLK_NONE, + CLK_MM, + CLK_MFG, + CLK_VENC, + CLK_VENC_LT, + CLK_ETHIF, + CLK_MAX, +}; + +static const char * const clk_names[] = { + NULL, + "mm", + "mfg", + "venc", + "venc_lt", + "ethif", + NULL, }; #define MAX_CLKS 2 @@ -76,98 +97,6 @@ struct scp_domain_data { bool active_wakeup; }; -static const struct scp_domain_data scp_domain_data[] = { - [MT8173_POWER_DOMAIN_VDEC] = { - .name = "vdec", - .sta_mask = PWR_STATUS_VDEC, - .ctl_offs = SPM_VDE_PWR_CON, - .sram_pdn_bits = GENMASK(11, 8), - .sram_pdn_ack_bits = GENMASK(12, 12), - .clk_id = {MT8173_CLK_MM}, - }, - [MT8173_POWER_DOMAIN_VENC] = { - .name = "venc", - .sta_mask = PWR_STATUS_VENC, - .ctl_offs = SPM_VEN_PWR_CON, - .sram_pdn_bits = GENMASK(11, 8), - .sram_pdn_ack_bits = GENMASK(15, 12), - .clk_id = {MT8173_CLK_MM, MT8173_CLK_VENC}, - }, - [MT8173_POWER_DOMAIN_ISP] = { - .name = "isp", - .sta_mask = PWR_STATUS_ISP, - .ctl_offs = SPM_ISP_PWR_CON, - .sram_pdn_bits = GENMASK(11, 8), - .sram_pdn_ack_bits = GENMASK(13, 12), - .clk_id = {MT8173_CLK_MM}, - }, - [MT8173_POWER_DOMAIN_MM] = { - .name = "mm", - .sta_mask = PWR_STATUS_DISP, - .ctl_offs = SPM_DIS_PWR_CON, - .sram_pdn_bits = GENMASK(11, 8), - .sram_pdn_ack_bits = GENMASK(12, 12), - .clk_id = {MT8173_CLK_MM}, - .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 | - MT8173_TOP_AXI_PROT_EN_MM_M1, - }, - [MT8173_POWER_DOMAIN_VENC_LT] = { - .name = "venc_lt", - .sta_mask = PWR_STATUS_VENC_LT, - .ctl_offs = SPM_VEN2_PWR_CON, - .sram_pdn_bits = GENMASK(11, 8), - .sram_pdn_ack_bits = GENMASK(15, 12), - .clk_id = {MT8173_CLK_MM, MT8173_CLK_VENC_LT}, - }, - [MT8173_POWER_DOMAIN_AUDIO] = { - .name = "audio", - .sta_mask = PWR_STATUS_AUDIO, - .ctl_offs = SPM_AUDIO_PWR_CON, - .sram_pdn_bits = GENMASK(11, 8), - .sram_pdn_ack_bits = GENMASK(15, 12), - .clk_id = {MT8173_CLK_NONE}, - }, - [MT8173_POWER_DOMAIN_USB] = { - .name = "usb", - .sta_mask = PWR_STATUS_USB, - .ctl_offs = SPM_USB_PWR_CON, - .sram_pdn_bits = GENMASK(11, 8), - .sram_pdn_ack_bits = GENMASK(15, 12), - .clk_id = {MT8173_CLK_NONE}, - .active_wakeup = true, - }, - [MT8173_POWER_DOMAIN_MFG_ASYNC] = { - .name = "mfg_async", - .sta_mask = PWR_STATUS_MFG_ASYNC, - .ctl_offs = SPM_MFG_ASYNC_PWR_CON, - .sram_pdn_bits = GENMASK(11, 8), - .sram_pdn_ack_bits = 0, - .clk_id = {MT8173_CLK_MFG}, - }, - [MT8173_POWER_DOMAIN_MFG_2D] = { - .name = "mfg_2d", - .sta_mask = PWR_STATUS_MFG_2D, - .ctl_offs = SPM_MFG_2D_PWR_CON, - .sram_pdn_bits = GENMASK(11, 8), - .sram_pdn_ack_bits = GENMASK(13, 12), - .clk_id = {MT8173_CLK_NONE}, - }, - [MT8173_POWER_DOMAIN_MFG] = { - .name = "mfg", - .sta_mask = PWR_STATUS_MFG, - .ctl_offs = SPM_MFG_PWR_CON, - .sram_pdn_bits = GENMASK(13, 8), - .sram_pdn_ack_bits = GENMASK(21, 16), - .clk_id = {MT8173_CLK_NONE}, - .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S | - MT8173_TOP_AXI_PROT_EN_MFG_M0 | - MT8173_TOP_AXI_PROT_EN_MFG_M1 | - MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT, - }, -}; - -#define NUM_DOMAINS ARRAY_SIZE(scp_domain_data) - struct scp; struct scp_domain { @@ -179,7 +108,7 @@ struct scp_domain { }; struct scp { - struct scp_domain domains[NUM_DOMAINS]; + struct scp_domain *domains; struct genpd_onecell_data pd_data; struct device *dev; void __iomem *base; @@ -408,57 +337,55 @@ static bool scpsys_active_wakeup(struct device *dev) return scpd->data->active_wakeup; } -static int scpsys_probe(struct platform_device *pdev) +static void init_clks(struct platform_device *pdev, struct clk **clk) +{ + int i; + + for (i = CLK_NONE + 1; i < CLK_MAX; i++) + clk[i] = devm_clk_get(&pdev->dev, clk_names[i]); +} + +static struct scp *init_scp(struct platform_device *pdev, + const struct scp_domain_data *scp_domain_data, int num) { struct genpd_onecell_data *pd_data; struct resource *res; - int i, j, ret; + int i, j; struct scp *scp; - struct clk *clk[MT8173_CLK_MAX]; + struct clk *clk[CLK_MAX]; scp = devm_kzalloc(&pdev->dev, sizeof(*scp), GFP_KERNEL); if (!scp) - return -ENOMEM; + return ERR_PTR(-ENOMEM); scp->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); scp->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(scp->base)) - return PTR_ERR(scp->base); + return ERR_CAST(scp->base); + + scp->domains = devm_kzalloc(&pdev->dev, + sizeof(*scp->domains) * num, GFP_KERNEL); + if (!scp->domains) + return ERR_PTR(-ENOMEM); pd_data = &scp->pd_data; pd_data->domains = devm_kzalloc(&pdev->dev, - sizeof(*pd_data->domains) * NUM_DOMAINS, GFP_KERNEL); + sizeof(*pd_data->domains) * num, GFP_KERNEL); if (!pd_data->domains) - return -ENOMEM; - - clk[MT8173_CLK_MM] = devm_clk_get(&pdev->dev, "mm"); - if (IS_ERR(clk[MT8173_CLK_MM])) - return PTR_ERR(clk[MT8173_CLK_MM]); - - clk[MT8173_CLK_MFG] = devm_clk_get(&pdev->dev, "mfg"); - if (IS_ERR(clk[MT8173_CLK_MFG])) - return PTR_ERR(clk[MT8173_CLK_MFG]); - - clk[MT8173_CLK_VENC] = devm_clk_get(&pdev->dev, "venc"); - if (IS_ERR(clk[MT8173_CLK_VENC])) - return PTR_ERR(clk[MT8173_CLK_VENC]); - - clk[MT8173_CLK_VENC_LT] = devm_clk_get(&pdev->dev, "venc_lt"); - if (IS_ERR(clk[MT8173_CLK_VENC_LT])) - return PTR_ERR(clk[MT8173_CLK_VENC_LT]); + return ERR_PTR(-ENOMEM); scp->infracfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "infracfg"); if (IS_ERR(scp->infracfg)) { dev_err(&pdev->dev, "Cannot find infracfg controller: %ld\n", PTR_ERR(scp->infracfg)); - return PTR_ERR(scp->infracfg); + return ERR_CAST(scp->infracfg); } - for (i = 0; i < NUM_DOMAINS; i++) { + for (i = 0; i < num; i++) { struct scp_domain *scpd = &scp->domains[i]; const struct scp_domain_data *data = &scp_domain_data[i]; @@ -467,13 +394,15 @@ static int scpsys_probe(struct platform_device *pdev) if (PTR_ERR(scpd->supply) == -ENODEV) scpd->supply = NULL; else - return PTR_ERR(scpd->supply); + return ERR_CAST(scpd->supply); } } - pd_data->num_domains = NUM_DOMAINS; + pd_data->num_domains = num; + + init_clks(pdev, clk); - for (i = 0; i < NUM_DOMAINS; i++) { + for (i = 0; i < num; i++) { struct scp_domain *scpd = &scp->domains[i]; struct generic_pm_domain *genpd = &scpd->genpd; const struct scp_domain_data *data = &scp_domain_data[i]; @@ -482,13 +411,37 @@ static int scpsys_probe(struct platform_device *pdev) scpd->scp = scp; scpd->data = data; - for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++) - scpd->clk[j] = clk[data->clk_id[j]]; + + for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++) { + struct clk *c = clk[data->clk_id[j]]; + + if (IS_ERR(c)) { + dev_err(&pdev->dev, "%s: clk unavailable\n", + data->name); + return ERR_CAST(c); + } + + scpd->clk[j] = c; + } genpd->name = data->name; genpd->power_off = scpsys_power_off; genpd->power_on = scpsys_power_on; genpd->dev_ops.active_wakeup = scpsys_active_wakeup; + } + + return scp; +} + +static void mtk_register_power_domains(struct platform_device *pdev, + struct scp *scp, int num) +{ + struct genpd_onecell_data *pd_data; + int i, ret; + + for (i = 0; i < num; i++) { + struct scp_domain *scpd = &scp->domains[i]; + struct generic_pm_domain *genpd = &scpd->genpd; /* * Initially turn on all domains to make the domains usable @@ -507,6 +460,222 @@ static int scpsys_probe(struct platform_device *pdev) * valid. */ + pd_data = &scp->pd_data; + + ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data); + if (ret) + dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret); +} + +/* + * MT2701 power domain support + */ + +static const struct scp_domain_data scp_domain_data_mt2701[] = { + [MT2701_POWER_DOMAIN_CONN] = { + .name = "conn", + .sta_mask = PWR_STATUS_CONN, + .ctl_offs = SPM_CONN_PWR_CON, + .bus_prot_mask = 0x0104, + .clk_id = {CLK_NONE}, + .active_wakeup = true, + }, + [MT2701_POWER_DOMAIN_DISP] = { + .name = "disp", + .sta_mask = PWR_STATUS_DISP, + .ctl_offs = SPM_DIS_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .clk_id = {CLK_MM}, + .bus_prot_mask = 0x0002, + .active_wakeup = true, + }, + [MT2701_POWER_DOMAIN_MFG] = { + .name = "mfg", + .sta_mask = PWR_STATUS_MFG, + .ctl_offs = SPM_MFG_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .clk_id = {CLK_MFG}, + .active_wakeup = true, + }, + [MT2701_POWER_DOMAIN_VDEC] = { + .name = "vdec", + .sta_mask = PWR_STATUS_VDEC, + .ctl_offs = SPM_VDE_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .clk_id = {CLK_MM}, + .active_wakeup = true, + }, + [MT2701_POWER_DOMAIN_ISP] = { + .name = "isp", + .sta_mask = PWR_STATUS_ISP, + .ctl_offs = SPM_ISP_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(13, 12), + .clk_id = {CLK_MM}, + .active_wakeup = true, + }, + [MT2701_POWER_DOMAIN_BDP] = { + .name = "bdp", + .sta_mask = PWR_STATUS_BDP, + .ctl_offs = SPM_BDP_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .clk_id = {CLK_NONE}, + .active_wakeup = true, + }, + [MT2701_POWER_DOMAIN_ETH] = { + .name = "eth", + .sta_mask = PWR_STATUS_ETH, + .ctl_offs = SPM_ETH_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + .clk_id = {CLK_ETHIF}, + .active_wakeup = true, + }, + [MT2701_POWER_DOMAIN_HIF] = { + .name = "hif", + .sta_mask = PWR_STATUS_HIF, + .ctl_offs = SPM_HIF_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + .clk_id = {CLK_ETHIF}, + .active_wakeup = true, + }, + [MT2701_POWER_DOMAIN_IFR_MSC] = { + .name = "ifr_msc", + .sta_mask = PWR_STATUS_IFR_MSC, + .ctl_offs = SPM_IFR_MSC_PWR_CON, + .clk_id = {CLK_NONE}, + .active_wakeup = true, + }, +}; + +#define NUM_DOMAINS_MT2701 ARRAY_SIZE(scp_domain_data_mt2701) + +static int __init scpsys_probe_mt2701(struct platform_device *pdev) +{ + struct scp *scp; + + scp = init_scp(pdev, scp_domain_data_mt2701, NUM_DOMAINS_MT2701); + if (IS_ERR(scp)) + return PTR_ERR(scp); + + mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT2701); + + return 0; +} + +/* + * MT8173 power domain support + */ + +static const struct scp_domain_data scp_domain_data_mt8173[] = { + [MT8173_POWER_DOMAIN_VDEC] = { + .name = "vdec", + .sta_mask = PWR_STATUS_VDEC, + .ctl_offs = SPM_VDE_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .clk_id = {CLK_MM}, + }, + [MT8173_POWER_DOMAIN_VENC] = { + .name = "venc", + .sta_mask = PWR_STATUS_VENC, + .ctl_offs = SPM_VEN_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + .clk_id = {CLK_MM, CLK_VENC}, + }, + [MT8173_POWER_DOMAIN_ISP] = { + .name = "isp", + .sta_mask = PWR_STATUS_ISP, + .ctl_offs = SPM_ISP_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(13, 12), + .clk_id = {CLK_MM}, + }, + [MT8173_POWER_DOMAIN_MM] = { + .name = "mm", + .sta_mask = PWR_STATUS_DISP, + .ctl_offs = SPM_DIS_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .clk_id = {CLK_MM}, + .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 | + MT8173_TOP_AXI_PROT_EN_MM_M1, + }, + [MT8173_POWER_DOMAIN_VENC_LT] = { + .name = "venc_lt", + .sta_mask = PWR_STATUS_VENC_LT, + .ctl_offs = SPM_VEN2_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + .clk_id = {CLK_MM, CLK_VENC_LT}, + }, + [MT8173_POWER_DOMAIN_AUDIO] = { + .name = "audio", + .sta_mask = PWR_STATUS_AUDIO, + .ctl_offs = SPM_AUDIO_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + .clk_id = {CLK_NONE}, + }, + [MT8173_POWER_DOMAIN_USB] = { + .name = "usb", + .sta_mask = PWR_STATUS_USB, + .ctl_offs = SPM_USB_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + .clk_id = {CLK_NONE}, + .active_wakeup = true, + }, + [MT8173_POWER_DOMAIN_MFG_ASYNC] = { + .name = "mfg_async", + .sta_mask = PWR_STATUS_MFG_ASYNC, + .ctl_offs = SPM_MFG_ASYNC_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = 0, + .clk_id = {CLK_MFG}, + }, + [MT8173_POWER_DOMAIN_MFG_2D] = { + .name = "mfg_2d", + .sta_mask = PWR_STATUS_MFG_2D, + .ctl_offs = SPM_MFG_2D_PWR_CON, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(13, 12), + .clk_id = {CLK_NONE}, + }, + [MT8173_POWER_DOMAIN_MFG] = { + .name = "mfg", + .sta_mask = PWR_STATUS_MFG, + .ctl_offs = SPM_MFG_PWR_CON, + .sram_pdn_bits = GENMASK(13, 8), + .sram_pdn_ack_bits = GENMASK(21, 16), + .clk_id = {CLK_NONE}, + .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S | + MT8173_TOP_AXI_PROT_EN_MFG_M0 | + MT8173_TOP_AXI_PROT_EN_MFG_M1 | + MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT, + }, +}; + +#define NUM_DOMAINS_MT8173 ARRAY_SIZE(scp_domain_data_mt8173) + +static int __init scpsys_probe_mt8173(struct platform_device *pdev) +{ + struct scp *scp; + struct genpd_onecell_data *pd_data; + int ret; + + scp = init_scp(pdev, scp_domain_data_mt8173, NUM_DOMAINS_MT8173); + if (IS_ERR(scp)) + return PTR_ERR(scp); + + mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT8173); + + pd_data = &scp->pd_data; + ret = pm_genpd_add_subdomain(pd_data->domains[MT8173_POWER_DOMAIN_MFG_ASYNC], pd_data->domains[MT8173_POWER_DOMAIN_MFG_2D]); if (ret && IS_ENABLED(CONFIG_PM)) @@ -517,21 +686,39 @@ static int scpsys_probe(struct platform_device *pdev) if (ret && IS_ENABLED(CONFIG_PM)) dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret); - ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data); - if (ret) - dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret); - return 0; } +/* + * scpsys driver init + */ + static const struct of_device_id of_scpsys_match_tbl[] = { { + .compatible = "mediatek,mt2701-scpsys", + .data = scpsys_probe_mt2701, + }, { .compatible = "mediatek,mt8173-scpsys", + .data = scpsys_probe_mt8173, }, { /* sentinel */ } }; +static int scpsys_probe(struct platform_device *pdev) +{ + int (*probe)(struct platform_device *); + const struct of_device_id *of_id; + + of_id = of_match_node(of_scpsys_match_tbl, pdev->dev.of_node); + if (!of_id || !of_id->data) + return -EINVAL; + + probe = of_id->data; + + return probe(pdev); +} + static struct platform_driver scpsys_drv = { .probe = scpsys_probe, .driver = { diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile index 623039c3514cdc347d77f4d6ab7a3d5d0af469c5..d9115cb5ed9dae787c0bc22d43e9425f3ceef78a 100644 --- a/drivers/soc/renesas/Makefile +++ b/drivers/soc/renesas/Makefile @@ -1,3 +1,12 @@ +obj-$(CONFIG_SOC_BUS) += renesas-soc.o + +obj-$(CONFIG_ARCH_RCAR_GEN1) += rcar-rst.o +obj-$(CONFIG_ARCH_RCAR_GEN2) += rcar-rst.o +obj-$(CONFIG_ARCH_R8A7795) += rcar-rst.o +obj-$(CONFIG_ARCH_R8A7796) += rcar-rst.o + +obj-$(CONFIG_ARCH_R8A7743) += rcar-sysc.o r8a7743-sysc.o +obj-$(CONFIG_ARCH_R8A7745) += rcar-sysc.o r8a7745-sysc.o obj-$(CONFIG_ARCH_R8A7779) += rcar-sysc.o r8a7779-sysc.o obj-$(CONFIG_ARCH_R8A7790) += rcar-sysc.o r8a7790-sysc.o obj-$(CONFIG_ARCH_R8A7791) += rcar-sysc.o r8a7791-sysc.o diff --git a/drivers/soc/renesas/r8a7743-sysc.c b/drivers/soc/renesas/r8a7743-sysc.c new file mode 100644 index 0000000000000000000000000000000000000000..9583a327d90c71600d28fcb100231b5f23a8b2b3 --- /dev/null +++ b/drivers/soc/renesas/r8a7743-sysc.c @@ -0,0 +1,32 @@ +/* + * Renesas RZ/G1M System Controller + * + * Copyright (C) 2016 Cogent Embedded Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; of the License. + */ + +#include +#include + +#include + +#include "rcar-sysc.h" + +static const struct rcar_sysc_area r8a7743_areas[] __initconst = { + { "always-on", 0, 0, R8A7743_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, + { "ca15-scu", 0x180, 0, R8A7743_PD_CA15_SCU, R8A7743_PD_ALWAYS_ON, + PD_SCU }, + { "ca15-cpu0", 0x40, 0, R8A7743_PD_CA15_CPU0, R8A7743_PD_CA15_SCU, + PD_CPU_NOCR }, + { "ca15-cpu1", 0x40, 1, R8A7743_PD_CA15_CPU1, R8A7743_PD_CA15_SCU, + PD_CPU_NOCR }, + { "sgx", 0xc0, 0, R8A7743_PD_SGX, R8A7743_PD_ALWAYS_ON }, +}; + +const struct rcar_sysc_info r8a7743_sysc_info __initconst = { + .areas = r8a7743_areas, + .num_areas = ARRAY_SIZE(r8a7743_areas), +}; diff --git a/drivers/soc/renesas/r8a7745-sysc.c b/drivers/soc/renesas/r8a7745-sysc.c new file mode 100644 index 0000000000000000000000000000000000000000..d17887c08aa1915ab5233aa9af41183ca85a4818 --- /dev/null +++ b/drivers/soc/renesas/r8a7745-sysc.c @@ -0,0 +1,32 @@ +/* + * Renesas RZ/G1E System Controller + * + * Copyright (C) 2016 Cogent Embedded Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; of the License. + */ + +#include +#include + +#include + +#include "rcar-sysc.h" + +static const struct rcar_sysc_area r8a7745_areas[] __initconst = { + { "always-on", 0, 0, R8A7745_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, + { "ca7-scu", 0x100, 0, R8A7745_PD_CA7_SCU, R8A7745_PD_ALWAYS_ON, + PD_SCU }, + { "ca7-cpu0", 0x1c0, 0, R8A7745_PD_CA7_CPU0, R8A7745_PD_CA7_SCU, + PD_CPU_NOCR }, + { "ca7-cpu1", 0x1c0, 1, R8A7745_PD_CA7_CPU1, R8A7745_PD_CA7_SCU, + PD_CPU_NOCR }, + { "sgx", 0xc0, 0, R8A7745_PD_SGX, R8A7745_PD_ALWAYS_ON }, +}; + +const struct rcar_sysc_info r8a7745_sysc_info __initconst = { + .areas = r8a7745_areas, + .num_areas = ARRAY_SIZE(r8a7745_areas), +}; diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c new file mode 100644 index 0000000000000000000000000000000000000000..a6d1c26d31675cf36ebc311a17a993704e0af688 --- /dev/null +++ b/drivers/soc/renesas/rcar-rst.c @@ -0,0 +1,92 @@ +/* + * R-Car Gen1 RESET/WDT, R-Car Gen2, Gen3, and RZ/G RST Driver + * + * Copyright (C) 2016 Glider bvba + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include + +struct rst_config { + unsigned int modemr; /* Mode Monitoring Register Offset */ +}; + +static const struct rst_config rcar_rst_gen1 __initconst = { + .modemr = 0x20, +}; + +static const struct rst_config rcar_rst_gen2 __initconst = { + .modemr = 0x60, +}; + +static const struct of_device_id rcar_rst_matches[] __initconst = { + /* RZ/G is handled like R-Car Gen2 */ + { .compatible = "renesas,r8a7743-rst", .data = &rcar_rst_gen2 }, + { .compatible = "renesas,r8a7745-rst", .data = &rcar_rst_gen2 }, + /* R-Car Gen1 */ + { .compatible = "renesas,r8a7778-reset-wdt", .data = &rcar_rst_gen1 }, + { .compatible = "renesas,r8a7779-reset-wdt", .data = &rcar_rst_gen1 }, + /* R-Car Gen2 */ + { .compatible = "renesas,r8a7790-rst", .data = &rcar_rst_gen2 }, + { .compatible = "renesas,r8a7791-rst", .data = &rcar_rst_gen2 }, + { .compatible = "renesas,r8a7792-rst", .data = &rcar_rst_gen2 }, + { .compatible = "renesas,r8a7793-rst", .data = &rcar_rst_gen2 }, + { .compatible = "renesas,r8a7794-rst", .data = &rcar_rst_gen2 }, + /* R-Car Gen3 is handled like R-Car Gen2 */ + { .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen2 }, + { .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen2 }, + { /* sentinel */ } +}; + +static void __iomem *rcar_rst_base __initdata; +static u32 saved_mode __initdata; + +static int __init rcar_rst_init(void) +{ + const struct of_device_id *match; + const struct rst_config *cfg; + struct device_node *np; + void __iomem *base; + int error = 0; + + np = of_find_matching_node_and_match(NULL, rcar_rst_matches, &match); + if (!np) + return -ENODEV; + + base = of_iomap(np, 0); + if (!base) { + pr_warn("%s: Cannot map regs\n", np->full_name); + error = -ENOMEM; + goto out_put; + } + + rcar_rst_base = base; + cfg = match->data; + saved_mode = ioread32(base + cfg->modemr); + + pr_debug("%s: MODE = 0x%08x\n", np->full_name, saved_mode); + +out_put: + of_node_put(np); + return error; +} + +int __init rcar_rst_read_mode_pins(u32 *mode) +{ + int error; + + if (!rcar_rst_base) { + error = rcar_rst_init(); + if (error) + return error; + } + + *mode = saved_mode; + return 0; +} diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c index 65c8e1eb90c09bb352acea559e619d0e87cbf5a9..225c35c79d9abad07308d2226f99448ba0d93531 100644 --- a/drivers/soc/renesas/rcar-sysc.c +++ b/drivers/soc/renesas/rcar-sysc.c @@ -275,6 +275,12 @@ static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd) } static const struct of_device_id rcar_sysc_matches[] = { +#ifdef CONFIG_ARCH_R8A7743 + { .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info }, +#endif +#ifdef CONFIG_ARCH_R8A7745 + { .compatible = "renesas,r8a7745-sysc", .data = &r8a7745_sysc_info }, +#endif #ifdef CONFIG_ARCH_R8A7779 { .compatible = "renesas,r8a7779-sysc", .data = &r8a7779_sysc_info }, #endif diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h index 77dbe861473f64351b5fd9dbde6adf9eca3b3c2f..f6e842e2976ebe326ca2439dfb58a4b14f6464bc 100644 --- a/drivers/soc/renesas/rcar-sysc.h +++ b/drivers/soc/renesas/rcar-sysc.h @@ -50,6 +50,8 @@ struct rcar_sysc_info { unsigned int num_areas; }; +extern const struct rcar_sysc_info r8a7743_sysc_info; +extern const struct rcar_sysc_info r8a7745_sysc_info; extern const struct rcar_sysc_info r8a7779_sysc_info; extern const struct rcar_sysc_info r8a7790_sysc_info; extern const struct rcar_sysc_info r8a7791_sysc_info; diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c new file mode 100644 index 0000000000000000000000000000000000000000..330960312296f603075fbcd0296f2693df8889aa --- /dev/null +++ b/drivers/soc/renesas/renesas-soc.c @@ -0,0 +1,257 @@ +/* + * Renesas SoC Identification + * + * Copyright (C) 2014-2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + + +struct renesas_family { + const char name[16]; + u32 reg; /* CCCR or PRR, if not in DT */ +}; + +static const struct renesas_family fam_rcar_gen1 __initconst __maybe_unused = { + .name = "R-Car Gen1", + .reg = 0xff000044, /* PRR (Product Register) */ +}; + +static const struct renesas_family fam_rcar_gen2 __initconst __maybe_unused = { + .name = "R-Car Gen2", + .reg = 0xff000044, /* PRR (Product Register) */ +}; + +static const struct renesas_family fam_rcar_gen3 __initconst __maybe_unused = { + .name = "R-Car Gen3", + .reg = 0xfff00044, /* PRR (Product Register) */ +}; + +static const struct renesas_family fam_rmobile __initconst __maybe_unused = { + .name = "R-Mobile", + .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */ +}; + +static const struct renesas_family fam_rza __initconst __maybe_unused = { + .name = "RZ/A", +}; + +static const struct renesas_family fam_rzg __initconst __maybe_unused = { + .name = "RZ/G", + .reg = 0xff000044, /* PRR (Product Register) */ +}; + +static const struct renesas_family fam_shmobile __initconst __maybe_unused = { + .name = "SH-Mobile", + .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */ +}; + + +struct renesas_soc { + const struct renesas_family *family; + u8 id; +}; + +static const struct renesas_soc soc_rz_a1h __initconst __maybe_unused = { + .family = &fam_rza, +}; + +static const struct renesas_soc soc_rmobile_ape6 __initconst __maybe_unused = { + .family = &fam_rmobile, + .id = 0x3f, +}; + +static const struct renesas_soc soc_rmobile_a1 __initconst __maybe_unused = { + .family = &fam_rmobile, + .id = 0x40, +}; + +static const struct renesas_soc soc_rz_g1m __initconst __maybe_unused = { + .family = &fam_rzg, + .id = 0x47, +}; + +static const struct renesas_soc soc_rz_g1e __initconst __maybe_unused = { + .family = &fam_rzg, + .id = 0x4c, +}; + +static const struct renesas_soc soc_rcar_m1a __initconst __maybe_unused = { + .family = &fam_rcar_gen1, +}; + +static const struct renesas_soc soc_rcar_h1 __initconst __maybe_unused = { + .family = &fam_rcar_gen1, + .id = 0x3b, +}; + +static const struct renesas_soc soc_rcar_h2 __initconst __maybe_unused = { + .family = &fam_rcar_gen2, + .id = 0x45, +}; + +static const struct renesas_soc soc_rcar_m2_w __initconst __maybe_unused = { + .family = &fam_rcar_gen2, + .id = 0x47, +}; + +static const struct renesas_soc soc_rcar_v2h __initconst __maybe_unused = { + .family = &fam_rcar_gen2, + .id = 0x4a, +}; + +static const struct renesas_soc soc_rcar_m2_n __initconst __maybe_unused = { + .family = &fam_rcar_gen2, + .id = 0x4b, +}; + +static const struct renesas_soc soc_rcar_e2 __initconst __maybe_unused = { + .family = &fam_rcar_gen2, + .id = 0x4c, +}; + +static const struct renesas_soc soc_rcar_h3 __initconst __maybe_unused = { + .family = &fam_rcar_gen3, + .id = 0x4f, +}; + +static const struct renesas_soc soc_rcar_m3_w __initconst __maybe_unused = { + .family = &fam_rcar_gen3, + .id = 0x52, +}; + +static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = { + .family = &fam_shmobile, + .id = 0x37, +}; + + +static const struct of_device_id renesas_socs[] __initconst = { +#ifdef CONFIG_ARCH_R7S72100 + { .compatible = "renesas,r7s72100", .data = &soc_rz_a1h }, +#endif +#ifdef CONFIG_ARCH_R8A73A4 + { .compatible = "renesas,r8a73a4", .data = &soc_rmobile_ape6 }, +#endif +#ifdef CONFIG_ARCH_R8A7740 + { .compatible = "renesas,r8a7740", .data = &soc_rmobile_a1 }, +#endif +#ifdef CONFIG_ARCH_R8A7743 + { .compatible = "renesas,r8a7743", .data = &soc_rz_g1m }, +#endif +#ifdef CONFIG_ARCH_R8A7745 + { .compatible = "renesas,r8a7745", .data = &soc_rz_g1e }, +#endif +#ifdef CONFIG_ARCH_R8A7778 + { .compatible = "renesas,r8a7778", .data = &soc_rcar_m1a }, +#endif +#ifdef CONFIG_ARCH_R8A7779 + { .compatible = "renesas,r8a7779", .data = &soc_rcar_h1 }, +#endif +#ifdef CONFIG_ARCH_R8A7790 + { .compatible = "renesas,r8a7790", .data = &soc_rcar_h2 }, +#endif +#ifdef CONFIG_ARCH_R8A7791 + { .compatible = "renesas,r8a7791", .data = &soc_rcar_m2_w }, +#endif +#ifdef CONFIG_ARCH_R8A7792 + { .compatible = "renesas,r8a7792", .data = &soc_rcar_v2h }, +#endif +#ifdef CONFIG_ARCH_R8A7793 + { .compatible = "renesas,r8a7793", .data = &soc_rcar_m2_n }, +#endif +#ifdef CONFIG_ARCH_R8A7794 + { .compatible = "renesas,r8a7794", .data = &soc_rcar_e2 }, +#endif +#ifdef CONFIG_ARCH_R8A7795 + { .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 }, +#endif +#ifdef CONFIG_ARCH_R8A7796 + { .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w }, +#endif +#ifdef CONFIG_ARCH_SH73A0 + { .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 }, +#endif + { /* sentinel */ } +}; + +static int __init renesas_soc_init(void) +{ + struct soc_device_attribute *soc_dev_attr; + const struct renesas_family *family; + const struct of_device_id *match; + const struct renesas_soc *soc; + void __iomem *chipid = NULL; + struct soc_device *soc_dev; + struct device_node *np; + unsigned int product; + + match = of_match_node(renesas_socs, of_root); + if (!match) + return -ENODEV; + + soc = match->data; + family = soc->family; + + /* Try PRR first, then hardcoded fallback */ + np = of_find_compatible_node(NULL, NULL, "renesas,prr"); + if (np) { + chipid = of_iomap(np, 0); + of_node_put(np); + } else if (soc->id) { + chipid = ioremap(family->reg, 4); + } + if (chipid) { + product = readl(chipid); + iounmap(chipid); + if (soc->id && ((product >> 8) & 0xff) != soc->id) { + pr_warn("SoC mismatch (product = 0x%x)\n", product); + return -ENODEV; + } + } + + soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + if (!soc_dev_attr) + return -ENOMEM; + + np = of_find_node_by_path("/"); + of_property_read_string(np, "model", &soc_dev_attr->machine); + of_node_put(np); + + soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL); + soc_dev_attr->soc_id = kstrdup_const(strchr(match->compatible, ',') + 1, + GFP_KERNEL); + if (chipid) + soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u", + ((product >> 4) & 0x0f) + 1, + product & 0xf); + + pr_info("Detected Renesas %s %s %s\n", soc_dev_attr->family, + soc_dev_attr->soc_id, soc_dev_attr->revision ?: ""); + + soc_dev = soc_device_register(soc_dev_attr); + if (IS_ERR(soc_dev)) { + kfree(soc_dev_attr->revision); + kfree_const(soc_dev_attr->soc_id); + kfree_const(soc_dev_attr->family); + kfree(soc_dev_attr); + return PTR_ERR(soc_dev); + } + + return 0; +} +core_initcall(renesas_soc_init); diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c index 7acd1517dd372cf2922096be88a475f23a4de856..1c78c42416c69b36e26b3572b8258b753f10ad6e 100644 --- a/drivers/soc/rockchip/pm_domains.c +++ b/drivers/soc/rockchip/pm_domains.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -105,12 +106,24 @@ static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd) return (val & pd_info->idle_mask) == pd_info->idle_mask; } +static unsigned int rockchip_pmu_read_ack(struct rockchip_pmu *pmu) +{ + unsigned int val; + + regmap_read(pmu->regmap, pmu->info->ack_offset, &val); + return val; +} + static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd, bool idle) { const struct rockchip_domain_info *pd_info = pd->info; + struct generic_pm_domain *genpd = &pd->genpd; struct rockchip_pmu *pmu = pd->pmu; + unsigned int target_ack; unsigned int val; + bool is_idle; + int ret; if (pd_info->req_mask == 0) return 0; @@ -120,12 +133,26 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd, dsb(sy); - do { - regmap_read(pmu->regmap, pmu->info->ack_offset, &val); - } while ((val & pd_info->ack_mask) != (idle ? pd_info->ack_mask : 0)); + /* Wait util idle_ack = 1 */ + target_ack = idle ? pd_info->ack_mask : 0; + ret = readx_poll_timeout_atomic(rockchip_pmu_read_ack, pmu, val, + (val & pd_info->ack_mask) == target_ack, + 0, 10000); + if (ret) { + dev_err(pmu->dev, + "failed to get ack on domain '%s', val=0x%x\n", + genpd->name, val); + return ret; + } - while (rockchip_pmu_domain_is_idle(pd) != idle) - cpu_relax(); + ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_idle, pd, + is_idle, is_idle == idle, 0, 10000); + if (ret) { + dev_err(pmu->dev, + "failed to set idle on domain '%s', val=%d\n", + genpd->name, is_idle); + return ret; + } return 0; } @@ -198,6 +225,8 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd, bool on) { struct rockchip_pmu *pmu = pd->pmu; + struct generic_pm_domain *genpd = &pd->genpd; + bool is_on; if (pd->info->pwr_mask == 0) return; @@ -207,8 +236,13 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd, dsb(sy); - while (rockchip_pmu_domain_is_on(pd) != on) - cpu_relax(); + if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on, + is_on == on, 0, 10000)) { + dev_err(pmu->dev, + "failed to set domain '%s', val=%d\n", + genpd->name, is_on); + return; + } } static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on) @@ -445,7 +479,16 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu, static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd) { - int i; + int i, ret; + + /* + * We're in the error cleanup already, so we only complain, + * but won't emit another error on top of the original one. + */ + ret = pm_genpd_remove(&pd->genpd); + if (ret < 0) + dev_err(pd->pmu->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n", + pd->genpd.name, ret); for (i = 0; i < pd->num_clks; i++) { clk_unprepare(pd->clks[i]); @@ -597,10 +640,12 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev) * Configure power up and down transition delays for CORE * and GPU domains. */ - rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset, - pmu_info->core_power_transition_time); - rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset, - pmu_info->gpu_power_transition_time); + if (pmu_info->core_power_transition_time) + rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset, + pmu_info->core_power_transition_time); + if (pmu_info->gpu_pwrcnt_offset) + rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset, + pmu_info->gpu_power_transition_time); error = -ENODEV; @@ -627,7 +672,11 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev) goto err_out; } - of_genpd_add_provider_onecell(np, &pmu->genpd_data); + error = of_genpd_add_provider_onecell(np, &pmu->genpd_data); + if (error) { + dev_err(dev, "failed to add provider: %d\n", error); + goto err_out; + } return 0; @@ -722,11 +771,7 @@ static const struct rockchip_pmu_info rk3399_pmu = { .idle_offset = 0x64, .ack_offset = 0x68, - .core_pwrcnt_offset = 0x9c, - .gpu_pwrcnt_offset = 0xa4, - - .core_power_transition_time = 24, - .gpu_power_transition_time = 24, + /* ARM Trusted Firmware manages power transition times */ .num_domains = ARRAY_SIZE(rk3399_pm_domains), .domain_info = rk3399_pm_domains, diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index 03089ad2fc6574be9af2b236d7c5c3761c971b7e..e5e124c07066b8a0e202cfe2fd17fb71f979e71e 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -77,5 +77,19 @@ config ARCH_TEGRA_210_SOC controllers, such as GPIO, I2C, SPI, SDHCI, PCIe, SATA and XHCI, to name only a few. +config ARCH_TEGRA_186_SOC + bool "NVIDIA Tegra186 SoC" + select MAILBOX + select TEGRA_BPMP + select TEGRA_HSP_MBOX + select TEGRA_IVC + help + Enable support for the NVIDIA Tegar186 SoC. The Tegra186 features a + combination of Denver and Cortex-A57 CPU cores and a GPU based on + the Pascal architecture. It contains an ADSP with a Cortex-A9 CPU + used for audio processing, hardware video encoders/decoders with + multi-format support, ISP for image capture processing and BPMP for + power management. + endif endif diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 7792ed88d80b4ecbb0ca7e3cdb2353ab0eaa4629..e233dd5dcab3d711f9c5573ffdb918436f4853df 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -45,29 +45,31 @@ #include #define PMC_CNTRL 0x0 -#define PMC_CNTRL_SYSCLK_POLARITY (1 << 10) /* sys clk polarity */ -#define PMC_CNTRL_SYSCLK_OE (1 << 11) /* system clock enable */ -#define PMC_CNTRL_SIDE_EFFECT_LP0 (1 << 14) /* LP0 when CPU pwr gated */ -#define PMC_CNTRL_CPU_PWRREQ_POLARITY (1 << 15) /* CPU pwr req polarity */ -#define PMC_CNTRL_CPU_PWRREQ_OE (1 << 16) /* CPU pwr req enable */ -#define PMC_CNTRL_INTR_POLARITY (1 << 17) /* inverts INTR polarity */ -#define PMC_CNTRL_MAIN_RST (1 << 4) +#define PMC_CNTRL_INTR_POLARITY BIT(17) /* inverts INTR polarity */ +#define PMC_CNTRL_CPU_PWRREQ_OE BIT(16) /* CPU pwr req enable */ +#define PMC_CNTRL_CPU_PWRREQ_POLARITY BIT(15) /* CPU pwr req polarity */ +#define PMC_CNTRL_SIDE_EFFECT_LP0 BIT(14) /* LP0 when CPU pwr gated */ +#define PMC_CNTRL_SYSCLK_OE BIT(11) /* system clock enable */ +#define PMC_CNTRL_SYSCLK_POLARITY BIT(10) /* sys clk polarity */ +#define PMC_CNTRL_MAIN_RST BIT(4) #define DPD_SAMPLE 0x020 -#define DPD_SAMPLE_ENABLE (1 << 0) +#define DPD_SAMPLE_ENABLE BIT(0) #define DPD_SAMPLE_DISABLE (0 << 0) #define PWRGATE_TOGGLE 0x30 -#define PWRGATE_TOGGLE_START (1 << 8) +#define PWRGATE_TOGGLE_START BIT(8) #define REMOVE_CLAMPING 0x34 #define PWRGATE_STATUS 0x38 +#define PMC_PWR_DET 0x48 + #define PMC_SCRATCH0 0x50 -#define PMC_SCRATCH0_MODE_RECOVERY (1 << 31) -#define PMC_SCRATCH0_MODE_BOOTLOADER (1 << 30) -#define PMC_SCRATCH0_MODE_RCM (1 << 1) +#define PMC_SCRATCH0_MODE_RECOVERY BIT(31) +#define PMC_SCRATCH0_MODE_BOOTLOADER BIT(30) +#define PMC_SCRATCH0_MODE_RCM BIT(1) #define PMC_SCRATCH0_MODE_MASK (PMC_SCRATCH0_MODE_RECOVERY | \ PMC_SCRATCH0_MODE_BOOTLOADER | \ PMC_SCRATCH0_MODE_RCM) @@ -75,11 +77,13 @@ #define PMC_CPUPWRGOOD_TIMER 0xc8 #define PMC_CPUPWROFF_TIMER 0xcc +#define PMC_PWR_DET_VALUE 0xe4 + #define PMC_SCRATCH41 0x140 #define PMC_SENSOR_CTRL 0x1b0 -#define PMC_SENSOR_CTRL_SCRATCH_WRITE (1 << 2) -#define PMC_SENSOR_CTRL_ENABLE_RST (1 << 1) +#define PMC_SENSOR_CTRL_SCRATCH_WRITE BIT(2) +#define PMC_SENSOR_CTRL_ENABLE_RST BIT(1) #define PMC_RST_STATUS 0x1b4 #define PMC_RST_STATUS_POR 0 @@ -90,10 +94,10 @@ #define PMC_RST_STATUS_AOTAG 5 #define IO_DPD_REQ 0x1b8 -#define IO_DPD_REQ_CODE_IDLE (0 << 30) -#define IO_DPD_REQ_CODE_OFF (1 << 30) -#define IO_DPD_REQ_CODE_ON (2 << 30) -#define IO_DPD_REQ_CODE_MASK (3 << 30) +#define IO_DPD_REQ_CODE_IDLE (0U << 30) +#define IO_DPD_REQ_CODE_OFF (1U << 30) +#define IO_DPD_REQ_CODE_ON (2U << 30) +#define IO_DPD_REQ_CODE_MASK (3U << 30) #define IO_DPD_STATUS 0x1bc #define IO_DPD2_REQ 0x1c0 @@ -101,16 +105,16 @@ #define SEL_DPD_TIM 0x1c8 #define PMC_SCRATCH54 0x258 -#define PMC_SCRATCH54_DATA_SHIFT 8 -#define PMC_SCRATCH54_ADDR_SHIFT 0 +#define PMC_SCRATCH54_DATA_SHIFT 8 +#define PMC_SCRATCH54_ADDR_SHIFT 0 #define PMC_SCRATCH55 0x25c -#define PMC_SCRATCH55_RESET_TEGRA (1 << 31) -#define PMC_SCRATCH55_CNTRL_ID_SHIFT 27 -#define PMC_SCRATCH55_PINMUX_SHIFT 24 -#define PMC_SCRATCH55_16BITOP (1 << 15) -#define PMC_SCRATCH55_CHECKSUM_SHIFT 16 -#define PMC_SCRATCH55_I2CSLV1_SHIFT 0 +#define PMC_SCRATCH55_RESET_TEGRA BIT(31) +#define PMC_SCRATCH55_CNTRL_ID_SHIFT 27 +#define PMC_SCRATCH55_PINMUX_SHIFT 24 +#define PMC_SCRATCH55_16BITOP BIT(15) +#define PMC_SCRATCH55_CHECKSUM_SHIFT 16 +#define PMC_SCRATCH55_I2CSLV1_SHIFT 0 #define GPU_RG_CNTRL 0x2d4 @@ -124,6 +128,12 @@ struct tegra_powergate { unsigned int num_resets; }; +struct tegra_io_pad_soc { + enum tegra_io_pad id; + unsigned int dpd; + unsigned int voltage; +}; + struct tegra_pmc_soc { unsigned int num_powergates; const char *const *powergates; @@ -132,6 +142,9 @@ struct tegra_pmc_soc { bool has_tsense_reset; bool has_gpu_clamps; + + const struct tegra_io_pad_soc *io_pads; + unsigned int num_io_pads; }; /** @@ -238,8 +251,6 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name) return i; } - dev_err(pmc->dev, "powergate %s not found\n", name); - return -ENODEV; } @@ -456,13 +467,12 @@ static int tegra_powergate_power_down(struct tegra_powergate *pg) static int tegra_genpd_power_on(struct generic_pm_domain *domain) { struct tegra_powergate *pg = to_powergate(domain); - struct tegra_pmc *pmc = pg->pmc; int err; err = tegra_powergate_power_up(pg, true); if (err) - dev_err(pmc->dev, "failed to turn on PM domain %s: %d\n", - pg->genpd.name, err); + pr_err("failed to turn on PM domain %s: %d\n", pg->genpd.name, + err); return err; } @@ -470,13 +480,12 @@ static int tegra_genpd_power_on(struct generic_pm_domain *domain) static int tegra_genpd_power_off(struct generic_pm_domain *domain) { struct tegra_powergate *pg = to_powergate(domain); - struct tegra_pmc *pmc = pg->pmc; int err; err = tegra_powergate_power_down(pg); if (err) - dev_err(pmc->dev, "failed to turn off PM domain %s: %d\n", - pg->genpd.name, err); + pr_err("failed to turn off PM domain %s: %d\n", + pg->genpd.name, err); return err; } @@ -801,8 +810,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) id = tegra_powergate_lookup(pmc, np->name); if (id < 0) { - dev_err(pmc->dev, "powergate lookup failed for %s: %d\n", - np->name, id); + pr_err("powergate lookup failed for %s: %d\n", np->name, id); goto free_mem; } @@ -822,20 +830,22 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) err = tegra_powergate_of_get_clks(pg, np); if (err < 0) { - dev_err(pmc->dev, "failed to get clocks for %s: %d\n", - np->name, err); + pr_err("failed to get clocks for %s: %d\n", np->name, err); goto set_available; } err = tegra_powergate_of_get_resets(pg, np, off); if (err < 0) { - dev_err(pmc->dev, "failed to get resets for %s: %d\n", - np->name, err); + pr_err("failed to get resets for %s: %d\n", np->name, err); goto remove_clks; } - if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) - goto power_on_cleanup; + if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) { + if (off) + WARN_ON(tegra_powergate_power_up(pg, true)); + + goto remove_resets; + } /* * FIXME: If XHCI is enabled for Tegra, then power-up the XUSB @@ -846,25 +856,33 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) * to be unused. */ if (IS_ENABLED(CONFIG_USB_XHCI_TEGRA) && - (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC)) - goto power_on_cleanup; + (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC)) { + if (off) + WARN_ON(tegra_powergate_power_up(pg, true)); + + goto remove_resets; + } - pm_genpd_init(&pg->genpd, NULL, off); + err = pm_genpd_init(&pg->genpd, NULL, off); + if (err < 0) { + pr_err("failed to initialise PM domain %s: %d\n", np->name, + err); + goto remove_resets; + } err = of_genpd_add_provider_simple(np, &pg->genpd); if (err < 0) { - dev_err(pmc->dev, "failed to add genpd provider for %s: %d\n", - np->name, err); - goto remove_resets; + pr_err("failed to add PM domain provider for %s: %d\n", + np->name, err); + goto remove_genpd; } - dev_dbg(pmc->dev, "added power domain %s\n", pg->genpd.name); + pr_debug("added PM domain %s\n", pg->genpd.name); return; -power_on_cleanup: - if (off) - WARN_ON(tegra_powergate_power_up(pg, true)); +remove_genpd: + pm_genpd_remove(&pg->genpd); remove_resets: while (pg->num_resets--) @@ -908,21 +926,36 @@ static void tegra_powergate_init(struct tegra_pmc *pmc, of_node_put(np); } -static int tegra_io_rail_prepare(unsigned int id, unsigned long *request, - unsigned long *status, unsigned int *bit) +static const struct tegra_io_pad_soc * +tegra_io_pad_find(struct tegra_pmc *pmc, enum tegra_io_pad id) { + unsigned int i; + + for (i = 0; i < pmc->soc->num_io_pads; i++) + if (pmc->soc->io_pads[i].id == id) + return &pmc->soc->io_pads[i]; + + return NULL; +} + +static int tegra_io_pad_prepare(enum tegra_io_pad id, unsigned long *request, + unsigned long *status, u32 *mask) +{ + const struct tegra_io_pad_soc *pad; unsigned long rate, value; - *bit = id % 32; + pad = tegra_io_pad_find(pmc, id); + if (!pad) { + pr_err("invalid I/O pad ID %u\n", id); + return -ENOENT; + } - /* - * There are two sets of 30 bits to select IO rails, but bits 30 and - * 31 are control bits rather than IO rail selection bits. - */ - if (id > 63 || *bit == 30 || *bit == 31) - return -EINVAL; + if (pad->dpd == UINT_MAX) + return -ENOTSUPP; - if (id < 32) { + *mask = BIT(pad->dpd % 32); + + if (pad->dpd < 32) { *status = IO_DPD_STATUS; *request = IO_DPD_REQ; } else { @@ -931,6 +964,10 @@ static int tegra_io_rail_prepare(unsigned int id, unsigned long *request, } rate = clk_get_rate(pmc->clk); + if (!rate) { + pr_err("failed to get clock rate\n"); + return -ENODEV; + } tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE); @@ -942,10 +979,10 @@ static int tegra_io_rail_prepare(unsigned int id, unsigned long *request, return 0; } -static int tegra_io_rail_poll(unsigned long offset, unsigned long mask, - unsigned long val, unsigned long timeout) +static int tegra_io_pad_poll(unsigned long offset, u32 mask, + u32 val, unsigned long timeout) { - unsigned long value; + u32 value; timeout = jiffies + msecs_to_jiffies(timeout); @@ -960,67 +997,164 @@ static int tegra_io_rail_poll(unsigned long offset, unsigned long mask, return -ETIMEDOUT; } -static void tegra_io_rail_unprepare(void) +static void tegra_io_pad_unprepare(void) { tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE); } -int tegra_io_rail_power_on(unsigned int id) +/** + * tegra_io_pad_power_enable() - enable power to I/O pad + * @id: Tegra I/O pad ID for which to enable power + * + * Returns: 0 on success or a negative error code on failure. + */ +int tegra_io_pad_power_enable(enum tegra_io_pad id) { unsigned long request, status; - unsigned int bit; + u32 mask; int err; mutex_lock(&pmc->powergates_lock); - err = tegra_io_rail_prepare(id, &request, &status, &bit); - if (err) - goto error; + err = tegra_io_pad_prepare(id, &request, &status, &mask); + if (err < 0) { + pr_err("failed to prepare I/O pad: %d\n", err); + goto unlock; + } - tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | BIT(bit), request); + tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | mask, request); - err = tegra_io_rail_poll(status, BIT(bit), 0, 250); - if (err) { - pr_info("tegra_io_rail_poll() failed: %d\n", err); - goto error; + err = tegra_io_pad_poll(status, mask, 0, 250); + if (err < 0) { + pr_err("failed to enable I/O pad: %d\n", err); + goto unlock; } - tegra_io_rail_unprepare(); + tegra_io_pad_unprepare(); -error: +unlock: mutex_unlock(&pmc->powergates_lock); - return err; } -EXPORT_SYMBOL(tegra_io_rail_power_on); +EXPORT_SYMBOL(tegra_io_pad_power_enable); -int tegra_io_rail_power_off(unsigned int id) +/** + * tegra_io_pad_power_disable() - disable power to I/O pad + * @id: Tegra I/O pad ID for which to disable power + * + * Returns: 0 on success or a negative error code on failure. + */ +int tegra_io_pad_power_disable(enum tegra_io_pad id) { unsigned long request, status; - unsigned int bit; + u32 mask; int err; mutex_lock(&pmc->powergates_lock); - err = tegra_io_rail_prepare(id, &request, &status, &bit); - if (err) { - pr_info("tegra_io_rail_prepare() failed: %d\n", err); - goto error; + err = tegra_io_pad_prepare(id, &request, &status, &mask); + if (err < 0) { + pr_err("failed to prepare I/O pad: %d\n", err); + goto unlock; } - tegra_pmc_writel(IO_DPD_REQ_CODE_ON | BIT(bit), request); + tegra_pmc_writel(IO_DPD_REQ_CODE_ON | mask, request); - err = tegra_io_rail_poll(status, BIT(bit), BIT(bit), 250); - if (err) - goto error; + err = tegra_io_pad_poll(status, mask, mask, 250); + if (err < 0) { + pr_err("failed to disable I/O pad: %d\n", err); + goto unlock; + } - tegra_io_rail_unprepare(); + tegra_io_pad_unprepare(); -error: +unlock: mutex_unlock(&pmc->powergates_lock); - return err; } +EXPORT_SYMBOL(tegra_io_pad_power_disable); + +int tegra_io_pad_set_voltage(enum tegra_io_pad id, + enum tegra_io_pad_voltage voltage) +{ + const struct tegra_io_pad_soc *pad; + u32 value; + + pad = tegra_io_pad_find(pmc, id); + if (!pad) + return -ENOENT; + + if (pad->voltage == UINT_MAX) + return -ENOTSUPP; + + mutex_lock(&pmc->powergates_lock); + + /* write-enable PMC_PWR_DET_VALUE[pad->voltage] */ + value = tegra_pmc_readl(PMC_PWR_DET); + value |= BIT(pad->voltage); + tegra_pmc_writel(value, PMC_PWR_DET); + + /* update I/O voltage */ + value = tegra_pmc_readl(PMC_PWR_DET_VALUE); + + if (voltage == TEGRA_IO_PAD_1800000UV) + value &= ~BIT(pad->voltage); + else + value |= BIT(pad->voltage); + + tegra_pmc_writel(value, PMC_PWR_DET_VALUE); + + mutex_unlock(&pmc->powergates_lock); + + usleep_range(100, 250); + + return 0; +} +EXPORT_SYMBOL(tegra_io_pad_set_voltage); + +int tegra_io_pad_get_voltage(enum tegra_io_pad id) +{ + const struct tegra_io_pad_soc *pad; + u32 value; + + pad = tegra_io_pad_find(pmc, id); + if (!pad) + return -ENOENT; + + if (pad->voltage == UINT_MAX) + return -ENOTSUPP; + + value = tegra_pmc_readl(PMC_PWR_DET_VALUE); + + if ((value & BIT(pad->voltage)) == 0) + return TEGRA_IO_PAD_1800000UV; + + return TEGRA_IO_PAD_3300000UV; +} +EXPORT_SYMBOL(tegra_io_pad_get_voltage); + +/** + * tegra_io_rail_power_on() - enable power to I/O rail + * @id: Tegra I/O pad ID for which to enable power + * + * See also: tegra_io_pad_power_enable() + */ +int tegra_io_rail_power_on(unsigned int id) +{ + return tegra_io_pad_power_enable(id); +} +EXPORT_SYMBOL(tegra_io_rail_power_on); + +/** + * tegra_io_rail_power_off() - disable power to I/O rail + * @id: Tegra I/O pad ID for which to disable power + * + * See also: tegra_io_pad_power_disable() + */ +int tegra_io_rail_power_off(unsigned int id) +{ + return tegra_io_pad_power_disable(id); +} EXPORT_SYMBOL(tegra_io_rail_power_off); #ifdef CONFIG_PM_SLEEP @@ -1454,6 +1588,39 @@ static const u8 tegra124_cpu_powergates[] = { TEGRA_POWERGATE_CPU3, }; +static const struct tegra_io_pad_soc tegra124_io_pads[] = { + { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_BB, .dpd = 15, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CAM, .dpd = 36, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_COMP, .dpd = 22, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSIE, .dpd = 44, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_DSI, .dpd = 2, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_DSIB, .dpd = 39, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_DSIC, .dpd = 40, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_DSID, .dpd = 41, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_HDMI, .dpd = 28, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_HSIC, .dpd = 19, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_HV, .dpd = 38, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_LVDS, .dpd = 57, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_NAND, .dpd = 13, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_BIAS, .dpd = 4, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 5, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = 32, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_SDMMC1, .dpd = 33, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_SDMMC3, .dpd = 34, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_SDMMC4, .dpd = 35, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_SYS_DDC, .dpd = 58, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_USB0, .dpd = 9, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_USB1, .dpd = 10, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_USB2, .dpd = 11, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_USB_BIAS, .dpd = 12, .voltage = UINT_MAX }, +}; + static const struct tegra_pmc_soc tegra124_pmc_soc = { .num_powergates = ARRAY_SIZE(tegra124_powergates), .powergates = tegra124_powergates, @@ -1461,6 +1628,8 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = { .cpu_powergates = tegra124_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = true, + .num_io_pads = ARRAY_SIZE(tegra124_io_pads), + .io_pads = tegra124_io_pads, }; static const char * const tegra210_powergates[] = { @@ -1497,6 +1666,47 @@ static const u8 tegra210_cpu_powergates[] = { TEGRA_POWERGATE_CPU3, }; +static const struct tegra_io_pad_soc tegra210_io_pads[] = { + { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = 5 }, + { .id = TEGRA_IO_PAD_AUDIO_HV, .dpd = 61, .voltage = 18 }, + { .id = TEGRA_IO_PAD_CAM, .dpd = 36, .voltage = 10 }, + { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSIC, .dpd = 42, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSID, .dpd = 43, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSIE, .dpd = 44, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSIF, .dpd = 45, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_DBG, .dpd = 25, .voltage = 19 }, + { .id = TEGRA_IO_PAD_DEBUG_NONAO, .dpd = 26, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_DMIC, .dpd = 50, .voltage = 20 }, + { .id = TEGRA_IO_PAD_DP, .dpd = 51, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_DSI, .dpd = 2, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_DSIB, .dpd = 39, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_DSIC, .dpd = 40, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_DSID, .dpd = 41, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_EMMC, .dpd = 35, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_EMMC2, .dpd = 37, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_GPIO, .dpd = 27, .voltage = 21 }, + { .id = TEGRA_IO_PAD_HDMI, .dpd = 28, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_HSIC, .dpd = 19, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_LVDS, .dpd = 57, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_BIAS, .dpd = 4, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 5, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = UINT_MAX, .voltage = 11 }, + { .id = TEGRA_IO_PAD_SDMMC1, .dpd = 33, .voltage = 12 }, + { .id = TEGRA_IO_PAD_SDMMC3, .dpd = 34, .voltage = 13 }, + { .id = TEGRA_IO_PAD_SPI, .dpd = 46, .voltage = 22 }, + { .id = TEGRA_IO_PAD_SPI_HV, .dpd = 47, .voltage = 23 }, + { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = 2 }, + { .id = TEGRA_IO_PAD_USB0, .dpd = 9, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_USB1, .dpd = 10, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_USB2, .dpd = 11, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_USB3, .dpd = 18, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_USB_BIAS, .dpd = 12, .voltage = UINT_MAX }, +}; + static const struct tegra_pmc_soc tegra210_pmc_soc = { .num_powergates = ARRAY_SIZE(tegra210_powergates), .powergates = tegra210_powergates, @@ -1504,6 +1714,8 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = { .cpu_powergates = tegra210_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = true, + .num_io_pads = ARRAY_SIZE(tegra210_io_pads), + .io_pads = tegra210_io_pads, }; static const struct of_device_id tegra_pmc_match[] = { diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index b73e3534f67b2778c2047d583320dba08a20b61c..eacad57f2977650087ba2527ddf6b48979653766 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -1228,7 +1228,7 @@ static int knav_setup_queue_range(struct knav_device *kdev, range->num_irqs++; - if (oirq.args_count == 3) + if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) range->irqs[i].cpu_map = (oirq.args[2] & 0x0000ff00) >> 8; } diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index b7995474148c7eb91b2c2f87459b0c9bd446d3f2..ec4aa252d6e8c1c761a47246851ad2645382516e 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -67,6 +67,13 @@ config SPI_ATH79 This enables support for the SPI controller present on the Atheros AR71XX/AR724X/AR913X SoCs. +config SPI_ARMADA_3700 + tristate "Marvell Armada 3700 SPI Controller" + depends on (ARCH_MVEBU && OF) || COMPILE_TEST + help + This enables support for the SPI controller present on the + Marvell Armada 3700 SoCs. + config SPI_ATMEL tristate "Atmel SPI Controller" depends on HAS_DMA @@ -264,6 +271,12 @@ config SPI_FALCON has only been tested with m25p80 type chips. The hardware has no support for other types of SPI peripherals. +config SPI_FSL_LPSPI + tristate "Freescale i.MX LPSPI controller" + depends on ARCH_MXC || COMPILE_TEST + help + This enables Freescale i.MX LPSPI controllers in master mode. + config SPI_GPIO tristate "GPIO-based bitbanging SPI Master" depends on GPIOLIB || COMPILE_TEST @@ -373,7 +386,6 @@ config SPI_FSL_DSPI config SPI_FSL_ESPI tristate "Freescale eSPI controller" depends on FSL_SOC - select SPI_FSL_LIB help This enables using the Freescale eSPI controllers in master mode. From MPC8536, 85xx platform uses the controller, and all P10xx, @@ -451,7 +463,8 @@ config SPI_ORION tristate "Orion SPI master" depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST help - This enables using the SPI master controller on the Orion chips. + This enables using the SPI master controller on the Orion + and MVEBU chips. config SPI_PIC32 tristate "Microchip PIC32 series SPI" @@ -553,7 +566,7 @@ config SPI_S3C24XX_FIQ config SPI_S3C64XX tristate "Samsung S3C64XX series type SPI" - depends on (PLAT_SAMSUNG || ARCH_EXYNOS) + depends on (PLAT_SAMSUNG || ARCH_EXYNOS || COMPILE_TEST) help SPI driver for Samsung S3C64XX and newer SoCs. diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index aa939d95552134fc05b15f7ee0b142fdb4ba4253..7a6b64662c82c71106f3af94a61190ba1f98a218 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o # SPI master controller drivers (bus) obj-$(CONFIG_SPI_ALTERA) += spi-altera.o +obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o obj-$(CONFIG_SPI_ATH79) += spi-ath79.o obj-$(CONFIG_SPI_AU1550) += spi-au1550.o @@ -43,6 +44,7 @@ obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o +obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o obj-$(CONFIG_SPI_GPIO) += spi-gpio.o obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c new file mode 100644 index 0000000000000000000000000000000000000000..e89da0af45d2518ef26670f7bbd50b875692872b --- /dev/null +++ b/drivers/spi/spi-armada-3700.c @@ -0,0 +1,923 @@ +/* + * Marvell Armada-3700 SPI controller driver + * + * Copyright (C) 2016 Marvell Ltd. + * + * Author: Wilson Ding + * Author: Romain Perier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "armada_3700_spi" + +#define A3700_SPI_TIMEOUT 10 + +/* SPI Register Offest */ +#define A3700_SPI_IF_CTRL_REG 0x00 +#define A3700_SPI_IF_CFG_REG 0x04 +#define A3700_SPI_DATA_OUT_REG 0x08 +#define A3700_SPI_DATA_IN_REG 0x0C +#define A3700_SPI_IF_INST_REG 0x10 +#define A3700_SPI_IF_ADDR_REG 0x14 +#define A3700_SPI_IF_RMODE_REG 0x18 +#define A3700_SPI_IF_HDR_CNT_REG 0x1C +#define A3700_SPI_IF_DIN_CNT_REG 0x20 +#define A3700_SPI_IF_TIME_REG 0x24 +#define A3700_SPI_INT_STAT_REG 0x28 +#define A3700_SPI_INT_MASK_REG 0x2C + +/* A3700_SPI_IF_CTRL_REG */ +#define A3700_SPI_EN BIT(16) +#define A3700_SPI_ADDR_NOT_CONFIG BIT(12) +#define A3700_SPI_WFIFO_OVERFLOW BIT(11) +#define A3700_SPI_WFIFO_UNDERFLOW BIT(10) +#define A3700_SPI_RFIFO_OVERFLOW BIT(9) +#define A3700_SPI_RFIFO_UNDERFLOW BIT(8) +#define A3700_SPI_WFIFO_FULL BIT(7) +#define A3700_SPI_WFIFO_EMPTY BIT(6) +#define A3700_SPI_RFIFO_FULL BIT(5) +#define A3700_SPI_RFIFO_EMPTY BIT(4) +#define A3700_SPI_WFIFO_RDY BIT(3) +#define A3700_SPI_RFIFO_RDY BIT(2) +#define A3700_SPI_XFER_RDY BIT(1) +#define A3700_SPI_XFER_DONE BIT(0) + +/* A3700_SPI_IF_CFG_REG */ +#define A3700_SPI_WFIFO_THRS BIT(28) +#define A3700_SPI_RFIFO_THRS BIT(24) +#define A3700_SPI_AUTO_CS BIT(20) +#define A3700_SPI_DMA_RD_EN BIT(18) +#define A3700_SPI_FIFO_MODE BIT(17) +#define A3700_SPI_SRST BIT(16) +#define A3700_SPI_XFER_START BIT(15) +#define A3700_SPI_XFER_STOP BIT(14) +#define A3700_SPI_INST_PIN BIT(13) +#define A3700_SPI_ADDR_PIN BIT(12) +#define A3700_SPI_DATA_PIN1 BIT(11) +#define A3700_SPI_DATA_PIN0 BIT(10) +#define A3700_SPI_FIFO_FLUSH BIT(9) +#define A3700_SPI_RW_EN BIT(8) +#define A3700_SPI_CLK_POL BIT(7) +#define A3700_SPI_CLK_PHA BIT(6) +#define A3700_SPI_BYTE_LEN BIT(5) +#define A3700_SPI_CLK_PRESCALE BIT(0) +#define A3700_SPI_CLK_PRESCALE_MASK (0x1f) + +#define A3700_SPI_WFIFO_THRS_BIT 28 +#define A3700_SPI_RFIFO_THRS_BIT 24 +#define A3700_SPI_FIFO_THRS_MASK 0x7 + +#define A3700_SPI_DATA_PIN_MASK 0x3 + +/* A3700_SPI_IF_HDR_CNT_REG */ +#define A3700_SPI_DUMMY_CNT_BIT 12 +#define A3700_SPI_DUMMY_CNT_MASK 0x7 +#define A3700_SPI_RMODE_CNT_BIT 8 +#define A3700_SPI_RMODE_CNT_MASK 0x3 +#define A3700_SPI_ADDR_CNT_BIT 4 +#define A3700_SPI_ADDR_CNT_MASK 0x7 +#define A3700_SPI_INSTR_CNT_BIT 0 +#define A3700_SPI_INSTR_CNT_MASK 0x3 + +/* A3700_SPI_IF_TIME_REG */ +#define A3700_SPI_CLK_CAPT_EDGE BIT(7) + +/* Flags and macros for struct a3700_spi */ +#define A3700_INSTR_CNT 1 +#define A3700_ADDR_CNT 3 +#define A3700_DUMMY_CNT 1 + +struct a3700_spi { + struct spi_master *master; + void __iomem *base; + struct clk *clk; + unsigned int irq; + unsigned int flags; + bool xmit_data; + const u8 *tx_buf; + u8 *rx_buf; + size_t buf_len; + u8 byte_len; + u32 wait_mask; + struct completion done; + u32 addr_cnt; + u32 instr_cnt; + size_t hdr_cnt; +}; + +static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset) +{ + return readl(a3700_spi->base + offset); +} + +static void spireg_write(struct a3700_spi *a3700_spi, u32 offset, u32 data) +{ + writel(data, a3700_spi->base + offset); +} + +static void a3700_spi_auto_cs_unset(struct a3700_spi *a3700_spi) +{ + u32 val; + + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + val &= ~A3700_SPI_AUTO_CS; + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); +} + +static void a3700_spi_activate_cs(struct a3700_spi *a3700_spi, unsigned int cs) +{ + u32 val; + + val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG); + val |= (A3700_SPI_EN << cs); + spireg_write(a3700_spi, A3700_SPI_IF_CTRL_REG, val); +} + +static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi, + unsigned int cs) +{ + u32 val; + + val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG); + val &= ~(A3700_SPI_EN << cs); + spireg_write(a3700_spi, A3700_SPI_IF_CTRL_REG, val); +} + +static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi, + unsigned int pin_mode) +{ + u32 val; + + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + val &= ~(A3700_SPI_INST_PIN | A3700_SPI_ADDR_PIN); + val &= ~(A3700_SPI_DATA_PIN0 | A3700_SPI_DATA_PIN1); + + switch (pin_mode) { + case 1: + break; + case 2: + val |= A3700_SPI_DATA_PIN0; + break; + case 4: + val |= A3700_SPI_DATA_PIN1; + break; + default: + dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode); + return -EINVAL; + } + + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); + + return 0; +} + +static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi) +{ + u32 val; + + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + val |= A3700_SPI_FIFO_MODE; + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); +} + +static void a3700_spi_mode_set(struct a3700_spi *a3700_spi, + unsigned int mode_bits) +{ + u32 val; + + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + + if (mode_bits & SPI_CPOL) + val |= A3700_SPI_CLK_POL; + else + val &= ~A3700_SPI_CLK_POL; + + if (mode_bits & SPI_CPHA) + val |= A3700_SPI_CLK_PHA; + else + val &= ~A3700_SPI_CLK_PHA; + + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); +} + +static void a3700_spi_clock_set(struct a3700_spi *a3700_spi, + unsigned int speed_hz, u16 mode) +{ + u32 val; + u32 prescale; + + prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz); + + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + val = val & ~A3700_SPI_CLK_PRESCALE_MASK; + + val = val | (prescale & A3700_SPI_CLK_PRESCALE_MASK); + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); + + if (prescale <= 2) { + val = spireg_read(a3700_spi, A3700_SPI_IF_TIME_REG); + val |= A3700_SPI_CLK_CAPT_EDGE; + spireg_write(a3700_spi, A3700_SPI_IF_TIME_REG, val); + } + + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + val &= ~(A3700_SPI_CLK_POL | A3700_SPI_CLK_PHA); + + if (mode & SPI_CPOL) + val |= A3700_SPI_CLK_POL; + + if (mode & SPI_CPHA) + val |= A3700_SPI_CLK_PHA; + + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); +} + +static void a3700_spi_bytelen_set(struct a3700_spi *a3700_spi, unsigned int len) +{ + u32 val; + + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + if (len == 4) + val |= A3700_SPI_BYTE_LEN; + else + val &= ~A3700_SPI_BYTE_LEN; + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); + + a3700_spi->byte_len = len; +} + +static int a3700_spi_fifo_flush(struct a3700_spi *a3700_spi) +{ + int timeout = A3700_SPI_TIMEOUT; + u32 val; + + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + val |= A3700_SPI_FIFO_FLUSH; + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); + + while (--timeout) { + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + if (!(val & A3700_SPI_FIFO_FLUSH)) + return 0; + udelay(1); + } + + return -ETIMEDOUT; +} + +static int a3700_spi_init(struct a3700_spi *a3700_spi) +{ + struct spi_master *master = a3700_spi->master; + u32 val; + int i, ret = 0; + + /* Reset SPI unit */ + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + val |= A3700_SPI_SRST; + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); + + udelay(A3700_SPI_TIMEOUT); + + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + val &= ~A3700_SPI_SRST; + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); + + /* Disable AUTO_CS and deactivate all chip-selects */ + a3700_spi_auto_cs_unset(a3700_spi); + for (i = 0; i < master->num_chipselect; i++) + a3700_spi_deactivate_cs(a3700_spi, i); + + /* Enable FIFO mode */ + a3700_spi_fifo_mode_set(a3700_spi); + + /* Set SPI mode */ + a3700_spi_mode_set(a3700_spi, master->mode_bits); + + /* Reset counters */ + spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0); + spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG, 0); + + /* Mask the interrupts and clear cause bits */ + spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0); + spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, ~0U); + + return ret; +} + +static irqreturn_t a3700_spi_interrupt(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct a3700_spi *a3700_spi; + u32 cause; + + a3700_spi = spi_master_get_devdata(master); + + /* Get interrupt causes */ + cause = spireg_read(a3700_spi, A3700_SPI_INT_STAT_REG); + + if (!cause || !(a3700_spi->wait_mask & cause)) + return IRQ_NONE; + + /* mask and acknowledge the SPI interrupts */ + spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0); + spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, cause); + + /* Wake up the transfer */ + if (a3700_spi->wait_mask & cause) + complete(&a3700_spi->done); + + return IRQ_HANDLED; +} + +static bool a3700_spi_wait_completion(struct spi_device *spi) +{ + struct a3700_spi *a3700_spi; + unsigned int timeout; + unsigned int ctrl_reg; + unsigned long timeout_jiffies; + + a3700_spi = spi_master_get_devdata(spi->master); + + /* SPI interrupt is edge-triggered, which means an interrupt will + * be generated only when detecting a specific status bit changed + * from '0' to '1'. So when we start waiting for a interrupt, we + * need to check status bit in control reg first, if it is already 1, + * then we do not need to wait for interrupt + */ + ctrl_reg = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG); + if (a3700_spi->wait_mask & ctrl_reg) + return true; + + reinit_completion(&a3700_spi->done); + + spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, + a3700_spi->wait_mask); + + timeout_jiffies = msecs_to_jiffies(A3700_SPI_TIMEOUT); + timeout = wait_for_completion_timeout(&a3700_spi->done, + timeout_jiffies); + + a3700_spi->wait_mask = 0; + + if (timeout) + return true; + + /* there might be the case that right after we checked the + * status bits in this routine and before start to wait for + * interrupt by wait_for_completion_timeout, the interrupt + * happens, to avoid missing it we need to double check + * status bits in control reg, if it is already 1, then + * consider that we have the interrupt successfully and + * return true. + */ + ctrl_reg = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG); + if (a3700_spi->wait_mask & ctrl_reg) + return true; + + spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0); + + return true; +} + +static bool a3700_spi_transfer_wait(struct spi_device *spi, + unsigned int bit_mask) +{ + struct a3700_spi *a3700_spi; + + a3700_spi = spi_master_get_devdata(spi->master); + a3700_spi->wait_mask = bit_mask; + + return a3700_spi_wait_completion(spi); +} + +static void a3700_spi_fifo_thres_set(struct a3700_spi *a3700_spi, + unsigned int bytes) +{ + u32 val; + + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + val &= ~(A3700_SPI_FIFO_THRS_MASK << A3700_SPI_RFIFO_THRS_BIT); + val |= (bytes - 1) << A3700_SPI_RFIFO_THRS_BIT; + val &= ~(A3700_SPI_FIFO_THRS_MASK << A3700_SPI_WFIFO_THRS_BIT); + val |= (7 - bytes) << A3700_SPI_WFIFO_THRS_BIT; + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); +} + +static void a3700_spi_transfer_setup(struct spi_device *spi, + struct spi_transfer *xfer) +{ + struct a3700_spi *a3700_spi; + unsigned int byte_len; + + a3700_spi = spi_master_get_devdata(spi->master); + + a3700_spi_clock_set(a3700_spi, xfer->speed_hz, spi->mode); + + byte_len = xfer->bits_per_word >> 3; + + a3700_spi_fifo_thres_set(a3700_spi, byte_len); +} + +static void a3700_spi_set_cs(struct spi_device *spi, bool enable) +{ + struct a3700_spi *a3700_spi = spi_master_get_devdata(spi->master); + + if (!enable) + a3700_spi_activate_cs(a3700_spi, spi->chip_select); + else + a3700_spi_deactivate_cs(a3700_spi, spi->chip_select); +} + +static void a3700_spi_header_set(struct a3700_spi *a3700_spi) +{ + u32 instr_cnt = 0, addr_cnt = 0, dummy_cnt = 0; + u32 val = 0; + + /* Clear the header registers */ + spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0); + spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0); + spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0); + + /* Set header counters */ + if (a3700_spi->tx_buf) { + if (a3700_spi->buf_len <= a3700_spi->instr_cnt) { + instr_cnt = a3700_spi->buf_len; + } else if (a3700_spi->buf_len <= (a3700_spi->instr_cnt + + a3700_spi->addr_cnt)) { + instr_cnt = a3700_spi->instr_cnt; + addr_cnt = a3700_spi->buf_len - instr_cnt; + } else if (a3700_spi->buf_len <= a3700_spi->hdr_cnt) { + instr_cnt = a3700_spi->instr_cnt; + addr_cnt = a3700_spi->addr_cnt; + /* Need to handle the normal write case with 1 byte + * data + */ + if (!a3700_spi->tx_buf[instr_cnt + addr_cnt]) + dummy_cnt = a3700_spi->buf_len - instr_cnt - + addr_cnt; + } + val |= ((instr_cnt & A3700_SPI_INSTR_CNT_MASK) + << A3700_SPI_INSTR_CNT_BIT); + val |= ((addr_cnt & A3700_SPI_ADDR_CNT_MASK) + << A3700_SPI_ADDR_CNT_BIT); + val |= ((dummy_cnt & A3700_SPI_DUMMY_CNT_MASK) + << A3700_SPI_DUMMY_CNT_BIT); + } + spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val); + + /* Update the buffer length to be transferred */ + a3700_spi->buf_len -= (instr_cnt + addr_cnt + dummy_cnt); + + /* Set Instruction */ + val = 0; + while (instr_cnt--) { + val = (val << 8) | a3700_spi->tx_buf[0]; + a3700_spi->tx_buf++; + } + spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, val); + + /* Set Address */ + val = 0; + while (addr_cnt--) { + val = (val << 8) | a3700_spi->tx_buf[0]; + a3700_spi->tx_buf++; + } + spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val); +} + +static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi) +{ + u32 val; + + val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG); + return (val & A3700_SPI_WFIFO_FULL); +} + +static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi) +{ + u32 val; + int i = 0; + + while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) { + val = 0; + if (a3700_spi->buf_len >= 4) { + val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf); + spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val); + + a3700_spi->buf_len -= 4; + a3700_spi->tx_buf += 4; + } else { + /* + * If the remained buffer length is less than 4-bytes, + * we should pad the write buffer with all ones. So that + * it avoids overwrite the unexpected bytes following + * the last one. + */ + val = GENMASK(31, 0); + while (a3700_spi->buf_len) { + val &= ~(0xff << (8 * i)); + val |= *a3700_spi->tx_buf++ << (8 * i); + i++; + a3700_spi->buf_len--; + + spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, + val); + } + break; + } + } + + return 0; +} + +static int a3700_is_rfifo_empty(struct a3700_spi *a3700_spi) +{ + u32 val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG); + + return (val & A3700_SPI_RFIFO_EMPTY); +} + +static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi) +{ + u32 val; + + while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) { + val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG); + if (a3700_spi->buf_len >= 4) { + u32 data = le32_to_cpu(val); + memcpy(a3700_spi->rx_buf, &data, 4); + + a3700_spi->buf_len -= 4; + a3700_spi->rx_buf += 4; + } else { + /* + * When remain bytes is not larger than 4, we should + * avoid memory overwriting and just write the left rx + * buffer bytes. + */ + while (a3700_spi->buf_len) { + *a3700_spi->rx_buf = val & 0xff; + val >>= 8; + + a3700_spi->buf_len--; + a3700_spi->rx_buf++; + } + } + } + + return 0; +} + +static void a3700_spi_transfer_abort_fifo(struct a3700_spi *a3700_spi) +{ + int timeout = A3700_SPI_TIMEOUT; + u32 val; + + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + val |= A3700_SPI_XFER_STOP; + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); + + while (--timeout) { + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + if (!(val & A3700_SPI_XFER_START)) + break; + udelay(1); + } + + a3700_spi_fifo_flush(a3700_spi); + + val &= ~A3700_SPI_XFER_STOP; + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); +} + +static int a3700_spi_prepare_message(struct spi_master *master, + struct spi_message *message) +{ + struct a3700_spi *a3700_spi = spi_master_get_devdata(master); + struct spi_device *spi = message->spi; + int ret; + + ret = clk_enable(a3700_spi->clk); + if (ret) { + dev_err(&spi->dev, "failed to enable clk with error %d\n", ret); + return ret; + } + + /* Flush the FIFOs */ + ret = a3700_spi_fifo_flush(a3700_spi); + if (ret) + return ret; + + a3700_spi_bytelen_set(a3700_spi, 4); + + return 0; +} + +static int a3700_spi_transfer_one(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *xfer) +{ + struct a3700_spi *a3700_spi = spi_master_get_devdata(master); + int ret = 0, timeout = A3700_SPI_TIMEOUT; + unsigned int nbits = 0; + u32 val; + + a3700_spi_transfer_setup(spi, xfer); + + a3700_spi->tx_buf = xfer->tx_buf; + a3700_spi->rx_buf = xfer->rx_buf; + a3700_spi->buf_len = xfer->len; + + /* SPI transfer headers */ + a3700_spi_header_set(a3700_spi); + + if (xfer->tx_buf) + nbits = xfer->tx_nbits; + else if (xfer->rx_buf) + nbits = xfer->rx_nbits; + + a3700_spi_pin_mode_set(a3700_spi, nbits); + + if (xfer->rx_buf) { + /* Set read data length */ + spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG, + a3700_spi->buf_len); + /* Start READ transfer */ + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + val &= ~A3700_SPI_RW_EN; + val |= A3700_SPI_XFER_START; + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); + } else if (xfer->tx_buf) { + /* Start Write transfer */ + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + val |= (A3700_SPI_XFER_START | A3700_SPI_RW_EN); + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); + + /* + * If there are data to be written to the SPI device, xmit_data + * flag is set true; otherwise the instruction in SPI_INSTR does + * not require data to be written to the SPI device, then + * xmit_data flag is set false. + */ + a3700_spi->xmit_data = (a3700_spi->buf_len != 0); + } + + while (a3700_spi->buf_len) { + if (a3700_spi->tx_buf) { + /* Wait wfifo ready */ + if (!a3700_spi_transfer_wait(spi, + A3700_SPI_WFIFO_RDY)) { + dev_err(&spi->dev, + "wait wfifo ready timed out\n"); + ret = -ETIMEDOUT; + goto error; + } + /* Fill up the wfifo */ + ret = a3700_spi_fifo_write(a3700_spi); + if (ret) + goto error; + } else if (a3700_spi->rx_buf) { + /* Wait rfifo ready */ + if (!a3700_spi_transfer_wait(spi, + A3700_SPI_RFIFO_RDY)) { + dev_err(&spi->dev, + "wait rfifo ready timed out\n"); + ret = -ETIMEDOUT; + goto error; + } + /* Drain out the rfifo */ + ret = a3700_spi_fifo_read(a3700_spi); + if (ret) + goto error; + } + } + + /* + * Stop a write transfer in fifo mode: + * - wait all the bytes in wfifo to be shifted out + * - set XFER_STOP bit + * - wait XFER_START bit clear + * - clear XFER_STOP bit + * Stop a read transfer in fifo mode: + * - the hardware is to reset the XFER_START bit + * after the number of bytes indicated in DIN_CNT + * register + * - just wait XFER_START bit clear + */ + if (a3700_spi->tx_buf) { + if (a3700_spi->xmit_data) { + /* + * If there are data written to the SPI device, wait + * until SPI_WFIFO_EMPTY is 1 to wait for all data to + * transfer out of write FIFO. + */ + if (!a3700_spi_transfer_wait(spi, + A3700_SPI_WFIFO_EMPTY)) { + dev_err(&spi->dev, "wait wfifo empty timed out\n"); + return -ETIMEDOUT; + } + } else { + /* + * If the instruction in SPI_INSTR does not require data + * to be written to the SPI device, wait until SPI_RDY + * is 1 for the SPI interface to be in idle. + */ + if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) { + dev_err(&spi->dev, "wait xfer ready timed out\n"); + return -ETIMEDOUT; + } + } + + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + val |= A3700_SPI_XFER_STOP; + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); + } + + while (--timeout) { + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); + if (!(val & A3700_SPI_XFER_START)) + break; + udelay(1); + } + + if (timeout == 0) { + dev_err(&spi->dev, "wait transfer start clear timed out\n"); + ret = -ETIMEDOUT; + goto error; + } + + val &= ~A3700_SPI_XFER_STOP; + spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); + goto out; + +error: + a3700_spi_transfer_abort_fifo(a3700_spi); +out: + spi_finalize_current_transfer(master); + + return ret; +} + +static int a3700_spi_unprepare_message(struct spi_master *master, + struct spi_message *message) +{ + struct a3700_spi *a3700_spi = spi_master_get_devdata(master); + + clk_disable(a3700_spi->clk); + + return 0; +} + +static const struct of_device_id a3700_spi_dt_ids[] = { + { .compatible = "marvell,armada-3700-spi", .data = NULL }, + {}, +}; + +MODULE_DEVICE_TABLE(of, a3700_spi_dt_ids); + +static int a3700_spi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *of_node = dev->of_node; + struct resource *res; + struct spi_master *master; + struct a3700_spi *spi; + u32 num_cs = 0; + int ret = 0; + + master = spi_alloc_master(dev, sizeof(*spi)); + if (!master) { + dev_err(dev, "master allocation failed\n"); + ret = -ENOMEM; + goto out; + } + + if (of_property_read_u32(of_node, "num-cs", &num_cs)) { + dev_err(dev, "could not find num-cs\n"); + ret = -ENXIO; + goto error; + } + + master->bus_num = pdev->id; + master->dev.of_node = of_node; + master->mode_bits = SPI_MODE_3; + master->num_chipselect = num_cs; + master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(32); + master->prepare_message = a3700_spi_prepare_message; + master->transfer_one = a3700_spi_transfer_one; + master->unprepare_message = a3700_spi_unprepare_message; + master->set_cs = a3700_spi_set_cs; + master->flags = SPI_MASTER_HALF_DUPLEX; + master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL | + SPI_RX_QUAD | SPI_TX_QUAD); + + platform_set_drvdata(pdev, master); + + spi = spi_master_get_devdata(master); + memset(spi, 0, sizeof(struct a3700_spi)); + + spi->master = master; + spi->instr_cnt = A3700_INSTR_CNT; + spi->addr_cnt = A3700_ADDR_CNT; + spi->hdr_cnt = A3700_INSTR_CNT + A3700_ADDR_CNT + + A3700_DUMMY_CNT; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + spi->base = devm_ioremap_resource(dev, res); + if (IS_ERR(spi->base)) { + ret = PTR_ERR(spi->base); + goto error; + } + + spi->irq = platform_get_irq(pdev, 0); + if (spi->irq < 0) { + dev_err(dev, "could not get irq: %d\n", spi->irq); + ret = -ENXIO; + goto error; + } + + init_completion(&spi->done); + + spi->clk = devm_clk_get(dev, NULL); + if (IS_ERR(spi->clk)) { + dev_err(dev, "could not find clk: %ld\n", PTR_ERR(spi->clk)); + goto error; + } + + ret = clk_prepare(spi->clk); + if (ret) { + dev_err(dev, "could not prepare clk: %d\n", ret); + goto error; + } + + ret = a3700_spi_init(spi); + if (ret) + goto error_clk; + + ret = devm_request_irq(dev, spi->irq, a3700_spi_interrupt, 0, + dev_name(dev), master); + if (ret) { + dev_err(dev, "could not request IRQ: %d\n", ret); + goto error_clk; + } + + ret = devm_spi_register_master(dev, master); + if (ret) { + dev_err(dev, "Failed to register master\n"); + goto error_clk; + } + + return 0; + +error_clk: + clk_disable_unprepare(spi->clk); +error: + spi_master_put(master); +out: + return ret; +} + +static int a3700_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct a3700_spi *spi = spi_master_get_devdata(master); + + clk_unprepare(spi->clk); + spi_master_put(master); + + return 0; +} + +static struct platform_driver a3700_spi_driver = { + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(a3700_spi_dt_ids), + }, + .probe = a3700_spi_probe, + .remove = a3700_spi_remove, +}; + +module_platform_driver(a3700_spi_driver); + +MODULE_DESCRIPTION("Armada-3700 SPI driver"); +MODULE_AUTHOR("Wilson Ding "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c index 6165bf21d42750af1f765f2e79bc9a9a51ea734c..f369174fbd886fbbc09a13af5f9dbce66d498635 100644 --- a/drivers/spi/spi-ath79.c +++ b/drivers/spi/spi-ath79.c @@ -304,6 +304,7 @@ static const struct of_device_id ath79_spi_of_match[] = { { .compatible = "qca,ar7100-spi", }, { }, }; +MODULE_DEVICE_TABLE(of, ath79_spi_of_match); static struct platform_driver ath79_spi_driver = { .probe = ath79_spi_probe, diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 8feac599e9ab4bfe886b8da89960f04ed32c1aba..0e7712bac3b6e1411e49890a5668c068371631f0 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -24,6 +24,7 @@ #include #include +#include #include #include @@ -264,17 +265,6 @@ #define AUTOSUSPEND_TIMEOUT 2000 -struct atmel_spi_dma { - struct dma_chan *chan_rx; - struct dma_chan *chan_tx; - struct scatterlist sgrx; - struct scatterlist sgtx; - struct dma_async_tx_descriptor *data_desc_rx; - struct dma_async_tx_descriptor *data_desc_tx; - - struct at_dma_slave dma_slave; -}; - struct atmel_spi_caps { bool is_spi2; bool has_wdrbt; @@ -295,6 +285,7 @@ struct atmel_spi { int irq; struct clk *clk; struct platform_device *pdev; + unsigned long spi_clk; struct spi_transfer *current_transfer; int current_remaining_bytes; @@ -302,17 +293,11 @@ struct atmel_spi { struct completion xfer_completion; - /* scratch buffer */ - void *buffer; - dma_addr_t buffer_dma; - struct atmel_spi_caps caps; bool use_dma; bool use_pdc; bool use_cs_gpios; - /* dmaengine data */ - struct atmel_spi_dma dma; bool keep_cs; bool cs_active; @@ -326,7 +311,7 @@ struct atmel_spi_device { u32 csr; }; -#define BUFFER_SIZE PAGE_SIZE +#define SPI_MAX_DMA_XFER 65535 /* true for both PDC and DMA */ #define INVALID_DMA_ADDRESS 0xffffffff /* @@ -456,10 +441,20 @@ static inline bool atmel_spi_use_dma(struct atmel_spi *as, return as->use_dma && xfer->len >= DMA_MIN_BYTES; } +static bool atmel_spi_can_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *xfer) +{ + struct atmel_spi *as = spi_master_get_devdata(master); + + return atmel_spi_use_dma(as, xfer); +} + static int atmel_spi_dma_slave_config(struct atmel_spi *as, struct dma_slave_config *slave_config, u8 bits_per_word) { + struct spi_master *master = platform_get_drvdata(as->pdev); int err = 0; if (bits_per_word > 8) { @@ -491,7 +486,7 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as, * path works the same whether FIFOs are available (and enabled) or not. */ slave_config->direction = DMA_MEM_TO_DEV; - if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) { + if (dmaengine_slave_config(master->dma_tx, slave_config)) { dev_err(&as->pdev->dev, "failed to configure tx dma channel\n"); err = -EINVAL; @@ -506,7 +501,7 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as, * enabled) or not. */ slave_config->direction = DMA_DEV_TO_MEM; - if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) { + if (dmaengine_slave_config(master->dma_rx, slave_config)) { dev_err(&as->pdev->dev, "failed to configure rx dma channel\n"); err = -EINVAL; @@ -515,7 +510,8 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as, return err; } -static int atmel_spi_configure_dma(struct atmel_spi *as) +static int atmel_spi_configure_dma(struct spi_master *master, + struct atmel_spi *as) { struct dma_slave_config slave_config; struct device *dev = &as->pdev->dev; @@ -525,26 +521,26 @@ static int atmel_spi_configure_dma(struct atmel_spi *as) dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - as->dma.chan_tx = dma_request_slave_channel_reason(dev, "tx"); - if (IS_ERR(as->dma.chan_tx)) { - err = PTR_ERR(as->dma.chan_tx); + master->dma_tx = dma_request_slave_channel_reason(dev, "tx"); + if (IS_ERR(master->dma_tx)) { + err = PTR_ERR(master->dma_tx); if (err == -EPROBE_DEFER) { dev_warn(dev, "no DMA channel available at the moment\n"); - return err; + goto error_clear; } dev_err(dev, "DMA TX channel not available, SPI unable to use DMA\n"); err = -EBUSY; - goto error; + goto error_clear; } /* * No reason to check EPROBE_DEFER here since we have already requested * tx channel. If it fails here, it's for another reason. */ - as->dma.chan_rx = dma_request_slave_channel(dev, "rx"); + master->dma_rx = dma_request_slave_channel(dev, "rx"); - if (!as->dma.chan_rx) { + if (!master->dma_rx) { dev_err(dev, "DMA RX channel not available, SPI unable to use DMA\n"); err = -EBUSY; @@ -557,31 +553,38 @@ static int atmel_spi_configure_dma(struct atmel_spi *as) dev_info(&as->pdev->dev, "Using %s (tx) and %s (rx) for DMA transfers\n", - dma_chan_name(as->dma.chan_tx), - dma_chan_name(as->dma.chan_rx)); + dma_chan_name(master->dma_tx), + dma_chan_name(master->dma_rx)); + return 0; error: - if (as->dma.chan_rx) - dma_release_channel(as->dma.chan_rx); - if (!IS_ERR(as->dma.chan_tx)) - dma_release_channel(as->dma.chan_tx); + if (master->dma_rx) + dma_release_channel(master->dma_rx); + if (!IS_ERR(master->dma_tx)) + dma_release_channel(master->dma_tx); +error_clear: + master->dma_tx = master->dma_rx = NULL; return err; } -static void atmel_spi_stop_dma(struct atmel_spi *as) +static void atmel_spi_stop_dma(struct spi_master *master) { - if (as->dma.chan_rx) - dmaengine_terminate_all(as->dma.chan_rx); - if (as->dma.chan_tx) - dmaengine_terminate_all(as->dma.chan_tx); + if (master->dma_rx) + dmaengine_terminate_all(master->dma_rx); + if (master->dma_tx) + dmaengine_terminate_all(master->dma_tx); } -static void atmel_spi_release_dma(struct atmel_spi *as) +static void atmel_spi_release_dma(struct spi_master *master) { - if (as->dma.chan_rx) - dma_release_channel(as->dma.chan_rx); - if (as->dma.chan_tx) - dma_release_channel(as->dma.chan_tx); + if (master->dma_rx) { + dma_release_channel(master->dma_rx); + master->dma_rx = NULL; + } + if (master->dma_tx) { + dma_release_channel(master->dma_tx); + master->dma_tx = NULL; + } } /* This function is called by the DMA driver from tasklet context */ @@ -611,14 +614,10 @@ static void atmel_spi_next_xfer_single(struct spi_master *master, cpu_relax(); } - if (xfer->tx_buf) { - if (xfer->bits_per_word > 8) - spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos)); - else - spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos)); - } else { - spi_writel(as, TDR, 0); - } + if (xfer->bits_per_word > 8) + spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos)); + else + spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos)); dev_dbg(master->dev.parent, " start pio xfer %p: len %u tx %p rx %p bitpw %d\n", @@ -665,17 +664,12 @@ static void atmel_spi_next_xfer_fifo(struct spi_master *master, /* Fill TX FIFO */ while (num_data >= 2) { - if (xfer->tx_buf) { - if (xfer->bits_per_word > 8) { - td0 = *words++; - td1 = *words++; - } else { - td0 = *bytes++; - td1 = *bytes++; - } + if (xfer->bits_per_word > 8) { + td0 = *words++; + td1 = *words++; } else { - td0 = 0; - td1 = 0; + td0 = *bytes++; + td1 = *bytes++; } spi_writel(as, TDR, (td1 << 16) | td0); @@ -683,14 +677,10 @@ static void atmel_spi_next_xfer_fifo(struct spi_master *master, } if (num_data) { - if (xfer->tx_buf) { - if (xfer->bits_per_word > 8) - td0 = *words++; - else - td0 = *bytes++; - } else { - td0 = 0; - } + if (xfer->bits_per_word > 8) + td0 = *words++; + else + td0 = *bytes++; spi_writew(as, TDR, td0); num_data--; @@ -730,13 +720,12 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, u32 *plen) { struct atmel_spi *as = spi_master_get_devdata(master); - struct dma_chan *rxchan = as->dma.chan_rx; - struct dma_chan *txchan = as->dma.chan_tx; + struct dma_chan *rxchan = master->dma_rx; + struct dma_chan *txchan = master->dma_tx; struct dma_async_tx_descriptor *rxdesc; struct dma_async_tx_descriptor *txdesc; struct dma_slave_config slave_config; dma_cookie_t cookie; - u32 len = *plen; dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n"); @@ -747,44 +736,22 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, /* release lock for DMA operations */ atmel_spi_unlock(as); - /* prepare the RX dma transfer */ - sg_init_table(&as->dma.sgrx, 1); - if (xfer->rx_buf) { - as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen; - } else { - as->dma.sgrx.dma_address = as->buffer_dma; - if (len > BUFFER_SIZE) - len = BUFFER_SIZE; - } - - /* prepare the TX dma transfer */ - sg_init_table(&as->dma.sgtx, 1); - if (xfer->tx_buf) { - as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen; - } else { - as->dma.sgtx.dma_address = as->buffer_dma; - if (len > BUFFER_SIZE) - len = BUFFER_SIZE; - memset(as->buffer, 0, len); - } - - sg_dma_len(&as->dma.sgtx) = len; - sg_dma_len(&as->dma.sgrx) = len; - - *plen = len; + *plen = xfer->len; if (atmel_spi_dma_slave_config(as, &slave_config, xfer->bits_per_word)) goto err_exit; /* Send both scatterlists */ - rxdesc = dmaengine_prep_slave_sg(rxchan, &as->dma.sgrx, 1, + rxdesc = dmaengine_prep_slave_sg(rxchan, + xfer->rx_sg.sgl, xfer->rx_sg.nents, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!rxdesc) goto err_dma; - txdesc = dmaengine_prep_slave_sg(txchan, &as->dma.sgtx, 1, + txdesc = dmaengine_prep_slave_sg(txchan, + xfer->tx_sg.sgl, xfer->tx_sg.nents, DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!txdesc) @@ -818,7 +785,7 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, err_dma: spi_writel(as, IDR, SPI_BIT(OVRES)); - atmel_spi_stop_dma(as); + atmel_spi_stop_dma(master); err_exit: atmel_spi_lock(as); return -ENOMEM; @@ -830,30 +797,10 @@ static void atmel_spi_next_xfer_data(struct spi_master *master, dma_addr_t *rx_dma, u32 *plen) { - struct atmel_spi *as = spi_master_get_devdata(master); - u32 len = *plen; - - /* use scratch buffer only when rx or tx data is unspecified */ - if (xfer->rx_buf) - *rx_dma = xfer->rx_dma + xfer->len - *plen; - else { - *rx_dma = as->buffer_dma; - if (len > BUFFER_SIZE) - len = BUFFER_SIZE; - } - - if (xfer->tx_buf) - *tx_dma = xfer->tx_dma + xfer->len - *plen; - else { - *tx_dma = as->buffer_dma; - if (len > BUFFER_SIZE) - len = BUFFER_SIZE; - memset(as->buffer, 0, len); - dma_sync_single_for_device(&as->pdev->dev, - as->buffer_dma, len, DMA_TO_DEVICE); - } - - *plen = len; + *rx_dma = xfer->rx_dma + xfer->len - *plen; + *tx_dma = xfer->tx_dma + xfer->len - *plen; + if (*plen > master->max_dma_len) + *plen = master->max_dma_len; } static int atmel_spi_set_xfer_speed(struct atmel_spi *as, @@ -864,7 +811,7 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as, unsigned long bus_hz; /* v1 chips start out at half the peripheral bus speed. */ - bus_hz = clk_get_rate(as->clk); + bus_hz = as->spi_clk; if (!atmel_spi_is_v2(as)) bus_hz /= 2; @@ -1025,16 +972,12 @@ atmel_spi_pump_single_data(struct atmel_spi *as, struct spi_transfer *xfer) u16 *rxp16; unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; - if (xfer->rx_buf) { - if (xfer->bits_per_word > 8) { - rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos); - *rxp16 = spi_readl(as, RDR); - } else { - rxp = ((u8 *)xfer->rx_buf) + xfer_pos; - *rxp = spi_readl(as, RDR); - } + if (xfer->bits_per_word > 8) { + rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos); + *rxp16 = spi_readl(as, RDR); } else { - spi_readl(as, RDR); + rxp = ((u8 *)xfer->rx_buf) + xfer_pos; + *rxp = spi_readl(as, RDR); } if (xfer->bits_per_word > 8) { if (as->current_remaining_bytes > 2) @@ -1073,12 +1016,10 @@ atmel_spi_pump_fifo_data(struct atmel_spi *as, struct spi_transfer *xfer) /* Read data */ while (num_data) { rd = spi_readl(as, RDR); - if (xfer->rx_buf) { - if (xfer->bits_per_word > 8) - *words++ = rd; - else - *bytes++ = rd; - } + if (xfer->bits_per_word > 8) + *words++ = rd; + else + *bytes++ = rd; num_data--; } } @@ -1204,7 +1145,6 @@ static int atmel_spi_setup(struct spi_device *spi) u32 csr; unsigned int bits = spi->bits_per_word; unsigned int npcs_pin; - int ret; as = spi_master_get_devdata(spi->master); @@ -1247,16 +1187,9 @@ static int atmel_spi_setup(struct spi_device *spi) if (!asd) return -ENOMEM; - if (as->use_cs_gpios) { - ret = gpio_request(npcs_pin, dev_name(&spi->dev)); - if (ret) { - kfree(asd); - return ret; - } - + if (as->use_cs_gpios) gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); - } asd->npcs_pin = npcs_pin; spi->controller_state = asd; @@ -1307,7 +1240,7 @@ static int atmel_spi_one_transfer(struct spi_master *master, * better fault reporting. */ if ((!msg->is_dma_mapped) - && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) { + && as->use_pdc) { if (atmel_spi_dma_map_xfer(as, xfer) < 0) return -ENOMEM; } @@ -1380,11 +1313,11 @@ static int atmel_spi_one_transfer(struct spi_master *master, spi_readl(as, SR); } else if (atmel_spi_use_dma(as, xfer)) { - atmel_spi_stop_dma(as); + atmel_spi_stop_dma(master); } if (!msg->is_dma_mapped - && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) + && as->use_pdc) atmel_spi_dma_unmap_xfer(master, xfer); return 0; @@ -1395,7 +1328,7 @@ static int atmel_spi_one_transfer(struct spi_master *master, } if (!msg->is_dma_mapped - && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) + && as->use_pdc) atmel_spi_dma_unmap_xfer(master, xfer); if (xfer->delay_usecs) @@ -1471,13 +1404,11 @@ static int atmel_spi_transfer_one_message(struct spi_master *master, static void atmel_spi_cleanup(struct spi_device *spi) { struct atmel_spi_device *asd = spi->controller_state; - unsigned gpio = (unsigned long) spi->controller_data; if (!asd) return; spi->controller_state = NULL; - gpio_free(gpio); kfree(asd); } @@ -1499,6 +1430,39 @@ static void atmel_get_caps(struct atmel_spi *as) } /*-------------------------------------------------------------------------*/ +static int atmel_spi_gpio_cs(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct atmel_spi *as = spi_master_get_devdata(master); + struct device_node *np = master->dev.of_node; + int i; + int ret = 0; + int nb = 0; + + if (!as->use_cs_gpios) + return 0; + + if (!np) + return 0; + + nb = of_gpio_named_count(np, "cs-gpios"); + for (i = 0; i < nb; i++) { + int cs_gpio = of_get_named_gpio(pdev->dev.of_node, + "cs-gpios", i); + + if (cs_gpio == -EPROBE_DEFER) + return cs_gpio; + + if (gpio_is_valid(cs_gpio)) { + ret = devm_gpio_request(&pdev->dev, cs_gpio, + dev_name(&pdev->dev)); + if (ret) + return ret; + } + } + + return 0; +} static int atmel_spi_probe(struct platform_device *pdev) { @@ -1537,29 +1501,23 @@ static int atmel_spi_probe(struct platform_device *pdev) master->bus_num = pdev->id; master->num_chipselect = master->dev.of_node ? 0 : 4; master->setup = atmel_spi_setup; + master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX); master->transfer_one_message = atmel_spi_transfer_one_message; master->cleanup = atmel_spi_cleanup; master->auto_runtime_pm = true; + master->max_dma_len = SPI_MAX_DMA_XFER; + master->can_dma = atmel_spi_can_dma; platform_set_drvdata(pdev, master); as = spi_master_get_devdata(master); - /* - * Scratch buffer is used for throwaway rx and tx data. - * It's coherent to minimize dcache pollution. - */ - as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, - &as->buffer_dma, GFP_KERNEL); - if (!as->buffer) - goto out_free; - spin_lock_init(&as->lock); as->pdev = pdev; as->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(as->regs)) { ret = PTR_ERR(as->regs); - goto out_free_buffer; + goto out_unmap_regs; } as->phybase = regs->start; as->irq = irq; @@ -1577,14 +1535,19 @@ static int atmel_spi_probe(struct platform_device *pdev) master->num_chipselect = 4; } + ret = atmel_spi_gpio_cs(pdev); + if (ret) + goto out_unmap_regs; + as->use_dma = false; as->use_pdc = false; if (as->caps.has_dma_support) { - ret = atmel_spi_configure_dma(as); - if (ret == 0) + ret = atmel_spi_configure_dma(master, as); + if (ret == 0) { as->use_dma = true; - else if (ret == -EPROBE_DEFER) + } else if (ret == -EPROBE_DEFER) { return ret; + } } else { as->use_pdc = true; } @@ -1606,6 +1569,9 @@ static int atmel_spi_probe(struct platform_device *pdev) ret = clk_prepare_enable(clk); if (ret) goto out_free_irq; + + as->spi_clk = clk_get_rate(clk); + spi_writel(as, CR, SPI_BIT(SWRST)); spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ if (as->caps.has_wdrbt) { @@ -1626,10 +1592,6 @@ static int atmel_spi_probe(struct platform_device *pdev) spi_writel(as, CR, SPI_BIT(FIFOEN)); } - /* go! */ - dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", - (unsigned long)regs->start, irq); - pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_active(&pdev->dev); @@ -1639,6 +1601,10 @@ static int atmel_spi_probe(struct platform_device *pdev) if (ret) goto out_free_dma; + /* go! */ + dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", + (unsigned long)regs->start, irq); + return 0; out_free_dma: @@ -1646,16 +1612,13 @@ static int atmel_spi_probe(struct platform_device *pdev) pm_runtime_set_suspended(&pdev->dev); if (as->use_dma) - atmel_spi_release_dma(as); + atmel_spi_release_dma(master); spi_writel(as, CR, SPI_BIT(SWRST)); spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ clk_disable_unprepare(clk); out_free_irq: out_unmap_regs: -out_free_buffer: - dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, - as->buffer_dma); out_free: spi_master_put(master); return ret; @@ -1671,8 +1634,8 @@ static int atmel_spi_remove(struct platform_device *pdev) /* reset the hardware and block queue progress */ spin_lock_irq(&as->lock); if (as->use_dma) { - atmel_spi_stop_dma(as); - atmel_spi_release_dma(as); + atmel_spi_stop_dma(master); + atmel_spi_release_dma(master); } spi_writel(as, CR, SPI_BIT(SWRST)); @@ -1680,9 +1643,6 @@ static int atmel_spi_remove(struct platform_device *pdev) spi_readl(as, SR); spin_unlock_irq(&as->lock); - dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, - as->buffer_dma); - clk_disable_unprepare(as->clk); pm_runtime_put_noidle(&pdev->dev); diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index 2b1456e5e2219c19cf5ab83c2b2cd61d987e118f..319225d7e761b0066a062b017c6e6f2104447859 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -574,6 +574,7 @@ static const struct of_device_id spi_engine_match_table[] = { { .compatible = "adi,axi-spi-engine-1.00.a" }, { }, }; +MODULE_DEVICE_TABLE(of, spi_engine_match_table); static struct platform_driver spi_engine_driver = { .probe = spi_engine_probe, diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 27960e46135de0d79ce0ee61946a0095ccf082f1..b715a26a91484fb695088459d9249b367b334fe1 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -502,6 +502,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) master->handle_err = dw_spi_handle_err; master->max_speed_hz = dws->max_freq; master->dev.of_node = dev->of_node; + master->flags = SPI_MASTER_GPIO_SS; /* Basic HW init */ spi_hw_init(dev, dws); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index a67b0ff6a362380e4f62382c12c557f98a127947..14c8e7ce1913b0992ef8d9765c983b86b19aa573 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -15,6 +15,8 @@ #include #include +#include +#include #include #include #include @@ -40,6 +42,7 @@ #define TRAN_STATE_WORD_ODD_NUM 0x04 #define DSPI_FIFO_SIZE 4 +#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024) #define SPI_MCR 0x00 #define SPI_MCR_MASTER (1 << 31) @@ -72,6 +75,11 @@ #define SPI_SR_TCFQF 0x80000000 #define SPI_SR_CLEAR 0xdaad0000 +#define SPI_RSER_TFFFE BIT(25) +#define SPI_RSER_TFFFD BIT(24) +#define SPI_RSER_RFDFE BIT(17) +#define SPI_RSER_RFDFD BIT(16) + #define SPI_RSER 0x30 #define SPI_RSER_EOQFE 0x10000000 #define SPI_RSER_TCFQE 0x80000000 @@ -109,6 +117,8 @@ #define SPI_TCR_TCNT_MAX 0x10000 +#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000) + struct chip_data { u32 mcr_val; u32 ctar_val; @@ -118,6 +128,7 @@ struct chip_data { enum dspi_trans_mode { DSPI_EOQ_MODE = 0, DSPI_TCFQ_MODE, + DSPI_DMA_MODE, }; struct fsl_dspi_devtype_data { @@ -126,7 +137,7 @@ struct fsl_dspi_devtype_data { }; static const struct fsl_dspi_devtype_data vf610_data = { - .trans_mode = DSPI_EOQ_MODE, + .trans_mode = DSPI_DMA_MODE, .max_clock_factor = 2, }; @@ -140,6 +151,23 @@ static const struct fsl_dspi_devtype_data ls2085a_data = { .max_clock_factor = 8, }; +struct fsl_dspi_dma { + /* Length of transfer in words of DSPI_FIFO_SIZE */ + u32 curr_xfer_len; + + u32 *tx_dma_buf; + struct dma_chan *chan_tx; + dma_addr_t tx_dma_phys; + struct completion cmd_tx_complete; + struct dma_async_tx_descriptor *tx_desc; + + u32 *rx_dma_buf; + struct dma_chan *chan_rx; + dma_addr_t rx_dma_phys; + struct completion cmd_rx_complete; + struct dma_async_tx_descriptor *rx_desc; +}; + struct fsl_dspi { struct spi_master *master; struct platform_device *pdev; @@ -166,8 +194,11 @@ struct fsl_dspi { u32 waitflags; u32 spi_tcnt; + struct fsl_dspi_dma *dma; }; +static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word); + static inline int is_double_byte_mode(struct fsl_dspi *dspi) { unsigned int val; @@ -177,6 +208,255 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi) return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1; } +static void dspi_tx_dma_callback(void *arg) +{ + struct fsl_dspi *dspi = arg; + struct fsl_dspi_dma *dma = dspi->dma; + + complete(&dma->cmd_tx_complete); +} + +static void dspi_rx_dma_callback(void *arg) +{ + struct fsl_dspi *dspi = arg; + struct fsl_dspi_dma *dma = dspi->dma; + int rx_word; + int i; + u16 d; + + rx_word = is_double_byte_mode(dspi); + + if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) { + for (i = 0; i < dma->curr_xfer_len; i++) { + d = dspi->dma->rx_dma_buf[i]; + rx_word ? (*(u16 *)dspi->rx = d) : + (*(u8 *)dspi->rx = d); + dspi->rx += rx_word + 1; + } + } + + complete(&dma->cmd_rx_complete); +} + +static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi) +{ + struct fsl_dspi_dma *dma = dspi->dma; + struct device *dev = &dspi->pdev->dev; + int time_left; + int tx_word; + int i; + + tx_word = is_double_byte_mode(dspi); + + for (i = 0; i < dma->curr_xfer_len; i++) { + dspi->dma->tx_dma_buf[i] = dspi_data_to_pushr(dspi, tx_word); + if ((dspi->cs_change) && (!dspi->len)) + dspi->dma->tx_dma_buf[i] &= ~SPI_PUSHR_CONT; + } + + dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx, + dma->tx_dma_phys, + dma->curr_xfer_len * + DMA_SLAVE_BUSWIDTH_4_BYTES, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!dma->tx_desc) { + dev_err(dev, "Not able to get desc for DMA xfer\n"); + return -EIO; + } + + dma->tx_desc->callback = dspi_tx_dma_callback; + dma->tx_desc->callback_param = dspi; + if (dma_submit_error(dmaengine_submit(dma->tx_desc))) { + dev_err(dev, "DMA submit failed\n"); + return -EINVAL; + } + + dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx, + dma->rx_dma_phys, + dma->curr_xfer_len * + DMA_SLAVE_BUSWIDTH_4_BYTES, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!dma->rx_desc) { + dev_err(dev, "Not able to get desc for DMA xfer\n"); + return -EIO; + } + + dma->rx_desc->callback = dspi_rx_dma_callback; + dma->rx_desc->callback_param = dspi; + if (dma_submit_error(dmaengine_submit(dma->rx_desc))) { + dev_err(dev, "DMA submit failed\n"); + return -EINVAL; + } + + reinit_completion(&dspi->dma->cmd_rx_complete); + reinit_completion(&dspi->dma->cmd_tx_complete); + + dma_async_issue_pending(dma->chan_rx); + dma_async_issue_pending(dma->chan_tx); + + time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete, + DMA_COMPLETION_TIMEOUT); + if (time_left == 0) { + dev_err(dev, "DMA tx timeout\n"); + dmaengine_terminate_all(dma->chan_tx); + dmaengine_terminate_all(dma->chan_rx); + return -ETIMEDOUT; + } + + time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete, + DMA_COMPLETION_TIMEOUT); + if (time_left == 0) { + dev_err(dev, "DMA rx timeout\n"); + dmaengine_terminate_all(dma->chan_tx); + dmaengine_terminate_all(dma->chan_rx); + return -ETIMEDOUT; + } + + return 0; +} + +static int dspi_dma_xfer(struct fsl_dspi *dspi) +{ + struct fsl_dspi_dma *dma = dspi->dma; + struct device *dev = &dspi->pdev->dev; + int curr_remaining_bytes; + int bytes_per_buffer; + int word = 1; + int ret = 0; + + if (is_double_byte_mode(dspi)) + word = 2; + curr_remaining_bytes = dspi->len; + bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE; + while (curr_remaining_bytes) { + /* Check if current transfer fits the DMA buffer */ + dma->curr_xfer_len = curr_remaining_bytes / word; + if (dma->curr_xfer_len > bytes_per_buffer) + dma->curr_xfer_len = bytes_per_buffer; + + ret = dspi_next_xfer_dma_submit(dspi); + if (ret) { + dev_err(dev, "DMA transfer failed\n"); + goto exit; + + } else { + curr_remaining_bytes -= dma->curr_xfer_len * word; + if (curr_remaining_bytes < 0) + curr_remaining_bytes = 0; + } + } + +exit: + return ret; +} + +static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) +{ + struct fsl_dspi_dma *dma; + struct dma_slave_config cfg; + struct device *dev = &dspi->pdev->dev; + int ret; + + dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); + if (!dma) + return -ENOMEM; + + dma->chan_rx = dma_request_slave_channel(dev, "rx"); + if (!dma->chan_rx) { + dev_err(dev, "rx dma channel not available\n"); + ret = -ENODEV; + return ret; + } + + dma->chan_tx = dma_request_slave_channel(dev, "tx"); + if (!dma->chan_tx) { + dev_err(dev, "tx dma channel not available\n"); + ret = -ENODEV; + goto err_tx_channel; + } + + dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE, + &dma->tx_dma_phys, GFP_KERNEL); + if (!dma->tx_dma_buf) { + ret = -ENOMEM; + goto err_tx_dma_buf; + } + + dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE, + &dma->rx_dma_phys, GFP_KERNEL); + if (!dma->rx_dma_buf) { + ret = -ENOMEM; + goto err_rx_dma_buf; + } + + cfg.src_addr = phy_addr + SPI_POPR; + cfg.dst_addr = phy_addr + SPI_PUSHR; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = 1; + cfg.dst_maxburst = 1; + + cfg.direction = DMA_DEV_TO_MEM; + ret = dmaengine_slave_config(dma->chan_rx, &cfg); + if (ret) { + dev_err(dev, "can't configure rx dma channel\n"); + ret = -EINVAL; + goto err_slave_config; + } + + cfg.direction = DMA_MEM_TO_DEV; + ret = dmaengine_slave_config(dma->chan_tx, &cfg); + if (ret) { + dev_err(dev, "can't configure tx dma channel\n"); + ret = -EINVAL; + goto err_slave_config; + } + + dspi->dma = dma; + init_completion(&dma->cmd_tx_complete); + init_completion(&dma->cmd_rx_complete); + + return 0; + +err_slave_config: + dma_free_coherent(dev, DSPI_DMA_BUFSIZE, + dma->rx_dma_buf, dma->rx_dma_phys); +err_rx_dma_buf: + dma_free_coherent(dev, DSPI_DMA_BUFSIZE, + dma->tx_dma_buf, dma->tx_dma_phys); +err_tx_dma_buf: + dma_release_channel(dma->chan_tx); +err_tx_channel: + dma_release_channel(dma->chan_rx); + + devm_kfree(dev, dma); + dspi->dma = NULL; + + return ret; +} + +static void dspi_release_dma(struct fsl_dspi *dspi) +{ + struct fsl_dspi_dma *dma = dspi->dma; + struct device *dev = &dspi->pdev->dev; + + if (dma) { + if (dma->chan_tx) { + dma_unmap_single(dev, dma->tx_dma_phys, + DSPI_DMA_BUFSIZE, DMA_TO_DEVICE); + dma_release_channel(dma->chan_tx); + } + + if (dma->chan_rx) { + dma_unmap_single(dev, dma->rx_dma_phys, + DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE); + dma_release_channel(dma->chan_rx); + } + } +} + static void hz_to_spi_baud(char *pbr, char *br, int speed_hz, unsigned long clkrate) { @@ -425,6 +705,12 @@ static int dspi_transfer_one_message(struct spi_master *master, regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE); dspi_tcfq_write(dspi); break; + case DSPI_DMA_MODE: + regmap_write(dspi->regmap, SPI_RSER, + SPI_RSER_TFFFE | SPI_RSER_TFFFD | + SPI_RSER_RFDFE | SPI_RSER_RFDFD); + status = dspi_dma_xfer(dspi); + break; default: dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n", trans_mode); @@ -432,9 +718,13 @@ static int dspi_transfer_one_message(struct spi_master *master, goto out; } - if (wait_event_interruptible(dspi->waitq, dspi->waitflags)) - dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n"); - dspi->waitflags = 0; + if (trans_mode != DSPI_DMA_MODE) { + if (wait_event_interruptible(dspi->waitq, + dspi->waitflags)) + dev_err(&dspi->pdev->dev, + "wait transfer complete fail!\n"); + dspi->waitflags = 0; + } if (transfer->delay_usecs) udelay(transfer->delay_usecs); @@ -740,6 +1030,13 @@ static int dspi_probe(struct platform_device *pdev) if (ret) goto out_master_put; + if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { + if (dspi_request_dma(dspi, res->start)) { + dev_err(&pdev->dev, "can't get dma channels\n"); + goto out_clk_put; + } + } + master->max_speed_hz = clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor; @@ -768,6 +1065,7 @@ static int dspi_remove(struct platform_device *pdev) struct fsl_dspi *dspi = spi_master_get_devdata(master); /* Disconnect from the SPI framework */ + dspi_release_dma(dspi); clk_disable_unprepare(dspi->clk); spi_unregister_master(dspi->master); diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c index 2c175b9495f7ee102a0d70a96f42fa730c019247..1d332e23f6ede6874e3a7c76329a9ab0ff67a751 100644 --- a/drivers/spi/spi-fsl-espi.c +++ b/drivers/spi/spi-fsl-espi.c @@ -23,8 +23,6 @@ #include #include -#include "spi-fsl-lib.h" - /* eSPI Controller registers */ #define ESPI_SPMODE 0x00 /* eSPI mode register */ #define ESPI_SPIE 0x04 /* eSPI event register */ @@ -54,8 +52,11 @@ #define CSMODE_AFT(x) ((x) << 8) #define CSMODE_CG(x) ((x) << 3) +#define FSL_ESPI_FIFO_SIZE 32 +#define FSL_ESPI_RXTHR 15 + /* Default mode/csmode for eSPI controller */ -#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3)) +#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(FSL_ESPI_RXTHR)) #define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \ | CSMODE_AFT(0) | CSMODE_CG(1)) @@ -90,219 +91,342 @@ #define AUTOSUSPEND_TIMEOUT 2000 -static inline u32 fsl_espi_read_reg(struct mpc8xxx_spi *mspi, int offset) +struct fsl_espi { + struct device *dev; + void __iomem *reg_base; + + struct list_head *m_transfers; + struct spi_transfer *tx_t; + unsigned int tx_pos; + bool tx_done; + struct spi_transfer *rx_t; + unsigned int rx_pos; + bool rx_done; + + bool swab; + unsigned int rxskip; + + spinlock_t lock; + + u32 spibrg; /* SPIBRG input clock */ + + struct completion done; +}; + +struct fsl_espi_cs { + u32 hw_mode; +}; + +static inline u32 fsl_espi_read_reg(struct fsl_espi *espi, int offset) { - return ioread32be(mspi->reg_base + offset); + return ioread32be(espi->reg_base + offset); } -static inline u8 fsl_espi_read_reg8(struct mpc8xxx_spi *mspi, int offset) +static inline u16 fsl_espi_read_reg16(struct fsl_espi *espi, int offset) { - return ioread8(mspi->reg_base + offset); + return ioread16be(espi->reg_base + offset); } -static inline void fsl_espi_write_reg(struct mpc8xxx_spi *mspi, int offset, - u32 val) +static inline u8 fsl_espi_read_reg8(struct fsl_espi *espi, int offset) { - iowrite32be(val, mspi->reg_base + offset); + return ioread8(espi->reg_base + offset); } -static inline void fsl_espi_write_reg8(struct mpc8xxx_spi *mspi, int offset, - u8 val) +static inline void fsl_espi_write_reg(struct fsl_espi *espi, int offset, + u32 val) { - iowrite8(val, mspi->reg_base + offset); + iowrite32be(val, espi->reg_base + offset); } -static void fsl_espi_copy_to_buf(struct spi_message *m, - struct mpc8xxx_spi *mspi) +static inline void fsl_espi_write_reg16(struct fsl_espi *espi, int offset, + u16 val) { - struct spi_transfer *t; - u8 *buf = mspi->local_buf; - - list_for_each_entry(t, &m->transfers, transfer_list) { - if (t->tx_buf) - memcpy(buf, t->tx_buf, t->len); - else - memset(buf, 0, t->len); - buf += t->len; - } + iowrite16be(val, espi->reg_base + offset); } -static void fsl_espi_copy_from_buf(struct spi_message *m, - struct mpc8xxx_spi *mspi) +static inline void fsl_espi_write_reg8(struct fsl_espi *espi, int offset, + u8 val) { - struct spi_transfer *t; - u8 *buf = mspi->local_buf; - - list_for_each_entry(t, &m->transfers, transfer_list) { - if (t->rx_buf) - memcpy(t->rx_buf, buf, t->len); - buf += t->len; - } + iowrite8(val, espi->reg_base + offset); } static int fsl_espi_check_message(struct spi_message *m) { - struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master); + struct fsl_espi *espi = spi_master_get_devdata(m->spi->master); struct spi_transfer *t, *first; if (m->frame_length > SPCOM_TRANLEN_MAX) { - dev_err(mspi->dev, "message too long, size is %u bytes\n", + dev_err(espi->dev, "message too long, size is %u bytes\n", m->frame_length); return -EMSGSIZE; } first = list_first_entry(&m->transfers, struct spi_transfer, transfer_list); + list_for_each_entry(t, &m->transfers, transfer_list) { if (first->bits_per_word != t->bits_per_word || first->speed_hz != t->speed_hz) { - dev_err(mspi->dev, "bits_per_word/speed_hz should be the same for all transfers\n"); + dev_err(espi->dev, "bits_per_word/speed_hz should be the same for all transfers\n"); return -EINVAL; } } + /* ESPI supports MSB-first transfers for word size 8 / 16 only */ + if (!(m->spi->mode & SPI_LSB_FIRST) && first->bits_per_word != 8 && + first->bits_per_word != 16) { + dev_err(espi->dev, + "MSB-first transfer not supported for wordsize %u\n", + first->bits_per_word); + return -EINVAL; + } + return 0; } -static void fsl_espi_change_mode(struct spi_device *spi) +static unsigned int fsl_espi_check_rxskip_mode(struct spi_message *m) { - struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); - struct spi_mpc8xxx_cs *cs = spi->controller_state; - u32 tmp; - unsigned long flags; - - /* Turn off IRQs locally to minimize time that SPI is disabled. */ - local_irq_save(flags); - - /* Turn off SPI unit prior changing mode */ - tmp = fsl_espi_read_reg(mspi, ESPI_SPMODE); - fsl_espi_write_reg(mspi, ESPI_SPMODE, tmp & ~SPMODE_ENABLE); - fsl_espi_write_reg(mspi, ESPI_SPMODEx(spi->chip_select), - cs->hw_mode); - fsl_espi_write_reg(mspi, ESPI_SPMODE, tmp); - - local_irq_restore(flags); + struct spi_transfer *t; + unsigned int i = 0, rxskip = 0; + + /* + * prerequisites for ESPI rxskip mode: + * - message has two transfers + * - first transfer is a write and second is a read + * + * In addition the current low-level transfer mechanism requires + * that the rxskip bytes fit into the TX FIFO. Else the transfer + * would hang because after the first FSL_ESPI_FIFO_SIZE bytes + * the TX FIFO isn't re-filled. + */ + list_for_each_entry(t, &m->transfers, transfer_list) { + if (i == 0) { + if (!t->tx_buf || t->rx_buf || + t->len > FSL_ESPI_FIFO_SIZE) + return 0; + rxskip = t->len; + } else if (i == 1) { + if (t->tx_buf || !t->rx_buf) + return 0; + } + i++; + } + + return i == 2 ? rxskip : 0; } -static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi) +static void fsl_espi_fill_tx_fifo(struct fsl_espi *espi, u32 events) { - u32 data; - u16 data_h; - u16 data_l; - const u32 *tx = mpc8xxx_spi->tx; + u32 tx_fifo_avail; + unsigned int tx_left; + const void *tx_buf; + + /* if events is zero transfer has not started and tx fifo is empty */ + tx_fifo_avail = events ? SPIE_TXCNT(events) : FSL_ESPI_FIFO_SIZE; +start: + tx_left = espi->tx_t->len - espi->tx_pos; + tx_buf = espi->tx_t->tx_buf; + while (tx_fifo_avail >= min(4U, tx_left) && tx_left) { + if (tx_left >= 4) { + if (!tx_buf) + fsl_espi_write_reg(espi, ESPI_SPITF, 0); + else if (espi->swab) + fsl_espi_write_reg(espi, ESPI_SPITF, + swahb32p(tx_buf + espi->tx_pos)); + else + fsl_espi_write_reg(espi, ESPI_SPITF, + *(u32 *)(tx_buf + espi->tx_pos)); + espi->tx_pos += 4; + tx_left -= 4; + tx_fifo_avail -= 4; + } else if (tx_left >= 2 && tx_buf && espi->swab) { + fsl_espi_write_reg16(espi, ESPI_SPITF, + swab16p(tx_buf + espi->tx_pos)); + espi->tx_pos += 2; + tx_left -= 2; + tx_fifo_avail -= 2; + } else { + if (!tx_buf) + fsl_espi_write_reg8(espi, ESPI_SPITF, 0); + else + fsl_espi_write_reg8(espi, ESPI_SPITF, + *(u8 *)(tx_buf + espi->tx_pos)); + espi->tx_pos += 1; + tx_left -= 1; + tx_fifo_avail -= 1; + } + } - if (!tx) - return 0; + if (!tx_left) { + /* Last transfer finished, in rxskip mode only one is needed */ + if (list_is_last(&espi->tx_t->transfer_list, + espi->m_transfers) || espi->rxskip) { + espi->tx_done = true; + return; + } + espi->tx_t = list_next_entry(espi->tx_t, transfer_list); + espi->tx_pos = 0; + /* continue with next transfer if tx fifo is not full */ + if (tx_fifo_avail) + goto start; + } +} - data = *tx++ << mpc8xxx_spi->tx_shift; - data_l = data & 0xffff; - data_h = (data >> 16) & 0xffff; - swab16s(&data_l); - swab16s(&data_h); - data = data_h | data_l; +static void fsl_espi_read_rx_fifo(struct fsl_espi *espi, u32 events) +{ + u32 rx_fifo_avail = SPIE_RXCNT(events); + unsigned int rx_left; + void *rx_buf; + +start: + rx_left = espi->rx_t->len - espi->rx_pos; + rx_buf = espi->rx_t->rx_buf; + while (rx_fifo_avail >= min(4U, rx_left) && rx_left) { + if (rx_left >= 4) { + u32 val = fsl_espi_read_reg(espi, ESPI_SPIRF); + + if (rx_buf && espi->swab) + *(u32 *)(rx_buf + espi->rx_pos) = swahb32(val); + else if (rx_buf) + *(u32 *)(rx_buf + espi->rx_pos) = val; + espi->rx_pos += 4; + rx_left -= 4; + rx_fifo_avail -= 4; + } else if (rx_left >= 2 && rx_buf && espi->swab) { + u16 val = fsl_espi_read_reg16(espi, ESPI_SPIRF); + + *(u16 *)(rx_buf + espi->rx_pos) = swab16(val); + espi->rx_pos += 2; + rx_left -= 2; + rx_fifo_avail -= 2; + } else { + u8 val = fsl_espi_read_reg8(espi, ESPI_SPIRF); + + if (rx_buf) + *(u8 *)(rx_buf + espi->rx_pos) = val; + espi->rx_pos += 1; + rx_left -= 1; + rx_fifo_avail -= 1; + } + } - mpc8xxx_spi->tx = tx; - return data; + if (!rx_left) { + if (list_is_last(&espi->rx_t->transfer_list, + espi->m_transfers)) { + espi->rx_done = true; + return; + } + espi->rx_t = list_next_entry(espi->rx_t, transfer_list); + espi->rx_pos = 0; + /* continue with next transfer if rx fifo is not empty */ + if (rx_fifo_avail) + goto start; + } } static void fsl_espi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { - struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + struct fsl_espi *espi = spi_master_get_devdata(spi->master); int bits_per_word = t ? t->bits_per_word : spi->bits_per_word; - u32 hz = t ? t->speed_hz : spi->max_speed_hz; - u8 pm; - struct spi_mpc8xxx_cs *cs = spi->controller_state; - - cs->rx_shift = 0; - cs->tx_shift = 0; - cs->get_rx = mpc8xxx_spi_rx_buf_u32; - cs->get_tx = mpc8xxx_spi_tx_buf_u32; - if (bits_per_word <= 8) { - cs->rx_shift = 8 - bits_per_word; - } else { - cs->rx_shift = 16 - bits_per_word; - if (spi->mode & SPI_LSB_FIRST) - cs->get_tx = fsl_espi_tx_buf_lsb; - } - - mpc8xxx_spi->rx_shift = cs->rx_shift; - mpc8xxx_spi->tx_shift = cs->tx_shift; - mpc8xxx_spi->get_rx = cs->get_rx; - mpc8xxx_spi->get_tx = cs->get_tx; + u32 pm, hz = t ? t->speed_hz : spi->max_speed_hz; + struct fsl_espi_cs *cs = spi_get_ctldata(spi); + u32 hw_mode_old = cs->hw_mode; /* mask out bits we are going to set */ cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF)); cs->hw_mode |= CSMODE_LEN(bits_per_word - 1); - if ((mpc8xxx_spi->spibrg / hz) > 64) { + pm = DIV_ROUND_UP(espi->spibrg, hz * 4) - 1; + + if (pm > 15) { cs->hw_mode |= CSMODE_DIV16; - pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 16 * 4); - - WARN_ONCE(pm > 33, "%s: Requested speed is too low: %d Hz. " - "Will use %d Hz instead.\n", dev_name(&spi->dev), - hz, mpc8xxx_spi->spibrg / (4 * 16 * (32 + 1))); - if (pm > 33) - pm = 33; - } else { - pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 4); + pm = DIV_ROUND_UP(espi->spibrg, hz * 16 * 4) - 1; } - if (pm) - pm--; - if (pm < 2) - pm = 2; cs->hw_mode |= CSMODE_PM(pm); - fsl_espi_change_mode(spi); + /* don't write the mode register if the mode doesn't change */ + if (cs->hw_mode != hw_mode_old) + fsl_espi_write_reg(espi, ESPI_SPMODEx(spi->chip_select), + cs->hw_mode); } static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t) { - struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); - u32 word; + struct fsl_espi *espi = spi_master_get_devdata(spi->master); + unsigned int rx_len = t->len; + u32 mask, spcom; int ret; - mpc8xxx_spi->len = t->len; - mpc8xxx_spi->count = roundup(t->len, 4) / 4; - - mpc8xxx_spi->tx = t->tx_buf; - mpc8xxx_spi->rx = t->rx_buf; - - reinit_completion(&mpc8xxx_spi->done); + reinit_completion(&espi->done); /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM, - (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1))); + spcom = SPCOM_CS(spi->chip_select); + spcom |= SPCOM_TRANLEN(t->len - 1); + + /* configure RXSKIP mode */ + if (espi->rxskip) { + spcom |= SPCOM_RXSKIP(espi->rxskip); + rx_len = t->len - espi->rxskip; + if (t->rx_nbits == SPI_NBITS_DUAL) + spcom |= SPCOM_DO; + } + + fsl_espi_write_reg(espi, ESPI_SPCOM, spcom); - /* enable rx ints */ - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, SPIM_RNE); + /* enable interrupts */ + mask = SPIM_DON; + if (rx_len > FSL_ESPI_FIFO_SIZE) + mask |= SPIM_RXT; + fsl_espi_write_reg(espi, ESPI_SPIM, mask); - /* transmit word */ - word = mpc8xxx_spi->get_tx(mpc8xxx_spi); - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPITF, word); + /* Prevent filling the fifo from getting interrupted */ + spin_lock_irq(&espi->lock); + fsl_espi_fill_tx_fifo(espi, 0); + spin_unlock_irq(&espi->lock); /* Won't hang up forever, SPI bus sometimes got lost interrupts... */ - ret = wait_for_completion_timeout(&mpc8xxx_spi->done, 2 * HZ); + ret = wait_for_completion_timeout(&espi->done, 2 * HZ); if (ret == 0) - dev_err(mpc8xxx_spi->dev, - "Transaction hanging up (left %d bytes)\n", - mpc8xxx_spi->count); + dev_err(espi->dev, "Transfer timed out!\n"); /* disable rx ints */ - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0); + fsl_espi_write_reg(espi, ESPI_SPIM, 0); - return mpc8xxx_spi->count > 0 ? -EMSGSIZE : 0; + return ret == 0 ? -ETIMEDOUT : 0; } static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans) { - struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master); + struct fsl_espi *espi = spi_master_get_devdata(m->spi->master); struct spi_device *spi = m->spi; int ret; - fsl_espi_copy_to_buf(m, mspi); + /* In case of LSB-first and bits_per_word > 8 byte-swap all words */ + espi->swab = spi->mode & SPI_LSB_FIRST && trans->bits_per_word > 8; + + espi->m_transfers = &m->transfers; + espi->tx_t = list_first_entry(&m->transfers, struct spi_transfer, + transfer_list); + espi->tx_pos = 0; + espi->tx_done = false; + espi->rx_t = list_first_entry(&m->transfers, struct spi_transfer, + transfer_list); + espi->rx_pos = 0; + espi->rx_done = false; + + espi->rxskip = fsl_espi_check_rxskip_mode(m); + if (trans->rx_nbits == SPI_NBITS_DUAL && !espi->rxskip) { + dev_err(espi->dev, "Dual output mode requires RXSKIP mode!\n"); + return -EINVAL; + } + + /* In RXSKIP mode skip first transfer for reads */ + if (espi->rxskip) + espi->rx_t = list_next_entry(espi->rx_t, transfer_list); + fsl_espi_setup_transfer(spi, trans); ret = fsl_espi_bufs(spi, trans); @@ -310,19 +434,13 @@ static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans) if (trans->delay_usecs) udelay(trans->delay_usecs); - fsl_espi_setup_transfer(spi, NULL); - - if (!ret) - fsl_espi_copy_from_buf(m, mspi); - return ret; } static int fsl_espi_do_one_msg(struct spi_master *master, struct spi_message *m) { - struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master); - unsigned int delay_usecs = 0; + unsigned int delay_usecs = 0, rx_nbits = 0; struct spi_transfer *t, trans = {}; int ret; @@ -333,6 +451,8 @@ static int fsl_espi_do_one_msg(struct spi_master *master, list_for_each_entry(t, &m->transfers, transfer_list) { if (t->delay_usecs > delay_usecs) delay_usecs = t->delay_usecs; + if (t->rx_nbits > rx_nbits) + rx_nbits = t->rx_nbits; } t = list_first_entry(&m->transfers, struct spi_transfer, @@ -342,8 +462,7 @@ static int fsl_espi_do_one_msg(struct spi_master *master, trans.speed_hz = t->speed_hz; trans.bits_per_word = t->bits_per_word; trans.delay_usecs = delay_usecs; - trans.tx_buf = mspi->local_buf; - trans.rx_buf = mspi->local_buf; + trans.rx_nbits = rx_nbits; if (trans.len) ret = fsl_espi_trans(m, &trans); @@ -360,12 +479,9 @@ static int fsl_espi_do_one_msg(struct spi_master *master, static int fsl_espi_setup(struct spi_device *spi) { - struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_espi *espi; u32 loop_mode; - struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi); - - if (!spi->max_speed_hz) - return -EINVAL; + struct fsl_espi_cs *cs = spi_get_ctldata(spi); if (!cs) { cs = kzalloc(sizeof(*cs), GFP_KERNEL); @@ -374,12 +490,11 @@ static int fsl_espi_setup(struct spi_device *spi) spi_set_ctldata(spi, cs); } - mpc8xxx_spi = spi_master_get_devdata(spi->master); + espi = spi_master_get_devdata(spi->master); - pm_runtime_get_sync(mpc8xxx_spi->dev); + pm_runtime_get_sync(espi->dev); - cs->hw_mode = fsl_espi_read_reg(mpc8xxx_spi, - ESPI_SPMODEx(spi->chip_select)); + cs->hw_mode = fsl_espi_read_reg(espi, ESPI_SPMODEx(spi->chip_select)); /* mask out bits we are going to set */ cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH | CSMODE_REV); @@ -392,115 +507,74 @@ static int fsl_espi_setup(struct spi_device *spi) cs->hw_mode |= CSMODE_REV; /* Handle the loop mode */ - loop_mode = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE); + loop_mode = fsl_espi_read_reg(espi, ESPI_SPMODE); loop_mode &= ~SPMODE_LOOP; if (spi->mode & SPI_LOOP) loop_mode |= SPMODE_LOOP; - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, loop_mode); + fsl_espi_write_reg(espi, ESPI_SPMODE, loop_mode); fsl_espi_setup_transfer(spi, NULL); - pm_runtime_mark_last_busy(mpc8xxx_spi->dev); - pm_runtime_put_autosuspend(mpc8xxx_spi->dev); + pm_runtime_mark_last_busy(espi->dev); + pm_runtime_put_autosuspend(espi->dev); return 0; } static void fsl_espi_cleanup(struct spi_device *spi) { - struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi); + struct fsl_espi_cs *cs = spi_get_ctldata(spi); kfree(cs); spi_set_ctldata(spi, NULL); } -static void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) +static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events) { - /* We need handle RX first */ - if (events & SPIE_RNE) { - u32 rx_data, tmp; - u8 rx_data_8; - int rx_nr_bytes = 4; - int ret; - - /* Spin until RX is done */ - if (SPIE_RXCNT(events) < min(4, mspi->len)) { - ret = spin_event_timeout( - !(SPIE_RXCNT(events = - fsl_espi_read_reg(mspi, ESPI_SPIE)) < - min(4, mspi->len)), - 10000, 0); /* 10 msec */ - if (!ret) - dev_err(mspi->dev, - "tired waiting for SPIE_RXCNT\n"); - } + if (!espi->rx_done) + fsl_espi_read_rx_fifo(espi, events); - if (mspi->len >= 4) { - rx_data = fsl_espi_read_reg(mspi, ESPI_SPIRF); - } else if (mspi->len <= 0) { - dev_err(mspi->dev, - "unexpected RX(SPIE_RNE) interrupt occurred,\n" - "(local rxlen %d bytes, reg rxlen %d bytes)\n", - min(4, mspi->len), SPIE_RXCNT(events)); - rx_nr_bytes = 0; - } else { - rx_nr_bytes = mspi->len; - tmp = mspi->len; - rx_data = 0; - while (tmp--) { - rx_data_8 = fsl_espi_read_reg8(mspi, - ESPI_SPIRF); - rx_data |= (rx_data_8 << (tmp * 8)); - } - - rx_data <<= (4 - mspi->len) * 8; - } + if (!espi->tx_done) + fsl_espi_fill_tx_fifo(espi, events); - mspi->len -= rx_nr_bytes; + if (!espi->tx_done || !espi->rx_done) + return; - if (rx_nr_bytes && mspi->rx) - mspi->get_rx(rx_data, mspi); - } + /* we're done, but check for errors before returning */ + events = fsl_espi_read_reg(espi, ESPI_SPIE); - if (!(events & SPIE_TNF)) { - int ret; - - /* spin until TX is done */ - ret = spin_event_timeout(((events = fsl_espi_read_reg( - mspi, ESPI_SPIE)) & SPIE_TNF), 1000, 0); - if (!ret) { - dev_err(mspi->dev, "tired waiting for SPIE_TNF\n"); - complete(&mspi->done); - return; - } - } + if (!(events & SPIE_DON)) + dev_err(espi->dev, + "Transfer done but SPIE_DON isn't set!\n"); - mspi->count -= 1; - if (mspi->count) { - u32 word = mspi->get_tx(mspi); + if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE) + dev_err(espi->dev, "Transfer done but rx/tx fifo's aren't empty!\n"); - fsl_espi_write_reg(mspi, ESPI_SPITF, word); - } else { - complete(&mspi->done); - } + complete(&espi->done); } static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) { - struct mpc8xxx_spi *mspi = context_data; + struct fsl_espi *espi = context_data; u32 events; + spin_lock(&espi->lock); + /* Get interrupt events(tx/rx) */ - events = fsl_espi_read_reg(mspi, ESPI_SPIE); - if (!events) + events = fsl_espi_read_reg(espi, ESPI_SPIE); + if (!events) { + spin_unlock(&espi->lock); return IRQ_NONE; + } - dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events); + dev_vdbg(espi->dev, "%s: events %x\n", __func__, events); - fsl_espi_cpu_irq(mspi, events); + fsl_espi_cpu_irq(espi, events); /* Clear the events */ - fsl_espi_write_reg(mspi, ESPI_SPIE, events); + fsl_espi_write_reg(espi, ESPI_SPIE, events); + + spin_unlock(&espi->lock); return IRQ_HANDLED; } @@ -509,12 +583,12 @@ static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) static int fsl_espi_runtime_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); - struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master); + struct fsl_espi *espi = spi_master_get_devdata(master); u32 regval; - regval = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE); + regval = fsl_espi_read_reg(espi, ESPI_SPMODE); regval &= ~SPMODE_ENABLE; - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval); + fsl_espi_write_reg(espi, ESPI_SPMODE, regval); return 0; } @@ -522,12 +596,12 @@ static int fsl_espi_runtime_suspend(struct device *dev) static int fsl_espi_runtime_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); - struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master); + struct fsl_espi *espi = spi_master_get_devdata(master); u32 regval; - regval = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE); + regval = fsl_espi_read_reg(espi, ESPI_SPMODE); regval |= SPMODE_ENABLE; - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval); + fsl_espi_write_reg(espi, ESPI_SPMODE, regval); return 0; } @@ -538,96 +612,105 @@ static size_t fsl_espi_max_message_size(struct spi_device *spi) return SPCOM_TRANLEN_MAX; } +static void fsl_espi_init_regs(struct device *dev, bool initial) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct fsl_espi *espi = spi_master_get_devdata(master); + struct device_node *nc; + u32 csmode, cs, prop; + int ret; + + /* SPI controller initializations */ + fsl_espi_write_reg(espi, ESPI_SPMODE, 0); + fsl_espi_write_reg(espi, ESPI_SPIM, 0); + fsl_espi_write_reg(espi, ESPI_SPCOM, 0); + fsl_espi_write_reg(espi, ESPI_SPIE, 0xffffffff); + + /* Init eSPI CS mode register */ + for_each_available_child_of_node(master->dev.of_node, nc) { + /* get chip select */ + ret = of_property_read_u32(nc, "reg", &cs); + if (ret || cs >= master->num_chipselect) + continue; + + csmode = CSMODE_INIT_VAL; + + /* check if CSBEF is set in device tree */ + ret = of_property_read_u32(nc, "fsl,csbef", &prop); + if (!ret) { + csmode &= ~(CSMODE_BEF(0xf)); + csmode |= CSMODE_BEF(prop); + } + + /* check if CSAFT is set in device tree */ + ret = of_property_read_u32(nc, "fsl,csaft", &prop); + if (!ret) { + csmode &= ~(CSMODE_AFT(0xf)); + csmode |= CSMODE_AFT(prop); + } + + fsl_espi_write_reg(espi, ESPI_SPMODEx(cs), csmode); + + if (initial) + dev_info(dev, "cs=%u, init_csmode=0x%x\n", cs, csmode); + } + + /* Enable SPI interface */ + fsl_espi_write_reg(espi, ESPI_SPMODE, SPMODE_INIT_VAL | SPMODE_ENABLE); +} + static int fsl_espi_probe(struct device *dev, struct resource *mem, - unsigned int irq) + unsigned int irq, unsigned int num_cs) { - struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); struct spi_master *master; - struct mpc8xxx_spi *mpc8xxx_spi; - struct device_node *nc; - const __be32 *prop; - u32 regval, csmode; - int i, len, ret; + struct fsl_espi *espi; + int ret; - master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); + master = spi_alloc_master(dev, sizeof(struct fsl_espi)); if (!master) return -ENOMEM; dev_set_drvdata(dev, master); - mpc8xxx_spi_probe(dev, mem, irq); - + master->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | + SPI_LSB_FIRST | SPI_LOOP; + master->dev.of_node = dev->of_node; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); master->setup = fsl_espi_setup; master->cleanup = fsl_espi_cleanup; master->transfer_one_message = fsl_espi_do_one_msg; master->auto_runtime_pm = true; master->max_message_size = fsl_espi_max_message_size; + master->num_chipselect = num_cs; - mpc8xxx_spi = spi_master_get_devdata(master); + espi = spi_master_get_devdata(master); + spin_lock_init(&espi->lock); - mpc8xxx_spi->local_buf = - devm_kmalloc(dev, SPCOM_TRANLEN_MAX, GFP_KERNEL); - if (!mpc8xxx_spi->local_buf) { - ret = -ENOMEM; + espi->dev = dev; + espi->spibrg = fsl_get_sys_freq(); + if (espi->spibrg == -1) { + dev_err(dev, "Can't get sys frequency!\n"); + ret = -EINVAL; goto err_probe; } + /* determined by clock divider fields DIV16/PM in register SPMODEx */ + master->min_speed_hz = DIV_ROUND_UP(espi->spibrg, 4 * 16 * 16); + master->max_speed_hz = DIV_ROUND_UP(espi->spibrg, 4); - mpc8xxx_spi->reg_base = devm_ioremap_resource(dev, mem); - if (IS_ERR(mpc8xxx_spi->reg_base)) { - ret = PTR_ERR(mpc8xxx_spi->reg_base); + init_completion(&espi->done); + + espi->reg_base = devm_ioremap_resource(dev, mem); + if (IS_ERR(espi->reg_base)) { + ret = PTR_ERR(espi->reg_base); goto err_probe; } /* Register for SPI Interrupt */ - ret = devm_request_irq(dev, mpc8xxx_spi->irq, fsl_espi_irq, - 0, "fsl_espi", mpc8xxx_spi); + ret = devm_request_irq(dev, irq, fsl_espi_irq, 0, "fsl_espi", espi); if (ret) goto err_probe; - if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { - mpc8xxx_spi->rx_shift = 16; - mpc8xxx_spi->tx_shift = 24; - } - - /* SPI controller initializations */ - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, 0); - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0); - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM, 0); - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIE, 0xffffffff); - - /* Init eSPI CS mode register */ - for_each_available_child_of_node(master->dev.of_node, nc) { - /* get chip select */ - prop = of_get_property(nc, "reg", &len); - if (!prop || len < sizeof(*prop)) - continue; - i = be32_to_cpup(prop); - if (i < 0 || i >= pdata->max_chipselect) - continue; - - csmode = CSMODE_INIT_VAL; - /* check if CSBEF is set in device tree */ - prop = of_get_property(nc, "fsl,csbef", &len); - if (prop && len >= sizeof(*prop)) { - csmode &= ~(CSMODE_BEF(0xf)); - csmode |= CSMODE_BEF(be32_to_cpup(prop)); - } - /* check if CSAFT is set in device tree */ - prop = of_get_property(nc, "fsl,csaft", &len); - if (prop && len >= sizeof(*prop)) { - csmode &= ~(CSMODE_AFT(0xf)); - csmode |= CSMODE_AFT(be32_to_cpup(prop)); - } - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODEx(i), csmode); - - dev_info(dev, "cs=%d, init_csmode=0x%x\n", i, csmode); - } - - /* Enable SPI interface */ - regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; - - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval); + fsl_espi_init_regs(dev, true); pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); @@ -639,8 +722,7 @@ static int fsl_espi_probe(struct device *dev, struct resource *mem, if (ret < 0) goto err_pm; - dev_info(dev, "at 0x%p (irq = %d)\n", mpc8xxx_spi->reg_base, - mpc8xxx_spi->irq); + dev_info(dev, "at 0x%p (irq = %u)\n", espi->reg_base, irq); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); @@ -659,20 +741,16 @@ static int fsl_espi_probe(struct device *dev, struct resource *mem, static int of_fsl_espi_get_chipselects(struct device *dev) { struct device_node *np = dev->of_node; - struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); - const u32 *prop; - int len; + u32 num_cs; + int ret; - prop = of_get_property(np, "fsl,espi-num-chipselects", &len); - if (!prop || len < sizeof(*prop)) { + ret = of_property_read_u32(np, "fsl,espi-num-chipselects", &num_cs); + if (ret) { dev_err(dev, "No 'fsl,espi-num-chipselects' property\n"); - return -EINVAL; + return 0; } - pdata->max_chipselect = *prop; - pdata->cs_control = NULL; - - return 0; + return num_cs; } static int of_fsl_espi_probe(struct platform_device *ofdev) @@ -680,16 +758,17 @@ static int of_fsl_espi_probe(struct platform_device *ofdev) struct device *dev = &ofdev->dev; struct device_node *np = ofdev->dev.of_node; struct resource mem; - unsigned int irq; + unsigned int irq, num_cs; int ret; - ret = of_mpc8xxx_spi_probe(ofdev); - if (ret) - return ret; + if (of_property_read_bool(np, "mode")) { + dev_err(dev, "mode property is not supported on ESPI!\n"); + return -EINVAL; + } - ret = of_fsl_espi_get_chipselects(dev); - if (ret) - return ret; + num_cs = of_fsl_espi_get_chipselects(dev); + if (!num_cs) + return -EINVAL; ret = of_address_to_resource(np, 0, &mem); if (ret) @@ -699,7 +778,7 @@ static int of_fsl_espi_probe(struct platform_device *ofdev) if (!irq) return -EINVAL; - return fsl_espi_probe(dev, &mem, irq); + return fsl_espi_probe(dev, &mem, irq, num_cs); } static int of_fsl_espi_remove(struct platform_device *dev) @@ -721,38 +800,15 @@ static int of_fsl_espi_suspend(struct device *dev) return ret; } - ret = pm_runtime_force_suspend(dev); - if (ret < 0) - return ret; - - return 0; + return pm_runtime_force_suspend(dev); } static int of_fsl_espi_resume(struct device *dev) { - struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); struct spi_master *master = dev_get_drvdata(dev); - struct mpc8xxx_spi *mpc8xxx_spi; - u32 regval; - int i, ret; - - mpc8xxx_spi = spi_master_get_devdata(master); - - /* SPI controller initializations */ - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, 0); - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0); - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM, 0); - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIE, 0xffffffff); - - /* Init eSPI CS mode register */ - for (i = 0; i < pdata->max_chipselect; i++) - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODEx(i), - CSMODE_INIT_VAL); - - /* Enable SPI interface */ - regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; + int ret; - fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval); + fsl_espi_init_regs(dev, false); ret = pm_runtime_force_resume(dev); if (ret < 0) diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h index 2925c8089fd93e91c5ea44461644d66ff2388703..f303f306b38e60c43ab73db676f5911b0394bae8 100644 --- a/drivers/spi/spi-fsl-lib.h +++ b/drivers/spi/spi-fsl-lib.h @@ -28,10 +28,6 @@ struct mpc8xxx_spi { /* rx & tx bufs from the spi_transfer */ const void *tx; void *rx; -#if IS_ENABLED(CONFIG_SPI_FSL_ESPI) - int len; - u8 *local_buf; -#endif int subblock; struct spi_pram __iomem *pram; diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c new file mode 100644 index 0000000000000000000000000000000000000000..52551f6d0c7ddf71aaf03ecc18c5caf2f804108b --- /dev/null +++ b/drivers/spi/spi-fsl-lpspi.c @@ -0,0 +1,525 @@ +/* + * Freescale i.MX7ULP LPSPI driver + * + * Copyright 2016 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "fsl_lpspi" + +/* i.MX7ULP LPSPI registers */ +#define IMX7ULP_VERID 0x0 +#define IMX7ULP_PARAM 0x4 +#define IMX7ULP_CR 0x10 +#define IMX7ULP_SR 0x14 +#define IMX7ULP_IER 0x18 +#define IMX7ULP_DER 0x1c +#define IMX7ULP_CFGR0 0x20 +#define IMX7ULP_CFGR1 0x24 +#define IMX7ULP_DMR0 0x30 +#define IMX7ULP_DMR1 0x34 +#define IMX7ULP_CCR 0x40 +#define IMX7ULP_FCR 0x58 +#define IMX7ULP_FSR 0x5c +#define IMX7ULP_TCR 0x60 +#define IMX7ULP_TDR 0x64 +#define IMX7ULP_RSR 0x70 +#define IMX7ULP_RDR 0x74 + +/* General control register field define */ +#define CR_RRF BIT(9) +#define CR_RTF BIT(8) +#define CR_RST BIT(1) +#define CR_MEN BIT(0) +#define SR_TCF BIT(10) +#define SR_RDF BIT(1) +#define SR_TDF BIT(0) +#define IER_TCIE BIT(10) +#define IER_RDIE BIT(1) +#define IER_TDIE BIT(0) +#define CFGR1_PCSCFG BIT(27) +#define CFGR1_PCSPOL BIT(8) +#define CFGR1_NOSTALL BIT(3) +#define CFGR1_MASTER BIT(0) +#define RSR_RXEMPTY BIT(1) +#define TCR_CPOL BIT(31) +#define TCR_CPHA BIT(30) +#define TCR_CONT BIT(21) +#define TCR_CONTC BIT(20) +#define TCR_RXMSK BIT(19) +#define TCR_TXMSK BIT(18) + +static int clkdivs[] = {1, 2, 4, 8, 16, 32, 64, 128}; + +struct lpspi_config { + u8 bpw; + u8 chip_select; + u8 prescale; + u16 mode; + u32 speed_hz; +}; + +struct fsl_lpspi_data { + struct device *dev; + void __iomem *base; + struct clk *clk; + + void *rx_buf; + const void *tx_buf; + void (*tx)(struct fsl_lpspi_data *); + void (*rx)(struct fsl_lpspi_data *); + + u32 remain; + u8 txfifosize; + u8 rxfifosize; + + struct lpspi_config config; + struct completion xfer_done; +}; + +static const struct of_device_id fsl_lpspi_dt_ids[] = { + { .compatible = "fsl,imx7ulp-spi", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids); + +#define LPSPI_BUF_RX(type) \ +static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \ +{ \ + unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \ + \ + if (fsl_lpspi->rx_buf) { \ + *(type *)fsl_lpspi->rx_buf = val; \ + fsl_lpspi->rx_buf += sizeof(type); \ + } \ +} + +#define LPSPI_BUF_TX(type) \ +static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \ +{ \ + type val = 0; \ + \ + if (fsl_lpspi->tx_buf) { \ + val = *(type *)fsl_lpspi->tx_buf; \ + fsl_lpspi->tx_buf += sizeof(type); \ + } \ + \ + fsl_lpspi->remain -= sizeof(type); \ + writel(val, fsl_lpspi->base + IMX7ULP_TDR); \ +} + +LPSPI_BUF_RX(u8) +LPSPI_BUF_TX(u8) +LPSPI_BUF_RX(u16) +LPSPI_BUF_TX(u16) +LPSPI_BUF_RX(u32) +LPSPI_BUF_TX(u32) + +static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi, + unsigned int enable) +{ + writel(enable, fsl_lpspi->base + IMX7ULP_IER); +} + +static int lpspi_prepare_xfer_hardware(struct spi_master *master) +{ + struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master); + + return clk_prepare_enable(fsl_lpspi->clk); +} + +static int lpspi_unprepare_xfer_hardware(struct spi_master *master) +{ + struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master); + + clk_disable_unprepare(fsl_lpspi->clk); + + return 0; +} + +static int fsl_lpspi_txfifo_empty(struct fsl_lpspi_data *fsl_lpspi) +{ + u32 txcnt; + unsigned long orig_jiffies = jiffies; + + do { + txcnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff; + + if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { + dev_dbg(fsl_lpspi->dev, "txfifo empty timeout\n"); + return -ETIMEDOUT; + } + cond_resched(); + + } while (txcnt); + + return 0; +} + +static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi) +{ + u8 txfifo_cnt; + + txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff; + + while (txfifo_cnt < fsl_lpspi->txfifosize) { + if (!fsl_lpspi->remain) + break; + fsl_lpspi->tx(fsl_lpspi); + txfifo_cnt++; + } + + if (!fsl_lpspi->remain && (txfifo_cnt < fsl_lpspi->txfifosize)) + writel(0, fsl_lpspi->base + IMX7ULP_TDR); + else + fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE); +} + +static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi) +{ + while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY)) + fsl_lpspi->rx(fsl_lpspi); +} + +static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi, + bool is_first_xfer) +{ + u32 temp = 0; + + temp |= fsl_lpspi->config.bpw - 1; + temp |= fsl_lpspi->config.prescale << 27; + temp |= (fsl_lpspi->config.mode & 0x3) << 30; + temp |= (fsl_lpspi->config.chip_select & 0x3) << 24; + + /* + * Set TCR_CONT will keep SS asserted after current transfer. + * For the first transfer, clear TCR_CONTC to assert SS. + * For subsequent transfer, set TCR_CONTC to keep SS asserted. + */ + temp |= TCR_CONT; + if (is_first_xfer) + temp &= ~TCR_CONTC; + else + temp |= TCR_CONTC; + + writel(temp, fsl_lpspi->base + IMX7ULP_TCR); + + dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp); +} + +static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi) +{ + u32 temp; + + temp = fsl_lpspi->txfifosize >> 1 | (fsl_lpspi->rxfifosize >> 1) << 16; + + writel(temp, fsl_lpspi->base + IMX7ULP_FCR); + + dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp); +} + +static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) +{ + struct lpspi_config config = fsl_lpspi->config; + unsigned int perclk_rate, scldiv; + u8 prescale; + + perclk_rate = clk_get_rate(fsl_lpspi->clk); + for (prescale = 0; prescale < 8; prescale++) { + scldiv = perclk_rate / + (clkdivs[prescale] * config.speed_hz) - 2; + if (scldiv < 256) { + fsl_lpspi->config.prescale = prescale; + break; + } + } + + if (prescale == 8 && scldiv >= 256) + return -EINVAL; + + writel(scldiv, fsl_lpspi->base + IMX7ULP_CCR); + + dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale =%d, scldiv=%d\n", + perclk_rate, config.speed_hz, prescale, scldiv); + + return 0; +} + +static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi) +{ + u32 temp; + int ret; + + temp = CR_RST; + writel(temp, fsl_lpspi->base + IMX7ULP_CR); + writel(0, fsl_lpspi->base + IMX7ULP_CR); + + ret = fsl_lpspi_set_bitrate(fsl_lpspi); + if (ret) + return ret; + + fsl_lpspi_set_watermark(fsl_lpspi); + + temp = CFGR1_PCSCFG | CFGR1_MASTER | CFGR1_NOSTALL; + if (fsl_lpspi->config.mode & SPI_CS_HIGH) + temp |= CFGR1_PCSPOL; + writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1); + + temp = readl(fsl_lpspi->base + IMX7ULP_CR); + temp |= CR_RRF | CR_RTF | CR_MEN; + writel(temp, fsl_lpspi->base + IMX7ULP_CR); + + return 0; +} + +static void fsl_lpspi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(spi->master); + + fsl_lpspi->config.mode = spi->mode; + fsl_lpspi->config.bpw = t ? t->bits_per_word : spi->bits_per_word; + fsl_lpspi->config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; + fsl_lpspi->config.chip_select = spi->chip_select; + + if (!fsl_lpspi->config.speed_hz) + fsl_lpspi->config.speed_hz = spi->max_speed_hz; + if (!fsl_lpspi->config.bpw) + fsl_lpspi->config.bpw = spi->bits_per_word; + + /* Initialize the functions for transfer */ + if (fsl_lpspi->config.bpw <= 8) { + fsl_lpspi->rx = fsl_lpspi_buf_rx_u8; + fsl_lpspi->tx = fsl_lpspi_buf_tx_u8; + } else if (fsl_lpspi->config.bpw <= 16) { + fsl_lpspi->rx = fsl_lpspi_buf_rx_u16; + fsl_lpspi->tx = fsl_lpspi_buf_tx_u16; + } else { + fsl_lpspi->rx = fsl_lpspi_buf_rx_u32; + fsl_lpspi->tx = fsl_lpspi_buf_tx_u32; + } + + fsl_lpspi_config(fsl_lpspi); +} + +static int fsl_lpspi_transfer_one(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *t) +{ + struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master); + int ret; + + fsl_lpspi->tx_buf = t->tx_buf; + fsl_lpspi->rx_buf = t->rx_buf; + fsl_lpspi->remain = t->len; + + reinit_completion(&fsl_lpspi->xfer_done); + fsl_lpspi_write_tx_fifo(fsl_lpspi); + + ret = wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ); + if (!ret) { + dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n"); + return -ETIMEDOUT; + } + + ret = fsl_lpspi_txfifo_empty(fsl_lpspi); + if (ret) + return ret; + + fsl_lpspi_read_rx_fifo(fsl_lpspi); + + return 0; +} + +static int fsl_lpspi_transfer_one_msg(struct spi_master *master, + struct spi_message *msg) +{ + struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master); + struct spi_device *spi = msg->spi; + struct spi_transfer *xfer; + bool is_first_xfer = true; + u32 temp; + int ret; + + msg->status = 0; + msg->actual_length = 0; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + fsl_lpspi_setup_transfer(spi, xfer); + fsl_lpspi_set_cmd(fsl_lpspi, is_first_xfer); + + is_first_xfer = false; + + ret = fsl_lpspi_transfer_one(master, spi, xfer); + if (ret < 0) + goto complete; + + msg->actual_length += xfer->len; + } + +complete: + /* de-assert SS, then finalize current message */ + temp = readl(fsl_lpspi->base + IMX7ULP_TCR); + temp &= ~TCR_CONTC; + writel(temp, fsl_lpspi->base + IMX7ULP_TCR); + + msg->status = ret; + spi_finalize_current_message(master); + + return ret; +} + +static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id) +{ + struct fsl_lpspi_data *fsl_lpspi = dev_id; + u32 temp; + + fsl_lpspi_intctrl(fsl_lpspi, 0); + temp = readl(fsl_lpspi->base + IMX7ULP_SR); + + fsl_lpspi_read_rx_fifo(fsl_lpspi); + + if (temp & SR_TDF) { + fsl_lpspi_write_tx_fifo(fsl_lpspi); + + if (!fsl_lpspi->remain) + complete(&fsl_lpspi->xfer_done); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int fsl_lpspi_probe(struct platform_device *pdev) +{ + struct fsl_lpspi_data *fsl_lpspi; + struct spi_master *master; + struct resource *res; + int ret, irq; + u32 temp; + + master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_lpspi_data)); + if (!master) + return -ENOMEM; + + platform_set_drvdata(pdev, master); + + master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); + master->bus_num = pdev->id; + + fsl_lpspi = spi_master_get_devdata(master); + fsl_lpspi->dev = &pdev->dev; + + master->transfer_one_message = fsl_lpspi_transfer_one_msg; + master->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; + master->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; + master->dev.of_node = pdev->dev.of_node; + master->bus_num = pdev->id; + + init_completion(&fsl_lpspi->xfer_done); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + fsl_lpspi->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(fsl_lpspi->base)) { + ret = PTR_ERR(fsl_lpspi->base); + goto out_master_put; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto out_master_put; + } + + ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0, + dev_name(&pdev->dev), fsl_lpspi); + if (ret) { + dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); + goto out_master_put; + } + + fsl_lpspi->clk = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(fsl_lpspi->clk)) { + ret = PTR_ERR(fsl_lpspi->clk); + goto out_master_put; + } + + ret = clk_prepare_enable(fsl_lpspi->clk); + if (ret) { + dev_err(&pdev->dev, "can't enable lpspi clock, ret=%d\n", ret); + goto out_master_put; + } + + temp = readl(fsl_lpspi->base + IMX7ULP_PARAM); + fsl_lpspi->txfifosize = 1 << (temp & 0x0f); + fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f); + + clk_disable_unprepare(fsl_lpspi->clk); + + ret = devm_spi_register_master(&pdev->dev, master); + if (ret < 0) { + dev_err(&pdev->dev, "spi_register_master error.\n"); + goto out_master_put; + } + + return 0; + +out_master_put: + spi_master_put(master); + + return ret; +} + +static int fsl_lpspi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master); + + clk_disable_unprepare(fsl_lpspi->clk); + + return 0; +} + +static struct platform_driver fsl_lpspi_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = fsl_lpspi_dt_ids, + }, + .probe = fsl_lpspi_probe, + .remove = fsl_lpspi_remove, +}; +module_platform_driver(fsl_lpspi_driver); + +MODULE_DESCRIPTION("LPSPI Master Controller driver"); +MODULE_AUTHOR("Gao Pan "); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index deb782f6556c119dc8dfe6760510e54df6c0d906..32ced64a5bb9a012e2edd0d415d8c477ebc0522a 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -173,15 +173,16 @@ static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, /* MX21, MX27 */ static unsigned int spi_imx_clkdiv_1(unsigned int fin, - unsigned int fspi, unsigned int max) + unsigned int fspi, unsigned int max, unsigned int *fres) { int i; for (i = 2; i < max; i++) if (fspi * mxc_clkdivs[i] >= fin) - return i; + break; - return max; + *fres = fin / mxc_clkdivs[i]; + return i; } /* MX1, MX31, MX35, MX51 CSPI */ @@ -442,6 +443,7 @@ static void mx51_ecspi_reset(struct spi_imx_data *spi_imx) #define MX31_CSPICTRL_ENABLE (1 << 0) #define MX31_CSPICTRL_MASTER (1 << 1) #define MX31_CSPICTRL_XCH (1 << 2) +#define MX31_CSPICTRL_SMC (1 << 3) #define MX31_CSPICTRL_POL (1 << 4) #define MX31_CSPICTRL_PHA (1 << 5) #define MX31_CSPICTRL_SSCTL (1 << 6) @@ -452,6 +454,10 @@ static void mx51_ecspi_reset(struct spi_imx_data *spi_imx) #define MX35_CSPICTRL_CS_SHIFT 12 #define MX31_CSPICTRL_DR_SHIFT 16 +#define MX31_CSPI_DMAREG 0x10 +#define MX31_DMAREG_RH_DEN (1<<4) +#define MX31_DMAREG_TH_DEN (1<<1) + #define MX31_CSPISTATUS 0x14 #define MX31_STATUS_RR (1 << 3) @@ -511,6 +517,9 @@ static int mx31_config(struct spi_device *spi, struct spi_imx_config *config) (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT : MX31_CSPICTRL_CS_SHIFT); + if (spi_imx->usedma) + reg |= MX31_CSPICTRL_SMC; + writel(reg, spi_imx->base + MXC_CSPICTRL); reg = readl(spi_imx->base + MX31_CSPI_TESTREG); @@ -520,6 +529,13 @@ static int mx31_config(struct spi_device *spi, struct spi_imx_config *config) reg &= ~MX31_TEST_LBC; writel(reg, spi_imx->base + MX31_CSPI_TESTREG); + if (spi_imx->usedma) { + /* configure DMA requests when RXFIFO is half full and + when TXFIFO is half empty */ + writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN, + spi_imx->base + MX31_CSPI_DMAREG); + } + return 0; } @@ -574,9 +590,12 @@ static int mx21_config(struct spi_device *spi, struct spi_imx_config *config) struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER; unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18; + unsigned int clk; + + reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max, &clk) + << MX21_CSPICTRL_DR_SHIFT; + spi_imx->spi_bus_clk = clk; - reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) << - MX21_CSPICTRL_DR_SHIFT; reg |= config->bpw - 1; if (spi->mode & SPI_CPHA) @@ -1244,10 +1263,10 @@ static int spi_imx_probe(struct platform_device *pdev) spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); /* - * Only validated on i.mx6 now, can remove the constrain if validated on - * other chips. + * Only validated on i.mx35 and i.mx6 now, can remove the constraint + * if validated on other chips. */ - if (is_imx51_ecspi(spi_imx)) { + if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx)) { ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master); if (ret == -EPROBE_DEFER) goto out_clk_put; diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c index f8117b80fa22e7ac8d5108f403166e178858cdb5..cebfea5faa4be247e2b8e145d96c3925c6a32ec7 100644 --- a/drivers/spi/spi-jcore.c +++ b/drivers/spi/spi-jcore.c @@ -214,6 +214,7 @@ static const struct of_device_id jcore_spi_of_match[] = { { .compatible = "jcore,spi2" }, {}, }; +MODULE_DEVICE_TABLE(of, jcore_spi_of_match); static struct platform_driver jcore_spi_driver = { .probe = jcore_spi_probe, diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index d5157b2222ce1d8837ed204598107f4968218aef..79800e991ccd537180866aa4671ab165a4f896f8 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -1386,20 +1386,13 @@ static int omap2_mcspi_probe(struct platform_device *pdev) regs_offset = pdata->regs_offset; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - status = -ENODEV; - goto free_master; - } - - r->start += regs_offset; - r->end += regs_offset; - mcspi->phys = r->start; - mcspi->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(mcspi->base)) { status = PTR_ERR(mcspi->base); goto free_master; } + mcspi->phys = r->start + regs_offset; + mcspi->base += regs_offset; mcspi->dev = &pdev->dev; diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c index ded37025b445834d0367ef41cac7aa80b2c656cf..6b001c4a564086528e9db608f7250305656d5da7 100644 --- a/drivers/spi/spi-orion.c +++ b/drivers/spi/spi-orion.c @@ -138,37 +138,62 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed) tclk_hz = clk_get_rate(orion_spi->clk); if (devdata->typ == ARMADA_SPI) { - unsigned int clk, spr, sppr, sppr2, err; - unsigned int best_spr, best_sppr, best_err; - - best_err = speed; - best_spr = 0; - best_sppr = 0; - - /* Iterate over the valid range looking for best fit */ - for (sppr = 0; sppr < 8; sppr++) { - sppr2 = 0x1 << sppr; - - spr = tclk_hz / sppr2; - spr = DIV_ROUND_UP(spr, speed); - if ((spr == 0) || (spr > 15)) - continue; - - clk = tclk_hz / (spr * sppr2); - err = speed - clk; - - if (err < best_err) { - best_spr = spr; - best_sppr = sppr; - best_err = err; - } - } + /* + * Given the core_clk (tclk_hz) and the target rate (speed) we + * determine the best values for SPR (in [0 .. 15]) and SPPR (in + * [0..7]) such that + * + * core_clk / (SPR * 2 ** SPPR) + * + * is as big as possible but not bigger than speed. + */ - if ((best_sppr == 0) && (best_spr == 0)) - return -EINVAL; + /* best integer divider: */ + unsigned divider = DIV_ROUND_UP(tclk_hz, speed); + unsigned spr, sppr; + + if (divider < 16) { + /* This is the easy case, divider is less than 16 */ + spr = divider; + sppr = 0; + + } else { + unsigned two_pow_sppr; + /* + * Find the highest bit set in divider. This and the + * three next bits define SPR (apart from rounding). + * SPPR is then the number of zero bits that must be + * appended: + */ + sppr = fls(divider) - 4; + + /* + * As SPR only has 4 bits, we have to round divider up + * to the next multiple of 2 ** sppr. + */ + two_pow_sppr = 1 << sppr; + divider = (divider + two_pow_sppr - 1) & -two_pow_sppr; + + /* + * recalculate sppr as rounding up divider might have + * increased it enough to change the position of the + * highest set bit. In this case the bit that now + * doesn't make it into SPR is 0, so there is no need to + * round again. + */ + sppr = fls(divider) - 4; + spr = divider >> sppr; + + /* + * Now do range checking. SPR is constructed to have a + * width of 4 bits, so this is fine for sure. So we + * still need to check for sppr to fit into 3 bits: + */ + if (sppr > 7) + return -EINVAL; + } - prescale = ((best_sppr & 0x6) << 5) | - ((best_sppr & 0x1) << 4) | best_spr; + prescale = ((sppr & 0x6) << 5) | ((sppr & 0x1) << 4) | spr; } else { /* * the supported rates are: 4,6,8...30 diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index ce31b8199bb3bf8f961eb169a910953800d367e2..2823a00a940591a631b73a945c0c1bff88c1617b 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h @@ -109,7 +109,6 @@ static inline void pxa2xx_spi_write(const struct driver_data *drv_data, #define DONE_STATE ((void *)2) #define ERROR_STATE ((void *)-1) -#define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT) #define DMA_ALIGNMENT 8 static inline int pxa25x_ssp_comp(struct driver_data *drv_data) diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index a816f07e168e84b0192224dcaadb99cb377ac9ed..9daf500317376bd143cb56eb4b14738d11adbdca 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -413,7 +413,7 @@ static unsigned int qspi_set_send_trigger(struct rspi_data *rspi, return n; } -static void qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len) +static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len) { unsigned int n; @@ -428,6 +428,7 @@ static void qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len) qspi_update(rspi, SPBFCR_RXTRG_MASK, SPBFCR_RXTRG_1B, QSPI_SPBFCR); } + return n; } #define set_config_register(spi, n) spi->ops->set_config_register(spi, n) @@ -785,6 +786,9 @@ static int qspi_transfer_out_in(struct rspi_data *rspi, static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) { + const u8 *tx = xfer->tx_buf; + unsigned int n = xfer->len; + unsigned int i, len; int ret; if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { @@ -793,9 +797,23 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) return ret; } - ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len); - if (ret < 0) - return ret; + while (n > 0) { + len = qspi_set_send_trigger(rspi, n); + if (len == QSPI_BUFFER_SIZE) { + ret = rspi_wait_for_tx_empty(rspi); + if (ret < 0) { + dev_err(&rspi->master->dev, "transmit timeout\n"); + return ret; + } + for (i = 0; i < len; i++) + rspi_write_data(rspi, *tx++); + } else { + ret = rspi_pio_transfer(rspi, tx, NULL, n); + if (ret < 0) + return ret; + } + n -= len; + } /* Wait for the last transmission */ rspi_wait_for_tx_empty(rspi); @@ -805,13 +823,37 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) { + u8 *rx = xfer->rx_buf; + unsigned int n = xfer->len; + unsigned int i, len; + int ret; + if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg); if (ret != -EAGAIN) return ret; } - return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len); + while (n > 0) { + len = qspi_set_receive_trigger(rspi, n); + if (len == QSPI_BUFFER_SIZE) { + ret = rspi_wait_for_rx_full(rspi); + if (ret < 0) { + dev_err(&rspi->master->dev, "receive timeout\n"); + return ret; + } + for (i = 0; i < len; i++) + *rx++ = rspi_read_data(rspi); + } else { + ret = rspi_pio_transfer(rspi, NULL, rx, n); + if (ret < 0) + return ret; + *rx++ = ret; + } + n -= len; + } + + return 0; } static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi, diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 3c09e94cf827f63b20f9e53d4523cbdaeba7718d..28dfdce4beae4d5fe3d983eb11a78a4c9a70a840 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -341,27 +341,20 @@ static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable) static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) { struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); - dma_filter_fn filter = sdd->cntrlr_info->filter; struct device *dev = &sdd->pdev->dev; - dma_cap_mask_t mask; if (is_polling(sdd)) return 0; - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - /* Acquire DMA channels */ - sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, - sdd->cntrlr_info->dma_rx, dev, "rx"); + sdd->rx_dma.ch = dma_request_slave_channel(dev, "rx"); if (!sdd->rx_dma.ch) { dev_err(dev, "Failed to get RX DMA channel\n"); return -EBUSY; } spi->dma_rx = sdd->rx_dma.ch; - sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, - sdd->cntrlr_info->dma_tx, dev, "tx"); + sdd->tx_dma.ch = dma_request_slave_channel(dev, "tx"); if (!sdd->tx_dma.ch) { dev_err(dev, "Failed to get TX DMA channel\n"); dma_release_channel(sdd->rx_dma.ch); @@ -1091,11 +1084,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) sdd->cur_bpw = 8; - if (!sdd->pdev->dev.of_node && (!sci->dma_tx || !sci->dma_rx)) { - dev_warn(&pdev->dev, "Unable to get SPI tx/rx DMA data. Switching to poll mode\n"); - sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; - } - sdd->tx_dma.direction = DMA_MEM_TO_DEV; sdd->rx_dma.direction = DMA_DEV_TO_MEM; @@ -1205,9 +1193,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n", sdd->port_id, master->num_chipselect); - dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\tDMA=[Rx-%p, Tx-%p]\n", - mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1, - sci->dma_rx, sci->dma_tx); + dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n", + mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1); pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 1de3a772eb7d23a8a90b9b77704033ac4945e364..0012ad02e5696d35b547a3a698682f51a2d02819 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -980,6 +980,7 @@ static const struct of_device_id sh_msiof_match[] = { { .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data }, + { .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data }, {}, }; MODULE_DEVICE_TABLE(of, sh_msiof_match); diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index 4969dc10684a4c48135a12e46823be3afc7c5c56..c5cd635c28f388bec2cfd47b9a6c6c9dcec9e046 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c @@ -46,6 +46,8 @@ #define SUN4I_CTL_TP BIT(18) #define SUN4I_INT_CTL_REG 0x0c +#define SUN4I_INT_CTL_RF_F34 BIT(4) +#define SUN4I_INT_CTL_TF_E34 BIT(12) #define SUN4I_INT_CTL_TC BIT(16) #define SUN4I_INT_STA_REG 0x10 @@ -61,11 +63,14 @@ #define SUN4I_CLK_CTL_CDR1(div) (((div) & SUN4I_CLK_CTL_CDR1_MASK) << 8) #define SUN4I_CLK_CTL_DRS BIT(12) +#define SUN4I_MAX_XFER_SIZE 0xffffff + #define SUN4I_BURST_CNT_REG 0x20 -#define SUN4I_BURST_CNT(cnt) ((cnt) & 0xffffff) +#define SUN4I_BURST_CNT(cnt) ((cnt) & SUN4I_MAX_XFER_SIZE) #define SUN4I_XMIT_CNT_REG 0x24 -#define SUN4I_XMIT_CNT(cnt) ((cnt) & 0xffffff) +#define SUN4I_XMIT_CNT(cnt) ((cnt) & SUN4I_MAX_XFER_SIZE) + #define SUN4I_FIFO_STA_REG 0x28 #define SUN4I_FIFO_STA_RF_CNT_MASK 0x7f @@ -96,6 +101,31 @@ static inline void sun4i_spi_write(struct sun4i_spi *sspi, u32 reg, u32 value) writel(value, sspi->base_addr + reg); } +static inline u32 sun4i_spi_get_tx_fifo_count(struct sun4i_spi *sspi) +{ + u32 reg = sun4i_spi_read(sspi, SUN4I_FIFO_STA_REG); + + reg >>= SUN4I_FIFO_STA_TF_CNT_BITS; + + return reg & SUN4I_FIFO_STA_TF_CNT_MASK; +} + +static inline void sun4i_spi_enable_interrupt(struct sun4i_spi *sspi, u32 mask) +{ + u32 reg = sun4i_spi_read(sspi, SUN4I_INT_CTL_REG); + + reg |= mask; + sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, reg); +} + +static inline void sun4i_spi_disable_interrupt(struct sun4i_spi *sspi, u32 mask) +{ + u32 reg = sun4i_spi_read(sspi, SUN4I_INT_CTL_REG); + + reg &= ~mask; + sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, reg); +} + static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len) { u32 reg, cnt; @@ -118,10 +148,13 @@ static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len) static inline void sun4i_spi_fill_fifo(struct sun4i_spi *sspi, int len) { + u32 cnt; u8 byte; - if (len > sspi->len) - len = sspi->len; + /* See how much data we can fit */ + cnt = SUN4I_FIFO_DEPTH - sun4i_spi_get_tx_fifo_count(sspi); + + len = min3(len, (int)cnt, sspi->len); while (len--) { byte = sspi->tx_buf ? *sspi->tx_buf++ : 0; @@ -184,10 +217,10 @@ static int sun4i_spi_transfer_one(struct spi_master *master, u32 reg; /* We don't support transfer larger than the FIFO */ - if (tfr->len > SUN4I_FIFO_DEPTH) + if (tfr->len > SUN4I_MAX_XFER_SIZE) return -EMSGSIZE; - if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH) + if (tfr->tx_buf && tfr->len >= SUN4I_MAX_XFER_SIZE) return -EMSGSIZE; reinit_completion(&sspi->done); @@ -286,7 +319,11 @@ static int sun4i_spi_transfer_one(struct spi_master *master, sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1); /* Enable the interrupts */ - sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC); + sun4i_spi_enable_interrupt(sspi, SUN4I_INT_CTL_TC | + SUN4I_INT_CTL_RF_F34); + /* Only enable Tx FIFO interrupt if we really need it */ + if (tx_len > SUN4I_FIFO_DEPTH) + sun4i_spi_enable_interrupt(sspi, SUN4I_INT_CTL_TF_E34); /* Start the transfer */ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG); @@ -306,7 +343,6 @@ static int sun4i_spi_transfer_one(struct spi_master *master, goto out; } - sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH); out: sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, 0); @@ -322,10 +358,33 @@ static irqreturn_t sun4i_spi_handler(int irq, void *dev_id) /* Transfer complete */ if (status & SUN4I_INT_CTL_TC) { sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TC); + sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH); complete(&sspi->done); return IRQ_HANDLED; } + /* Receive FIFO 3/4 full */ + if (status & SUN4I_INT_CTL_RF_F34) { + sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH); + /* Only clear the interrupt _after_ draining the FIFO */ + sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_RF_F34); + return IRQ_HANDLED; + } + + /* Transmit FIFO 3/4 empty */ + if (status & SUN4I_INT_CTL_TF_E34) { + sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH); + + if (!sspi->len) + /* nothing left to transmit */ + sun4i_spi_disable_interrupt(sspi, SUN4I_INT_CTL_TF_E34); + + /* Only clear the interrupt _after_ re-seeding the FIFO */ + sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TF_E34); + + return IRQ_HANDLED; + } + return IRQ_NONE; } diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index 9918a57a6a6e643456d95dfaf53a6d85e428cbac..e3114832c485cdac7621662ed1d7291b493a76d8 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -24,6 +25,7 @@ #include #define SUN6I_FIFO_DEPTH 128 +#define SUN8I_FIFO_DEPTH 64 #define SUN6I_GBL_CTL_REG 0x04 #define SUN6I_GBL_CTL_BUS_ENABLE BIT(0) @@ -90,6 +92,7 @@ struct sun6i_spi { const u8 *tx_buf; u8 *rx_buf; int len; + unsigned long fifo_depth; }; static inline u32 sun6i_spi_read(struct sun6i_spi *sspi, u32 reg) @@ -155,7 +158,9 @@ static void sun6i_spi_set_cs(struct spi_device *spi, bool enable) static size_t sun6i_spi_max_transfer_size(struct spi_device *spi) { - return SUN6I_FIFO_DEPTH - 1; + struct sun6i_spi *sspi = spi_master_get_devdata(spi->master); + + return sspi->fifo_depth - 1; } static int sun6i_spi_transfer_one(struct spi_master *master, @@ -170,7 +175,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, u32 reg; /* We don't support transfer larger than the FIFO */ - if (tfr->len > SUN6I_FIFO_DEPTH) + if (tfr->len > sspi->fifo_depth) return -EINVAL; reinit_completion(&sspi->done); @@ -265,7 +270,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, SUN6I_BURST_CTL_CNT_STC(tx_len)); /* Fill the TX FIFO */ - sun6i_spi_fill_fifo(sspi, SUN6I_FIFO_DEPTH); + sun6i_spi_fill_fifo(sspi, sspi->fifo_depth); /* Enable the interrupts */ sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, SUN6I_INT_CTL_TC); @@ -288,7 +293,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, goto out; } - sun6i_spi_drain_fifo(sspi, SUN6I_FIFO_DEPTH); + sun6i_spi_drain_fifo(sspi, sspi->fifo_depth); out: sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0); @@ -398,6 +403,8 @@ static int sun6i_spi_probe(struct platform_device *pdev) } sspi->master = master; + sspi->fifo_depth = (unsigned long)of_device_get_match_data(&pdev->dev); + master->max_speed_hz = 100 * 1000 * 1000; master->min_speed_hz = 3 * 1000; master->set_cs = sun6i_spi_set_cs; @@ -470,7 +477,8 @@ static int sun6i_spi_remove(struct platform_device *pdev) } static const struct of_device_id sun6i_spi_match[] = { - { .compatible = "allwinner,sun6i-a31-spi", }, + { .compatible = "allwinner,sun6i-a31-spi", .data = (void *)SUN6I_FIFO_DEPTH }, + { .compatible = "allwinner,sun8i-h3-spi", .data = (void *)SUN8I_FIFO_DEPTH }, {} }; MODULE_DEVICE_TABLE(of, sun6i_spi_match); diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index caeac66a39777465ac1c9a62cf2b74a99dfb2e47..ec6fb09e2e1711f7adfbe079559a89f1a9cd343d 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -411,6 +411,7 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst, tx->callback = ti_qspi_dma_callback; tx->callback_param = qspi; cookie = tx->tx_submit(tx); + reinit_completion(&qspi->transfer_complete); ret = dma_submit_error(cookie); if (ret) { diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index c54ee667447101abd8957bdc2c976c174d137d4c..fcb991034c3d076bc032545addca93517017b126 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -1268,11 +1268,8 @@ static void pch_spi_free_resources(struct pch_spi_board_data *board_dat, static int pch_spi_get_resources(struct pch_spi_board_data *board_dat, struct pch_spi_data *data) { - int retval = 0; - dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); - /* reset PCH SPI h/w */ pch_spi_reset(data->master); dev_dbg(&board_dat->pdev->dev, @@ -1280,15 +1277,7 @@ static int pch_spi_get_resources(struct pch_spi_board_data *board_dat, dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__); - if (retval != 0) { - dev_err(&board_dat->pdev->dev, - "%s FAIL:invoking pch_spi_free_resources\n", __func__); - pch_spi_free_resources(board_dat, data); - } - - dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval); - - return retval; + return 0; } static void pch_free_dma_buf(struct pch_spi_board_data *board_dat, diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c index 4071a729eb2f2af5ffd8f81c9113c026a07d3e34..bea7a93a60468813d904f3d9457c8e8440055ec3 100644 --- a/drivers/spi/spi-xlp.c +++ b/drivers/spi/spi-xlp.c @@ -451,6 +451,7 @@ static const struct of_device_id xlp_spi_dt_id[] = { { .compatible = "netlogic,xlp832-spi" }, { }, }; +MODULE_DEVICE_TABLE(of, xlp_spi_dt_id); static struct platform_driver xlp_spi_driver = { .probe = xlp_spi_probe, diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 838783c3fed0ae81626099b8e295dc12963a1360..656dd3e3220c5062b43b5a162a078f245f90630c 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -697,10 +697,15 @@ static void spi_set_cs(struct spi_device *spi, bool enable) if (spi->mode & SPI_CS_HIGH) enable = !enable; - if (gpio_is_valid(spi->cs_gpio)) + if (gpio_is_valid(spi->cs_gpio)) { gpio_set_value(spi->cs_gpio, !enable); - else if (spi->master->set_cs) + /* Some SPI masters need both GPIO CS & slave_select */ + if ((spi->master->flags & SPI_MASTER_GPIO_SS) && + spi->master->set_cs) + spi->master->set_cs(spi, !enable); + } else if (spi->master->set_cs) { spi->master->set_cs(spi, !enable); + } } #ifdef CONFIG_HAS_DMA @@ -720,6 +725,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev, int desc_len; int sgs; struct page *vm_page; + struct scatterlist *sg; void *sg_buf; size_t min; int i, ret; @@ -738,6 +744,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev, if (ret != 0) return ret; + sg = &sgt->sgl[0]; for (i = 0; i < sgs; i++) { if (vmalloced_buf || kmap_buf) { @@ -751,16 +758,17 @@ static int spi_map_buf(struct spi_master *master, struct device *dev, sg_free_table(sgt); return -ENOMEM; } - sg_set_page(&sgt->sgl[i], vm_page, + sg_set_page(sg, vm_page, min, offset_in_page(buf)); } else { min = min_t(size_t, len, desc_len); sg_buf = buf; - sg_set_buf(&sgt->sgl[i], sg_buf, min); + sg_set_buf(sg, sg_buf, min); } buf += min; len -= min; + sg = sg_next(sg); } ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); @@ -1034,8 +1042,14 @@ static int spi_transfer_one_message(struct spi_master *master, if (msg->status != -EINPROGRESS) goto out; - if (xfer->delay_usecs) - udelay(xfer->delay_usecs); + if (xfer->delay_usecs) { + u16 us = xfer->delay_usecs; + + if (us <= 10) + udelay(us); + else + usleep_range(us, us + DIV_ROUND_UP(us, 10)); + } if (xfer->cs_change) { if (list_is_last(&xfer->transfer_list, diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 2e05046f866bd01bf87edcdeff0d5b76d4d0aea7..9e2e099baf8ca5cc6510912a36d4ca03daeb8273 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -696,6 +696,7 @@ static struct class *spidev_class; static const struct of_device_id spidev_dt_ids[] = { { .compatible = "rohm,dh2228fv" }, { .compatible = "lineartechnology,ltc2488" }, + { .compatible = "ge,achc" }, {}, }; MODULE_DEVICE_TABLE(of, spidev_dt_ids); diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index 0f28c08fcb3c76f4a2deaf3cd084a39f800db142..77b551da57288c11d21d6ff40b65b1d8b01ed887 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c @@ -909,6 +909,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, if (err) { ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n", err); + goto out_free; } else { ssb_dbg("Using SPROM revision %d provided by platform\n", sprom->revision); diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 58a7b3504b82aacc59ad65c01b97a49248013990..cd005cd414135576d15de5351d94e64c8345a911 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -24,8 +24,6 @@ menuconfig STAGING if STAGING -source "drivers/staging/slicoss/Kconfig" - source "drivers/staging/wlan-ng/Kconfig" source "drivers/staging/comedi/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 2fa9745db6144ef4b744eb57514c5bf400279a1a..831e2e891989bb2216b10d56a6a6cf8421b1378d 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -1,7 +1,6 @@ # Makefile for staging directory obj-y += media/ -obj-$(CONFIG_SLICOSS) += slicoss/ obj-$(CONFIG_PRISM2_USB) += wlan-ng/ obj-$(CONFIG_COMEDI) += comedi/ obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/ @@ -41,4 +40,4 @@ obj-$(CONFIG_MOST) += most/ obj-$(CONFIG_ISDN_I4L) += i4l/ obj-$(CONFIG_KS7010) += ks7010/ obj-$(CONFIG_GREYBUS) += greybus/ -obj-$(CONFIG_BCM2708_VCHIQ) += vc04_services/ +obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/ diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO index 64d8c87209605cbfebbed27378adf9fd9a244bcb..8f3ac37bfe12e71cb9b0f2e936664ffe7ba20737 100644 --- a/drivers/staging/android/TODO +++ b/drivers/staging/android/TODO @@ -25,13 +25,5 @@ ion/ exposes existing cma regions and doesn't reserve unecessarily memory when booting a system which doesn't use ion. -sync framework: - - remove CONFIG_SW_SYNC_USER, it is used only for testing/debugging and - should not be upstreamed. - - port CONFIG_SW_SYNC_USER tests interfaces to use debugfs somehow - - port libsync tests to kselftest - - clean up and ABI check for security issues - - move it to drivers/base/dma-buf - Please send patches to Greg Kroah-Hartman and Cc: Arve Hjønnevåg and Riley Andrews diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index ca9a53c03f0f9a706c7f145ef446976d89d1773a..7cbad0d45b9c8053dc77b7ec696966ed6318324d 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -100,39 +100,43 @@ static DEFINE_MUTEX(ashmem_mutex); static struct kmem_cache *ashmem_area_cachep __read_mostly; static struct kmem_cache *ashmem_range_cachep __read_mostly; -#define range_size(range) \ - ((range)->pgend - (range)->pgstart + 1) +static inline unsigned long range_size(struct ashmem_range *range) +{ + return range->pgend - range->pgstart + 1; +} -#define range_on_lru(range) \ - ((range)->purged == ASHMEM_NOT_PURGED) +static inline bool range_on_lru(struct ashmem_range *range) +{ + return range->purged == ASHMEM_NOT_PURGED; +} -static inline int page_range_subsumes_range(struct ashmem_range *range, - size_t start, size_t end) +static inline bool page_range_subsumes_range(struct ashmem_range *range, + size_t start, size_t end) { - return (((range)->pgstart >= (start)) && ((range)->pgend <= (end))); + return (range->pgstart >= start) && (range->pgend <= end); } -static inline int page_range_subsumed_by_range(struct ashmem_range *range, - size_t start, size_t end) +static inline bool page_range_subsumed_by_range(struct ashmem_range *range, + size_t start, size_t end) { - return (((range)->pgstart <= (start)) && ((range)->pgend >= (end))); + return (range->pgstart <= start) && (range->pgend >= end); } -static inline int page_in_range(struct ashmem_range *range, size_t page) +static inline bool page_in_range(struct ashmem_range *range, size_t page) { - return (((range)->pgstart <= (page)) && ((range)->pgend >= (page))); + return (range->pgstart <= page) && (range->pgend >= page); } -static inline int page_range_in_range(struct ashmem_range *range, - size_t start, size_t end) +static inline bool page_range_in_range(struct ashmem_range *range, + size_t start, size_t end) { - return (page_in_range(range, start) || page_in_range(range, end) || - page_range_subsumes_range(range, start, end)); + return page_in_range(range, start) || page_in_range(range, end) || + page_range_subsumes_range(range, start, end); } -static inline int range_before_page(struct ashmem_range *range, size_t page) +static inline bool range_before_page(struct ashmem_range *range, size_t page) { - return ((range)->pgend < (page)); + return range->pgend < page; } #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 209a8f7ef02bd4b04cabefad50d17351e5e6990c..b653451843c84e81cdbea96fc0c8c006832e24c1 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -882,7 +882,7 @@ static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); - ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + ret = vm_insert_pfn(vma, vmf->address, pfn); mutex_unlock(&buffer->lock); if (ret) return VM_FAULT_ERROR; @@ -1013,7 +1013,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, return 0; } -static struct dma_buf_ops dma_buf_ops = { +static const struct dma_buf_ops dma_buf_ops = { .map_dma_buf = ion_map_dma_buf, .unmap_dma_buf = ion_unmap_dma_buf, .mmap = ion_mmap, diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c index b23f2c76c753e3fb95b191d98e9639a3294f5209..cf5c010d32bcac356c69df987d4208a5e251a2b0 100644 --- a/drivers/staging/android/ion/ion_dummy_driver.c +++ b/drivers/staging/android/ion/ion_dummy_driver.c @@ -58,7 +58,7 @@ static struct ion_platform_heap dummy_heaps[] = { }, }; -static struct ion_platform_data dummy_ion_pdata = { +static const struct ion_platform_data dummy_ion_pdata = { .nr = ARRAY_SIZE(dummy_heaps), .heaps = dummy_heaps, }; diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 7e023d505af870ee527e8cce934e8155a7c193de..3ebbb75746e8c4de63781defd360ed7f11e77b46 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -30,7 +30,7 @@ static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_RECLAIM; -static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO); +static gfp_t low_order_gfp_flags = GFP_HIGHUSER | __GFP_ZERO; static const unsigned int orders[] = {8, 4, 0}; static int order_to_index(unsigned int order) diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h index ffef06f63133fb874e38c1445947a1b35175dd75..480242e02f8d64b547792225e2aacb7629b496f8 100644 --- a/drivers/staging/android/uapi/ion_test.h +++ b/drivers/staging/android/uapi/ion_test.h @@ -66,5 +66,4 @@ struct ion_test_rw_data { #define ION_IOC_TEST_KERNEL_MAPPING \ _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data) - #endif /* _UAPI_LINUX_ION_H */ diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c index 7b8be5293883b2d5f5ba96ff4bc9e860d37d3a63..bf3fe7c61be578380225f666b7981b623517383c 100644 --- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c +++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c @@ -68,7 +68,7 @@ struct clk_wzrd { struct clk *axi_clk; struct clk *clks_internal[wzrd_clk_int_max]; struct clk *clkout[WZRD_NUM_OUTPUTS]; - int speed_grade; + unsigned int speed_grade; bool suspended; }; diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h index 08fb26b51a5fb3d98ae191d83c0e6159a3a8c9b0..a1c1081906c5523dd3976112b7c9a0cf18012a6f 100644 --- a/drivers/staging/comedi/comedi.h +++ b/drivers/staging/comedi/comedi.h @@ -244,6 +244,22 @@ enum comedi_subdevice_type { /* configuration instructions */ +/** + * enum comedi_io_direction - COMEDI I/O directions + * @COMEDI_INPUT: Input. + * @COMEDI_OUTPUT: Output. + * @COMEDI_OPENDRAIN: Open-drain (or open-collector) output. + * + * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to + * report a direction. They may also be used in other places where a direction + * needs to be specified. + */ +enum comedi_io_direction { + COMEDI_INPUT = 0, + COMEDI_OUTPUT = 1, + COMEDI_OPENDRAIN = 2 +}; + /** * enum configuration_ids - COMEDI configuration instruction codes * @INSN_CONFIG_DIO_INPUT: Configure digital I/O as input. @@ -296,9 +312,9 @@ enum comedi_subdevice_type { * @INSN_CONFIG_PWM_GET_H_BRIDGE: Get PWM H bridge duty cycle and polarity. */ enum configuration_ids { - INSN_CONFIG_DIO_INPUT = 0, - INSN_CONFIG_DIO_OUTPUT = 1, - INSN_CONFIG_DIO_OPENDRAIN = 2, + INSN_CONFIG_DIO_INPUT = COMEDI_INPUT, + INSN_CONFIG_DIO_OUTPUT = COMEDI_OUTPUT, + INSN_CONFIG_DIO_OPENDRAIN = COMEDI_OPENDRAIN, INSN_CONFIG_ANALOG_TRIG = 16, /* INSN_CONFIG_WAVEFORM = 17, */ /* INSN_CONFIG_TRIG = 18, */ @@ -396,22 +412,6 @@ enum comedi_digital_trig_op { COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = 2 }; -/** - * enum comedi_io_direction - COMEDI I/O directions - * @COMEDI_INPUT: Input. - * @COMEDI_OUTPUT: Output. - * @COMEDI_OPENDRAIN: Open-drain (or open-collector) output. - * - * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to - * report a direction. They may also be used in other places where a direction - * needs to be specified. - */ -enum comedi_io_direction { - COMEDI_INPUT = 0, - COMEDI_OUTPUT = 1, - COMEDI_OPENDRAIN = 2 -}; - /** * enum comedi_support_level - support level for a COMEDI feature * @COMEDI_UNKNOWN_SUPPORT: Unspecified support for feature. @@ -1104,18 +1104,19 @@ enum ni_gpct_other_select { enum ni_gpct_arm_source { NI_GPCT_ARM_IMMEDIATE = 0x0, /* - * Start both the counter and the adjacent pared - * counter simultaneously + * Start both the counter and the adjacent paired counter simultaneously */ NI_GPCT_ARM_PAIRED_IMMEDIATE = 0x1, /* - * NI doesn't document bits for selecting hardware arm triggers. - * If the NI_GPCT_ARM_UNKNOWN bit is set, we will pass the least - * significant bits (3 bits for 660x or 5 bits for m-series) - * through to the hardware. This will at least allow someone to - * figure out what the bits do later. + * If the NI_GPCT_HW_ARM bit is set, we will pass the least significant + * bits (3 bits for 660x or 5 bits for m-series) through to the + * hardware. To select a hardware trigger, pass the appropriate select + * bit, e.g., + * NI_GPCT_HW_ARM | NI_GPCT_AI_START1_GATE_SELECT or + * NI_GPCT_HW_ARM | NI_GPCT_PFI_GATE_SELECT(pfi_number) */ - NI_GPCT_ARM_UNKNOWN = 0x1000, + NI_GPCT_HW_ARM = 0x1000, + NI_GPCT_ARM_UNKNOWN = NI_GPCT_HW_ARM, /* for backward compatibility */ }; /* digital filtering options for ni 660x for use with INSN_CONFIG_FILTER. */ diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h index dcb637665eb7283a4a85a65aa69896dc2c267504..0c7c37a8ff3372aa65103be3dc1b8f6f0cd2bab2 100644 --- a/drivers/staging/comedi/comedidev.h +++ b/drivers/staging/comedi/comedidev.h @@ -426,6 +426,18 @@ enum comedi_cb { * handler will be called with the COMEDI device structure's board_ptr member * pointing to the matched pointer to a board name within the driver's private * array of static, read-only board type information. + * + * The @detach handler has two roles. If a COMEDI device was successfully + * configured by the @attach or @auto_attach handler, it is called when the + * device is being deconfigured (by the %COMEDI_DEVCONFIG ioctl, or due to + * unloading of the driver, or due to device removal). It is also called when + * the @attach or @auto_attach handler returns an error. Therefore, the + * @attach or @auto_attach handlers can defer clean-up on error until the + * @detach handler is called. If the @attach or @auto_attach handlers free + * any resources themselves, they must prevent the @detach handler from + * freeing the same resources. The @detach handler must not assume that all + * resources requested by the @attach or @auto_attach handler were + * successfully allocated. */ struct comedi_driver { /* private: */ diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c index ccb37d1f0f8eeddc9e8495711da1c220e5c465ec..98741474160514edd39a11132c3d88a648eab16e 100644 --- a/drivers/staging/comedi/drivers/cb_pcidda.c +++ b/drivers/staging/comedi/drivers/cb_pcidda.c @@ -248,8 +248,8 @@ static void cb_pcidda_write_caldac(struct comedi_device *dev, cb_pcidda_serial_out(dev, value, num_caldac_bits); /* -* latch stream into appropriate caldac deselect reference dac -*/ + * latch stream into appropriate caldac deselect reference dac + */ cal2_bits = DESELECT_REF_DAC_BIT | DUMMY_BIT; /* deactivate caldacs (one caldac for every two channels) */ for (i = 0; i < max_num_caldacs; i++) diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c index b1c0860135d0156a6bc7305b2316d936c2846546..05126ba4ba51bd804fc5f91413acbe8c7c6fd590 100644 --- a/drivers/staging/comedi/drivers/mite.c +++ b/drivers/staging/comedi/drivers/mite.c @@ -837,7 +837,7 @@ static int mite_setup(struct comedi_device *dev, struct mite *mite, * of 0x61f and bursts worked. 6281 powered up with register value of * 0x1f and bursts didn't work. The NI windows driver reads the * register, then does a bitwise-or of 0x600 with it and writes it back. - * + * * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be * written and read back. The bits 0x1f always read as 1. * The rest always read as zero. diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index 0f97d7b611d720b7fd774ab102adfd3a607494fa..b2e38288898132004152c1e00d38c8df286e5d43 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -1832,11 +1832,10 @@ static int ni_ai_insn_read(struct comedi_device *dev, unsigned int *data) { struct ni_private *devpriv = dev->private; - unsigned int mask = (s->maxdata + 1) >> 1; + unsigned int mask = s->maxdata; int i, n; unsigned int signbits; unsigned int d; - unsigned long dl; ni_load_channelgain_list(dev, s, 1, &insn->chanspec); @@ -1875,7 +1874,7 @@ static int ni_ai_insn_read(struct comedi_device *dev, return -ETIME; } d += signbits; - data[n] = d; + data[n] = d & 0xffff; } } else if (devpriv->is_6143) { for (n = 0; n < insn->n; n++) { @@ -1887,15 +1886,15 @@ static int ni_ai_insn_read(struct comedi_device *dev, * bit to move a single 16bit stranded sample into * the FIFO. */ - dl = 0; + d = 0; for (i = 0; i < NI_TIMEOUT; i++) { if (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) & 0x01) { /* Get stranded sample into FIFO */ ni_writel(dev, 0x01, NI6143_AI_FIFO_CTRL_REG); - dl = ni_readl(dev, - NI6143_AI_FIFO_DATA_REG); + d = ni_readl(dev, + NI6143_AI_FIFO_DATA_REG); break; } } @@ -1903,7 +1902,7 @@ static int ni_ai_insn_read(struct comedi_device *dev, dev_err(dev->class_dev, "timeout\n"); return -ETIME; } - data[n] = (((dl >> 16) & 0xFFFF) + signbits) & 0xFFFF; + data[n] = (((d >> 16) & 0xFFFF) + signbits) & 0xFFFF; } } else { for (n = 0; n < insn->n; n++) { @@ -1919,14 +1918,13 @@ static int ni_ai_insn_read(struct comedi_device *dev, return -ETIME; } if (devpriv->is_m_series) { - dl = ni_readl(dev, NI_M_AI_FIFO_DATA_REG); - dl &= mask; - data[n] = dl; + d = ni_readl(dev, NI_M_AI_FIFO_DATA_REG); + d &= mask; + data[n] = d; } else { d = ni_readw(dev, NI_E_AI_FIFO_DATA_REG); - /* subtle: needs to be short addition */ d += signbits; - data[n] = d; + data[n] = d & 0xffff; } } } @@ -2729,66 +2727,36 @@ static int ni_ao_insn_write(struct comedi_device *dev, return insn->n; } -static int ni_ao_insn_config(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, unsigned int *data) -{ - const struct ni_board_struct *board = dev->board_ptr; - struct ni_private *devpriv = dev->private; - unsigned int nbytes; - - switch (data[0]) { - case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE: - switch (data[1]) { - case COMEDI_OUTPUT: - nbytes = comedi_samples_to_bytes(s, - board->ao_fifo_depth); - data[2] = 1 + nbytes; - if (devpriv->mite) - data[2] += devpriv->mite->fifo_size; - break; - case COMEDI_INPUT: - data[2] = 0; - break; - default: - return -EINVAL; - } - return 0; - default: - break; - } - - return -EINVAL; -} - -static int ni_ao_inttrig(struct comedi_device *dev, - struct comedi_subdevice *s, - unsigned int trig_num) +/* + * Arms the AO device in preparation for a trigger event. + * This function also allocates and prepares a DMA channel (or FIFO if DMA is + * not used). As a part of this preparation, this function preloads the DAC + * registers with the first values of the output stream. This ensures that the + * first clock cycle after the trigger can be used for output. + * + * Note that this function _must_ happen after a user has written data to the + * output buffers via either mmap or write(fileno,...). + */ +static int ni_ao_arm(struct comedi_device *dev, + struct comedi_subdevice *s) { struct ni_private *devpriv = dev->private; - struct comedi_cmd *cmd = &s->async->cmd; int ret; int interrupt_b_bits; int i; static const int timeout = 1000; /* - * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT. - * For backwards compatibility, also allow trig_num == 0 when - * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT); - * in that case, the internal trigger is being used as a pre-trigger - * before the external trigger. + * Prevent ao from doing things like trying to allocate the ao dma + * channel multiple times. */ - if (!(trig_num == cmd->start_arg || - (trig_num == 0 && cmd->start_src != TRIG_INT))) + if (!devpriv->ao_needs_arming) { + dev_dbg(dev->class_dev, "%s: device does not need arming!\n", + __func__); return -EINVAL; + } - /* - * Null trig at beginning prevent ao start trigger from executing more - * than once per command (and doing things like trying to allocate the - * ao dma channel multiple times). - */ - s->async->inttrig = NULL; + devpriv->ao_needs_arming = 0; ni_set_bits(dev, NISTC_INTB_ENA_REG, NISTC_INTB_ENA_AO_FIFO | NISTC_INTB_ENA_AO_ERR, 0); @@ -2840,6 +2808,75 @@ static int ni_ao_inttrig(struct comedi_device *dev, devpriv->ao_cmd1, NISTC_AO_CMD1_REG); + return 0; +} + +static int ni_ao_insn_config(struct comedi_device *dev, + struct comedi_subdevice *s, + struct comedi_insn *insn, unsigned int *data) +{ + const struct ni_board_struct *board = dev->board_ptr; + struct ni_private *devpriv = dev->private; + unsigned int nbytes; + + switch (data[0]) { + case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE: + switch (data[1]) { + case COMEDI_OUTPUT: + nbytes = comedi_samples_to_bytes(s, + board->ao_fifo_depth); + data[2] = 1 + nbytes; + if (devpriv->mite) + data[2] += devpriv->mite->fifo_size; + break; + case COMEDI_INPUT: + data[2] = 0; + break; + default: + return -EINVAL; + } + return 0; + case INSN_CONFIG_ARM: + return ni_ao_arm(dev, s); + default: + break; + } + + return -EINVAL; +} + +static int ni_ao_inttrig(struct comedi_device *dev, + struct comedi_subdevice *s, + unsigned int trig_num) +{ + struct ni_private *devpriv = dev->private; + struct comedi_cmd *cmd = &s->async->cmd; + int ret; + + /* + * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT. + * For backwards compatibility, also allow trig_num == 0 when + * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT); + * in that case, the internal trigger is being used as a pre-trigger + * before the external trigger. + */ + if (!(trig_num == cmd->start_arg || + (trig_num == 0 && cmd->start_src != TRIG_INT))) + return -EINVAL; + + /* + * Null trig at beginning prevent ao start trigger from executing more + * than once per command. + */ + s->async->inttrig = NULL; + + if (devpriv->ao_needs_arming) { + /* only arm this device if it still needs arming */ + ret = ni_ao_arm(dev, s); + if (ret) + return ret; + } + ni_stc_writew(dev, NISTC_AO_CMD2_START1_PULSE | devpriv->ao_cmd2, NISTC_AO_CMD2_REG); @@ -3227,10 +3264,17 @@ static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) ni_ao_cmd_set_interrupts(dev, s); /* - * arm(ing) and star(ting) happen in ni_ao_inttrig, which _must_ be - * called for ao commands since 1) TRIG_NOW is not supported and 2) DMA - * must be setup and initially written to before arm/start happen. + * arm(ing) must happen later so that DMA can be setup and DACs + * preloaded with the actual output buffer before starting. + * + * start(ing) must happen _after_ arming is completed. Starting can be + * done either via ni_ao_inttrig, or via an external trigger. + * + * **Currently, ni_ao_inttrig will automatically attempt a call to + * ni_ao_arm if the device still needs arming at that point. This + * allows backwards compatibility. */ + devpriv->ao_needs_arming = 1; return 0; } diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h index 1966519cb6e54337d2a0a1464ec6a5d758138226..f27b545f83ebca7c6ee14278be237436b722a2f7 100644 --- a/drivers/staging/comedi/drivers/ni_stc.h +++ b/drivers/staging/comedi/drivers/ni_stc.h @@ -1053,6 +1053,20 @@ struct ni_private { unsigned int is_67xx:1; unsigned int is_6711:1; unsigned int is_6713:1; + + /* + * Boolean value of whether device needs to be armed. + * + * Currently, only NI AO devices are known to be needing arming, since + * the DAC registers must be preloaded before triggering. + * This variable should only be set true during a command operation + * (e.g ni_ao_cmd) and should then be set false by the arming + * function (e.g. ni_ao_arm). + * + * This variable helps to ensure that multiple DMA allocations are not + * possible. + */ + unsigned int ao_needs_arming:1; }; static const struct comedi_lrange range_ni_E_ao_ext; diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c index 7043eb0543f6b15c98161e341b354a05e7748e02..15cb4088467bc782e505dbf2723819f699698c9f 100644 --- a/drivers/staging/comedi/drivers/ni_tio.c +++ b/drivers/staging/comedi/drivers/ni_tio.c @@ -207,7 +207,8 @@ static int ni_tio_clock_period_ps(const struct ni_gpct *counter, * clock period is specified by user with prescaling * already taken into account. */ - return counter->clock_period_ps; + *period_ps = counter->clock_period_ps; + return 0; } switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) { @@ -451,8 +452,9 @@ static void ni_tio_set_sync_mode(struct ni_gpct *counter) unsigned int bits = 0; unsigned int reg; unsigned int mode; - unsigned int clk_src; - u64 ps; + unsigned int clk_src = 0; + u64 ps = 0; + int ret; bool force_alt_sync; /* only m series and 660x variants have counting mode registers */ @@ -482,9 +484,12 @@ static void ni_tio_set_sync_mode(struct ni_gpct *counter) break; } - ni_tio_generic_clock_src_select(counter, &clk_src); - ni_tio_clock_period_ps(counter, clk_src, &ps); - + ret = ni_tio_generic_clock_src_select(counter, &clk_src); + if (ret) + return; + ret = ni_tio_clock_period_ps(counter, clk_src, &ps); + if (ret) + return; /* * It's not clear what we should do if clock_period is unknown, so we * are not using the alt sync bit in that case. @@ -808,7 +813,7 @@ static int ni_tio_get_clock_src(struct ni_gpct *counter, unsigned int *clock_source, unsigned int *period_ns) { - u64 temp64; + u64 temp64 = 0; int ret; ret = ni_tio_generic_clock_src_select(counter, clock_source); diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c index 5aeed44dff706787eb75024afd142f1a77e43da6..5b5df0596ad982c9077100b436fcb143cba60982 100644 --- a/drivers/staging/comedi/drivers/pcl818.c +++ b/drivers/staging/comedi/drivers/pcl818.c @@ -771,9 +771,9 @@ static int pcl818_ai_cancel(struct comedi_device *dev, s->async->scans_done < cmd->stop_arg)) { if (!devpriv->ai_cmd_canceled) { /* - * Wait for running dma transfer to end, - * do cleanup in interrupt. - */ + * Wait for running dma transfer to end, + * do cleanup in interrupt. + */ devpriv->ai_cmd_canceled = 1; return 0; } diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c index c14a025644323f6ed3b948156d165c17a4c2a520..0dd5fe28685531951a8cc6e5181322067650b976 100644 --- a/drivers/staging/comedi/drivers/s626.c +++ b/drivers/staging/comedi/drivers/s626.c @@ -75,24 +75,24 @@ struct s626_buffer_dma { }; struct s626_private { - uint8_t ai_cmd_running; /* ai_cmd is running */ + u8 ai_cmd_running; /* ai_cmd is running */ unsigned int ai_sample_timer; /* time between samples in * units of the timer */ int ai_convert_count; /* conversion counter */ unsigned int ai_convert_timer; /* time between conversion in * units of the timer */ - uint16_t counter_int_enabs; /* counter interrupt enable mask + u16 counter_int_enabs; /* counter interrupt enable mask * for MISC2 register */ - uint8_t adc_items; /* number of items in ADC poll list */ + u8 adc_items; /* number of items in ADC poll list */ struct s626_buffer_dma rps_buf; /* DMA buffer used to hold ADC (RPS1) * program */ struct s626_buffer_dma ana_buf; /* DMA buffer used to receive ADC data * and hold DAC data */ - uint32_t *dac_wbuf; /* pointer to logical adrs of DMA buffer + u32 *dac_wbuf; /* pointer to logical adrs of DMA buffer * used to hold DAC data */ - uint16_t dacpol; /* image of DAC polarity register */ - uint8_t trim_setpoint[12]; /* images of TrimDAC setpoints */ - uint32_t i2c_adrs; /* I2C device address for onboard EEPROM + u16 dacpol; /* image of DAC polarity register */ + u8 trim_setpoint[12]; /* images of TrimDAC setpoints */ + u32 i2c_adrs; /* I2C device address for onboard EEPROM * (board rev dependent) */ }; @@ -179,7 +179,7 @@ static void s626_debi_transfer(struct comedi_device *dev) /* * Read a value from a gate array register. */ -static uint16_t s626_debi_read(struct comedi_device *dev, uint16_t addr) +static u16 s626_debi_read(struct comedi_device *dev, u16 addr) { /* Set up DEBI control register value in shadow RAM */ writel(S626_DEBI_CMD_RDWORD | addr, dev->mmio + S626_P_DEBICMD); @@ -193,8 +193,8 @@ static uint16_t s626_debi_read(struct comedi_device *dev, uint16_t addr) /* * Write a value to a gate array register. */ -static void s626_debi_write(struct comedi_device *dev, uint16_t addr, - uint16_t wdata) +static void s626_debi_write(struct comedi_device *dev, u16 addr, + u16 wdata) { /* Set up DEBI control register value in shadow RAM */ writel(S626_DEBI_CMD_WRWORD | addr, dev->mmio + S626_P_DEBICMD); @@ -241,7 +241,7 @@ static int s626_i2c_handshake_eoc(struct comedi_device *dev, return -EBUSY; } -static int s626_i2c_handshake(struct comedi_device *dev, uint32_t val) +static int s626_i2c_handshake(struct comedi_device *dev, u32 val) { unsigned int ctrl; int ret; @@ -267,8 +267,8 @@ static int s626_i2c_handshake(struct comedi_device *dev, uint32_t val) return ctrl & S626_I2C_ERR; } -/* Read uint8_t from EEPROM. */ -static uint8_t s626_i2c_read(struct comedi_device *dev, uint8_t addr) +/* Read u8 from EEPROM. */ +static u8 s626_i2c_read(struct comedi_device *dev, u8 addr) { struct s626_private *devpriv = dev->private; @@ -304,10 +304,10 @@ static uint8_t s626_i2c_read(struct comedi_device *dev, uint8_t addr) /* *********** DAC FUNCTIONS *********** */ /* TrimDac LogicalChan-to-PhysicalChan mapping table. */ -static const uint8_t s626_trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 }; +static const u8 s626_trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 }; /* TrimDac LogicalChan-to-EepromAdrs mapping table. */ -static const uint8_t s626_trimadrs[] = { +static const u8 s626_trimadrs[] = { 0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63 }; @@ -357,7 +357,7 @@ static int s626_send_dac_eoc(struct comedi_device *dev, * channel 2. Assumes: (1) TSL2 slot records initialized, and (2) * dacpol contains valid target image. */ -static int s626_send_dac(struct comedi_device *dev, uint32_t val) +static int s626_send_dac(struct comedi_device *dev, u32 val) { struct s626_private *devpriv = dev->private; int ret; @@ -516,12 +516,12 @@ static int s626_send_dac(struct comedi_device *dev, uint32_t val) * Private helper function: Write setpoint to an application DAC channel. */ static int s626_set_dac(struct comedi_device *dev, - uint16_t chan, int16_t dacdata) + u16 chan, int16_t dacdata) { struct s626_private *devpriv = dev->private; - uint16_t signmask; - uint32_t ws_image; - uint32_t val; + u16 signmask; + u32 ws_image; + u32 val; /* * Adjust DAC data polarity and set up Polarity Control Register image. @@ -535,7 +535,7 @@ static int s626_set_dac(struct comedi_device *dev, } /* Limit DAC setpoint value to valid range. */ - if ((uint16_t)dacdata > 0x1FFF) + if ((u16)dacdata > 0x1FFF) dacdata = 0x1FFF; /* @@ -575,23 +575,23 @@ static int s626_set_dac(struct comedi_device *dev, * (write to non-existent trimdac). */ val |= 0x00004000; /* Address the two main dual-DAC devices * (TSL's chip select enables target device). */ - val |= ((uint32_t)(chan & 1) << 15); /* Address the DAC channel + val |= ((u32)(chan & 1) << 15); /* Address the DAC channel * within the device. */ - val |= (uint32_t)dacdata; /* Include DAC setpoint data. */ + val |= (u32)dacdata; /* Include DAC setpoint data. */ return s626_send_dac(dev, val); } static int s626_write_trim_dac(struct comedi_device *dev, - uint8_t logical_chan, uint8_t dac_data) + u8 logical_chan, u8 dac_data) { struct s626_private *devpriv = dev->private; - uint32_t chan; + u32 chan; /* * Save the new setpoint in case the application needs to read it back * later. */ - devpriv->trim_setpoint[logical_chan] = (uint8_t)dac_data; + devpriv->trim_setpoint[logical_chan] = (u8)dac_data; /* Map logical channel number to physical channel number. */ chan = s626_trimchan[logical_chan]; @@ -633,7 +633,7 @@ static int s626_write_trim_dac(struct comedi_device *dev, static int s626_load_trim_dacs(struct comedi_device *dev) { - uint8_t i; + u8 i; int ret; /* Copy TrimDac setpoint values from EEPROM to TrimDacs. */ @@ -661,7 +661,7 @@ static int s626_load_trim_dacs(struct comedi_device *dev) * latches B. */ static void s626_set_latch_source(struct comedi_device *dev, - unsigned int chan, uint16_t value) + unsigned int chan, u16 value) { s626_debi_replace(dev, S626_LP_CRB(chan), ~(S626_CRBMSK_INTCTRL | S626_CRBMSK_LATCHSRC), @@ -672,7 +672,7 @@ static void s626_set_latch_source(struct comedi_device *dev, * Write value into counter preload register. */ static void s626_preload(struct comedi_device *dev, - unsigned int chan, uint32_t value) + unsigned int chan, u32 value) { s626_debi_write(dev, S626_LP_CNTR(chan), value); s626_debi_write(dev, S626_LP_CNTR(chan) + 2, value >> 16); @@ -686,7 +686,7 @@ static void s626_preload(struct comedi_device *dev, static void s626_reset_cap_flags(struct comedi_device *dev, unsigned int chan) { - uint16_t set; + u16 set; set = S626_SET_CRB_INTRESETCMD(1); if (chan < 3) @@ -704,12 +704,12 @@ static void s626_reset_cap_flags(struct comedi_device *dev, * ClkPol, ClkEnab, IndexSrc, IndexPol, LoadSrc. */ static void s626_set_mode_a(struct comedi_device *dev, - unsigned int chan, uint16_t setup, - uint16_t disable_int_src) + unsigned int chan, u16 setup, + u16 disable_int_src) { struct s626_private *devpriv = dev->private; - uint16_t cra; - uint16_t crb; + u16 cra; + u16 crb; unsigned int cntsrc, clkmult, clkpol; /* Initialize CRA and CRB images. */ @@ -782,12 +782,12 @@ static void s626_set_mode_a(struct comedi_device *dev, } static void s626_set_mode_b(struct comedi_device *dev, - unsigned int chan, uint16_t setup, - uint16_t disable_int_src) + unsigned int chan, u16 setup, + u16 disable_int_src) { struct s626_private *devpriv = dev->private; - uint16_t cra; - uint16_t crb; + u16 cra; + u16 crb; unsigned int cntsrc, clkmult, clkpol; /* Initialize CRA and CRB images. */ @@ -868,7 +868,7 @@ static void s626_set_mode_b(struct comedi_device *dev, static void s626_set_mode(struct comedi_device *dev, unsigned int chan, - uint16_t setup, uint16_t disable_int_src) + u16 setup, u16 disable_int_src) { if (chan < 3) s626_set_mode_a(dev, chan, setup, disable_int_src); @@ -880,7 +880,7 @@ static void s626_set_mode(struct comedi_device *dev, * Return/set a counter's enable. enab: 0=always enabled, 1=enabled by index. */ static void s626_set_enable(struct comedi_device *dev, - unsigned int chan, uint16_t enab) + unsigned int chan, u16 enab) { unsigned int mask = S626_CRBMSK_INTCTRL; unsigned int set; @@ -901,11 +901,11 @@ static void s626_set_enable(struct comedi_device *dev, * 2=OverflowA (B counters only), 3=disabled. */ static void s626_set_load_trig(struct comedi_device *dev, - unsigned int chan, uint16_t trig) + unsigned int chan, u16 trig) { - uint16_t reg; - uint16_t mask; - uint16_t set; + u16 reg; + u16 mask; + u16 set; if (chan < 3) { reg = S626_LP_CRA(chan); @@ -925,11 +925,11 @@ static void s626_set_load_trig(struct comedi_device *dev, * 2=IndexOnly, 3=IndexAndOverflow. */ static void s626_set_int_src(struct comedi_device *dev, - unsigned int chan, uint16_t int_source) + unsigned int chan, u16 int_source) { struct s626_private *devpriv = dev->private; - uint16_t cra_reg = S626_LP_CRA(chan); - uint16_t crb_reg = S626_LP_CRB(chan); + u16 cra_reg = S626_LP_CRA(chan); + u16 crb_reg = S626_LP_CRB(chan); if (chan < 3) { /* Reset any pending counter overflow or index captures */ @@ -941,7 +941,7 @@ static void s626_set_int_src(struct comedi_device *dev, s626_debi_replace(dev, cra_reg, ~S626_CRAMSK_INTSRC_A, S626_SET_CRA_INTSRC_A(int_source)); } else { - uint16_t crb; + u16 crb; /* Cache writeable CRB register image */ crb = s626_debi_read(dev, crb_reg); @@ -985,7 +985,7 @@ static void s626_pulse_index(struct comedi_device *dev, unsigned int chan) { if (chan < 3) { - uint16_t cra; + u16 cra; cra = s626_debi_read(dev, S626_LP_CRA(chan)); @@ -994,7 +994,7 @@ static void s626_pulse_index(struct comedi_device *dev, (cra ^ S626_CRAMSK_INDXPOL_A)); s626_debi_write(dev, S626_LP_CRA(chan), cra); } else { - uint16_t crb; + u16 crb; crb = s626_debi_read(dev, S626_LP_CRB(chan)); crb &= ~S626_CRBMSK_INTCTRL; @@ -1062,7 +1062,7 @@ static int s626_dio_clear_irq(struct comedi_device *dev) } static void s626_handle_dio_interrupt(struct comedi_device *dev, - uint16_t irqbit, uint8_t group) + u16 irqbit, u8 group) { struct s626_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; @@ -1110,8 +1110,8 @@ static void s626_handle_dio_interrupt(struct comedi_device *dev, static void s626_check_dio_interrupts(struct comedi_device *dev) { - uint16_t irqbit; - uint8_t group; + u16 irqbit; + u8 group; for (group = 0; group < S626_DIO_BANKS; group++) { /* read interrupt type */ @@ -1131,7 +1131,7 @@ static void s626_check_counter_interrupts(struct comedi_device *dev) struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; - uint16_t irqbit; + u16 irqbit; /* read interrupt type */ irqbit = s626_debi_read(dev, S626_LP_RDMISC2); @@ -1196,7 +1196,7 @@ static bool s626_handle_eos_interrupt(struct comedi_device *dev) * first uint16_t in the buffer because it contains junk data * from the final ADC of the previous poll list scan. */ - uint32_t *readaddr = (uint32_t *)devpriv->ana_buf.logical_base + 1; + u32 *readaddr = (u32 *)devpriv->ana_buf.logical_base + 1; int i; /* get the data and hand it over to comedi */ @@ -1231,7 +1231,7 @@ static irqreturn_t s626_irq_handler(int irq, void *d) { struct comedi_device *dev = d; unsigned long flags; - uint32_t irqtype, irqstatus; + u32 irqtype, irqstatus; if (!dev->attached) return IRQ_NONE; @@ -1272,25 +1272,25 @@ static irqreturn_t s626_irq_handler(int irq, void *d) /* * This function builds the RPS program for hardware driven acquisition. */ -static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl) +static void s626_reset_adc(struct comedi_device *dev, u8 *ppl) { struct s626_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; struct comedi_cmd *cmd = &s->async->cmd; - uint32_t *rps; - uint32_t jmp_adrs; - uint16_t i; - uint16_t n; - uint32_t local_ppl; + u32 *rps; + u32 jmp_adrs; + u16 i; + u16 n; + u32 local_ppl; /* Stop RPS program in case it is currently running */ s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1); /* Set starting logical address to write RPS commands. */ - rps = (uint32_t *)devpriv->rps_buf.logical_base; + rps = (u32 *)devpriv->rps_buf.logical_base; /* Initialize RPS instruction pointer */ - writel((uint32_t)devpriv->rps_buf.physical_base, + writel((u32)devpriv->rps_buf.physical_base, dev->mmio + S626_P_RPSADDR1); /* Construct RPS program in rps_buf DMA buffer */ @@ -1372,8 +1372,8 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl) * flushes the RPS' instruction prefetch pipeline. */ jmp_adrs = - (uint32_t)devpriv->rps_buf.physical_base + - (uint32_t)((unsigned long)rps - + (u32)devpriv->rps_buf.physical_base + + (u32)((unsigned long)rps - (unsigned long)devpriv-> rps_buf.logical_base); for (i = 0; i < (10 * S626_RPSCLK_PER_US / 2); i++) { @@ -1408,7 +1408,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl) /* Transfer ADC data from FB BUFFER 1 register to DMA buffer. */ *rps++ = S626_RPS_STREG | (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2); - *rps++ = (uint32_t)devpriv->ana_buf.physical_base + + *rps++ = (u32)devpriv->ana_buf.physical_base + (devpriv->adc_items << 2); /* @@ -1452,7 +1452,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl) /* Transfer final ADC data from FB BUFFER 1 register to DMA buffer. */ *rps++ = S626_RPS_STREG | (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2); - *rps++ = (uint32_t)devpriv->ana_buf.physical_base + + *rps++ = (u32)devpriv->ana_buf.physical_base + (devpriv->adc_items << 2); /* Indicate ADC scan loop is finished. */ @@ -1465,7 +1465,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl) /* Restart RPS program at its beginning. */ *rps++ = S626_RPS_JUMP; /* Branch to start of RPS program. */ - *rps++ = (uint32_t)devpriv->rps_buf.physical_base; + *rps++ = (u32)devpriv->rps_buf.physical_base; /* End of RPS program build */ } @@ -1488,11 +1488,11 @@ static int s626_ai_insn_read(struct comedi_device *dev, struct comedi_insn *insn, unsigned int *data) { - uint16_t chan = CR_CHAN(insn->chanspec); - uint16_t range = CR_RANGE(insn->chanspec); - uint16_t adc_spec = 0; - uint32_t gpio_image; - uint32_t tmp; + u16 chan = CR_CHAN(insn->chanspec); + u16 range = CR_RANGE(insn->chanspec); + u16 adc_spec = 0; + u32 gpio_image; + u32 tmp; int ret; int n; @@ -1585,7 +1585,7 @@ static int s626_ai_insn_read(struct comedi_device *dev, return n; } -static int s626_ai_load_polllist(uint8_t *ppl, struct comedi_cmd *cmd) +static int s626_ai_load_polllist(u8 *ppl, struct comedi_cmd *cmd) { int n; @@ -1651,7 +1651,7 @@ static int s626_ns_to_timer(unsigned int *nanosec, unsigned int flags) static void s626_timer_load(struct comedi_device *dev, unsigned int chan, int tick) { - uint16_t setup = + u16 setup = /* Preload upon index. */ S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) | /* Disable hardware index. */ @@ -1664,7 +1664,7 @@ static void s626_timer_load(struct comedi_device *dev, S626_SET_STD_CLKMULT(S626_CLKMULT_1X) | /* Enabled by index */ S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX); - uint16_t value_latchsrc = S626_LATCHSRC_A_INDXA; + u16 value_latchsrc = S626_LATCHSRC_A_INDXA; /* uint16_t enab = S626_CLKENAB_ALWAYS; */ s626_set_mode(dev, chan, setup, false); @@ -1693,7 +1693,7 @@ static void s626_timer_load(struct comedi_device *dev, static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct s626_private *devpriv = dev->private; - uint8_t ppl[16]; + u8 ppl[16]; struct comedi_cmd *cmd = &s->async->cmd; int tick; @@ -1953,7 +1953,7 @@ static int s626_ao_insn_write(struct comedi_device *dev, static void s626_dio_init(struct comedi_device *dev) { - uint16_t group; + u16 group; /* Prepare to treat writes to WRCapSel as capture disables. */ s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP); @@ -2017,7 +2017,7 @@ static int s626_enc_insn_config(struct comedi_device *dev, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); - uint16_t setup = + u16 setup = /* Preload upon index. */ S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) | /* Disable hardware index. */ @@ -2032,8 +2032,8 @@ static int s626_enc_insn_config(struct comedi_device *dev, S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX); /* uint16_t disable_int_src = true; */ /* uint32_t Preloadvalue; //Counter initial value */ - uint16_t value_latchsrc = S626_LATCHSRC_AB_READ; - uint16_t enab = S626_CLKENAB_ALWAYS; + u16 value_latchsrc = S626_LATCHSRC_AB_READ; + u16 enab = S626_CLKENAB_ALWAYS; /* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */ @@ -2052,7 +2052,7 @@ static int s626_enc_insn_read(struct comedi_device *dev, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); - uint16_t cntr_latch_reg = S626_LP_CNTR(chan); + u16 cntr_latch_reg = S626_LP_CNTR(chan); int i; for (i = 0; i < insn->n; i++) { @@ -2090,7 +2090,7 @@ static int s626_enc_insn_write(struct comedi_device *dev, return 1; } -static void s626_write_misc2(struct comedi_device *dev, uint16_t new_image) +static void s626_write_misc2(struct comedi_device *dev, u16 new_image) { s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_WENABLE); s626_debi_write(dev, S626_LP_WRMISC2, new_image); @@ -2100,7 +2100,7 @@ static void s626_write_misc2(struct comedi_device *dev, uint16_t new_image) static void s626_counters_init(struct comedi_device *dev) { int chan; - uint16_t setup = + u16 setup = /* Preload upon index. */ S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) | /* Disable hardware index. */ @@ -2169,7 +2169,7 @@ static int s626_initialize(struct comedi_device *dev) { struct s626_private *devpriv = dev->private; dma_addr_t phys_buf; - uint16_t chan; + u16 chan; int i; int ret; @@ -2248,7 +2248,7 @@ static int s626_initialize(struct comedi_device *dev) */ /* Physical start of RPS program */ - writel((uint32_t)devpriv->rps_buf.physical_base, + writel((u32)devpriv->rps_buf.physical_base, dev->mmio + S626_P_RPSADDR1); /* RPS program performs no explicit mem writes */ writel(0, dev->mmio + S626_P_RPSPAGE1); @@ -2318,16 +2318,16 @@ static int s626_initialize(struct comedi_device *dev) * enabled. */ phys_buf = devpriv->ana_buf.physical_base + - (S626_DAC_WDMABUF_OS * sizeof(uint32_t)); - writel((uint32_t)phys_buf, dev->mmio + S626_P_BASEA2_OUT); - writel((uint32_t)(phys_buf + sizeof(uint32_t)), + (S626_DAC_WDMABUF_OS * sizeof(u32)); + writel((u32)phys_buf, dev->mmio + S626_P_BASEA2_OUT); + writel((u32)(phys_buf + sizeof(u32)), dev->mmio + S626_P_PROTA2_OUT); /* * Cache Audio2's output DMA buffer logical address. This is * where DAC data is buffered for A2 output DMA transfers. */ - devpriv->dac_wbuf = (uint32_t *)devpriv->ana_buf.logical_base + + devpriv->dac_wbuf = (u32 *)devpriv->ana_buf.logical_base + S626_DAC_WDMABUF_OS; /* diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c index d0a8a28edd36b29a76f8dd9c559b52557790d75c..55d43c076b1cc581cca3776b5d41a60e328dad27 100644 --- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c +++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c @@ -250,3 +250,15 @@ int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice) return n; } EXPORT_SYMBOL_GPL(comedi_get_n_channels); + +static int __init kcomedilib_module_init(void) +{ + return 0; +} + +static void __exit kcomedilib_module_exit(void) +{ +} + +module_init(kcomedilib_module_init); +module_exit(kcomedilib_module_exit); diff --git a/drivers/staging/dgnc/Makefile b/drivers/staging/dgnc/Makefile index 995c874f40eb138ce0af15eb6c97acd2e5730707..40ff0d0076957c89350106302e02fae3f3a6c31b 100644 --- a/drivers/staging/dgnc/Makefile +++ b/drivers/staging/dgnc/Makefile @@ -2,5 +2,4 @@ obj-$(CONFIG_DGNC) += dgnc.o dgnc-objs := dgnc_cls.o dgnc_driver.o\ dgnc_mgmt.o dgnc_neo.o\ - dgnc_tty.o dgnc_sysfs.o\ - dgnc_utils.o + dgnc_tty.o dgnc_utils.o diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c index aedca66cbe4110b81766897e9d45c84abd022d5d..c20ffdd254d8fda4b731a3efc509f58460e849ca 100644 --- a/drivers/staging/dgnc/dgnc_cls.c +++ b/drivers/staging/dgnc/dgnc_cls.c @@ -385,9 +385,8 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch) ch->ch_rxcount++; } - /* - * Write new final heads to channel structure. - */ + /* Write new final heads to channel structure. */ + ch->ch_r_head = head & RQUEUEMASK; ch->ch_e_head = head & EQUEUEMASK; @@ -666,9 +665,8 @@ static void cls_param(struct tty_struct *tty) if (!bd || bd->magic != DGNC_BOARD_MAGIC) return; - /* - * If baud rate is zero, flush queues, and set mval to drop DTR. - */ + /* If baud rate is zero, flush queues, and set mval to drop DTR. */ + if ((ch->ch_c_cflag & (CBAUD)) == 0) { ch->ch_r_head = 0; ch->ch_r_tail = 0; @@ -887,9 +885,8 @@ static void cls_param(struct tty_struct *tty) cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr)); } -/* - * Our board poller function. - */ +/* Our board poller function. */ + static void cls_tasklet(unsigned long data) { struct dgnc_board *bd = (struct dgnc_board *)data; @@ -914,9 +911,8 @@ static void cls_tasklet(unsigned long data) */ spin_lock_irqsave(&bd->bd_intr_lock, flags); - /* - * If board is ready, parse deeper to see if there is anything to do. - */ + /* If board is ready, parse deeper to see if there is anything to do. */ + if ((state == BOARD_READY) && (ports > 0)) { /* Loop on each port */ for (i = 0; i < ports; i++) { @@ -938,9 +934,8 @@ static void cls_tasklet(unsigned long data) cls_copy_data_from_queue_to_uart(ch); dgnc_wakeup_writes(ch); - /* - * Check carrier function. - */ + /* Check carrier function. */ + dgnc_carrier(ch); /* @@ -992,9 +987,8 @@ static irqreturn_t cls_intr(int irq, void *voidbrd) for (i = 0; i < brd->nasync; i++) cls_parse_isr(brd, i); - /* - * Schedule tasklet to more in-depth servicing at a better time. - */ + /* Schedule tasklet to more in-depth servicing at a better time. */ + tasklet_schedule(&brd->helper_tasklet); spin_unlock_irqrestore(&brd->bd_intr_lock, flags); @@ -1043,9 +1037,7 @@ static int cls_drain(struct tty_struct *tty, uint seconds) un->un_flags |= UN_EMPTY; spin_unlock_irqrestore(&ch->ch_lock, flags); - /* - * NOTE: Do something with time passed in. - */ + /* NOTE: Do something with time passed in. */ /* If ret is non-zero, user ctrl-c'ed us */ @@ -1112,9 +1104,8 @@ static void cls_uart_init(struct channel_t *ch) readb(&ch->ch_cls_uart->msr); } -/* - * Turns off UART. - */ +/* Turns off UART. */ + static void cls_uart_off(struct channel_t *ch) { writeb(0, &ch->ch_cls_uart->ier); @@ -1160,9 +1151,8 @@ static void cls_send_break(struct channel_t *ch, int msecs) if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; - /* - * If we receive a time of 0, this means turn off the break. - */ + /* If we receive a time of 0, this means turn off the break. */ + if (msecs == 0) { /* Turn break off, and unset some variables */ if (ch->ch_flags & CH_BREAK_SENDING) { diff --git a/drivers/staging/dgnc/dgnc_cls.h b/drivers/staging/dgnc/dgnc_cls.h index 2597e36d38c47798999bd8c4bbaa0e1b53a0f3b5..463ad30efb3b986b582717684362b0ccf17c9748 100644 --- a/drivers/staging/dgnc/dgnc_cls.h +++ b/drivers/staging/dgnc/dgnc_cls.h @@ -69,7 +69,7 @@ struct cls_uart_struct { #define UART_EXAR654_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */ #define UART_EXAR654_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */ #define UART_EXAR654_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */ -#define UART_EXAR654_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */ +#define UART_EXAR654_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow Control Enable */ #define UART_EXAR654_IER_XOFF 0x20 /* Xoff Interrupt Enable */ #define UART_EXAR654_IER_RTSDTR 0x40 /* Output Interrupt Enable */ #define UART_EXAR654_IER_CTSDSR 0x80 /* Input Interrupt Enable */ diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c index fd372d3afa465f609cae7cfa5213c4f0dbf7fa54..5381dbddd8bbb2121721c818f767d3cb9cafc629 100644 --- a/drivers/staging/dgnc/dgnc_driver.c +++ b/drivers/staging/dgnc/dgnc_driver.c @@ -24,31 +24,14 @@ #include "dgnc_tty.h" #include "dgnc_cls.h" #include "dgnc_neo.h" -#include "dgnc_sysfs.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Digi International, http://www.digi.com"); MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line"); MODULE_SUPPORTED_DEVICE("dgnc"); -/************************************************************************** - * - * protos for this file - * - */ -static int dgnc_start(void); -static int dgnc_request_irq(struct dgnc_board *brd); -static void dgnc_free_irq(struct dgnc_board *brd); -static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id); -static void dgnc_cleanup_board(struct dgnc_board *brd); -static void dgnc_poll_handler(ulong dummy); -static int dgnc_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent); -static int dgnc_do_remap(struct dgnc_board *brd); +/* File operations permitted on Control/Management major. */ -/* - * File operations permitted on Control/Management major. - */ static const struct file_operations dgnc_board_fops = { .owner = THIS_MODULE, .unlocked_ioctl = dgnc_mgmt_ioctl, @@ -56,9 +39,8 @@ static const struct file_operations dgnc_board_fops = { .release = dgnc_mgmt_close }; -/* - * Globals - */ +/* Globals */ + uint dgnc_num_boards; struct dgnc_board *dgnc_board[MAXBOARDS]; DEFINE_SPINLOCK(dgnc_global_lock); @@ -66,14 +48,12 @@ DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */ uint dgnc_major; int dgnc_poll_tick = 20; /* Poll interval - 20 ms */ -/* - * Static vars. - */ +/* Static vars. */ + static struct class *dgnc_class; -/* - * Poller stuff - */ +/* Poller stuff */ + static ulong dgnc_poll_time; /* Time of next poll */ static uint dgnc_poll_stop; /* Used to tell poller to stop */ static struct timer_list dgnc_poll_timer; @@ -93,7 +73,7 @@ struct board_id { unsigned int is_pci_express; }; -static struct board_id dgnc_ids[] = { +static const struct board_id dgnc_ids[] = { { PCI_DEVICE_CLASSIC_4_PCI_NAME, 4, 0 }, { PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 }, { PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 }, @@ -114,273 +94,19 @@ static struct board_id dgnc_ids[] = { { NULL, 0, 0 } }; -static struct pci_driver dgnc_driver = { - .name = "dgnc", - .probe = dgnc_init_one, - .id_table = dgnc_pci_tbl, -}; - -/************************************************************************ - * - * Driver load/unload functions - * - ************************************************************************/ - -static void cleanup(bool sysfiles) -{ - int i; - unsigned long flags; - - spin_lock_irqsave(&dgnc_poll_lock, flags); - dgnc_poll_stop = 1; - spin_unlock_irqrestore(&dgnc_poll_lock, flags); - - /* Turn off poller right away. */ - del_timer_sync(&dgnc_poll_timer); - - if (sysfiles) - dgnc_remove_driver_sysfiles(&dgnc_driver); - - device_destroy(dgnc_class, MKDEV(dgnc_major, 0)); - class_destroy(dgnc_class); - unregister_chrdev(dgnc_major, "dgnc"); - - for (i = 0; i < dgnc_num_boards; ++i) { - dgnc_remove_ports_sysfiles(dgnc_board[i]); - dgnc_cleanup_tty(dgnc_board[i]); - dgnc_cleanup_board(dgnc_board[i]); - } - - dgnc_tty_post_uninit(); -} - -/* - * dgnc_cleanup_module() - * - * Module unload. This is where it all ends. - */ -static void __exit dgnc_cleanup_module(void) -{ - cleanup(true); - pci_unregister_driver(&dgnc_driver); -} - -/* - * init_module() - * - * Module load. This is where it all starts. - */ -static int __init dgnc_init_module(void) -{ - int rc; - - /* - * Initialize global stuff - */ - rc = dgnc_start(); - - if (rc < 0) - return rc; - - /* - * Find and configure all the cards - */ - rc = pci_register_driver(&dgnc_driver); - if (rc) { - pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n"); - cleanup(false); - return rc; - } - dgnc_create_driver_sysfiles(&dgnc_driver); - - return 0; -} - -module_init(dgnc_init_module); -module_exit(dgnc_cleanup_module); +/* Remap PCI memory. */ -/* - * Start of driver. - */ -static int dgnc_start(void) +static int dgnc_do_remap(struct dgnc_board *brd) { int rc = 0; - unsigned long flags; - struct device *dev; - - /* make sure timer is initialized before we do anything else */ - init_timer(&dgnc_poll_timer); - - /* - * Register our base character device into the kernel. - * This allows the download daemon to connect to the downld device - * before any of the boards are init'ed. - * - * Register management/dpa devices - */ - rc = register_chrdev(0, "dgnc", &dgnc_board_fops); - if (rc < 0) { - pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc); - return rc; - } - dgnc_major = rc; - - dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt"); - if (IS_ERR(dgnc_class)) { - rc = PTR_ERR(dgnc_class); - pr_err(DRVSTR ": Can't create dgnc_mgmt class (%d)\n", rc); - goto failed_class; - } - - dev = device_create(dgnc_class, NULL, - MKDEV(dgnc_major, 0), - NULL, "dgnc_mgmt"); - if (IS_ERR(dev)) { - rc = PTR_ERR(dev); - pr_err(DRVSTR ": Can't create device (%d)\n", rc); - goto failed_device; - } - - /* - * Init any global tty stuff. - */ - rc = dgnc_tty_preinit(); - - if (rc < 0) { - pr_err(DRVSTR ": tty preinit - not enough memory (%d)\n", rc); - goto failed_tty; - } - - /* Start the poller */ - spin_lock_irqsave(&dgnc_poll_lock, flags); - setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0); - dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick); - dgnc_poll_timer.expires = dgnc_poll_time; - spin_unlock_irqrestore(&dgnc_poll_lock, flags); - - add_timer(&dgnc_poll_timer); - - return 0; - -failed_tty: - device_destroy(dgnc_class, MKDEV(dgnc_major, 0)); -failed_device: - class_destroy(dgnc_class); -failed_class: - unregister_chrdev(dgnc_major, "dgnc"); - return rc; -} - -/* returns count (>= 0), or negative on error */ -static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - int rc; - struct dgnc_board *brd; - - /* wake up and enable device */ - rc = pci_enable_device(pdev); - - if (rc) - return -EIO; - - brd = dgnc_found_board(pdev, ent->driver_data); - if (IS_ERR(brd)) - return PTR_ERR(brd); - - /* - * Do tty device initialization. - */ - - rc = dgnc_tty_register(brd); - if (rc < 0) { - pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc); - goto failed; - } - - rc = dgnc_request_irq(brd); - if (rc < 0) { - pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc); - goto unregister_tty; - } - - rc = dgnc_tty_init(brd); - if (rc < 0) { - pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc); - goto free_irq; - } - - brd->state = BOARD_READY; - brd->dpastatus = BD_RUNNING; - dgnc_create_ports_sysfiles(brd); - - dgnc_board[dgnc_num_boards++] = brd; - - return 0; - -free_irq: - dgnc_free_irq(brd); -unregister_tty: - dgnc_tty_unregister(brd); - -failed: - kfree(brd); + brd->re_map_membase = ioremap(brd->membase, 0x1000); + if (!brd->re_map_membase) + rc = -ENOMEM; return rc; } -/* - * dgnc_cleanup_board() - * - * Free all the memory associated with a board - */ -static void dgnc_cleanup_board(struct dgnc_board *brd) -{ - int i = 0; - - if (!brd || brd->magic != DGNC_BOARD_MAGIC) - return; - - switch (brd->device) { - case PCI_DEVICE_CLASSIC_4_DID: - case PCI_DEVICE_CLASSIC_8_DID: - case PCI_DEVICE_CLASSIC_4_422_DID: - case PCI_DEVICE_CLASSIC_8_422_DID: - - /* Tell card not to interrupt anymore. */ - outb(0, brd->iobase + 0x4c); - break; - - default: - break; - } - - if (brd->irq) - free_irq(brd->irq, brd); - - tasklet_kill(&brd->helper_tasklet); - - if (brd->re_map_membase) { - iounmap(brd->re_map_membase); - brd->re_map_membase = NULL; - } - - /* Free all allocated channels structs */ - for (i = 0; i < MAXPORTS ; i++) { - if (brd->channels[i]) { - kfree(brd->channels[i]->ch_rqueue); - kfree(brd->channels[i]->ch_equeue); - kfree(brd->channels[i]->ch_wqueue); - kfree(brd->channels[i]); - brd->channels[i] = NULL; - } - } - - dgnc_board[brd->boardnum] = NULL; - - kfree(brd); -} - /* * dgnc_found_board() * @@ -587,21 +313,6 @@ static void dgnc_free_irq(struct dgnc_board *brd) } /* - * Remap PCI memory. - */ -static int dgnc_do_remap(struct dgnc_board *brd) -{ - int rc = 0; - - brd->re_map_membase = ioremap(brd->membase, 0x1000); - if (!brd->re_map_membase) - rc = -ENOMEM; - - return rc; -} - -/* - * * Function: * * dgnc_poll_handler @@ -623,7 +334,6 @@ static int dgnc_do_remap(struct dgnc_board *brd) * As each timer expires, it determines (a) whether the "transmit" * waiter needs to be woken up, and (b) whether the poller needs to * be rescheduled. - * */ static void dgnc_poll_handler(ulong dummy) @@ -651,9 +361,8 @@ static void dgnc_poll_handler(ulong dummy) spin_unlock_irqrestore(&brd->bd_lock, flags); } - /* - * Schedule ourself back at the nominal wakeup interval. - */ + /* Schedule ourself back at the nominal wakeup interval. */ + spin_lock_irqsave(&dgnc_poll_lock, flags); dgnc_poll_time += dgnc_jiffies_from_ms(dgnc_poll_tick); @@ -669,3 +378,240 @@ static void dgnc_poll_handler(ulong dummy) if (!dgnc_poll_stop) add_timer(&dgnc_poll_timer); } + +/* returns count (>= 0), or negative on error */ +static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int rc; + struct dgnc_board *brd; + + /* wake up and enable device */ + rc = pci_enable_device(pdev); + + if (rc) + return -EIO; + + brd = dgnc_found_board(pdev, ent->driver_data); + if (IS_ERR(brd)) + return PTR_ERR(brd); + + /* Do tty device initialization. */ + + rc = dgnc_tty_register(brd); + if (rc < 0) { + pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc); + goto failed; + } + + rc = dgnc_request_irq(brd); + if (rc < 0) { + pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc); + goto unregister_tty; + } + + rc = dgnc_tty_init(brd); + if (rc < 0) { + pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc); + goto free_irq; + } + + brd->state = BOARD_READY; + brd->dpastatus = BD_RUNNING; + + dgnc_board[dgnc_num_boards++] = brd; + + return 0; + +free_irq: + dgnc_free_irq(brd); +unregister_tty: + dgnc_tty_unregister(brd); + +failed: + kfree(brd); + + return rc; +} + +static struct pci_driver dgnc_driver = { + .name = "dgnc", + .probe = dgnc_init_one, + .id_table = dgnc_pci_tbl, +}; + +/* Start of driver. */ + +static int dgnc_start(void) +{ + int rc = 0; + unsigned long flags; + struct device *dev; + + /* make sure timer is initialized before we do anything else */ + init_timer(&dgnc_poll_timer); + + /* + * Register our base character device into the kernel. + * This allows the download daemon to connect to the downld device + * before any of the boards are init'ed. + * + * Register management/dpa devices + */ + rc = register_chrdev(0, "dgnc", &dgnc_board_fops); + if (rc < 0) { + pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc); + return rc; + } + dgnc_major = rc; + + dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt"); + if (IS_ERR(dgnc_class)) { + rc = PTR_ERR(dgnc_class); + pr_err(DRVSTR ": Can't create dgnc_mgmt class (%d)\n", rc); + goto failed_class; + } + + dev = device_create(dgnc_class, NULL, + MKDEV(dgnc_major, 0), + NULL, "dgnc_mgmt"); + if (IS_ERR(dev)) { + rc = PTR_ERR(dev); + pr_err(DRVSTR ": Can't create device (%d)\n", rc); + goto failed_device; + } + + /* Start the poller */ + spin_lock_irqsave(&dgnc_poll_lock, flags); + setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0); + dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick); + dgnc_poll_timer.expires = dgnc_poll_time; + spin_unlock_irqrestore(&dgnc_poll_lock, flags); + + add_timer(&dgnc_poll_timer); + + return 0; + +failed_device: + class_destroy(dgnc_class); +failed_class: + unregister_chrdev(dgnc_major, "dgnc"); + return rc; +} + +/* + * dgnc_cleanup_board() + * + * Free all the memory associated with a board + */ +static void dgnc_cleanup_board(struct dgnc_board *brd) +{ + int i = 0; + + if (!brd || brd->magic != DGNC_BOARD_MAGIC) + return; + + switch (brd->device) { + case PCI_DEVICE_CLASSIC_4_DID: + case PCI_DEVICE_CLASSIC_8_DID: + case PCI_DEVICE_CLASSIC_4_422_DID: + case PCI_DEVICE_CLASSIC_8_422_DID: + + /* Tell card not to interrupt anymore. */ + outb(0, brd->iobase + 0x4c); + break; + + default: + break; + } + + if (brd->irq) + free_irq(brd->irq, brd); + + tasklet_kill(&brd->helper_tasklet); + + if (brd->re_map_membase) { + iounmap(brd->re_map_membase); + brd->re_map_membase = NULL; + } + + /* Free all allocated channels structs */ + for (i = 0; i < MAXPORTS ; i++) { + if (brd->channels[i]) { + kfree(brd->channels[i]->ch_rqueue); + kfree(brd->channels[i]->ch_equeue); + kfree(brd->channels[i]->ch_wqueue); + kfree(brd->channels[i]); + brd->channels[i] = NULL; + } + } + + dgnc_board[brd->boardnum] = NULL; + + kfree(brd); +} + +/* Driver load/unload functions */ + +static void cleanup(void) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&dgnc_poll_lock, flags); + dgnc_poll_stop = 1; + spin_unlock_irqrestore(&dgnc_poll_lock, flags); + + /* Turn off poller right away. */ + del_timer_sync(&dgnc_poll_timer); + + device_destroy(dgnc_class, MKDEV(dgnc_major, 0)); + class_destroy(dgnc_class); + unregister_chrdev(dgnc_major, "dgnc"); + + for (i = 0; i < dgnc_num_boards; ++i) { + dgnc_cleanup_tty(dgnc_board[i]); + dgnc_cleanup_board(dgnc_board[i]); + } +} + +/* + * dgnc_cleanup_module() + * + * Module unload. This is where it all ends. + */ +static void __exit dgnc_cleanup_module(void) +{ + cleanup(); + pci_unregister_driver(&dgnc_driver); +} + +/* + * init_module() + * + * Module load. This is where it all starts. + */ +static int __init dgnc_init_module(void) +{ + int rc; + + /* Initialize global stuff */ + + rc = dgnc_start(); + + if (rc < 0) + return rc; + + /* Find and configure all the cards */ + + rc = pci_register_driver(&dgnc_driver); + if (rc) { + pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n"); + cleanup(); + return rc; + } + + return 0; +} + +module_init(dgnc_init_module); +module_exit(dgnc_cleanup_module); diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h index 879202663a9836e6c4093731b0c8929166d294fe..c8119f2fe8812f605717f588f5a0aedfde7bc6a8 100644 --- a/drivers/staging/dgnc/dgnc_driver.h +++ b/drivers/staging/dgnc/dgnc_driver.h @@ -12,11 +12,8 @@ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the GNU General Public License for more details. * - ************************************************************************* - * * Driver includes - * - *************************************************************************/ + */ #ifndef __DGNC_DRIVER_H #define __DGNC_DRIVER_H @@ -26,19 +23,14 @@ #include #include "digi.h" /* Digi specific ioctl header */ -#include "dgnc_sysfs.h" /* Support for SYSFS */ -/************************************************************************* - * - * Driver defines - * - *************************************************************************/ +/* Driver defines */ -/* Driver identification and error statments */ -#define PROCSTR "dgnc" /* /proc entries */ -#define DEVSTR "/dev/dg/dgnc" /* /dev entries */ -#define DRVSTR "dgnc" /* Driver name string */ -#define DG_PART "40002369_F" /* RPM part number */ +/* Driver identification and error statements */ +#define PROCSTR "dgnc" /* /proc entries */ +#define DEVSTR "/dev/dg/dgnc" /* /dev entries */ +#define DRVSTR "dgnc" /* Driver name string */ +#define DG_PART "40002369_F" /* RPM part number */ #define TRC_TO_CONSOLE 1 @@ -61,7 +53,8 @@ #define PORT_NUM(dev) ((dev) & 0x7f) #define IS_PRINT(dev) (((dev) & 0xff) >= 0x80) -/* MAX number of stop characters we will send +/* + *MAX number of stop characters we will send * when our read queue is getting full */ #define MAX_STOPS_SENT 5 @@ -88,35 +81,28 @@ #define _POSIX_VDISABLE '\0' #endif -/* - * All the possible states the driver can be while being loaded. - */ +/* All the possible states the driver can be while being loaded. */ + enum { DRIVER_INITIALIZED = 0, DRIVER_READY }; -/* - * All the possible states the board can be while booting up. - */ +/* All the possible states the board can be while booting up. */ + enum { BOARD_FAILED = 0, BOARD_FOUND, BOARD_READY }; -/************************************************************************* - * - * Structures and closely related defines. - * - *************************************************************************/ +/* Structures and closely related defines. */ struct dgnc_board; struct channel_t; -/************************************************************************ - * Per board operations structure * - ************************************************************************/ +/* Per board operations structure */ + struct board_ops { void (*tasklet)(unsigned long data); irqreturn_t (*intr)(int irq, void *voidbrd); @@ -138,16 +124,14 @@ struct board_ops { void (*send_immediate_char)(struct channel_t *ch, unsigned char); }; -/************************************************************************ - * Device flag definitions for bd_flags. - ************************************************************************/ +/* Device flag definitions for bd_flags. */ + #define BD_IS_PCI_EXPRESS 0x0001 /* Is a PCI Express board */ -/* - * Per-board information - */ +/* Per-board information */ + struct dgnc_board { - int magic; /* Board Magic number. */ + int magic; /* Board Magic number. */ int boardnum; /* Board number: 0-32 */ int type; /* Type of board */ @@ -220,62 +204,56 @@ struct dgnc_board { }; -/************************************************************************ - * Unit flag definitions for un_flags. - ************************************************************************/ -#define UN_ISOPEN 0x0001 /* Device is open */ -#define UN_CLOSING 0x0002 /* Line is being closed */ -#define UN_IMM 0x0004 /* Service immediately */ -#define UN_BUSY 0x0008 /* Some work this channel */ -#define UN_BREAKI 0x0010 /* Input break received */ +/* Unit flag definitions for un_flags. */ +#define UN_ISOPEN 0x0001 /* Device is open */ +#define UN_CLOSING 0x0002 /* Line is being closed */ +#define UN_IMM 0x0004 /* Service immediately */ +#define UN_BUSY 0x0008 /* Some work this channel */ +#define UN_BREAKI 0x0010 /* Input break received */ #define UN_PWAIT 0x0020 /* Printer waiting for terminal */ -#define UN_TIME 0x0040 /* Waiting on time */ -#define UN_EMPTY 0x0080 /* Waiting output queue empty */ +#define UN_TIME 0x0040 /* Waiting on time */ +#define UN_EMPTY 0x0080 /* Waiting output queue empty */ #define UN_LOW 0x0100 /* Waiting output low water mark*/ -#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */ -#define UN_WOPEN 0x0400 /* Device waiting for open */ -#define UN_WIOCTL 0x0800 /* Device waiting for open */ -#define UN_HANGUP 0x8000 /* Carrier lost */ +#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */ +#define UN_WOPEN 0x0400 /* Device waiting for open */ +#define UN_WIOCTL 0x0800 /* Device waiting for open */ +#define UN_HANGUP 0x8000 /* Carrier lost */ struct device; -/************************************************************************ - * Structure for terminal or printer unit. - ************************************************************************/ +/* Structure for terminal or printer unit. */ struct un_t { - int magic; /* Unit Magic Number. */ + int magic; /* Unit Magic Number. */ struct channel_t *un_ch; ulong un_time; uint un_type; - uint un_open_count; /* Counter of opens to port */ - struct tty_struct *un_tty;/* Pointer to unit tty structure */ - uint un_flags; /* Unit flags */ + uint un_open_count; /* Counter of opens to port */ + struct tty_struct *un_tty; /* Pointer to unit tty structure */ + uint un_flags; /* Unit flags */ wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */ - uint un_dev; /* Minor device number */ + uint un_dev; /* Minor device number */ struct device *un_sysfs; }; -/************************************************************************ - * Device flag definitions for ch_flags. - ************************************************************************/ -#define CH_PRON 0x0001 /* Printer on string */ -#define CH_STOP 0x0002 /* Output is stopped */ -#define CH_STOPI 0x0004 /* Input is stopped */ -#define CH_CD 0x0008 /* Carrier is present */ -#define CH_FCAR 0x0010 /* Carrier forced on */ -#define CH_HANGUP 0x0020 /* Hangup received */ - -#define CH_RECEIVER_OFF 0x0040 /* Receiver is off */ -#define CH_OPENING 0x0080 /* Port in fragile open state */ -#define CH_CLOSING 0x0100 /* Port in fragile close state */ -#define CH_FIFO_ENABLED 0x0200 /* Port has FIFOs enabled */ -#define CH_TX_FIFO_EMPTY 0x0400 /* TX Fifo is completely empty */ -#define CH_TX_FIFO_LWM 0x0800 /* TX Fifo is below Low Water */ -#define CH_BREAK_SENDING 0x1000 /* Break is being sent */ -#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */ +/* Device flag definitions for ch_flags. */ +#define CH_PRON 0x0001 /* Printer on string */ +#define CH_STOP 0x0002 /* Output is stopped */ +#define CH_STOPI 0x0004 /* Input is stopped */ +#define CH_CD 0x0008 /* Carrier is present */ +#define CH_FCAR 0x0010 /* Carrier forced on */ +#define CH_HANGUP 0x0020 /* Hangup received */ + +#define CH_RECEIVER_OFF 0x0040 /* Receiver is off */ +#define CH_OPENING 0x0080 /* Port in fragile open state */ +#define CH_CLOSING 0x0100 /* Port in fragile close state */ +#define CH_FIFO_ENABLED 0x0200 /* Port has FIFOs enabled */ +#define CH_TX_FIFO_EMPTY 0x0400 /* TX Fifo is completely empty */ +#define CH_TX_FIFO_LWM 0x0800 /* TX Fifo is below Low Water */ +#define CH_BREAK_SENDING 0x1000 /* Break is being sent */ +#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */ #define CH_BAUD0 0x08000 /* Used for checking B0 transitions */ -#define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */ -#define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */ +#define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */ +#define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */ /* Our Read/Error/Write queue sizes */ #define RQUEUEMASK 0x1FFF /* 8 K - 1 */ @@ -285,43 +263,41 @@ struct un_t { #define EQUEUESIZE RQUEUESIZE #define WQUEUESIZE (WQUEUEMASK + 1) -/************************************************************************ - * Channel information structure. - ************************************************************************/ +/* Channel information structure. */ struct channel_t { - int magic; /* Channel Magic Number */ - struct dgnc_board *ch_bd; /* Board structure pointer */ + int magic; /* Channel Magic Number */ + struct dgnc_board *ch_bd; /* Board structure pointer */ struct digi_t ch_digi; /* Transparent Print structure */ - struct un_t ch_tun; /* Terminal unit info */ - struct un_t ch_pun; /* Printer unit info */ + struct un_t ch_tun; /* Terminal unit info */ + struct un_t ch_pun; /* Printer unit info */ spinlock_t ch_lock; /* provide for serialization */ wait_queue_head_t ch_flags_wait; - uint ch_portnum; /* Port number, 0 offset. */ - uint ch_open_count; /* open count */ - uint ch_flags; /* Channel flags */ + uint ch_portnum; /* Port number, 0 offset. */ + uint ch_open_count; /* open count */ + uint ch_flags; /* Channel flags */ ulong ch_close_delay; /* How long we should * drop RTS/DTR for */ - ulong ch_cpstime; /* Time for CPS calculations */ + ulong ch_cpstime; /* Time for CPS calculations */ - tcflag_t ch_c_iflag; /* channel iflags */ - tcflag_t ch_c_cflag; /* channel cflags */ - tcflag_t ch_c_oflag; /* channel oflags */ - tcflag_t ch_c_lflag; /* channel lflags */ - unsigned char ch_stopc; /* Stop character */ - unsigned char ch_startc; /* Start character */ + tcflag_t ch_c_iflag; /* channel iflags */ + tcflag_t ch_c_cflag; /* channel cflags */ + tcflag_t ch_c_oflag; /* channel oflags */ + tcflag_t ch_c_lflag; /* channel lflags */ + unsigned char ch_stopc; /* Stop character */ + unsigned char ch_startc; /* Start character */ uint ch_old_baud; /* Cache of the current baud */ uint ch_custom_speed;/* Custom baud, if set */ uint ch_wopen; /* Waiting for open process cnt */ - unsigned char ch_mostat; /* FEP output modem status */ - unsigned char ch_mistat; /* FEP input modem status */ + unsigned char ch_mostat; /* FEP output modem status */ + unsigned char ch_mistat; /* FEP input modem status */ struct neo_uart_struct __iomem *ch_neo_uart; /* Pointer to the * "mapped" UART struct @@ -347,10 +323,10 @@ struct channel_t { ulong ch_rxcount; /* total of data received so far */ ulong ch_txcount; /* total of data transmitted so far */ - unsigned char ch_r_tlevel; /* Receive Trigger level */ - unsigned char ch_t_tlevel; /* Transmit Trigger level */ + unsigned char ch_r_tlevel; /* Receive Trigger level */ + unsigned char ch_t_tlevel; /* Transmit Trigger level */ - unsigned char ch_r_watermark; /* Receive Watermark */ + unsigned char ch_r_watermark; /* Receive Watermark */ ulong ch_stop_sending_break; /* Time we should STOP * sending a break @@ -374,16 +350,15 @@ struct channel_t { }; -/* - * Our Global Variables. - */ +/* Our Global Variables. */ + extern uint dgnc_major; /* Our driver/mgmt major */ extern int dgnc_poll_tick; /* Poll interval - 20 ms */ extern spinlock_t dgnc_global_lock; /* Driver global spinlock */ extern spinlock_t dgnc_poll_lock; /* Poll scheduling lock */ extern uint dgnc_num_boards; /* Total number of boards */ -extern struct dgnc_board *dgnc_board[MAXBOARDS]; /* Array of board - * structs - */ +extern struct dgnc_board *dgnc_board[MAXBOARDS];/* Array of board + * structs + */ #endif diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c index 683c098391d9f3e9b9685896d744aca7f8d7c0c3..9d9b15d6358aae49f0e8fcf8d84a9ab8f66a2dcb 100644 --- a/drivers/staging/dgnc/dgnc_mgmt.c +++ b/drivers/staging/dgnc/dgnc_mgmt.c @@ -13,13 +13,11 @@ * PURPOSE. See the GNU General Public License for more details. */ -/************************************************************************ - * +/* * This file implements the mgmt functionality for the * Neo and ClassicBoard based product lines. - * - ************************************************************************ */ + #include #include #include /* For jiffies, task states */ diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c index 5becb3741b67a84854b24e93247dee907b4bdaa7..3eefefe53174a812a405a742fdecc8080ef5e2f7 100644 --- a/drivers/staging/dgnc/dgnc_neo.c +++ b/drivers/staging/dgnc/dgnc_neo.c @@ -107,7 +107,8 @@ static inline void neo_set_cts_flow_control(struct channel_t *ch) /* Turn off auto Xon flow control */ efr &= ~UART_17158_EFR_IXON; - /* Why? Because Exar's spec says we have to zero it + /* + * Why? Because Exar's spec says we have to zero it * out before setting it */ writeb(0, &ch->ch_neo_uart->efr); @@ -145,7 +146,8 @@ static inline void neo_set_rts_flow_control(struct channel_t *ch) ier &= ~UART_17158_IER_XOFF; efr &= ~UART_17158_EFR_IXOFF; - /* Why? Because Exar's spec says we have to zero it + /* + * Why? Because Exar's spec says we have to zero it * out before setting it */ writeb(0, &ch->ch_neo_uart->efr); @@ -185,7 +187,8 @@ static inline void neo_set_ixon_flow_control(struct channel_t *ch) /* Turn on auto Xon flow control */ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON); - /* Why? Because Exar's spec says we have to zero it + /* + * Why? Because Exar's spec says we have to zero it * out before setting it */ writeb(0, &ch->ch_neo_uart->efr); @@ -225,7 +228,8 @@ static inline void neo_set_ixoff_flow_control(struct channel_t *ch) ier |= UART_17158_IER_XOFF; efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF); - /* Why? Because Exar's spec says we have to zero it + /* + * Why? Because Exar's spec says we have to zero it * out before setting it */ writeb(0, &ch->ch_neo_uart->efr); @@ -268,7 +272,8 @@ static inline void neo_set_no_input_flow_control(struct channel_t *ch) else efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF); - /* Why? Because Exar's spec says we have to zero + /* + * Why? Because Exar's spec says we have to zero * it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); @@ -308,7 +313,8 @@ static inline void neo_set_no_output_flow_control(struct channel_t *ch) else efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON); - /* Why? Because Exar's spec says we have to zero it + /* + * Why? Because Exar's spec says we have to zero it * out before setting it */ writeb(0, &ch->ch_neo_uart->efr); @@ -351,9 +357,8 @@ static inline void neo_set_new_start_stop_chars(struct channel_t *ch) neo_pci_posting_flush(ch->ch_bd); } -/* - * No locks are assumed to be held when calling this function. - */ +/* No locks are assumed to be held when calling this function. */ + static inline void neo_clear_break(struct channel_t *ch, int force) { unsigned long flags; @@ -381,9 +386,8 @@ static inline void neo_clear_break(struct channel_t *ch, int force) spin_unlock_irqrestore(&ch->ch_lock, flags); } -/* - * Parse the ISR register. - */ +/* Parse the ISR register. */ + static inline void neo_parse_isr(struct dgnc_board *brd, uint port) { struct channel_t *ch; @@ -412,8 +416,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port) if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) { /* Read data from uart -> queue */ neo_copy_data_from_uart_to_queue(ch); - - /* Call our tty layer to enforce queue + /* + * Call our tty layer to enforce queue * flow control if needed. */ spin_lock_irqsave(&ch->ch_lock, flags); @@ -438,7 +442,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port) * one it was, so we can suspend or resume data flow. */ if (cause == UART_17158_XON_DETECT) { - /* Is output stopped right now, if so, + /* + * Is output stopped right now, if so, * resume it */ if (brd->channels[port]->ch_flags & CH_STOP) { @@ -609,9 +614,8 @@ static void neo_param(struct tty_struct *tty) if (!bd || bd->magic != DGNC_BOARD_MAGIC) return; - /* - * If baud rate is zero, flush queues, and set mval to drop DTR. - */ + /* If baud rate is zero, flush queues, and set mval to drop DTR. */ + if ((ch->ch_c_cflag & (CBAUD)) == 0) { ch->ch_r_head = 0; ch->ch_r_tail = 0; @@ -672,7 +676,8 @@ static void neo_param(struct tty_struct *tty) 4800, 9600, 19200, 38400 } }; - /* Only use the TXPrint baud rate if the terminal unit + /* + * Only use the TXPrint baud rate if the terminal unit * is NOT open */ if (!(ch->ch_tun.un_flags & UN_ISOPEN) && @@ -797,7 +802,8 @@ static void neo_param(struct tty_struct *tty) if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) { neo_set_cts_flow_control(ch); } else if (ch->ch_c_iflag & IXON) { - /* If start/stop is set to disable, then we should + /* + * If start/stop is set to disable, then we should * disable flow control */ if ((ch->ch_startc == _POSIX_VDISABLE) || @@ -812,7 +818,8 @@ static void neo_param(struct tty_struct *tty) if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) { neo_set_rts_flow_control(ch); } else if (ch->ch_c_iflag & IXOFF) { - /* If start/stop is set to disable, then we should + /* + * If start/stop is set to disable, then we should * disable flow control */ if ((ch->ch_startc == _POSIX_VDISABLE) || @@ -840,9 +847,8 @@ static void neo_param(struct tty_struct *tty) neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); } -/* - * Our board poller function. - */ +/* Our board poller function. */ + static void neo_tasklet(unsigned long data) { struct dgnc_board *bd = (struct dgnc_board *)data; @@ -867,9 +873,8 @@ static void neo_tasklet(unsigned long data) */ spin_lock_irqsave(&bd->bd_intr_lock, flags); - /* - * If board is ready, parse deeper to see if there is anything to do. - */ + /* If board is ready, parse deeper to see if there is anything to do. */ + if ((state == BOARD_READY) && (ports > 0)) { /* Loop on each port */ for (i = 0; i < ports; i++) { @@ -997,9 +1002,9 @@ static irqreturn_t neo_intr(int irq, void *voidbrd) break; case UART_17158_RX_LINE_STATUS: - /* - * RXRDY and RX LINE Status (logic OR of LSR[4:1]) - */ + + /* RXRDY and RX LINE Status (logic OR of LSR[4:1]) */ + neo_parse_lsr(brd, port); break; @@ -1022,9 +1027,9 @@ static irqreturn_t neo_intr(int irq, void *voidbrd) break; case UART_17158_MSR: - /* - * MSR or flow control was seen. - */ + + /* MSR or flow control was seen. */ + neo_parse_isr(brd, port); break; @@ -1041,9 +1046,8 @@ static irqreturn_t neo_intr(int irq, void *voidbrd) port++; } - /* - * Schedule tasklet to more in-depth servicing at a better time. - */ + /* Schedule tasklet to more in-depth servicing at a better time. */ + tasklet_schedule(&brd->helper_tasklet); spin_unlock_irqrestore(&brd->bd_intr_lock, flags); @@ -1238,9 +1242,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch) ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); } - /* - * Discard character if we are ignoring the error mask. - */ + /* Discard character if we are ignoring the error mask. */ + if (linestatus & error_mask) { unsigned char discard; @@ -1279,9 +1282,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch) ch->ch_rxcount++; } - /* - * Write new final heads to channel structure. - */ + /* Write new final heads to channel structure. */ + ch->ch_r_head = head & RQUEUEMASK; ch->ch_e_head = head & EQUEUEMASK; @@ -1412,9 +1414,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch) (ch->ch_flags & CH_BREAK_SENDING)) goto exit_unlock; - /* - * If FIFOs are disabled. Send data directly to txrx register - */ + /* If FIFOs are disabled. Send data directly to txrx register */ + if (!(ch->ch_flags & CH_FIFO_ENABLED)) { unsigned char lsrbits = readb(&ch->ch_neo_uart->lsr); @@ -1458,9 +1459,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch) goto exit_unlock; } - /* - * We have to do it this way, because of the EXAR TXFIFO count bug. - */ + /* We have to do it this way, because of the EXAR TXFIFO count bug. */ + if ((ch->ch_bd->dvid & 0xf0) < UART_XR17E158_DVID) { if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) goto exit_unlock; @@ -1645,9 +1645,8 @@ static void neo_send_stop_character(struct channel_t *ch) } } -/* - * neo_uart_init - */ +/* neo_uart_init */ + static void neo_uart_init(struct channel_t *ch) { writeb(0, &ch->ch_neo_uart->ier); @@ -1668,9 +1667,8 @@ static void neo_uart_init(struct channel_t *ch) neo_pci_posting_flush(ch->ch_bd); } -/* - * Make the UART completely turn off. - */ +/* Make the UART completely turn off. */ + static void neo_uart_off(struct channel_t *ch) { /* Turn off UART enhanced bits */ @@ -1705,9 +1703,8 @@ static uint neo_get_uart_bytes_left(struct channel_t *ch) /* Channel lock MUST be held by the calling function! */ static void neo_send_break(struct channel_t *ch, int msecs) { - /* - * If we receive a time of 0, this means turn off the break. - */ + /* If we receive a time of 0, this means turn off the break. */ + if (msecs == 0) { if (ch->ch_flags & CH_BREAK_SENDING) { unsigned char temp = readb(&ch->ch_neo_uart->lcr); diff --git a/drivers/staging/dgnc/dgnc_neo.h b/drivers/staging/dgnc/dgnc_neo.h index abddd48353d0fd96263cfa75a1ac704251c42fc6..77ecd9baae45feb61214ecc614de98eee2f2b5c4 100644 --- a/drivers/staging/dgnc/dgnc_neo.h +++ b/drivers/staging/dgnc/dgnc_neo.h @@ -18,37 +18,38 @@ #include "dgnc_driver.h" -/************************************************************************ - * Per channel/port NEO UART structure * - ************************************************************************ - * Base Structure Entries Usage Meanings to Host * - * * - * W = read write R = read only * - * U = Unused. * - ************************************************************************/ +/* + * Per channel/port NEO UART structure + * Base Structure Entries Usage Meanings to Host + * + * W = read write R = read only + * U = Unused. + */ struct neo_uart_struct { - u8 txrx; /* WR RHR/THR - Holding Reg */ + u8 txrx; /* WR RHR/THR - Holding Reg */ u8 ier; /* WR IER - Interrupt Enable Reg */ - u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg */ + u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo + * Control Reg + */ u8 lcr; /* WR LCR - Line Control Reg */ u8 mcr; /* WR MCR - Modem Control Reg */ u8 lsr; /* WR LSR - Line Status Reg */ u8 msr; /* WR MSR - Modem Status Reg */ u8 spr; /* WR SPR - Scratch Pad Reg */ - u8 fctr; /* WR FCTR - Feature Control Reg */ + u8 fctr; /* WR FCTR - Feature Control Reg */ u8 efr; /* WR EFR - Enhanced Function Reg */ - u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */ - u8 rfifo; /* WR RXCNT/RXTRG - Receive FIFO Reg */ + u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */ + u8 rfifo; /* WR RXCNT/RXTRG - Receive FIFO Reg */ u8 xoffchar1; /* WR XOFF 1 - XOff Character 1 Reg */ u8 xoffchar2; /* WR XOFF 2 - XOff Character 2 Reg */ u8 xonchar1; /* WR XON 1 - Xon Character 1 Reg */ u8 xonchar2; /* WR XON 2 - XOn Character 2 Reg */ u8 reserved1[0x2ff - 0x200]; /* U Reserved by Exar */ - u8 txrxburst[64]; /* RW 64 bytes of RX/TX FIFO Data */ + u8 txrxburst[64]; /* RW 64 bytes of RX/TX FIFO Data */ u8 reserved2[0x37f - 0x340]; /* U Reserved by Exar */ - u8 rxburst_with_errors[64]; /* R 64 bytes of RX FIFO Data + LSR */ + u8 rxburst_with_errors[64]; /* R 64 bytes of RX FIFO Data + LSR */ }; /* Where to read the extended interrupt register (32bits instead of 8bits) */ @@ -108,7 +109,9 @@ struct neo_uart_struct { /* 17158 Extended IIR's */ #define UART_17158_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */ #define UART_17158_IIR_XONXOFF 0x10 /* Received an XON/XOFF char */ -#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20 /* CTS/DSR or RTS/DTR state change */ +#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20 /* CTS/DSR or RTS/DTR + * state change + */ #define UART_17158_IIR_FIFO_ENABLED 0xC0 /* 16550 FIFOs are Enabled */ /* @@ -119,8 +122,12 @@ struct neo_uart_struct { #define UART_17158_RXRDY_TIMEOUT 0x2 /* RX Ready Timeout */ #define UART_17158_TXRDY 0x3 /* TX Ready */ #define UART_17158_MSR 0x4 /* Modem State Change */ -#define UART_17158_TX_AND_FIFO_CLR 0x40 /* Transmitter Holding Reg Empty */ -#define UART_17158_RX_FIFO_DATA_ERROR 0x80 /* UART detected an RX FIFO Data error */ +#define UART_17158_TX_AND_FIFO_CLR 0x40 /* Transmitter Holding + * Reg Empty + */ +#define UART_17158_RX_FIFO_DATA_ERROR 0x80 /* UART detected an RX FIFO + * Data error + */ /* * These are the EXTENDED definitions for the 17C158's Interrupt @@ -130,19 +137,22 @@ struct neo_uart_struct { #define UART_17158_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */ #define UART_17158_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */ #define UART_17158_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */ -#define UART_17158_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */ +#define UART_17158_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow Control Enable */ -#define UART_17158_XOFF_DETECT 0x1 /* Indicates whether chip saw an incoming XOFF char */ -#define UART_17158_XON_DETECT 0x2 /* Indicates whether chip saw an incoming XON char */ +#define UART_17158_XOFF_DETECT 0x1 /* Indicates whether chip saw an + * incoming XOFF char + */ +#define UART_17158_XON_DETECT 0x2 /* Indicates whether chip saw an + * incoming XON char + */ #define UART_17158_IER_RSVD1 0x10 /* Reserved by Exar */ #define UART_17158_IER_XOFF 0x20 /* Xoff Interrupt Enable */ #define UART_17158_IER_RTSDTR 0x40 /* Output Interrupt Enable */ #define UART_17158_IER_CTSDSR 0x80 /* Input Interrupt Enable */ -/* - * Our Global Variables - */ +/* Our Global Variables */ + extern struct board_ops dgnc_neo_ops; #endif diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c deleted file mode 100644 index 290bf6e226acfa3d1ab1b7b2759cc24ce21e5ea2..0000000000000000000000000000000000000000 --- a/drivers/staging/dgnc/dgnc_sysfs.c +++ /dev/null @@ -1,703 +0,0 @@ -/* - * Copyright 2004 Digi International (www.digi.com) - * Scott H Kilau - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the - * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - * PURPOSE. See the GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "dgnc_driver.h" -#include "dgnc_mgmt.h" - -static ssize_t version_show(struct device_driver *ddp, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART); -} -static DRIVER_ATTR_RO(version); - -static ssize_t boards_show(struct device_driver *ddp, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_num_boards); -} -static DRIVER_ATTR_RO(boards); - -static ssize_t maxboards_show(struct device_driver *ddp, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS); -} -static DRIVER_ATTR_RO(maxboards); - -static ssize_t pollrate_show(struct device_driver *ddp, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%dms\n", dgnc_poll_tick); -} - -static ssize_t pollrate_store(struct device_driver *ddp, - const char *buf, size_t count) -{ - unsigned long flags; - int tick; - int ret; - - ret = sscanf(buf, "%d\n", &tick); - if (ret != 1) - return -EINVAL; - - spin_lock_irqsave(&dgnc_poll_lock, flags); - dgnc_poll_tick = tick; - spin_unlock_irqrestore(&dgnc_poll_lock, flags); - - return count; -} -static DRIVER_ATTR_RW(pollrate); - -void dgnc_create_driver_sysfiles(struct pci_driver *dgnc_driver) -{ - int rc = 0; - struct device_driver *driverfs = &dgnc_driver->driver; - - rc |= driver_create_file(driverfs, &driver_attr_version); - rc |= driver_create_file(driverfs, &driver_attr_boards); - rc |= driver_create_file(driverfs, &driver_attr_maxboards); - rc |= driver_create_file(driverfs, &driver_attr_pollrate); - if (rc) - pr_err("DGNC: sysfs driver_create_file failed!\n"); -} - -void dgnc_remove_driver_sysfiles(struct pci_driver *dgnc_driver) -{ - struct device_driver *driverfs = &dgnc_driver->driver; - - driver_remove_file(driverfs, &driver_attr_version); - driver_remove_file(driverfs, &driver_attr_boards); - driver_remove_file(driverfs, &driver_attr_maxboards); - driver_remove_file(driverfs, &driver_attr_pollrate); -} - -#define DGNC_VERIFY_BOARD(p, bd) \ - do { \ - if (!p) \ - return 0; \ - \ - bd = dev_get_drvdata(p); \ - if (!bd || bd->magic != DGNC_BOARD_MAGIC) \ - return 0; \ - if (bd->state != BOARD_READY) \ - return 0; \ - } while (0) - -static ssize_t vpd_show(struct device *p, struct device_attribute *attr, - char *buf) -{ - struct dgnc_board *bd; - int count = 0; - int i = 0; - - DGNC_VERIFY_BOARD(p, bd); - - count += sprintf(buf + count, - "\n 0 1 2 3 4 5 6 7 8 9 A B C D E F"); - for (i = 0; i < 0x40 * 2; i++) { - if (!(i % 16)) - count += sprintf(buf + count, "\n%04X ", i * 2); - count += sprintf(buf + count, "%02X ", bd->vpd[i]); - } - count += sprintf(buf + count, "\n"); - - return count; -} -static DEVICE_ATTR_RO(vpd); - -static ssize_t serial_number_show(struct device *p, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - int count = 0; - - DGNC_VERIFY_BOARD(p, bd); - - if (bd->serial_num[0] == '\0') - count += sprintf(buf + count, "\n"); - else - count += sprintf(buf + count, "%s\n", bd->serial_num); - - return count; -} -static DEVICE_ATTR_RO(serial_number); - -static ssize_t ports_state_show(struct device *p, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - int count = 0; - int i = 0; - - DGNC_VERIFY_BOARD(p, bd); - - for (i = 0; i < bd->nasync; i++) { - count += snprintf(buf + count, PAGE_SIZE - count, - "%d %s\n", bd->channels[i]->ch_portnum, - bd->channels[i]->ch_open_count ? "Open" : "Closed"); - } - return count; -} -static DEVICE_ATTR_RO(ports_state); - -static ssize_t ports_baud_show(struct device *p, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - int count = 0; - int i = 0; - - DGNC_VERIFY_BOARD(p, bd); - - for (i = 0; i < bd->nasync; i++) { - count += snprintf(buf + count, PAGE_SIZE - count, - "%d %d\n", bd->channels[i]->ch_portnum, - bd->channels[i]->ch_old_baud); - } - return count; -} -static DEVICE_ATTR_RO(ports_baud); - -static ssize_t ports_msignals_show(struct device *p, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - int count = 0; - int i = 0; - - DGNC_VERIFY_BOARD(p, bd); - - for (i = 0; i < bd->nasync; i++) { - struct channel_t *ch = bd->channels[i]; - - if (ch->ch_open_count) { - count += snprintf(buf + count, PAGE_SIZE - count, - "%d %s %s %s %s %s %s\n", - ch->ch_portnum, - (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "", - (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "", - (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "", - (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "", - (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "", - (ch->ch_mistat & UART_MSR_RI) ? "RI" : ""); - } else { - count += snprintf(buf + count, PAGE_SIZE - count, - "%d\n", ch->ch_portnum); - } - } - return count; -} -static DEVICE_ATTR_RO(ports_msignals); - -static ssize_t ports_iflag_show(struct device *p, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - int count = 0; - int i = 0; - - DGNC_VERIFY_BOARD(p, bd); - - for (i = 0; i < bd->nasync; i++) { - count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_c_iflag); - } - return count; -} -static DEVICE_ATTR_RO(ports_iflag); - -static ssize_t ports_cflag_show(struct device *p, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - int count = 0; - int i = 0; - - DGNC_VERIFY_BOARD(p, bd); - - for (i = 0; i < bd->nasync; i++) { - count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_c_cflag); - } - return count; -} -static DEVICE_ATTR_RO(ports_cflag); - -static ssize_t ports_oflag_show(struct device *p, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - int count = 0; - int i = 0; - - DGNC_VERIFY_BOARD(p, bd); - - for (i = 0; i < bd->nasync; i++) { - count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_c_oflag); - } - return count; -} -static DEVICE_ATTR_RO(ports_oflag); - -static ssize_t ports_lflag_show(struct device *p, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - int count = 0; - int i = 0; - - DGNC_VERIFY_BOARD(p, bd); - - for (i = 0; i < bd->nasync; i++) { - count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_c_lflag); - } - return count; -} -static DEVICE_ATTR_RO(ports_lflag); - -static ssize_t ports_digi_flag_show(struct device *p, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - int count = 0; - int i = 0; - - DGNC_VERIFY_BOARD(p, bd); - - for (i = 0; i < bd->nasync; i++) { - count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_digi.digi_flags); - } - return count; -} -static DEVICE_ATTR_RO(ports_digi_flag); - -static ssize_t ports_rxcount_show(struct device *p, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - int count = 0; - int i = 0; - - DGNC_VERIFY_BOARD(p, bd); - - for (i = 0; i < bd->nasync; i++) { - count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_rxcount); - } - return count; -} -static DEVICE_ATTR_RO(ports_rxcount); - -static ssize_t ports_txcount_show(struct device *p, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - int count = 0; - int i = 0; - - DGNC_VERIFY_BOARD(p, bd); - - for (i = 0; i < bd->nasync; i++) { - count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_txcount); - } - return count; -} -static DEVICE_ATTR_RO(ports_txcount); - -/* this function creates the sys files that will export each signal status - * to sysfs each value will be put in a separate filename - */ -void dgnc_create_ports_sysfiles(struct dgnc_board *bd) -{ - int rc = 0; - - dev_set_drvdata(&bd->pdev->dev, bd); - rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_state); - rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_baud); - rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_msignals); - rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_iflag); - rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_cflag); - rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_oflag); - rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_lflag); - rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_digi_flag); - rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_rxcount); - rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_txcount); - rc |= device_create_file(&bd->pdev->dev, &dev_attr_vpd); - rc |= device_create_file(&bd->pdev->dev, &dev_attr_serial_number); - if (rc) - dev_err(&bd->pdev->dev, "dgnc: sysfs device_create_file failed!\n"); -} - -/* removes all the sys files created for that port */ -void dgnc_remove_ports_sysfiles(struct dgnc_board *bd) -{ - device_remove_file(&bd->pdev->dev, &dev_attr_ports_state); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_baud); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_msignals); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_iflag); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_cflag); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_oflag); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_lflag); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_digi_flag); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_rxcount); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_txcount); - device_remove_file(&bd->pdev->dev, &dev_attr_vpd); - device_remove_file(&bd->pdev->dev, &dev_attr_serial_number); -} - -static ssize_t tty_state_show(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGNC_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGNC_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%s", - un->un_open_count ? "Open" : "Closed"); -} -static DEVICE_ATTR_RO(tty_state); - -static ssize_t tty_baud_show(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGNC_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGNC_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_old_baud); -} -static DEVICE_ATTR_RO(tty_baud); - -static ssize_t tty_msignals_show(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGNC_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGNC_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - if (ch->ch_open_count) { - return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n", - (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "", - (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "", - (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "", - (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "", - (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "", - (ch->ch_mistat & UART_MSR_RI) ? "RI" : ""); - } - return 0; -} -static DEVICE_ATTR_RO(tty_msignals); - -static ssize_t tty_iflag_show(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGNC_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGNC_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag); -} -static DEVICE_ATTR_RO(tty_iflag); - -static ssize_t tty_cflag_show(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGNC_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGNC_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag); -} -static DEVICE_ATTR_RO(tty_cflag); - -static ssize_t tty_oflag_show(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGNC_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGNC_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag); -} -static DEVICE_ATTR_RO(tty_oflag); - -static ssize_t tty_lflag_show(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGNC_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGNC_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag); -} -static DEVICE_ATTR_RO(tty_lflag); - -static ssize_t tty_digi_flag_show(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGNC_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGNC_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags); -} -static DEVICE_ATTR_RO(tty_digi_flag); - -static ssize_t tty_rxcount_show(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGNC_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGNC_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount); -} -static DEVICE_ATTR_RO(tty_rxcount); - -static ssize_t tty_txcount_show(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGNC_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGNC_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount); -} -static DEVICE_ATTR_RO(tty_txcount); - -static ssize_t tty_custom_name_show(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct dgnc_board *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGNC_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGNC_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%sn%d%c\n", - (un->un_type == DGNC_PRINT) ? "pr" : "tty", - bd->boardnum + 1, 'a' + ch->ch_portnum); -} -static DEVICE_ATTR_RO(tty_custom_name); - -static struct attribute *dgnc_sysfs_tty_entries[] = { - &dev_attr_tty_state.attr, - &dev_attr_tty_baud.attr, - &dev_attr_tty_msignals.attr, - &dev_attr_tty_iflag.attr, - &dev_attr_tty_cflag.attr, - &dev_attr_tty_oflag.attr, - &dev_attr_tty_lflag.attr, - &dev_attr_tty_digi_flag.attr, - &dev_attr_tty_rxcount.attr, - &dev_attr_tty_txcount.attr, - &dev_attr_tty_custom_name.attr, - NULL -}; - -static const struct attribute_group dgnc_tty_attribute_group = { - .name = NULL, - .attrs = dgnc_sysfs_tty_entries, -}; - -void dgnc_create_tty_sysfs(struct un_t *un, struct device *c) -{ - int ret; - - ret = sysfs_create_group(&c->kobj, &dgnc_tty_attribute_group); - if (ret) { - dev_err(c, "dgnc: failed to create sysfs tty device attributes.\n"); - sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group); - return; - } - - dev_set_drvdata(c, un); -} - -void dgnc_remove_tty_sysfs(struct device *c) -{ - sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group); -} - diff --git a/drivers/staging/dgnc/dgnc_sysfs.h b/drivers/staging/dgnc/dgnc_sysfs.h deleted file mode 100644 index 7be7d55bc49ea9e92c674ef256f075b6fafb50bb..0000000000000000000000000000000000000000 --- a/drivers/staging/dgnc/dgnc_sysfs.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2003 Digi International (www.digi.com) - * Scott H Kilau - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the - * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - * PURPOSE. See the GNU General Public License for more details. - */ - -#ifndef __DGNC_SYSFS_H -#define __DGNC_SYSFS_H - -#include -#include "dgnc_driver.h" - -struct dgnc_board; -struct channel_t; -struct un_t; -struct pci_driver; -struct class_device; - -void dgnc_create_ports_sysfiles(struct dgnc_board *bd); -void dgnc_remove_ports_sysfiles(struct dgnc_board *bd); - -void dgnc_create_driver_sysfiles(struct pci_driver *); -void dgnc_remove_driver_sysfiles(struct pci_driver *); - -int dgnc_tty_class_init(void); -int dgnc_tty_class_destroy(void); - -void dgnc_create_tty_sysfs(struct un_t *un, struct device *c); -void dgnc_remove_tty_sysfs(struct device *c); - -#endif diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c index 953d9310fa74770a7636709d7819a35ef13e951e..1e10c0fe47453083012cd460dfbc764ac1d6c802 100644 --- a/drivers/staging/dgnc/dgnc_tty.c +++ b/drivers/staging/dgnc/dgnc_tty.c @@ -13,13 +13,9 @@ * PURPOSE. See the GNU General Public License for more details. */ -/************************************************************************ - * +/* * This file implements the tty driver functionality for the * Neo and ClassicBoard PCI based product lines. - * - ************************************************************************ - * */ #include @@ -39,27 +35,20 @@ #include "dgnc_tty.h" #include "dgnc_neo.h" #include "dgnc_cls.h" -#include "dgnc_sysfs.h" #include "dgnc_utils.h" -/* - * internal variables - */ -static unsigned char *dgnc_TmpWriteBuf; - -/* - * Default transparent print information. - */ -static struct digi_t dgnc_digi_init = { - .digi_flags = DIGI_COOK, /* Flags */ - .digi_maxcps = 100, /* Max CPS */ - .digi_maxchar = 50, /* Max chars in print queue */ - .digi_bufsize = 100, /* Printer buffer size */ - .digi_onlen = 4, /* size of printer on string */ - .digi_offlen = 4, /* size of printer off string */ - .digi_onstr = "\033[5i", /* ANSI printer on string ] */ - .digi_offstr = "\033[4i", /* ANSI printer off string ] */ - .digi_term = "ansi" /* default terminal type */ +/* Default transparent print information. */ + +static const struct digi_t dgnc_digi_init = { + .digi_flags = DIGI_COOK, /* Flags */ + .digi_maxcps = 100, /* Max CPS */ + .digi_maxchar = 50, /* Max chars in print queue */ + .digi_bufsize = 100, /* Printer buffer size */ + .digi_onlen = 4, /* size of printer on string */ + .digi_offlen = 4, /* size of printer off string */ + .digi_onstr = "\033[5i", /* ANSI printer on string ] */ + .digi_offstr = "\033[4i", /* ANSI printer off string ] */ + .digi_term = "ansi" /* default terminal type */ }; /* @@ -69,7 +58,7 @@ static struct digi_t dgnc_digi_init = { * This defines a raw port at 9600 baud, 8 data bits, no parity, * 1 stop bit. */ -static struct ktermios DgncDefaultTermios = { +static struct ktermios default_termios = { .c_iflag = (DEFAULT_IFLAGS), /* iflags */ .c_oflag = (DEFAULT_OFLAGS), /* oflags */ .c_cflag = (DEFAULT_CFLAGS), /* cflags */ @@ -113,6 +102,8 @@ static int dgnc_tty_write(struct tty_struct *tty, const unsigned char *buf, static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios); static void dgnc_tty_send_xchar(struct tty_struct *tty, char ch); +static void dgnc_set_signal_low(struct channel_t *ch, const unsigned char line); +static void dgnc_wake_up_unit(struct un_t *unit); static const struct tty_operations dgnc_tty_ops = { .open = dgnc_tty_open, @@ -137,36 +128,7 @@ static const struct tty_operations dgnc_tty_ops = { .send_xchar = dgnc_tty_send_xchar }; -/************************************************************************ - * - * TTY Initialization/Cleanup Functions - * - ************************************************************************/ - -/* - * dgnc_tty_preinit() - * - * Initialize any global tty related data before we download any boards. - */ -int dgnc_tty_preinit(void) -{ - /* - * Allocate a buffer for doing the copy from user space to - * kernel space in dgnc_write(). We only use one buffer and - * control access to it with a semaphore. If we are paging, we - * are already in trouble so one buffer won't hurt much anyway. - * - * We are okay to sleep in the malloc, as this routine - * is only called during module load, (not in interrupt context), - * and with no locks held. - */ - dgnc_TmpWriteBuf = kmalloc(WRITEBUFLEN, GFP_KERNEL); - - if (!dgnc_TmpWriteBuf) - return -ENOMEM; - - return 0; -} +/* TTY Initialization/Cleanup Functions */ /* * dgnc_tty_register() @@ -194,7 +156,7 @@ int dgnc_tty_register(struct dgnc_board *brd) brd->serial_driver->minor_start = 0; brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL; brd->serial_driver->subtype = SERIAL_TYPE_NORMAL; - brd->serial_driver->init_termios = DgncDefaultTermios; + brd->serial_driver->init_termios = default_termios; brd->serial_driver->driver_name = DRVSTR; /* @@ -233,7 +195,7 @@ int dgnc_tty_register(struct dgnc_board *brd) brd->print_driver->minor_start = 0x80; brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL; brd->print_driver->subtype = SERIAL_TYPE_NORMAL; - brd->print_driver->init_termios = DgncDefaultTermios; + brd->print_driver->init_termios = default_termios; brd->print_driver->driver_name = DRVSTR; /* @@ -285,9 +247,7 @@ int dgnc_tty_init(struct dgnc_board *brd) if (!brd) return -ENXIO; - /* - * Initialize board structure elements. - */ + /* Initialize board structure elements. */ vaddr = brd->re_map_membase; @@ -345,12 +305,10 @@ int dgnc_tty_init(struct dgnc_board *brd) classp = tty_register_device(brd->serial_driver, i, &ch->ch_bd->pdev->dev); ch->ch_tun.un_sysfs = classp; - dgnc_create_tty_sysfs(&ch->ch_tun, classp); classp = tty_register_device(brd->print_driver, i, &ch->ch_bd->pdev->dev); ch->ch_pun.un_sysfs = classp; - dgnc_create_tty_sysfs(&ch->ch_pun, classp); } } @@ -364,17 +322,6 @@ int dgnc_tty_init(struct dgnc_board *brd) return -ENOMEM; } -/* - * dgnc_tty_post_uninit() - * - * UnInitialize any global tty related data. - */ -void dgnc_tty_post_uninit(void) -{ - kfree(dgnc_TmpWriteBuf); - dgnc_TmpWriteBuf = NULL; -} - /* * dgnc_cleanup_tty() * @@ -385,20 +332,14 @@ void dgnc_cleanup_tty(struct dgnc_board *brd) { int i = 0; - for (i = 0; i < brd->nasync; i++) { - if (brd->channels[i]) - dgnc_remove_tty_sysfs(brd->channels[i]-> - ch_tun.un_sysfs); + for (i = 0; i < brd->nasync; i++) tty_unregister_device(brd->serial_driver, i); - } + tty_unregister_driver(brd->serial_driver); - for (i = 0; i < brd->nasync; i++) { - if (brd->channels[i]) - dgnc_remove_tty_sysfs(brd->channels[i]-> - ch_pun.un_sysfs); + for (i = 0; i < brd->nasync; i++) tty_unregister_device(brd->print_driver, i); - } + tty_unregister_driver(brd->print_driver); put_tty_driver(brd->serial_driver); @@ -437,9 +378,7 @@ static void dgnc_wmove(struct channel_t *ch, char *buf, uint n) } if (n > 0) { - /* - * Move rest of data. - */ + /* Move rest of data. */ remain = n; memcpy(ch->ch_wqueue + head, buf, remain); head += remain; @@ -509,9 +448,8 @@ void dgnc_input(struct channel_t *ch) goto exit_unlock; } - /* - * If we are throttled, simply don't read any data. - */ + /* If we are throttled, simply don't read any data. */ + if (ch->ch_flags & CH_FORCED_STOPI) goto exit_unlock; @@ -624,10 +562,10 @@ void dgnc_input(struct channel_t *ch) tty_ldisc_deref(ld); } -/************************************************************************ +/* * Determines when CARRIER changes state and takes appropriate * action. - ************************************************************************/ + */ void dgnc_carrier(struct channel_t *ch) { int virt_carrier = 0; @@ -645,28 +583,24 @@ void dgnc_carrier(struct channel_t *ch) if (ch->ch_c_cflag & CLOCAL) virt_carrier = 1; - /* - * Test for a VIRTUAL carrier transition to HIGH. - */ + /* Test for a VIRTUAL carrier transition to HIGH. */ + if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) { /* * When carrier rises, wake any threads waiting * for carrier in the open routine. */ - if (waitqueue_active(&ch->ch_flags_wait)) wake_up_interruptible(&ch->ch_flags_wait); } - /* - * Test for a PHYSICAL carrier transition to HIGH. - */ + /* Test for a PHYSICAL carrier transition to HIGH. */ + if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) { /* * When carrier rises, wake any threads waiting * for carrier in the open routine. */ - if (waitqueue_active(&ch->ch_flags_wait)) wake_up_interruptible(&ch->ch_flags_wait); } @@ -704,9 +638,8 @@ void dgnc_carrier(struct channel_t *ch) tty_hangup(ch->ch_pun.un_tty); } - /* - * Make sure that our cached values reflect the current reality. - */ + /* Make sure that our cached values reflect the current reality. */ + if (virt_carrier == 1) ch->ch_flags |= CH_FCAR; else @@ -718,9 +651,8 @@ void dgnc_carrier(struct channel_t *ch) ch->ch_flags &= ~CH_CD; } -/* - * Assign the custom baud rate to the channel structure - */ +/* Assign the custom baud rate to the channel structure */ + static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate) { int testdiv; @@ -854,6 +786,12 @@ void dgnc_check_queue_flow_control(struct channel_t *ch) } } +static void dgnc_set_signal_low(struct channel_t *ch, const unsigned char sig) +{ + ch->ch_mostat &= ~(sig); + ch->ch_bd->bd_ops->assert_modem_signals(ch); +} + void dgnc_wakeup_writes(struct channel_t *ch) { int qlen = 0; @@ -864,9 +802,8 @@ void dgnc_wakeup_writes(struct channel_t *ch) spin_lock_irqsave(&ch->ch_lock, flags); - /* - * If channel now has space, wake up anyone waiting on the condition. - */ + /* If channel now has space, wake up anyone waiting on the condition. */ + qlen = ch->ch_w_head - ch->ch_w_tail; if (qlen < 0) qlen += WQUEUESIZE; @@ -892,19 +829,15 @@ void dgnc_wakeup_writes(struct channel_t *ch) * If RTS Toggle mode is on, whenever * the queue and UART is empty, keep RTS low. */ - if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) { - ch->ch_mostat &= ~(UART_MCR_RTS); - ch->ch_bd->bd_ops->assert_modem_signals(ch); - } + if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) + dgnc_set_signal_low(ch, UART_MCR_RTS); /* * If DTR Toggle mode is on, whenever * the queue and UART is empty, keep DTR low. */ - if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) { - ch->ch_mostat &= ~(UART_MCR_DTR); - ch->ch_bd->bd_ops->assert_modem_signals(ch); - } + if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) + dgnc_set_signal_low(ch, UART_MCR_DTR); } } @@ -930,7 +863,7 @@ void dgnc_wakeup_writes(struct channel_t *ch) spin_unlock_irqrestore(&ch->ch_lock, flags); } -struct dgnc_board *find_board_by_major(unsigned int major) +static struct dgnc_board *find_board_by_major(unsigned int major) { int i; @@ -948,16 +881,10 @@ struct dgnc_board *find_board_by_major(unsigned int major) return NULL; } -/************************************************************************ - * - * TTY Entry points and helper functions - * - ************************************************************************/ +/* TTY Entry points and helper functions */ + +/* dgnc_tty_open() */ -/* - * dgnc_tty_open() - * - */ static int dgnc_tty_open(struct tty_struct *tty, struct file *file) { struct dgnc_board *brd; @@ -1045,8 +972,8 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file) * ch_flags_wait to wake us back up. */ rc = wait_event_interruptible(ch->ch_flags_wait, - (((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & - UN_CLOSING) == 0)); + (((ch->ch_tun.un_flags | + ch->ch_pun.un_flags) & UN_CLOSING) == 0)); /* If ret is non-zero, user ctrl-c'ed us */ if (rc) @@ -1057,9 +984,8 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file) /* Store our unit into driver_data, so we always have it available. */ tty->driver_data = un; - /* - * Initialize tty's - */ + /* Initialize tty's */ + if (!(un->un_flags & UN_ISOPEN)) { /* Store important variables. */ un->un_tty = tty; @@ -1096,13 +1022,10 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file) ch->ch_flags &= ~(CH_OPENING); wake_up_interruptible(&ch->ch_flags_wait); - /* - * Initialize if neither terminal or printer is open. - */ + /* Initialize if neither terminal or printer is open. */ + if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) { - /* - * Flush input queues. - */ + /* Flush input queues. */ ch->ch_r_head = 0; ch->ch_r_tail = 0; ch->ch_e_head = 0; @@ -1138,16 +1061,13 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file) brd->bd_ops->uart_init(ch); } - /* - * Run param in case we changed anything - */ + /* Run param in case we changed anything */ + brd->bd_ops->param(tty); dgnc_carrier(ch); - /* - * follow protocol for opening port - */ + /* follow protocol for opening port */ spin_unlock_irqrestore(&ch->ch_lock, flags); @@ -1248,9 +1168,8 @@ static int dgnc_block_til_ready(struct tty_struct *tty, break; } - /* - * Store the flags before we let go of channel lock - */ + /* Store the flags before we let go of channel lock */ + if (sleep_on_un_flags) old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags; else @@ -1269,12 +1188,13 @@ static int dgnc_block_til_ready(struct tty_struct *tty, * from the current value. */ if (sleep_on_un_flags) - retval = wait_event_interruptible(un->un_flags_wait, - (old_flags != (ch->ch_tun.un_flags | - ch->ch_pun.un_flags))); + retval = wait_event_interruptible + (un->un_flags_wait, + (old_flags != (ch->ch_tun.un_flags | + ch->ch_pun.un_flags))); else retval = wait_event_interruptible(ch->ch_flags_wait, - (old_flags != ch->ch_flags)); + (old_flags != ch->ch_flags)); /* * We got woken up for some reason. @@ -1304,10 +1224,8 @@ static void dgnc_tty_hangup(struct tty_struct *tty) dgnc_tty_flush_buffer(tty); } -/* - * dgnc_tty_close() - * - */ +/* dgnc_tty_close() */ + static void dgnc_tty_close(struct tty_struct *tty, struct file *file) { struct dgnc_board *bd; @@ -1377,9 +1295,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file) !(ch->ch_digi.digi_flags & DIGI_PRINTER)) { ch->ch_flags &= ~(CH_STOPI | CH_FORCED_STOPI); - /* - * turn off print device when closing print device. - */ + /* turn off print device when closing print device. */ + if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) { dgnc_wmove(ch, ch->ch_digi.digi_offstr, (int)ch->ch_digi.digi_offlen); @@ -1399,9 +1316,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file) tty->closing = 0; - /* - * If we have HUPCL set, lower DTR and RTS - */ + /* If we have HUPCL set, lower DTR and RTS */ + if (ch->ch_c_cflag & HUPCL) { /* Drop RTS/DTR */ ch->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS); @@ -1424,9 +1340,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file) /* Turn off UART interrupts for this port */ ch->ch_bd->bd_ops->uart_off(ch); } else { - /* - * turn off print device when closing print device. - */ + /* turn off print device when closing print device. */ + if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) { dgnc_wmove(ch, ch->ch_digi.digi_offstr, (int)ch->ch_digi.digi_offlen); @@ -1543,7 +1458,7 @@ static int dgnc_tty_write_room(struct tty_struct *tty) int ret = 0; unsigned long flags; - if (!tty || !dgnc_TmpWriteBuf) + if (!tty) return 0; un = tty->driver_data; @@ -1598,9 +1513,8 @@ static int dgnc_tty_write_room(struct tty_struct *tty) */ static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c) { - /* - * Simply call tty_write. - */ + /* Simply call tty_write. */ + dgnc_tty_write(tty, &c, 1); return 1; } @@ -1623,7 +1537,7 @@ static int dgnc_tty_write(struct tty_struct *tty, ushort tmask; uint remain; - if (!tty || !dgnc_TmpWriteBuf) + if (!tty) return 0; un = tty->driver_data; @@ -1667,9 +1581,8 @@ static int dgnc_tty_write(struct tty_struct *tty, */ count = min(count, bufcount); - /* - * Bail if no space left. - */ + /* Bail if no space left. */ + if (count <= 0) goto exit_retry; @@ -1712,9 +1625,7 @@ static int dgnc_tty_write(struct tty_struct *tty, } if (n > 0) { - /* - * Move rest of data. - */ + /* Move rest of data. */ remain = n; memcpy(ch->ch_wqueue + head, buf, remain); head += remain; @@ -1749,9 +1660,7 @@ static int dgnc_tty_write(struct tty_struct *tty, return 0; } -/* - * Return modem signals to ld. - */ +/* Return modem signals to ld. */ static int dgnc_tty_tiocmget(struct tty_struct *tty) { @@ -1960,9 +1869,8 @@ static void dgnc_tty_send_xchar(struct tty_struct *tty, char c) dev_dbg(tty->dev, "dgnc_tty_send_xchar finish\n"); } -/* - * Return modem signals to ld. - */ +/* Return modem signals to ld. */ + static inline int dgnc_get_mstat(struct channel_t *ch) { unsigned char mstat; @@ -1994,9 +1902,8 @@ static inline int dgnc_get_mstat(struct channel_t *ch) return result; } -/* - * Return modem signals to ld. - */ +/* Return modem signals to ld. */ + static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value) { @@ -2070,9 +1977,6 @@ static int dgnc_set_modem_info(struct channel_t *ch, * dgnc_tty_digigeta() * * Ioctl to get the information for ditty. - * - * - * */ static int dgnc_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo) @@ -2112,9 +2016,6 @@ static int dgnc_tty_digigeta(struct tty_struct *tty, * dgnc_tty_digiseta() * * Ioctl to set the information for ditty. - * - * - * */ static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info) @@ -2145,9 +2046,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty, spin_lock_irqsave(&ch->ch_lock, flags); - /* - * Handle transistions to and from RTS Toggle. - */ + /* Handle transitions to and from RTS Toggle. */ + if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) && (new_digi.digi_flags & DIGI_RTS_TOGGLE)) ch->ch_mostat &= ~(UART_MCR_RTS); @@ -2155,9 +2055,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty, !(new_digi.digi_flags & DIGI_RTS_TOGGLE)) ch->ch_mostat |= (UART_MCR_RTS); - /* - * Handle transistions to and from DTR Toggle. - */ + /* Handle transitions to and from DTR Toggle. */ + if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && (new_digi.digi_flags & DIGI_DTR_TOGGLE)) ch->ch_mostat &= ~(UART_MCR_DTR); @@ -2195,9 +2094,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty, return 0; } -/* - * dgnc_set_termios() - */ +/* dgnc_set_termios() */ + static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { @@ -2428,11 +2326,18 @@ static void dgnc_tty_flush_buffer(struct tty_struct *tty) spin_unlock_irqrestore(&ch->ch_lock, flags); } -/***************************************************************************** - * - * The IOCTL function and all of its helpers +/* + * dgnc_wake_up_unit() * - *****************************************************************************/ + * Wakes up processes waiting in the unit's (teminal/printer) wait queue + */ +static void dgnc_wake_up_unit(struct un_t *unit) +{ + unit->un_flags &= ~(UN_LOW | UN_EMPTY); + wake_up_interruptible(&unit->un_flags_wait); +} + +/* The IOCTL function and all of its helpers */ /* * dgnc_tty_ioctl() @@ -2506,7 +2411,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, return 0; case TCSBRKP: - /* support for POSIX tcsendbreak() + /* + * support for POSIX tcsendbreak() * According to POSIX.1 spec (7.2.2.1.2) breaks should be * between 0.25 and 0.5 seconds so we'll ask for something * in the middle: 0.375 seconds. @@ -2583,9 +2489,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, spin_unlock_irqrestore(&ch->ch_lock, flags); return dgnc_set_modem_info(ch, cmd, uarg); - /* - * Here are any additional ioctl's that we want to implement - */ + /* Here are any additional ioctl's that we want to implement */ case TCFLSH: /* @@ -2615,17 +2519,11 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, ch->ch_w_head = ch->ch_w_tail; ch_bd_ops->flush_uart_write(ch); - if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) { - ch->ch_tun.un_flags &= - ~(UN_LOW | UN_EMPTY); - wake_up_interruptible(&ch->ch_tun.un_flags_wait); - } - - if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) { - ch->ch_pun.un_flags &= - ~(UN_LOW | UN_EMPTY); - wake_up_interruptible(&ch->ch_pun.un_flags_wait); - } + if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) + dgnc_wake_up_unit(&ch->ch_tun); + + if (ch->ch_pun.un_flags & (UN_LOW | UN_EMPTY)) + dgnc_wake_up_unit(&ch->ch_pun); } } @@ -2705,9 +2603,10 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, case DIGI_LOOPBACK: { uint loopback = 0; - /* Let go of locks when accessing user space, + /* + * Let go of locks when accessing user space, * could sleep - */ + */ spin_unlock_irqrestore(&ch->ch_lock, flags); rc = get_user(loopback, (unsigned int __user *)arg); if (rc) @@ -2749,7 +2648,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, * This ioctl allows insertion of a character into the front * of any pending data to be transmitted. * - * This ioctl is to satify the "Send Character Immediate" + * This ioctl is to satisfy the "Send Character Immediate" * call that the RealPort protocol spec requires. */ case DIGI_REALPORT_SENDIMMEDIATE: @@ -2769,7 +2668,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, /* * This ioctl returns all the current counts for the port. * - * This ioctl is to satify the "Line Error Counters" + * This ioctl is to satisfy the "Line Error Counters" * call that the RealPort protocol spec requires. */ case DIGI_REALPORT_GETCOUNTERS: @@ -2795,7 +2694,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, /* * This ioctl returns all current events. * - * This ioctl is to satify the "Event Reporting" + * This ioctl is to satisfy the "Event Reporting" * call that the RealPort protocol spec requires. */ case DIGI_REALPORT_GETEVENTS: @@ -2831,23 +2730,23 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, spin_unlock_irqrestore(&ch->ch_lock, flags); - /* - * Get data from user first. - */ + /* Get data from user first. */ + if (copy_from_user(&buf, uarg, sizeof(buf))) return -EFAULT; spin_lock_irqsave(&ch->ch_lock, flags); - /* - * Figure out how much data is in our RX and TX queues. - */ + /* Figure out how much data is in our RX and TX queues. */ + buf.rxbuf = (ch->ch_r_head - ch->ch_r_tail) & RQUEUEMASK; buf.txbuf = (ch->ch_w_head - ch->ch_w_tail) & WQUEUEMASK; /* - * Is the UART empty? Add that value to whats in our TX queue. + * Is the UART empty? + * Add that value to whats in our TX queue. */ + count = buf.txbuf + ch_bd_ops->get_uart_bytes_left(ch); /* @@ -2867,9 +2766,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, if (buf.txbuf > tdist) buf.txbuf = tdist; - /* - * Report whether our queue and UART TX are completely empty. - */ + /* Report whether our queue and UART TX are completely empty. */ + if (count) buf.txdone = 0; else diff --git a/drivers/staging/dgnc/dgnc_tty.h b/drivers/staging/dgnc/dgnc_tty.h index 24c9a412211e55eadb505d222d7f4941df1f33aa..1ee0eeeb47308c5a433157a4b7d9637e0fbef06e 100644 --- a/drivers/staging/dgnc/dgnc_tty.h +++ b/drivers/staging/dgnc/dgnc_tty.h @@ -21,11 +21,9 @@ int dgnc_tty_register(struct dgnc_board *brd); void dgnc_tty_unregister(struct dgnc_board *brd); -int dgnc_tty_preinit(void); -int dgnc_tty_init(struct dgnc_board *); +int dgnc_tty_init(struct dgnc_board *brd); -void dgnc_tty_post_uninit(void); -void dgnc_cleanup_tty(struct dgnc_board *); +void dgnc_cleanup_tty(struct dgnc_board *brd); void dgnc_input(struct channel_t *ch); void dgnc_carrier(struct channel_t *ch); diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h index 5b983e6f5ee2c8de09e060f1b8f283c23fe424c8..ec2e3dda6119e2c80e28a2813dbbbf097ccefcf1 100644 --- a/drivers/staging/dgnc/digi.h +++ b/drivers/staging/dgnc/digi.h @@ -17,16 +17,16 @@ #define __DIGI_H #ifndef TIOCM_LE -#define TIOCM_LE 0x01 /* line enable */ -#define TIOCM_DTR 0x02 /* data terminal ready */ -#define TIOCM_RTS 0x04 /* request to send */ -#define TIOCM_ST 0x08 /* secondary transmit */ -#define TIOCM_SR 0x10 /* secondary receive */ -#define TIOCM_CTS 0x20 /* clear to send */ -#define TIOCM_CAR 0x40 /* carrier detect */ -#define TIOCM_RNG 0x80 /* ring indicator */ -#define TIOCM_DSR 0x100 /* data set ready */ -#define TIOCM_RI TIOCM_RNG /* ring (alternate) */ +#define TIOCM_LE 0x01 /* line enable */ +#define TIOCM_DTR 0x02 /* data terminal ready */ +#define TIOCM_RTS 0x04 /* request to send */ +#define TIOCM_ST 0x08 /* secondary transmit */ +#define TIOCM_SR 0x10 /* secondary receive */ +#define TIOCM_CTS 0x20 /* clear to send */ +#define TIOCM_CAR 0x40 /* carrier detect */ +#define TIOCM_RNG 0x80 /* ring indicator */ +#define TIOCM_DSR 0x100 /* data set ready */ +#define TIOCM_RI TIOCM_RNG /* ring (alternate) */ #define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */ #endif @@ -40,72 +40,71 @@ #define TIOCMBIS (('d' << 8) | 255) /* set modem ctrl state */ #endif -#define DIGI_GETA (('e' << 8) | 94) /* Read params */ -#define DIGI_SETA (('e' << 8) | 95) /* Set params */ -#define DIGI_SETAW (('e' << 8) | 96) /* Drain & set params */ +#define DIGI_GETA (('e' << 8) | 94) /* Read params */ +#define DIGI_SETA (('e' << 8) | 95) /* Set params */ +#define DIGI_SETAW (('e' << 8) | 96) /* Drain & set params */ #define DIGI_SETAF (('e' << 8) | 97) /* Drain, flush & set params */ -#define DIGI_GET_NI_INFO (('d' << 8) | 250) /* Non-intelligent state info */ -#define DIGI_LOOPBACK (('d' << 8) | 252) /* - * Enable/disable UART - * internal loopback - */ -#define DIGI_FAST 0x0002 /* Fast baud rates */ -#define RTSPACE 0x0004 /* RTS input flow control */ -#define CTSPACE 0x0008 /* CTS output flow control */ +#define DIGI_GET_NI_INFO (('d' << 8) | 250) /* Non-intelligent state info */ +#define DIGI_LOOPBACK (('d' << 8) | 252) /* + * Enable/disable UART + * internal loopback + */ +#define DIGI_FAST 0x0002 /* Fast baud rates */ +#define RTSPACE 0x0004 /* RTS input flow control */ +#define CTSPACE 0x0008 /* CTS output flow control */ #define DIGI_COOK 0x0080 /* Cooked processing done in FEP */ -#define DIGI_FORCEDCD 0x0100 /* Force carrier */ -#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */ +#define DIGI_FORCEDCD 0x0100 /* Force carrier */ +#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */ #define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/ -#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */ -#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */ -#define DIGI_PLEN 28 /* String length */ -#define DIGI_TSIZ 10 /* Terminal string len */ +#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */ +#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */ +#define DIGI_PLEN 28 /* String length */ +#define DIGI_TSIZ 10 /* Terminal string len */ -/************************************************************************ +/* * Structure used with ioctl commands for DIGI parameters. - ************************************************************************/ + */ struct digi_t { - unsigned short digi_flags; /* Flags (see above) */ - unsigned short digi_maxcps; /* Max printer CPS */ + unsigned short digi_flags; /* Flags (see above) */ + unsigned short digi_maxcps; /* Max printer CPS */ unsigned short digi_maxchar; /* Max chars in print queue */ - unsigned short digi_bufsize; /* Buffer size */ - unsigned char digi_onlen; /* Length of ON string */ + unsigned short digi_bufsize; /* Buffer size */ + unsigned char digi_onlen; /* Length of ON string */ unsigned char digi_offlen; /* Length of OFF string */ - char digi_onstr[DIGI_PLEN]; /* Printer on string */ - char digi_offstr[DIGI_PLEN]; /* Printer off string */ - char digi_term[DIGI_TSIZ]; /* terminal string */ + char digi_onstr[DIGI_PLEN]; /* Printer on string */ + char digi_offstr[DIGI_PLEN]; /* Printer off string */ + char digi_term[DIGI_TSIZ]; /* terminal string */ }; -/************************************************************************ - * Structure to get driver status information - ************************************************************************/ +/* Structure to get driver status information */ + struct digi_dinfo { - unsigned int dinfo_nboards; /* # boards configured */ + unsigned int dinfo_nboards; /* # boards configured */ char dinfo_reserved[12]; /* for future expansion */ - char dinfo_version[16]; /* driver version */ + char dinfo_version[16]; /* driver version */ }; -#define DIGI_GETDD (('d' << 8) | 248) /* get driver info */ +#define DIGI_GETDD (('d' << 8) | 248) /* get driver info */ -/************************************************************************ +/* * Structure used with ioctl commands for per-board information * * physsize and memsize differ when board has "windowed" memory - ************************************************************************/ + */ struct digi_info { - unsigned int info_bdnum; /* Board number (0 based) */ - unsigned int info_ioport; /* io port address */ - unsigned int info_physaddr; /* memory address */ + unsigned int info_bdnum; /* Board number (0 based) */ + unsigned int info_ioport; /* io port address */ + unsigned int info_physaddr; /* memory address */ unsigned int info_physsize; /* Size of host mem window */ unsigned int info_memsize; /* Amount of dual-port mem */ - /* on board */ - unsigned short info_bdtype; /* Board type */ - unsigned short info_nports; /* number of ports */ - char info_bdstate; /* board state */ - char info_reserved[7]; /* for future expansion */ + /* on board */ + unsigned short info_bdtype; /* Board type */ + unsigned short info_nports; /* number of ports */ + char info_bdstate; /* board state */ + char info_reserved[7]; /* for future expansion */ }; -#define DIGI_GETBD (('d' << 8) | 249) /* get board info */ +#define DIGI_GETBD (('d' << 8) | 249) /* get board info */ struct digi_getbuffer /* Struct for holding buffer use counts */ { @@ -139,7 +138,7 @@ struct digi_getcounter { #define DIGI_REALPORT_GETEVENTS (('e' << 8) | 111) #define EV_OPU 0x0001 /* !context; p_ctrl = &udc->ctrl; if ((p_ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { - if (p_ctrl->bRequest == USB_REQ_SET_FEATURE) { /*-------------------------------------------------*/ /* SET_FEATURE */ @@ -263,7 +262,7 @@ static int _nbu2ss_ep_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep) } _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data); - _nbu2ss_endpoint_toggle_reset(udc, (ep->epnum|ep->direct)); + _nbu2ss_endpoint_toggle_reset(udc, (ep->epnum | ep->direct)); if (ep->direct == USB_DIR_OUT) { /*---------------------------------------------------------*/ @@ -460,7 +459,7 @@ static void _nbu2ss_ep_in_end( if (length) _nbu2ss_writel(&preg->EP_REGS[num].EP_WRITE, data32); - data = ((((u32)length) << 5) & EPn_DW) | EPn_DEND; + data = (((length) << 5) & EPn_DW) | EPn_DEND; _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data); _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO); @@ -753,7 +752,6 @@ static int _nbu2ss_ep0_out_transfer( /* Receive data confirmation */ iRecvLength = _nbu2ss_readl(&udc->p_regs->EP0_LENGTH) & EP0_LDATA; if (iRecvLength != 0) { - fRcvZero = 0; iRemainSize = req->req.length - req->req.actual; @@ -928,9 +926,8 @@ static int _nbu2ss_epn_out_pio( req->req.actual += result; - if ((req->req.actual == req->req.length) - || ((req->req.actual % ep->ep.maxpacket) != 0)) { - + if ((req->req.actual == req->req.length) || + ((req->req.actual % ep->ep.maxpacket) != 0)) { result = 0; } @@ -956,9 +953,8 @@ static int _nbu2ss_epn_out_data( iBufSize = min((req->req.length - req->req.actual), data_size); - if ((ep->ep_type != USB_ENDPOINT_XFER_INT) - && (req->req.dma != 0) - && (iBufSize >= sizeof(u32))) { + if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) && + (iBufSize >= sizeof(u32))) { nret = _nbu2ss_out_dma(udc, req, num, iBufSize); } else { iBufSize = min_t(u32, iBufSize, ep->ep.maxpacket); @@ -999,9 +995,8 @@ static int _nbu2ss_epn_out_transfer( } } } else { - if ((req->req.actual == req->req.length) - || ((req->req.actual % ep->ep.maxpacket) != 0)) { - + if ((req->req.actual == req->req.length) || + ((req->req.actual % ep->ep.maxpacket) != 0)) { result = 0; } } @@ -1170,9 +1165,8 @@ static int _nbu2ss_epn_in_data( num = ep->epnum - 1; - if ((ep->ep_type != USB_ENDPOINT_XFER_INT) - && (req->req.dma != 0) - && (data_size >= sizeof(u32))) { + if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) && + (data_size >= sizeof(u32))) { nret = _nbu2ss_in_dma(udc, ep, req, num, data_size); } else { data_size = min_t(u32, data_size, ep->ep.maxpacket); @@ -1557,7 +1551,6 @@ static void _nbu2ss_epn_set_stall( for (limit_cnt = 0 ; limit_cnt < IN_DATA_EMPTY_COUNT ; limit_cnt++) { - regdata = _nbu2ss_readl( &preg->EP_REGS[ep->epnum - 1].EP_STATUS); @@ -1582,11 +1575,8 @@ static int std_req_get_status(struct nbu2ss_udc *udc) u8 ep_adrs; int result = -EINVAL; - if ((udc->ctrl.wValue != 0x0000) - || (direction != USB_DIR_IN)) { - + if ((udc->ctrl.wValue != 0x0000) || (direction != USB_DIR_IN)) return result; - } length = min_t(u16, udc->ctrl.wLength, sizeof(status_data)); @@ -1852,7 +1842,7 @@ static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc) status = _nbu2ss_readl(&udc->p_regs->EP0_STATUS); intr = status & EP0_STATUS_RW_BIT; - _nbu2ss_writel(&udc->p_regs->EP0_STATUS, ~(u32)intr); + _nbu2ss_writel(&udc->p_regs->EP0_STATUS, ~intr); status &= (SETUP_INT | EP0_IN_INT | EP0_OUT_INT | STG_END_INT | EP0_OUT_NULL_INT); @@ -1897,9 +1887,8 @@ static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc) break; case EP0_OUT_STATUS_PAHSE: - if ((status & STG_END_INT) - || (status & SETUP_INT) - || (status & EP0_OUT_NULL_INT)) { + if ((status & STG_END_INT) || (status & SETUP_INT) || + (status & EP0_OUT_NULL_INT)) { status &= ~(STG_END_INT | EP0_OUT_INT | EP0_OUT_NULL_INT); @@ -1982,7 +1971,6 @@ static inline void _nbu2ss_epn_in_int( } else { if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) { - status = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS); @@ -2127,7 +2115,7 @@ static inline void _nbu2ss_epn_int(struct nbu2ss_udc *udc, u32 epnum) status = _nbu2ss_readl(&udc->p_regs->EP_REGS[num].EP_STATUS); /* Interrupt Clear */ - _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_STATUS, ~(u32)status); + _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_STATUS, ~status); req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue); if (!req) { @@ -2330,7 +2318,6 @@ static inline void _nbu2ss_check_vbus(struct nbu2ss_udc *udc) /* VBUS ON Check*/ reg_dt = gpio_get_value(VBUS_VALUE); if (reg_dt == 0) { - udc->linux_suspended = 0; _nbu2ss_reset_controller(udc); @@ -2502,7 +2489,6 @@ static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc) int_bit = status >> 8; for (epnum = 0; epnum < NUM_ENDPOINTS; epnum++) { - if (0x01 & int_bit) _nbu2ss_ep_int(udc, epnum); @@ -2546,9 +2532,8 @@ static int nbu2ss_ep_enable( } ep_type = usb_endpoint_type(desc); - if ((ep_type == USB_ENDPOINT_XFER_CONTROL) - || (ep_type == USB_ENDPOINT_XFER_ISOC)) { - + if ((ep_type == USB_ENDPOINT_XFER_CONTROL) || + (ep_type == USB_ENDPOINT_XFER_ISOC)) { pr_err(" *** %s, bat bmAttributes\n", __func__); return -EINVAL; } @@ -2557,9 +2542,7 @@ static int nbu2ss_ep_enable( if (udc->vbus_active == 0) return -ESHUTDOWN; - if ((!udc->driver) - || (udc->gadget.speed == USB_SPEED_UNKNOWN)) { - + if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) { dev_err(ep->udc->dev, " *** %s, udc !!\n", __func__); return -ESHUTDOWN; } @@ -2674,10 +2657,7 @@ static int nbu2ss_ep_queue( } req = container_of(_req, struct nbu2ss_req, req); - if (unlikely - (!_req->complete || !_req->buf - || !list_empty(&req->queue))) { - + if (unlikely(!_req->complete || !_req->buf || !list_empty(&req->queue))) { if (!_req->complete) pr_err("udc: %s --- !_req->complete\n", __func__); @@ -2736,7 +2716,6 @@ static int nbu2ss_ep_queue( list_add_tail(&req->queue, &ep->queue); if (bflag && !ep->stalled) { - result = _nbu2ss_start_transfer(udc, ep, req, FALSE); if (result < 0) { dev_err(udc->dev, " *** %s, result = %d\n", __func__, @@ -2938,7 +2917,7 @@ static void nbu2ss_ep_fifo_flush(struct usb_ep *_ep) } /*-------------------------------------------------------------------------*/ -static struct usb_ep_ops nbu2ss_ep_ops = { +static const struct usb_ep_ops nbu2ss_ep_ops = { .enable = nbu2ss_ep_enable, .disable = nbu2ss_ep_disable, @@ -2979,9 +2958,7 @@ static int nbu2ss_gad_get_frame(struct usb_gadget *pgadget) if (data == 0) return -EINVAL; - data = _nbu2ss_readl(&udc->p_regs->USB_ADDRESS) & FRAME; - - return data; + return _nbu2ss_readl(&udc->p_regs->USB_ADDRESS) & FRAME; } /*-------------------------------------------------------------------------*/ @@ -3307,8 +3284,8 @@ static int nbu2ss_drv_remove(struct platform_device *pdev) for (i = 0; i < NUM_ENDPOINTS; i++) { ep = &udc->ep[i]; if (ep->virt_buf) - dma_free_coherent(NULL, PAGE_SIZE, - (void *)ep->virt_buf, ep->phys_buf); + dma_free_coherent(NULL, PAGE_SIZE, (void *)ep->virt_buf, + ep->phys_buf); } /* Interrupt Handler - Release */ diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c index 7561385761e9f5635465da8066772d431d6838dd..a6e3af74a904eef657177c180183bcd2d688bf43 100644 --- a/drivers/staging/fbtft/fb_agm1264k-fl.c +++ b/drivers/staging/fbtft/fb_agm1264k-fl.c @@ -264,6 +264,39 @@ construct_line_bitmap(struct fbtft_par *par, u8 *dest, signed short *src, } } +static void iterate_diffusion_matrix(u32 xres, u32 yres, int x, + int y, signed short *convert_buf, + signed short pixel, signed short error) +{ + u16 i, j; + + /* diffusion matrix row */ + for (i = 0; i < DIFFUSING_MATRIX_WIDTH; ++i) + /* diffusion matrix column */ + for (j = 0; j < DIFFUSING_MATRIX_HEIGHT; ++j) { + signed short *write_pos; + signed char coeff; + + /* skip pixels out of zone */ + if (x + i < 0 || x + i >= xres || y + j >= yres) + continue; + write_pos = &convert_buf[(y + j) * xres + x + i]; + coeff = diffusing_matrix[i][j]; + if (-1 == coeff) + /* pixel itself */ + *write_pos = pixel; + else { + signed short p = *write_pos + error * coeff; + + if (p > WHITE) + p = WHITE; + if (p < BLACK) + p = BLACK; + *write_pos = p; + } + } +} + static int write_vmem(struct fbtft_par *par, size_t offset, size_t len) { u16 *vmem16 = (u16 *)par->info->screen_buffer; @@ -303,7 +336,6 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len) signed short error_b = pixel - BLACK; signed short error_w = pixel - WHITE; signed short error; - u16 i, j; /* what color close? */ if (abs(error_b) >= abs(error_w)) { @@ -318,36 +350,10 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len) error /= 8; - /* diffusion matrix row */ - for (i = 0; i < DIFFUSING_MATRIX_WIDTH; ++i) - /* diffusion matrix column */ - for (j = 0; j < DIFFUSING_MATRIX_HEIGHT; ++j) { - signed short *write_pos; - signed char coeff; - - /* skip pixels out of zone */ - if (x + i < 0 || - x + i >= par->info->var.xres - || y + j >= par->info->var.yres) - continue; - write_pos = &convert_buf[ - (y + j) * par->info->var.xres + - x + i]; - coeff = diffusing_matrix[i][j]; - if (coeff == -1) - /* pixel itself */ - *write_pos = pixel; - else { - signed short p = *write_pos + - error * coeff; - - if (p > WHITE) - p = WHITE; - if (p < BLACK) - p = BLACK; - *write_pos = p; - } - } + iterate_diffusion_matrix(par->info->var.xres, + par->info->var.yres, + x, y, convert_buf, + pixel, error); } /* 1 string = 2 pages */ diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c index c31e2e051d4a063be3b14dcc118c0585b325ed70..19e33bab9cacd0ea79d5461f6167588aefea43cc 100644 --- a/drivers/staging/fbtft/fb_ili9325.c +++ b/drivers/staging/fbtft/fb_ili9325.c @@ -33,26 +33,23 @@ "04 16 2 7 6 3 2 1 7 7" static unsigned int bt = 6; /* VGL=Vci*4 , VGH=Vci*4 */ -module_param(bt, uint, 0); +module_param(bt, uint, 0000); MODULE_PARM_DESC(bt, "Sets the factor used in the step-up circuits"); static unsigned int vc = 0x03; /* Vci1=Vci*0.80 */ -module_param(vc, uint, 0); -MODULE_PARM_DESC(vc, -"Sets the ratio factor of Vci to generate the reference voltages Vci1"); +module_param(vc, uint, 0000); +MODULE_PARM_DESC(vc, "Sets the ratio factor of Vci to generate the reference voltages Vci1"); static unsigned int vrh = 0x0d; /* VREG1OUT=Vci*1.85 */ -module_param(vrh, uint, 0); -MODULE_PARM_DESC(vrh, -"Set the amplifying rate (1.6 ~ 1.9) of Vci applied to output the VREG1OUT"); +module_param(vrh, uint, 0000); +MODULE_PARM_DESC(vrh, "Set the amplifying rate (1.6 ~ 1.9) of Vci applied to output the VREG1OUT"); static unsigned int vdv = 0x12; /* VCOMH amplitude=VREG1OUT*0.98 */ -module_param(vdv, uint, 0); -MODULE_PARM_DESC(vdv, -"Select the factor of VREG1OUT to set the amplitude of Vcom"); +module_param(vdv, uint, 0000); +MODULE_PARM_DESC(vdv, "Select the factor of VREG1OUT to set the amplitude of Vcom"); static unsigned int vcm = 0x0a; /* VCOMH=VREG1OUT*0.735 */ -module_param(vcm, uint, 0); +module_param(vcm, uint, 0000); MODULE_PARM_DESC(vcm, "Set the internal VcomH voltage"); /* diff --git a/drivers/staging/fbtft/fb_ili9481.c b/drivers/staging/fbtft/fb_ili9481.c index 242adb3859bd0d03c0ae870f4fba6aeaaa15934f..4e75f5abe2f9701de3c90ca06018c099fbb1405a 100644 --- a/drivers/staging/fbtft/fb_ili9481.c +++ b/drivers/staging/fbtft/fb_ili9481.c @@ -27,7 +27,7 @@ #define WIDTH 320 #define HEIGHT 480 -static int default_init_sequence[] = { +static s16 default_init_sequence[] = { /* SLP_OUT - Sleep out */ -1, MIPI_DCS_EXIT_SLEEP_MODE, -2, 50, diff --git a/drivers/staging/fbtft/fb_ili9486.c b/drivers/staging/fbtft/fb_ili9486.c index fa38d8885f0b425798b53f2a88158c928c356e15..f4b314265f9ed3e83e457862667f8d151c65fd93 100644 --- a/drivers/staging/fbtft/fb_ili9486.c +++ b/drivers/staging/fbtft/fb_ili9486.c @@ -26,7 +26,7 @@ #define HEIGHT 480 /* this init sequence matches PiScreen */ -static int default_init_sequence[] = { +static s16 default_init_sequence[] = { /* Interface Mode Control */ -1, 0xb0, 0x0, -1, MIPI_DCS_EXIT_SLEEP_MODE, diff --git a/drivers/staging/fbtft/fb_s6d02a1.c b/drivers/staging/fbtft/fb_s6d02a1.c index 774b0ff69e6dd83952355a4f04e596b720108e9e..eb712aa0d692ff6c73f8ba71f49e8e40d398942a 100644 --- a/drivers/staging/fbtft/fb_s6d02a1.c +++ b/drivers/staging/fbtft/fb_s6d02a1.c @@ -24,7 +24,7 @@ #define DRVNAME "fb_s6d02a1" -static int default_init_sequence[] = { +static s16 default_init_sequence[] = { -1, 0xf0, 0x5a, 0x5a, diff --git a/drivers/staging/fbtft/fb_st7735r.c b/drivers/staging/fbtft/fb_st7735r.c index 6670f2bb62ec4e05130e8764cf9228d3c04cc4ae..710b74bbba97c57086fcf74bc34e1f6af4a6f4d3 100644 --- a/drivers/staging/fbtft/fb_st7735r.c +++ b/drivers/staging/fbtft/fb_st7735r.c @@ -25,7 +25,7 @@ #define DEFAULT_GAMMA "0F 1A 0F 18 2F 28 20 22 1F 1B 23 37 00 07 02 10\n" \ "0F 1B 0F 17 33 2C 29 2E 30 30 39 3F 00 07 03 10" -static int default_init_sequence[] = { +static s16 default_init_sequence[] = { -1, MIPI_DCS_SOFT_RESET, -2, 150, /* delay */ diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index 587f68aa466c2ee41c32e7d2822874eaaae91986..bbe89c9c4fb91b6f812ffd3458049e21fa2b1a5a 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -253,7 +253,8 @@ static int fbtft_backlight_update_status(struct backlight_device *bd) "%s: polarity=%d, power=%d, fb_blank=%d\n", __func__, polarity, bd->props.power, bd->props.fb_blank); - if ((bd->props.power == FB_BLANK_UNBLANK) && (bd->props.fb_blank == FB_BLANK_UNBLANK)) + if ((bd->props.power == FB_BLANK_UNBLANK) && + (bd->props.fb_blank == FB_BLANK_UNBLANK)) gpio_set_value(par->gpio.led[0], polarity); else gpio_set_value(par->gpio.led[0], !polarity); @@ -299,7 +300,8 @@ void fbtft_register_backlight(struct fbtft_par *par) bl_props.state |= BL_CORE_DRIVER1; bd = backlight_device_register(dev_driver_string(par->info->device), - par->info->device, par, &fbtft_bl_ops, &bl_props); + par->info->device, par, + &fbtft_bl_ops, &bl_props); if (IS_ERR(bd)) { dev_err(par->info->device, "cannot register backlight device (%ld)\n", @@ -350,9 +352,11 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line, bool timeit = false; int ret = 0; - if (unlikely(par->debug & (DEBUG_TIME_FIRST_UPDATE | DEBUG_TIME_EACH_UPDATE))) { + if (unlikely(par->debug & (DEBUG_TIME_FIRST_UPDATE | + DEBUG_TIME_EACH_UPDATE))) { if ((par->debug & DEBUG_TIME_EACH_UPDATE) || - ((par->debug & DEBUG_TIME_FIRST_UPDATE) && !par->first_update_done)) { + ((par->debug & DEBUG_TIME_FIRST_UPDATE) && + !par->first_update_done)) { ts_start = ktime_get(); timeit = true; } @@ -361,15 +365,17 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line, /* Sanity checks */ if (start_line > end_line) { dev_warn(par->info->device, - "%s: start_line=%u is larger than end_line=%u. Shouldn't happen, will do full display update\n", - __func__, start_line, end_line); + "%s: start_line=%u is larger than end_line=%u. Shouldn't happen, will do full display update\n", + __func__, start_line, end_line); start_line = 0; end_line = par->info->var.yres - 1; } - if (start_line > par->info->var.yres - 1 || end_line > par->info->var.yres - 1) { + if (start_line > par->info->var.yres - 1 || + end_line > par->info->var.yres - 1) { dev_warn(par->info->device, "%s: start_line=%u or end_line=%u is larger than max=%d. Shouldn't happen, will do full display update\n", - __func__, start_line, end_line, par->info->var.yres - 1); + __func__, start_line, + end_line, par->info->var.yres - 1); start_line = 0; end_line = par->info->var.yres - 1; } @@ -660,12 +666,13 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, unsigned int bpp = display->bpp; unsigned int fps = display->fps; int vmem_size, i; - int *init_sequence = display->init_sequence; + s16 *init_sequence = display->init_sequence; char *gamma = display->gamma; unsigned long *gamma_curves = NULL; /* sanity check */ - if (display->gamma_num * display->gamma_len > FBTFT_GAMMA_MAX_VALUES_TOTAL) { + if (display->gamma_num * display->gamma_len > + FBTFT_GAMMA_MAX_VALUES_TOTAL) { dev_err(dev, "FBTFT_GAMMA_MAX_VALUES_TOTAL=%d is exceeded\n", FBTFT_GAMMA_MAX_VALUES_TOTAL); return NULL; @@ -832,11 +839,13 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, #ifdef CONFIG_HAS_DMA if (dma) { dev->coherent_dma_mask = ~0; - txbuf = dmam_alloc_coherent(dev, txbuflen, &par->txbuf.dma, GFP_DMA); + txbuf = dmam_alloc_coherent(dev, txbuflen, + &par->txbuf.dma, GFP_DMA); } else #endif { - txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL); + txbuf = devm_kzalloc(par->info->device, + txbuflen, GFP_KERNEL); } if (!txbuf) goto alloc_fail; diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h index 89c4b5b76ce69d78c11051868958eaf6c0b0e084..aacdde92cc2e2e8bf4cf3238325723c22e223647 100644 --- a/drivers/staging/fbtft/fbtft.h +++ b/drivers/staging/fbtft/fbtft.h @@ -124,7 +124,7 @@ struct fbtft_display { unsigned int bpp; unsigned int fps; int txbuflen; - int *init_sequence; + s16 *init_sequence; char *gamma; int gamma_num; int gamma_len; @@ -229,7 +229,7 @@ struct fbtft_par { int led[16]; int aux[16]; } gpio; - int *init_sequence; + s16 *init_sequence; struct { struct mutex lock; unsigned long *curves; diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c index e9211831b6a1d8d696e8155737a04ea51bb8e7b4..de46f8d988d246cb7516690ab321cab4604f29b9 100644 --- a/drivers/staging/fbtft/fbtft_device.c +++ b/drivers/staging/fbtft/fbtft_device.c @@ -96,9 +96,9 @@ static unsigned int buswidth = 8; module_param(buswidth, uint, 0); MODULE_PARM_DESC(buswidth, "Display bus width, used with the custom argument"); -static int init[FBTFT_MAX_INIT_SEQUENCE]; +static s16 init[FBTFT_MAX_INIT_SEQUENCE]; static int init_num; -module_param_array(init, int, &init_num, 0); +module_param_array(init, short, &init_num, 0); MODULE_PARM_DESC(init, "Init sequence, used with the custom argument"); static unsigned long debug; @@ -131,7 +131,7 @@ static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par, "D0 00 14 15 13 2C 42 43 4E 09 16 14 18 21\n" \ "D0 00 14 15 13 0B 43 55 53 0C 17 14 23 20" -static int cberry28_init_sequence[] = { +static s16 cberry28_init_sequence[] = { /* turn off sleep mode */ -1, MIPI_DCS_EXIT_SLEEP_MODE, -2, 120, @@ -180,7 +180,7 @@ static int cberry28_init_sequence[] = { -3, }; -static int hy28b_init_sequence[] = { +static s16 hy28b_init_sequence[] = { -1, 0x00e7, 0x0010, -1, 0x0000, 0x0001, -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, -1, 0x0003, 0x1030, -1, 0x0004, 0x0000, @@ -211,7 +211,7 @@ static int hy28b_init_sequence[] = { "04 1F 4 7 7 0 7 7 6 0\n" \ "0F 00 1 7 4 0 0 0 6 7" -static int pitft_init_sequence[] = { +static s16 pitft_init_sequence[] = { -1, MIPI_DCS_SOFT_RESET, -2, 5, -1, MIPI_DCS_SET_DISPLAY_OFF, @@ -242,7 +242,7 @@ static int pitft_init_sequence[] = { -3 }; -static int waveshare32b_init_sequence[] = { +static s16 waveshare32b_init_sequence[] = { -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02, -1, 0xCF, 0x00, 0xC1, 0x30, -1, 0xE8, 0x85, 0x00, 0x78, diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c index ce0d254148e4d8a8e3498ef33b68f5313e16ecf8..ded10718712b6e391263a3aee7b14cb1a67823ad 100644 --- a/drivers/staging/fbtft/flexfb.c +++ b/drivers/staging/fbtft/flexfb.c @@ -38,9 +38,9 @@ static unsigned int height; module_param(height, uint, 0); MODULE_PARM_DESC(height, "Display height"); -static int init[512]; +static s16 init[512]; static int init_num; -module_param_array(init, int, &init_num, 0); +module_param_array(init, short, &init_num, 0); MODULE_PARM_DESC(init, "Init sequence"); static unsigned int setaddrwin; @@ -63,68 +63,316 @@ static bool latched; module_param(latched, bool, 0); MODULE_PARM_DESC(latched, "Use with latched 16-bit databus"); -static int *initp; +static s16 *initp; static int initp_num; /* default init sequences */ -static int st7735r_init[] = { --1, 0x01, -2, 150, -1, 0x11, -2, 500, -1, 0xB1, 0x01, 0x2C, 0x2D, -1, 0xB2, 0x01, 0x2C, 0x2D, -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D, --1, 0xB4, 0x07, -1, 0xC0, 0xA2, 0x02, 0x84, -1, 0xC1, 0xC5, -1, 0xC2, 0x0A, 0x00, -1, 0xC3, 0x8A, 0x2A, -1, 0xC4, 0x8A, 0xEE, -1, 0xC5, 0x0E, --1, 0x20, -1, 0x36, 0xC0, -1, 0x3A, 0x05, -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22, 0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10, --1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e, 0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10, -1, 0x29, -2, 100, -1, 0x13, -2, 10, -3 }; - -static int ssd1289_init[] = { --1, 0x00, 0x0001, -1, 0x03, 0xA8A4, -1, 0x0C, 0x0000, -1, 0x0D, 0x080C, -1, 0x0E, 0x2B00, -1, 0x1E, 0x00B7, -1, 0x01, 0x2B3F, -1, 0x02, 0x0600, --1, 0x10, 0x0000, -1, 0x11, 0x6070, -1, 0x05, 0x0000, -1, 0x06, 0x0000, -1, 0x16, 0xEF1C, -1, 0x17, 0x0003, -1, 0x07, 0x0233, -1, 0x0B, 0x0000, --1, 0x0F, 0x0000, -1, 0x41, 0x0000, -1, 0x42, 0x0000, -1, 0x48, 0x0000, -1, 0x49, 0x013F, -1, 0x4A, 0x0000, -1, 0x4B, 0x0000, -1, 0x44, 0xEF00, --1, 0x45, 0x0000, -1, 0x46, 0x013F, -1, 0x30, 0x0707, -1, 0x31, 0x0204, -1, 0x32, 0x0204, -1, 0x33, 0x0502, -1, 0x34, 0x0507, -1, 0x35, 0x0204, --1, 0x36, 0x0204, -1, 0x37, 0x0502, -1, 0x3A, 0x0302, -1, 0x3B, 0x0302, -1, 0x23, 0x0000, -1, 0x24, 0x0000, -1, 0x25, 0x8000, -1, 0x4f, 0x0000, --1, 0x4e, 0x0000, -1, 0x22, -3 }; - -static int hx8340bn_init[] = { --1, 0xC1, 0xFF, 0x83, 0x40, -1, 0x11, -2, 150, -1, 0xCA, 0x70, 0x00, 0xD9, -1, 0xB0, 0x01, 0x11, --1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06, -2, 20, -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A, --1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33, -2, 10, -1, 0xB5, 0x35, 0x20, 0x45, -1, 0xB4, 0x33, 0x25, 0x4C, -2, 10, --1, 0x3A, 0x05, -1, 0x29, -2, 10, -3 }; - -static int ili9225_init[] = { --1, 0x0001, 0x011C, -1, 0x0002, 0x0100, -1, 0x0003, 0x1030, -1, 0x0008, 0x0808, -1, 0x000C, 0x0000, -1, 0x000F, 0x0A01, -1, 0x0020, 0x0000, --1, 0x0021, 0x0000, -2, 50, -1, 0x0010, 0x0A00, -1, 0x0011, 0x1038, -2, 50, -1, 0x0012, 0x1121, -1, 0x0013, 0x004E, -1, 0x0014, 0x676F, --1, 0x0030, 0x0000, -1, 0x0031, 0x00DB, -1, 0x0032, 0x0000, -1, 0x0033, 0x0000, -1, 0x0034, 0x00DB, -1, 0x0035, 0x0000, -1, 0x0036, 0x00AF, --1, 0x0037, 0x0000, -1, 0x0038, 0x00DB, -1, 0x0039, 0x0000, -1, 0x0050, 0x0000, -1, 0x0051, 0x060A, -1, 0x0052, 0x0D0A, -1, 0x0053, 0x0303, --1, 0x0054, 0x0A0D, -1, 0x0055, 0x0A06, -1, 0x0056, 0x0000, -1, 0x0057, 0x0303, -1, 0x0058, 0x0000, -1, 0x0059, 0x0000, -2, 50, --1, 0x0007, 0x1017, -2, 50, -3 }; - -static int ili9320_init[] = { --1, 0x00E5, 0x8000, -1, 0x0000, 0x0001, -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, -1, 0x0003, 0x1030, -1, 0x0004, 0x0000, -1, 0x0008, 0x0202, --1, 0x0009, 0x0000, -1, 0x000A, 0x0000, -1, 0x000C, 0x0000, -1, 0x000D, 0x0000, -1, 0x000F, 0x0000, -1, 0x0010, 0x0000, -1, 0x0011, 0x0007, --1, 0x0012, 0x0000, -1, 0x0013, 0x0000, -2, 200, -1, 0x0010, 0x17B0, -1, 0x0011, 0x0031, -2, 50, -1, 0x0012, 0x0138, -2, 50, -1, 0x0013, 0x1800, --1, 0x0029, 0x0008, -2, 50, -1, 0x0020, 0x0000, -1, 0x0021, 0x0000, -1, 0x0030, 0x0000, -1, 0x0031, 0x0505, -1, 0x0032, 0x0004, --1, 0x0035, 0x0006, -1, 0x0036, 0x0707, -1, 0x0037, 0x0105, -1, 0x0038, 0x0002, -1, 0x0039, 0x0707, -1, 0x003C, 0x0704, -1, 0x003D, 0x0807, --1, 0x0050, 0x0000, -1, 0x0051, 0x00EF, -1, 0x0052, 0x0000, -1, 0x0053, 0x013F, -1, 0x0060, 0x2700, -1, 0x0061, 0x0001, -1, 0x006A, 0x0000, --1, 0x0080, 0x0000, -1, 0x0081, 0x0000, -1, 0x0082, 0x0000, -1, 0x0083, 0x0000, -1, 0x0084, 0x0000, -1, 0x0085, 0x0000, -1, 0x0090, 0x0010, --1, 0x0092, 0x0000, -1, 0x0093, 0x0003, -1, 0x0095, 0x0110, -1, 0x0097, 0x0000, -1, 0x0098, 0x0000, -1, 0x0007, 0x0173, -3 }; - -static int ili9325_init[] = { --1, 0x00E3, 0x3008, -1, 0x00E7, 0x0012, -1, 0x00EF, 0x1231, -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, -1, 0x0003, 0x1030, -1, 0x0004, 0x0000, --1, 0x0008, 0x0207, -1, 0x0009, 0x0000, -1, 0x000A, 0x0000, -1, 0x000C, 0x0000, -1, 0x000D, 0x0000, -1, 0x000F, 0x0000, -1, 0x0010, 0x0000, --1, 0x0011, 0x0007, -1, 0x0012, 0x0000, -1, 0x0013, 0x0000, -2, 200, -1, 0x0010, 0x1690, -1, 0x0011, 0x0223, -2, 50, -1, 0x0012, 0x000D, -2, 50, --1, 0x0013, 0x1200, -1, 0x0029, 0x000A, -1, 0x002B, 0x000C, -2, 50, -1, 0x0020, 0x0000, -1, 0x0021, 0x0000, -1, 0x0030, 0x0000, --1, 0x0031, 0x0506, -1, 0x0032, 0x0104, -1, 0x0035, 0x0207, -1, 0x0036, 0x000F, -1, 0x0037, 0x0306, -1, 0x0038, 0x0102, -1, 0x0039, 0x0707, --1, 0x003C, 0x0702, -1, 0x003D, 0x1604, -1, 0x0050, 0x0000, -1, 0x0051, 0x00EF, -1, 0x0052, 0x0000, -1, 0x0053, 0x013F, -1, 0x0060, 0xA700, --1, 0x0061, 0x0001, -1, 0x006A, 0x0000, -1, 0x0080, 0x0000, -1, 0x0081, 0x0000, -1, 0x0082, 0x0000, -1, 0x0083, 0x0000, -1, 0x0084, 0x0000, --1, 0x0085, 0x0000, -1, 0x0090, 0x0010, -1, 0x0092, 0x0600, -1, 0x0007, 0x0133, -3 }; - -static int ili9341_init[] = { --1, 0x28, -2, 20, -1, 0xCF, 0x00, 0x83, 0x30, -1, 0xED, 0x64, 0x03, 0x12, 0x81, -1, 0xE8, 0x85, 0x01, 0x79, --1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02, -1, 0xF7, 0x20, -1, 0xEA, 0x00, 0x00, -1, 0xC0, 0x26, -1, 0xC1, 0x11, --1, 0xC5, 0x35, 0x3E, -1, 0xC7, 0xBE, -1, 0xB1, 0x00, 0x1B, -1, 0xB6, 0x0a, 0x82, 0x27, 0x00, -1, 0xB7, 0x07, --1, 0x3A, 0x55, -1, 0x36, 0x48, -1, 0x11, -2, 120, -1, 0x29, -2, 20, -3 }; - -static int ssd1351_init[] = { -1, 0xfd, 0x12, -1, 0xfd, 0xb1, -1, 0xae, -1, 0xb3, 0xf1, -1, 0xca, 0x7f, -1, 0xa0, 0x74, - -1, 0x15, 0x00, 0x7f, -1, 0x75, 0x00, 0x7f, -1, 0xa1, 0x00, -1, 0xa2, 0x00, -1, 0xb5, 0x00, - -1, 0xab, 0x01, -1, 0xb1, 0x32, -1, 0xb4, 0xa0, 0xb5, 0x55, -1, 0xbb, 0x17, -1, 0xbe, 0x05, - -1, 0xc1, 0xc8, 0x80, 0xc8, -1, 0xc7, 0x0f, -1, 0xb6, 0x01, -1, 0xa6, -1, 0xaf, -3 }; +static s16 st7735r_init[] = { + -1, 0x01, + -2, 150, + -1, 0x11, + -2, 500, + -1, 0xB1, 0x01, 0x2C, 0x2D, + -1, 0xB2, 0x01, 0x2C, 0x2D, + -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D, + -1, 0xB4, 0x07, + -1, 0xC0, 0xA2, 0x02, 0x84, + -1, 0xC1, 0xC5, + -1, 0xC2, 0x0A, 0x00, + -1, 0xC3, 0x8A, 0x2A, + -1, 0xC4, 0x8A, 0xEE, + -1, 0xC5, 0x0E, + -1, 0x20, + -1, 0x36, 0xC0, + -1, 0x3A, 0x05, + -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22, + 0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10, + -1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e, + 0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10, + -1, 0x29, + -2, 100, + -1, 0x13, + -2, 10, + -3 +}; + +static s16 ssd1289_init[] = { + -1, 0x00, 0x0001, + -1, 0x03, 0xA8A4, + -1, 0x0C, 0x0000, + -1, 0x0D, 0x080C, + -1, 0x0E, 0x2B00, + -1, 0x1E, 0x00B7, + -1, 0x01, 0x2B3F, + -1, 0x02, 0x0600, + -1, 0x10, 0x0000, + -1, 0x11, 0x6070, + -1, 0x05, 0x0000, + -1, 0x06, 0x0000, + -1, 0x16, 0xEF1C, + -1, 0x17, 0x0003, + -1, 0x07, 0x0233, + -1, 0x0B, 0x0000, + -1, 0x0F, 0x0000, + -1, 0x41, 0x0000, + -1, 0x42, 0x0000, + -1, 0x48, 0x0000, + -1, 0x49, 0x013F, + -1, 0x4A, 0x0000, + -1, 0x4B, 0x0000, + -1, 0x44, 0xEF00, + -1, 0x45, 0x0000, + -1, 0x46, 0x013F, + -1, 0x30, 0x0707, + -1, 0x31, 0x0204, + -1, 0x32, 0x0204, + -1, 0x33, 0x0502, + -1, 0x34, 0x0507, + -1, 0x35, 0x0204, + -1, 0x36, 0x0204, + -1, 0x37, 0x0502, + -1, 0x3A, 0x0302, + -1, 0x3B, 0x0302, + -1, 0x23, 0x0000, + -1, 0x24, 0x0000, + -1, 0x25, 0x8000, + -1, 0x4f, 0x0000, + -1, 0x4e, 0x0000, + -1, 0x22, + -3 +}; + +static s16 hx8340bn_init[] = { + -1, 0xC1, 0xFF, 0x83, 0x40, + -1, 0x11, + -2, 150, + -1, 0xCA, 0x70, 0x00, 0xD9, + -1, 0xB0, 0x01, 0x11, + -1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06, + -2, 20, + -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A, + -1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33, + -2, 10, + -1, 0xB5, 0x35, 0x20, 0x45, + -1, 0xB4, 0x33, 0x25, 0x4C, + -2, 10, + -1, 0x3A, 0x05, + -1, 0x29, + -2, 10, + -3 +}; + +static s16 ili9225_init[] = { + -1, 0x0001, 0x011C, + -1, 0x0002, 0x0100, + -1, 0x0003, 0x1030, + -1, 0x0008, 0x0808, + -1, 0x000C, 0x0000, + -1, 0x000F, 0x0A01, + -1, 0x0020, 0x0000, + -1, 0x0021, 0x0000, + -2, 50, + -1, 0x0010, 0x0A00, + -1, 0x0011, 0x1038, + -2, 50, + -1, 0x0012, 0x1121, + -1, 0x0013, 0x004E, + -1, 0x0014, 0x676F, + -1, 0x0030, 0x0000, + -1, 0x0031, 0x00DB, + -1, 0x0032, 0x0000, + -1, 0x0033, 0x0000, + -1, 0x0034, 0x00DB, + -1, 0x0035, 0x0000, + -1, 0x0036, 0x00AF, + -1, 0x0037, 0x0000, + -1, 0x0038, 0x00DB, + -1, 0x0039, 0x0000, + -1, 0x0050, 0x0000, + -1, 0x0051, 0x060A, + -1, 0x0052, 0x0D0A, + -1, 0x0053, 0x0303, + -1, 0x0054, 0x0A0D, + -1, 0x0055, 0x0A06, + -1, 0x0056, 0x0000, + -1, 0x0057, 0x0303, + -1, 0x0058, 0x0000, + -1, 0x0059, 0x0000, + -2, 50, + -1, 0x0007, 0x1017, + -2, 50, + -3 +}; + +static s16 ili9320_init[] = { + -1, 0x00E5, 0x8000, + -1, 0x0000, 0x0001, + -1, 0x0001, 0x0100, + -1, 0x0002, 0x0700, + -1, 0x0003, 0x1030, + -1, 0x0004, 0x0000, + -1, 0x0008, 0x0202, + -1, 0x0009, 0x0000, + -1, 0x000A, 0x0000, + -1, 0x000C, 0x0000, + -1, 0x000D, 0x0000, + -1, 0x000F, 0x0000, + -1, 0x0010, 0x0000, + -1, 0x0011, 0x0007, + -1, 0x0012, 0x0000, + -1, 0x0013, 0x0000, + -2, 200, + -1, 0x0010, 0x17B0, + -1, 0x0011, 0x0031, + -2, 50, + -1, 0x0012, 0x0138, + -2, 50, + -1, 0x0013, 0x1800, + -1, 0x0029, 0x0008, + -2, 50, + -1, 0x0020, 0x0000, + -1, 0x0021, 0x0000, + -1, 0x0030, 0x0000, + -1, 0x0031, 0x0505, + -1, 0x0032, 0x0004, + -1, 0x0035, 0x0006, + -1, 0x0036, 0x0707, + -1, 0x0037, 0x0105, + -1, 0x0038, 0x0002, + -1, 0x0039, 0x0707, + -1, 0x003C, 0x0704, + -1, 0x003D, 0x0807, + -1, 0x0050, 0x0000, + -1, 0x0051, 0x00EF, + -1, 0x0052, 0x0000, + -1, 0x0053, 0x013F, + -1, 0x0060, 0x2700, + -1, 0x0061, 0x0001, + -1, 0x006A, 0x0000, + -1, 0x0080, 0x0000, + -1, 0x0081, 0x0000, + -1, 0x0082, 0x0000, + -1, 0x0083, 0x0000, + -1, 0x0084, 0x0000, + -1, 0x0085, 0x0000, + -1, 0x0090, 0x0010, + -1, 0x0092, 0x0000, + -1, 0x0093, 0x0003, + -1, 0x0095, 0x0110, + -1, 0x0097, 0x0000, + -1, 0x0098, 0x0000, + -1, 0x0007, 0x0173, + -3 +}; + +static s16 ili9325_init[] = { + -1, 0x00E3, 0x3008, + -1, 0x00E7, 0x0012, + -1, 0x00EF, 0x1231, + -1, 0x0001, 0x0100, + -1, 0x0002, 0x0700, + -1, 0x0003, 0x1030, + -1, 0x0004, 0x0000, + -1, 0x0008, 0x0207, + -1, 0x0009, 0x0000, + -1, 0x000A, 0x0000, + -1, 0x000C, 0x0000, + -1, 0x000D, 0x0000, + -1, 0x000F, 0x0000, + -1, 0x0010, 0x0000, + -1, 0x0011, 0x0007, + -1, 0x0012, 0x0000, + -1, 0x0013, 0x0000, + -2, 200, + -1, 0x0010, 0x1690, + -1, 0x0011, 0x0223, + -2, 50, + -1, 0x0012, 0x000D, + -2, 50, + -1, 0x0013, 0x1200, + -1, 0x0029, 0x000A, + -1, 0x002B, 0x000C, + -2, 50, + -1, 0x0020, 0x0000, + -1, 0x0021, 0x0000, + -1, 0x0030, 0x0000, + -1, 0x0031, 0x0506, + -1, 0x0032, 0x0104, + -1, 0x0035, 0x0207, + -1, 0x0036, 0x000F, + -1, 0x0037, 0x0306, + -1, 0x0038, 0x0102, + -1, 0x0039, 0x0707, + -1, 0x003C, 0x0702, + -1, 0x003D, 0x1604, + -1, 0x0050, 0x0000, + -1, 0x0051, 0x00EF, + -1, 0x0052, 0x0000, + -1, 0x0053, 0x013F, + -1, 0x0060, 0xA700, + -1, 0x0061, 0x0001, + -1, 0x006A, 0x0000, + -1, 0x0080, 0x0000, + -1, 0x0081, 0x0000, + -1, 0x0082, 0x0000, + -1, 0x0083, 0x0000, + -1, 0x0084, 0x0000, + -1, 0x0085, 0x0000, + -1, 0x0090, 0x0010, + -1, 0x0092, 0x0600, + -1, 0x0007, 0x0133, + -3 +}; + +static s16 ili9341_init[] = { + -1, 0x28, + -2, 20, + -1, 0xCF, 0x00, 0x83, 0x30, + -1, 0xED, 0x64, 0x03, 0x12, 0x81, + -1, 0xE8, 0x85, 0x01, 0x79, + -1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02, + -1, 0xF7, 0x20, + -1, 0xEA, 0x00, 0x00, + -1, 0xC0, 0x26, + -1, 0xC1, 0x11, + -1, 0xC5, 0x35, 0x3E, + -1, 0xC7, 0xBE, + -1, 0xB1, 0x00, 0x1B, + -1, 0xB6, 0x0a, 0x82, 0x27, 0x00, + -1, 0xB7, 0x07, + -1, 0x3A, 0x55, + -1, 0x36, 0x48, + -1, 0x11, + -2, 120, + -1, 0x29, + -2, 20, + -3 +}; + +static s16 ssd1351_init[] = { + -1, 0xfd, 0x12, + -1, 0xfd, 0xb1, + -1, 0xae, + -1, 0xb3, 0xf1, + -1, 0xca, 0x7f, + -1, 0xa0, 0x74, + -1, 0x15, 0x00, 0x7f, + -1, 0x75, 0x00, 0x7f, + -1, 0xa1, 0x00, + -1, 0xa2, 0x00, + -1, 0xb5, 0x00, + -1, 0xab, 0x01, + -1, 0xb1, 0x32, + -1, 0xb4, 0xa0, 0xb5, 0x55, + -1, 0xbb, 0x17, + -1, 0xbe, 0x05, + -1, 0xc1, 0xc8, 0x80, 0xc8, + -1, 0xc7, 0x0f, + -1, 0xb6, 0x01, + -1, 0xa6, + -1, 0xaf, + -3 +}; /** * struct flexfb_lcd_controller - Describes the LCD controller properties @@ -142,7 +390,7 @@ struct flexfb_lcd_controller { unsigned int height; unsigned int setaddrwin; unsigned int regwidth; - int *init_seq; + s16 *init_seq; int init_seq_sz; }; @@ -582,6 +830,7 @@ static const struct platform_device_id flexfb_platform_ids[] = { { "flexpfb", 0 }, { }, }; +MODULE_DEVICE_TABLE(platform, flexfb_platform_ids); static struct platform_driver flexfb_platform_driver = { .driver = { diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig index 1f959339c6711339e4768dcf9aabe06cac0cf9de..5c009ab48f0051c5f44fc5c9df3f896f1cb928e2 100644 --- a/drivers/staging/fsl-mc/bus/Kconfig +++ b/drivers/staging/fsl-mc/bus/Kconfig @@ -1,25 +1,17 @@ # -# Freescale Management Complex (MC) bus drivers +# DPAA2 fsl-mc bus # -# Copyright (C) 2014 Freescale Semiconductor, Inc. +# Copyright (C) 2014-2016 Freescale Semiconductor, Inc. # # This file is released under the GPLv2 # config FSL_MC_BUS - bool "Freescale Management Complex (MC) bus driver" - depends on OF && ARM64 + bool "QorIQ DPAA2 fsl-mc bus driver" + depends on OF && ARCH_LAYERSCAPE select GENERIC_MSI_IRQ_DOMAIN help - Driver to enable the bus infrastructure for the Freescale - QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware - module of the QorIQ LS2 SoCs, that does resource management - for hardware building-blocks in the SoC that can be used - to dynamically create networking hardware objects such as - network interfaces (NICs), crypto accelerator instances, - or L2 switches. - - Only enable this option when building the kernel for - Freescale QorQIQ LS2xxxx SoCs. - - + Driver to enable the bus infrastructure for the QorIQ DPAA2 + architecture. The fsl-mc bus driver handles discovery of + DPAA2 objects (which are represented as Linux devices) and + binding objects to drivers. diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/bus/dpbp-cmd.h similarity index 75% rename from drivers/staging/fsl-mc/include/dpbp-cmd.h rename to drivers/staging/fsl-mc/bus/dpbp-cmd.h index 2860411ddb517bceefca8f504b111a92a54a34b6..7d86539b541448ad6e61b7fe75f1afb5eb4de55f 100644 --- a/drivers/staging/fsl-mc/include/dpbp-cmd.h +++ b/drivers/staging/fsl-mc/bus/dpbp-cmd.h @@ -1,4 +1,5 @@ -/* Copyright 2013-2016 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -11,7 +12,6 @@ * names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * - * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any @@ -33,37 +33,48 @@ #define _FSL_DPBP_CMD_H /* DPBP Version */ -#define DPBP_VER_MAJOR 2 +#define DPBP_VER_MAJOR 3 #define DPBP_VER_MINOR 2 +/* Command versioning */ +#define DPBP_CMD_BASE_VERSION 1 +#define DPBP_CMD_ID_OFFSET 4 + +#define DPBP_CMD(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION) + /* Command IDs */ -#define DPBP_CMDID_CLOSE 0x800 -#define DPBP_CMDID_OPEN 0x804 -#define DPBP_CMDID_CREATE 0x904 -#define DPBP_CMDID_DESTROY 0x900 - -#define DPBP_CMDID_ENABLE 0x002 -#define DPBP_CMDID_DISABLE 0x003 -#define DPBP_CMDID_GET_ATTR 0x004 -#define DPBP_CMDID_RESET 0x005 -#define DPBP_CMDID_IS_ENABLED 0x006 - -#define DPBP_CMDID_SET_IRQ 0x010 -#define DPBP_CMDID_GET_IRQ 0x011 -#define DPBP_CMDID_SET_IRQ_ENABLE 0x012 -#define DPBP_CMDID_GET_IRQ_ENABLE 0x013 -#define DPBP_CMDID_SET_IRQ_MASK 0x014 -#define DPBP_CMDID_GET_IRQ_MASK 0x015 -#define DPBP_CMDID_GET_IRQ_STATUS 0x016 -#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017 - -#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0 -#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1 +#define DPBP_CMDID_CLOSE DPBP_CMD(0x800) +#define DPBP_CMDID_OPEN DPBP_CMD(0x804) +#define DPBP_CMDID_CREATE DPBP_CMD(0x904) +#define DPBP_CMDID_DESTROY DPBP_CMD(0x984) +#define DPBP_CMDID_GET_API_VERSION DPBP_CMD(0xa04) + +#define DPBP_CMDID_ENABLE DPBP_CMD(0x002) +#define DPBP_CMDID_DISABLE DPBP_CMD(0x003) +#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004) +#define DPBP_CMDID_RESET DPBP_CMD(0x005) +#define DPBP_CMDID_IS_ENABLED DPBP_CMD(0x006) + +#define DPBP_CMDID_SET_IRQ DPBP_CMD(0x010) +#define DPBP_CMDID_GET_IRQ DPBP_CMD(0x011) +#define DPBP_CMDID_SET_IRQ_ENABLE DPBP_CMD(0x012) +#define DPBP_CMDID_GET_IRQ_ENABLE DPBP_CMD(0x013) +#define DPBP_CMDID_SET_IRQ_MASK DPBP_CMD(0x014) +#define DPBP_CMDID_GET_IRQ_MASK DPBP_CMD(0x015) +#define DPBP_CMDID_GET_IRQ_STATUS DPBP_CMD(0x016) +#define DPBP_CMDID_CLEAR_IRQ_STATUS DPBP_CMD(0x017) + +#define DPBP_CMDID_SET_NOTIFICATIONS DPBP_CMD(0x01b0) +#define DPBP_CMDID_GET_NOTIFICATIONS DPBP_CMD(0x01b1) struct dpbp_cmd_open { __le32 dpbp_id; }; +struct dpbp_cmd_destroy { + __le32 object_id; +}; + #define DPBP_ENABLE 0x1 struct dpbp_rsp_is_enabled { diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c index 5d4cd812a400ff724ca0d6a7fbb1094225c0ec69..cf4782f6a0494ff6bc1ce86eadccf016edde5d2d 100644 --- a/drivers/staging/fsl-mc/bus/dpbp.c +++ b/drivers/staging/fsl-mc/bus/dpbp.c @@ -1,4 +1,5 @@ -/* Copyright 2013-2016 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -11,7 +12,6 @@ * names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * - * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any @@ -32,7 +32,8 @@ #include "../include/mc-sys.h" #include "../include/mc-cmd.h" #include "../include/dpbp.h" -#include "../include/dpbp-cmd.h" + +#include "dpbp-cmd.h" /** * dpbp_open() - Open a control session for the specified object. @@ -107,28 +108,26 @@ EXPORT_SYMBOL(dpbp_close); /** * dpbp_create() - Create the DPBP object. * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @cfg: Configuration structure - * @token: Returned token; use in subsequent API calls + * @obj_id: Returned object id; use in subsequent API calls * * Create the DPBP object, allocate required resources and * perform required initialization. * - * The object can be created either by declaring it in the - * DPL file, or by calling this function. - * This function returns a unique authentication token, - * associated with the specific object ID and the specific MC - * portal; this token must be used in all subsequent calls to - * this specific object. For objects that are created using the - * DPL file, call dpbp_open function to get an authentication - * token first. + * This function accepts an authentication token of a parent + * container that this object should be assigned to and returns + * an object id. This object_id will be used in all subsequent calls to + * this specific object. * * Return: '0' on Success; Error code otherwise. */ int dpbp_create(struct fsl_mc_io *mc_io, + u16 dprc_token, u32 cmd_flags, const struct dpbp_cfg *cfg, - u16 *token) + u32 *obj_id) { struct mc_command cmd = { 0 }; int err; @@ -137,7 +136,7 @@ int dpbp_create(struct fsl_mc_io *mc_io, /* prepare command */ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE, - cmd_flags, 0); + cmd_flags, dprc_token); /* send command to mc*/ err = mc_send_command(mc_io, &cmd); @@ -145,7 +144,7 @@ int dpbp_create(struct fsl_mc_io *mc_io, return err; /* retrieve response parameters */ - *token = mc_cmd_hdr_read_token(&cmd); + *obj_id = mc_cmd_read_object_id(&cmd); return 0; } @@ -153,20 +152,25 @@ int dpbp_create(struct fsl_mc_io *mc_io, /** * dpbp_destroy() - Destroy the DPBP object and release all its resources. * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPBP object + * @obj_id: ID of DPBP object * * Return: '0' on Success; error code otherwise. */ int dpbp_destroy(struct fsl_mc_io *mc_io, + u16 dprc_token, u32 cmd_flags, - u16 token) + u32 obj_id) { + struct dpbp_cmd_destroy *cmd_params; struct mc_command cmd = { 0 }; /* prepare command */ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY, - cmd_flags, token); + cmd_flags, dprc_token); + cmd_params = (struct dpbp_cmd_destroy *)cmd.params; + cmd_params->object_id = cpu_to_le32(obj_id); /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -609,8 +613,6 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io, rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params; attr->bpid = le16_to_cpu(rsp_params->bpid); attr->id = le32_to_cpu(rsp_params->id); - attr->version.major = le16_to_cpu(rsp_params->version_major); - attr->version.minor = le16_to_cpu(rsp_params->version_minor); return 0; } @@ -689,3 +691,35 @@ int dpbp_get_notifications(struct fsl_mc_io *mc_io, return 0; } + +/** + * dpbp_get_api_version - Get Data Path Buffer Pool API version + * @mc_io: Pointer to Mc portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of Buffer Pool API + * @minor_ver: Minor version of Buffer Pool API + * + * Return: '0' on Success; Error code otherwise. + */ +int dpbp_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver) +{ + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION, + cmd_flags, 0); + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + mc_cmd_read_api_version(&cmd, major_ver, minor_ver); + + return 0; +} diff --git a/drivers/staging/fsl-mc/include/dpcon-cmd.h b/drivers/staging/fsl-mc/bus/dpcon-cmd.h similarity index 97% rename from drivers/staging/fsl-mc/include/dpcon-cmd.h rename to drivers/staging/fsl-mc/bus/dpcon-cmd.h index 536b2ef13507819455c83554c1d31a453f32419e..d0a5e194c5e1d1e8b51054db6dfae65332330b3e 100644 --- a/drivers/staging/fsl-mc/include/dpcon-cmd.h +++ b/drivers/staging/fsl-mc/bus/dpcon-cmd.h @@ -1,4 +1,5 @@ -/* Copyright 2013-2015 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -11,7 +12,6 @@ * names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * - * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h index d098a6d8f6bced916d68ae010d67ce25adecd2eb..7cb514963c26485947fb3c3830648bb2a51486b7 100644 --- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h +++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h @@ -1,4 +1,5 @@ -/* Copyright 2013-2016 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -11,7 +12,6 @@ * names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * - * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any @@ -33,25 +33,32 @@ #define _FSL_DPMCP_CMD_H /* Minimal supported DPMCP Version */ -#define DPMCP_MIN_VER_MAJOR 3 -#define DPMCP_MIN_VER_MINOR 0 +#define DPMCP_MIN_VER_MAJOR 3 +#define DPMCP_MIN_VER_MINOR 0 + +/* Command versioning */ +#define DPMCP_CMD_BASE_VERSION 1 +#define DPMCP_CMD_ID_OFFSET 4 + +#define DPMCP_CMD(id) ((id << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION) /* Command IDs */ -#define DPMCP_CMDID_CLOSE 0x800 -#define DPMCP_CMDID_OPEN 0x80b -#define DPMCP_CMDID_CREATE 0x90b -#define DPMCP_CMDID_DESTROY 0x900 - -#define DPMCP_CMDID_GET_ATTR 0x004 -#define DPMCP_CMDID_RESET 0x005 - -#define DPMCP_CMDID_SET_IRQ 0x010 -#define DPMCP_CMDID_GET_IRQ 0x011 -#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012 -#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013 -#define DPMCP_CMDID_SET_IRQ_MASK 0x014 -#define DPMCP_CMDID_GET_IRQ_MASK 0x015 -#define DPMCP_CMDID_GET_IRQ_STATUS 0x016 +#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800) +#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b) +#define DPMCP_CMDID_CREATE DPMCP_CMD(0x90b) +#define DPMCP_CMDID_DESTROY DPMCP_CMD(0x98b) +#define DPMCP_CMDID_GET_API_VERSION DPMCP_CMD(0xa0b) + +#define DPMCP_CMDID_GET_ATTR DPMCP_CMD(0x004) +#define DPMCP_CMDID_RESET DPMCP_CMD(0x005) + +#define DPMCP_CMDID_SET_IRQ DPMCP_CMD(0x010) +#define DPMCP_CMDID_GET_IRQ DPMCP_CMD(0x011) +#define DPMCP_CMDID_SET_IRQ_ENABLE DPMCP_CMD(0x012) +#define DPMCP_CMDID_GET_IRQ_ENABLE DPMCP_CMD(0x013) +#define DPMCP_CMDID_SET_IRQ_MASK DPMCP_CMD(0x014) +#define DPMCP_CMDID_GET_IRQ_MASK DPMCP_CMD(0x015) +#define DPMCP_CMDID_GET_IRQ_STATUS DPMCP_CMD(0x016) struct dpmcp_cmd_open { __le32 dpmcp_id; @@ -61,6 +68,10 @@ struct dpmcp_cmd_create { __le32 portal_id; }; +struct dpmcp_cmd_destroy { + __le32 object_id; +}; + struct dpmcp_cmd_set_irq { /* cmd word 0 */ u8 irq_index; diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c index 55766f78a528675e6f54f111a6d080b52d31296e..e4d16519bcb40a32b3d298ffc9f314d0aa5302cc 100644 --- a/drivers/staging/fsl-mc/bus/dpmcp.c +++ b/drivers/staging/fsl-mc/bus/dpmcp.c @@ -1,4 +1,5 @@ -/* Copyright 2013-2016 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -11,7 +12,6 @@ * names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * - * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any @@ -106,28 +106,29 @@ int dpmcp_close(struct fsl_mc_io *mc_io, /** * dpmcp_create() - Create the DPMCP object. * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @cfg: Configuration structure - * @token: Returned token; use in subsequent API calls + * @obj_id: Returned object id; use in subsequent API calls * * Create the DPMCP object, allocate required resources and * perform required initialization. * * The object can be created either by declaring it in the * DPL file, or by calling this function. - * This function returns a unique authentication token, - * associated with the specific object ID and the specific MC - * portal; this token must be used in all subsequent calls to - * this specific object. For objects that are created using the - * DPL file, call dpmcp_open function to get an authentication - * token first. + + * This function accepts an authentication token of a parent + * container that this object should be assigned to and returns + * an object id. This object_id will be used in all subsequent calls to + * this specific object. * * Return: '0' on Success; Error code otherwise. */ int dpmcp_create(struct fsl_mc_io *mc_io, + u16 dprc_token, u32 cmd_flags, const struct dpmcp_cfg *cfg, - u16 *token) + u32 *obj_id) { struct mc_command cmd = { 0 }; struct dpmcp_cmd_create *cmd_params; @@ -136,7 +137,7 @@ int dpmcp_create(struct fsl_mc_io *mc_io, /* prepare command */ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE, - cmd_flags, 0); + cmd_flags, dprc_token); cmd_params = (struct dpmcp_cmd_create *)cmd.params; cmd_params->portal_id = cpu_to_le32(cfg->portal_id); @@ -146,7 +147,7 @@ int dpmcp_create(struct fsl_mc_io *mc_io, return err; /* retrieve response parameters */ - *token = mc_cmd_hdr_read_token(&cmd); + *obj_id = mc_cmd_read_object_id(&cmd); return 0; } @@ -154,20 +155,25 @@ int dpmcp_create(struct fsl_mc_io *mc_io, /** * dpmcp_destroy() - Destroy the DPMCP object and release all its resources. * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPMCP object + * @obj_id: ID of DPMCP object * * Return: '0' on Success; error code otherwise. */ int dpmcp_destroy(struct fsl_mc_io *mc_io, + u16 dprc_token, u32 cmd_flags, - u16 token) + u32 obj_id) { struct mc_command cmd = { 0 }; + struct dpmcp_cmd_destroy *cmd_params; /* prepare command */ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY, - cmd_flags, token); + cmd_flags, dprc_token); + cmd_params = (struct dpmcp_cmd_destroy *)cmd.params; + cmd_params->object_id = cpu_to_le32(obj_id); /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -497,8 +503,38 @@ int dpmcp_get_attributes(struct fsl_mc_io *mc_io, /* retrieve response parameters */ rsp_params = (struct dpmcp_rsp_get_attributes *)cmd.params; attr->id = le32_to_cpu(rsp_params->id); - attr->version.major = le16_to_cpu(rsp_params->version_major); - attr->version.minor = le16_to_cpu(rsp_params->version_minor); + + return 0; +} + +/** + * dpmcp_get_api_version - Get Data Path Management Command Portal API version + * @mc_io: Pointer to Mc portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of Data Path Management Command Portal API + * @minor_ver: Minor version of Data Path Management Command Portal API + * + * Return: '0' on Success; Error code otherwise. + */ +int dpmcp_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver) +{ + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_API_VERSION, + cmd_flags, 0); + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + mc_cmd_read_api_version(&cmd, major_ver, minor_ver); return 0; } diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h index fe79d4d9293dec18df7511c6bdc900a84a3f9fc8..98a100d543f66b3e3c2dcaf53e5079a155b3ca28 100644 --- a/drivers/staging/fsl-mc/bus/dpmcp.h +++ b/drivers/staging/fsl-mc/bus/dpmcp.h @@ -1,4 +1,5 @@ -/* Copyright 2013-2015 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -11,7 +12,6 @@ * names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * - * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any @@ -32,23 +32,24 @@ #ifndef __FSL_DPMCP_H #define __FSL_DPMCP_H -/* Data Path Management Command Portal API +/* + * Data Path Management Command Portal API * Contains initialization APIs and runtime control APIs for DPMCP */ struct fsl_mc_io; int dpmcp_open(struct fsl_mc_io *mc_io, - uint32_t cmd_flags, + u32 cmd_flags, int dpmcp_id, - uint16_t *token); + u16 *token); /* Get portal ID from pool */ #define DPMCP_GET_PORTAL_ID_FROM_POOL (-1) int dpmcp_close(struct fsl_mc_io *mc_io, - uint32_t cmd_flags, - uint16_t token); + u32 cmd_flags, + u16 token); /** * struct dpmcp_cfg - Structure representing DPMCP configuration @@ -59,18 +60,20 @@ struct dpmcp_cfg { int portal_id; }; -int dpmcp_create(struct fsl_mc_io *mc_io, - uint32_t cmd_flags, - const struct dpmcp_cfg *cfg, - uint16_t *token); +int dpmcp_create(struct fsl_mc_io *mc_io, + u16 dprc_token, + u32 cmd_flags, + const struct dpmcp_cfg *cfg, + u32 *obj_id); int dpmcp_destroy(struct fsl_mc_io *mc_io, - uint32_t cmd_flags, - uint16_t token); + u16 dprc_token, + u32 cmd_flags, + u32 obj_id); int dpmcp_reset(struct fsl_mc_io *mc_io, - uint32_t cmd_flags, - uint16_t token); + u32 cmd_flags, + u16 token); /* IRQ */ /* IRQ Index */ @@ -85,75 +88,65 @@ int dpmcp_reset(struct fsl_mc_io *mc_io, * @irq_num: A user defined number associated with this IRQ */ struct dpmcp_irq_cfg { - uint64_t paddr; - uint32_t val; - int irq_num; + u64 paddr; + u32 val; + int irq_num; }; -int dpmcp_set_irq(struct fsl_mc_io *mc_io, - uint32_t cmd_flags, - uint16_t token, - uint8_t irq_index, - struct dpmcp_irq_cfg *irq_cfg); - -int dpmcp_get_irq(struct fsl_mc_io *mc_io, - uint32_t cmd_flags, - uint16_t token, - uint8_t irq_index, - int *type, - struct dpmcp_irq_cfg *irq_cfg); - -int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, - uint32_t cmd_flags, - uint16_t token, - uint8_t irq_index, - uint8_t en); - -int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, - uint32_t cmd_flags, - uint16_t token, - uint8_t irq_index, - uint8_t *en); - -int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, - uint32_t cmd_flags, - uint16_t token, - uint8_t irq_index, - uint32_t mask); - -int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, - uint32_t cmd_flags, - uint16_t token, - uint8_t irq_index, - uint32_t *mask); - -int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, - uint32_t cmd_flags, - uint16_t token, - uint8_t irq_index, - uint32_t *status); +int dpmcp_set_irq(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + struct dpmcp_irq_cfg *irq_cfg); + +int dpmcp_get_irq(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + int *type, + struct dpmcp_irq_cfg *irq_cfg); + +int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en); + +int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 *en); + +int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask); + +int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *mask); + +int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status); /** * struct dpmcp_attr - Structure representing DPMCP attributes * @id: DPMCP object ID - * @version: DPMCP version */ struct dpmcp_attr { int id; - /** - * struct version - Structure representing DPMCP version - * @major: DPMCP major version - * @minor: DPMCP minor version - */ - struct { - uint16_t major; - uint16_t minor; - } version; }; -int dpmcp_get_attributes(struct fsl_mc_io *mc_io, - uint32_t cmd_flags, - uint16_t token, - struct dpmcp_attr *attr); +int dpmcp_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpmcp_attr *attr); #endif /* __FSL_DPMCP_H */ diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h index a7b77d58c8cdae39367ed8a442676b0c0615f078..cdddfb80eecc6b9767611fd575b1fccfdbe6606a 100644 --- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h +++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h @@ -12,7 +12,6 @@ * names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * - * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any @@ -41,13 +40,14 @@ #ifndef __FSL_DPMNG_CMD_H #define __FSL_DPMNG_CMD_H -/* Command IDs */ -#define DPMNG_CMDID_GET_CONT_ID 0x830 -#define DPMNG_CMDID_GET_VERSION 0x831 +/* Command versioning */ +#define DPMNG_CMD_BASE_VERSION 1 +#define DPMNG_CMD_ID_OFFSET 4 -struct dpmng_rsp_get_container_id { - __le32 container_id; -}; +#define DPMNG_CMD(id) ((id << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION) + +/* Command IDs */ +#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831) struct dpmng_rsp_get_version { __le32 revision; diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c index 96b1d67756fa49f6a56dc1a68e9209ae68f05e53..ad5d5bbec529ca972c6a4da746b62b6c00be1b28 100644 --- a/drivers/staging/fsl-mc/bus/dpmng.c +++ b/drivers/staging/fsl-mc/bus/dpmng.c @@ -1,4 +1,5 @@ -/* Copyright 2013-2016 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -11,7 +12,6 @@ * names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * - * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any @@ -72,36 +72,3 @@ int mc_get_version(struct fsl_mc_io *mc_io, } EXPORT_SYMBOL(mc_get_version); -/** - * dpmng_get_container_id() - Get container ID associated with a given portal. - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @container_id: Requested container ID - * - * Return: '0' on Success; Error code otherwise. - */ -int dpmng_get_container_id(struct fsl_mc_io *mc_io, - u32 cmd_flags, - int *container_id) -{ - struct mc_command cmd = { 0 }; - struct dpmng_rsp_get_container_id *rsp_params; - int err; - - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID, - cmd_flags, - 0); - - /* send command to mc*/ - err = mc_send_command(mc_io, &cmd); - if (err) - return err; - - /* retrieve response parameters */ - rsp_params = (struct dpmng_rsp_get_container_id *)cmd.params; - *container_id = le32_to_cpu(rsp_params->container_id); - - return 0; -} - diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h index 009d65673155e1f0b8ca833259e3d7f894235feb..588b8cafdbc7da218e1c707e424503721d1bd3fc 100644 --- a/drivers/staging/fsl-mc/bus/dprc-cmd.h +++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h @@ -12,7 +12,6 @@ * names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * - * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any @@ -42,48 +41,56 @@ #define _FSL_DPRC_CMD_H /* Minimal supported DPRC Version */ -#define DPRC_MIN_VER_MAJOR 5 +#define DPRC_MIN_VER_MAJOR 6 #define DPRC_MIN_VER_MINOR 0 +/* Command versioning */ +#define DPRC_CMD_BASE_VERSION 1 +#define DPRC_CMD_ID_OFFSET 4 + +#define DPRC_CMD(id) ((id << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION) + /* Command IDs */ -#define DPRC_CMDID_CLOSE 0x800 -#define DPRC_CMDID_OPEN 0x805 -#define DPRC_CMDID_CREATE 0x905 - -#define DPRC_CMDID_GET_ATTR 0x004 -#define DPRC_CMDID_RESET_CONT 0x005 - -#define DPRC_CMDID_SET_IRQ 0x010 -#define DPRC_CMDID_GET_IRQ 0x011 -#define DPRC_CMDID_SET_IRQ_ENABLE 0x012 -#define DPRC_CMDID_GET_IRQ_ENABLE 0x013 -#define DPRC_CMDID_SET_IRQ_MASK 0x014 -#define DPRC_CMDID_GET_IRQ_MASK 0x015 -#define DPRC_CMDID_GET_IRQ_STATUS 0x016 -#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017 - -#define DPRC_CMDID_CREATE_CONT 0x151 -#define DPRC_CMDID_DESTROY_CONT 0x152 -#define DPRC_CMDID_SET_RES_QUOTA 0x155 -#define DPRC_CMDID_GET_RES_QUOTA 0x156 -#define DPRC_CMDID_ASSIGN 0x157 -#define DPRC_CMDID_UNASSIGN 0x158 -#define DPRC_CMDID_GET_OBJ_COUNT 0x159 -#define DPRC_CMDID_GET_OBJ 0x15A -#define DPRC_CMDID_GET_RES_COUNT 0x15B -#define DPRC_CMDID_GET_RES_IDS 0x15C -#define DPRC_CMDID_GET_OBJ_REG 0x15E -#define DPRC_CMDID_SET_OBJ_IRQ 0x15F -#define DPRC_CMDID_GET_OBJ_IRQ 0x160 -#define DPRC_CMDID_SET_OBJ_LABEL 0x161 -#define DPRC_CMDID_GET_OBJ_DESC 0x162 - -#define DPRC_CMDID_CONNECT 0x167 -#define DPRC_CMDID_DISCONNECT 0x168 -#define DPRC_CMDID_GET_POOL 0x169 -#define DPRC_CMDID_GET_POOL_COUNT 0x16A - -#define DPRC_CMDID_GET_CONNECTION 0x16C +#define DPRC_CMDID_CLOSE DPRC_CMD(0x800) +#define DPRC_CMDID_OPEN DPRC_CMD(0x805) +#define DPRC_CMDID_CREATE DPRC_CMD(0x905) +#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05) + +#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004) +#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005) + +#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010) +#define DPRC_CMDID_GET_IRQ DPRC_CMD(0x011) +#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012) +#define DPRC_CMDID_GET_IRQ_ENABLE DPRC_CMD(0x013) +#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014) +#define DPRC_CMDID_GET_IRQ_MASK DPRC_CMD(0x015) +#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016) +#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017) + +#define DPRC_CMDID_CREATE_CONT DPRC_CMD(0x151) +#define DPRC_CMDID_DESTROY_CONT DPRC_CMD(0x152) +#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830) +#define DPRC_CMDID_SET_RES_QUOTA DPRC_CMD(0x155) +#define DPRC_CMDID_GET_RES_QUOTA DPRC_CMD(0x156) +#define DPRC_CMDID_ASSIGN DPRC_CMD(0x157) +#define DPRC_CMDID_UNASSIGN DPRC_CMD(0x158) +#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159) +#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A) +#define DPRC_CMDID_GET_RES_COUNT DPRC_CMD(0x15B) +#define DPRC_CMDID_GET_RES_IDS DPRC_CMD(0x15C) +#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E) +#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F) +#define DPRC_CMDID_GET_OBJ_IRQ DPRC_CMD(0x160) +#define DPRC_CMDID_SET_OBJ_LABEL DPRC_CMD(0x161) +#define DPRC_CMDID_GET_OBJ_DESC DPRC_CMD(0x162) + +#define DPRC_CMDID_CONNECT DPRC_CMD(0x167) +#define DPRC_CMDID_DISCONNECT DPRC_CMD(0x168) +#define DPRC_CMDID_GET_POOL DPRC_CMD(0x169) +#define DPRC_CMDID_GET_POOL_COUNT DPRC_CMD(0x16A) + +#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C) struct dprc_cmd_open { __le32 container_id; @@ -199,9 +206,6 @@ struct dprc_rsp_get_attributes { /* response word 1 */ __le32 options; __le32 portal_id; - /* response word 2 */ - __le16 version_major; - __le16 version_minor; }; struct dprc_cmd_set_res_quota { diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c index c5ee4639682b58de9f5273fd68f8c9f242604ac0..4e416d89b736ff47c28886e449a986445bef4b83 100644 --- a/drivers/staging/fsl-mc/bus/dprc-driver.c +++ b/drivers/staging/fsl-mc/bus/dprc-driver.c @@ -1,7 +1,7 @@ /* * Freescale data path resource container (DPRC) driver * - * Copyright (C) 2014 Freescale Semiconductor, Inc. + * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. * Author: German Rivera * * This file is licensed under the terms of the GNU General Public @@ -505,7 +505,7 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev) dprc_irq0_handler, dprc_irq0_handler_thread, IRQF_NO_SUSPEND | IRQF_ONESHOT, - "FSL MC DPRC irq0", + dev_name(&mc_dev->dev), &mc_dev->dev); if (error < 0) { dev_err(&mc_dev->dev, @@ -597,6 +597,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev) struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); bool mc_io_created = false; bool msi_domain_set = false; + u16 major_ver, minor_ver; if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) return -EINVAL; @@ -669,13 +670,21 @@ static int dprc_probe(struct fsl_mc_device *mc_dev) goto error_cleanup_open; } - if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR || - (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR && - mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) { + error = dprc_get_api_version(mc_dev->mc_io, 0, + &major_ver, + &minor_ver); + if (error < 0) { + dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n", + error); + goto error_cleanup_open; + } + + if (major_ver < DPRC_MIN_VER_MAJOR || + (major_ver == DPRC_MIN_VER_MAJOR && + minor_ver < DPRC_MIN_VER_MINOR)) { dev_err(&mc_dev->dev, "ERROR: DPRC version %d.%d not supported\n", - mc_bus->dprc_attr.version.major, - mc_bus->dprc_attr.version.minor); + major_ver, minor_ver); error = -ENOTSUPP; goto error_cleanup_open; } diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c index 9fea3def604193467249bece1be216a34e9e8d86..572edd4c066e2872ec64568914cd8f7ad37b52f2 100644 --- a/drivers/staging/fsl-mc/bus/dprc.c +++ b/drivers/staging/fsl-mc/bus/dprc.c @@ -1,4 +1,5 @@ -/* Copyright 2013-2016 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -11,7 +12,6 @@ * names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * - * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any @@ -565,8 +565,6 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io, attr->icid = le16_to_cpu(rsp_params->icid); attr->options = le32_to_cpu(rsp_params->options); attr->portal_id = le32_to_cpu(rsp_params->portal_id); - attr->version.major = le16_to_cpu(rsp_params->version_major); - attr->version.minor = le16_to_cpu(rsp_params->version_minor); return 0; } @@ -1386,3 +1384,66 @@ int dprc_get_connection(struct fsl_mc_io *mc_io, return 0; } + +/** + * dprc_get_api_version - Get Data Path Resource Container API version + * @mc_io: Pointer to Mc portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of Data Path Resource Container API + * @minor_ver: Minor version of Data Path Resource Container API + * + * Return: '0' on Success; Error code otherwise. + */ +int dprc_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver) +{ + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION, + cmd_flags, 0); + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + mc_cmd_read_api_version(&cmd, major_ver, minor_ver); + + return 0; +} + +/** + * dprc_get_container_id - Get container ID associated with a given portal. + * @mc_io: Pointer to Mc portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @container_id: Requested container id + * + * Return: '0' on Success; Error code otherwise. + */ +int dprc_get_container_id(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int *container_id) +{ + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID, + cmd_flags, + 0); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *container_id = (int)mc_cmd_read_object_id(&cmd); + + return 0; +} diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c index e93ab53bae6763bcf5bec35ddb75013e3950acd9..ce07096c3b1fb098508e8aa7acffa67cfe68f2d2 100644 --- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c +++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c @@ -1,7 +1,7 @@ /* - * Freescale MC object device allocator driver + * fsl-mc object allocator driver * - * Copyright (C) 2013 Freescale Semiconductor, Inc. + * Copyright (C) 2013-2016 Freescale Semiconductor, Inc. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -12,9 +12,9 @@ #include #include "../include/mc-bus.h" #include "../include/mc-sys.h" -#include "../include/dpbp-cmd.h" -#include "../include/dpcon-cmd.h" +#include "dpbp-cmd.h" +#include "dpcon-cmd.h" #include "fsl-mc-private.h" #define FSL_MC_IS_ALLOCATABLE(_obj_type) \ @@ -23,15 +23,12 @@ strcmp(_obj_type, "dpcon") == 0) /** - * fsl_mc_resource_pool_add_device - add allocatable device to a resource - * pool of a given MC bus + * fsl_mc_resource_pool_add_device - add allocatable object to a resource + * pool of a given fsl-mc bus * - * @mc_bus: pointer to the MC bus - * @pool_type: MC bus pool type - * @mc_dev: Pointer to allocatable MC object device - * - * It adds an allocatable MC object device to a container's resource pool of - * the given resource type + * @mc_bus: pointer to the fsl-mc bus + * @pool_type: pool type + * @mc_dev: pointer to allocatable fsl-mc device */ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus *mc_bus, @@ -95,10 +92,10 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus * fsl_mc_resource_pool_remove_device - remove an allocatable device from a * resource pool * - * @mc_dev: Pointer to allocatable MC object device + * @mc_dev: pointer to allocatable fsl-mc device * - * It permanently removes an allocatable MC object device from the resource - * pool, the device is currently in, as long as it is in the pool's free list. + * It permanently removes an allocatable fsl-mc device from the resource + * pool. It's an error if the device is in use. */ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device *mc_dev) @@ -255,17 +252,18 @@ void fsl_mc_resource_free(struct fsl_mc_resource *resource) EXPORT_SYMBOL_GPL(fsl_mc_resource_free); /** - * fsl_mc_object_allocate - Allocates a MC object device of the given - * pool type from a given MC bus + * fsl_mc_object_allocate - Allocates an fsl-mc object of the given + * pool type from a given fsl-mc bus instance * - * @mc_dev: MC device for which the MC object device is to be allocated - * @pool_type: MC bus resource pool type - * @new_mc_dev: Pointer to area where the pointer to the allocated - * MC object device is to be returned + * @mc_dev: fsl-mc device which is used in conjunction with the + * allocated object + * @pool_type: pool type + * @new_mc_dev: pointer to area where the pointer to the allocated device + * is to be returned * - * This function allocates a MC object device from the device's parent DPRC, - * from the corresponding MC bus' pool of allocatable MC object devices of - * the given resource type. mc_dev cannot be a DPRC itself. + * Allocatable objects are always used in conjunction with some functional + * device. This function allocates an object of the specified type from + * the DPRC containing the functional device. * * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC * portals are allocated using fsl_mc_portal_allocate(), instead of @@ -312,10 +310,9 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, EXPORT_SYMBOL_GPL(fsl_mc_object_allocate); /** - * fsl_mc_object_free - Returns an allocatable MC object device to the - * corresponding resource pool of a given MC bus. - * - * @mc_adev: Pointer to the MC object device + * fsl_mc_object_free - Returns an fsl-mc object to the resource + * pool where it came from. + * @mc_adev: Pointer to the fsl-mc device */ void fsl_mc_object_free(struct fsl_mc_device *mc_adev) { @@ -332,8 +329,14 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev) EXPORT_SYMBOL_GPL(fsl_mc_object_free); /* - * Initialize the interrupt pool associated with a MC bus. - * It allocates a block of IRQs from the GIC-ITS + * A DPRC and the devices in the DPRC all share the same GIC-ITS device + * ID. A block of IRQs is pre-allocated and maintained in a pool + * from which devices can allocate them when needed. + */ + +/* + * Initialize the interrupt pool associated with an fsl-mc bus. + * It allocates a block of IRQs from the GIC-ITS. */ int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, unsigned int irq_count) @@ -395,7 +398,7 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool); /** - * Teardown the interrupt pool associated with an MC bus. + * Teardown the interrupt pool associated with an fsl-mc bus. * It frees the IRQs that were allocated to the pool, back to the GIC-ITS. */ void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus) @@ -422,11 +425,7 @@ void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus) EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool); /** - * It allocates the IRQs required by a given MC object device. The - * IRQs are allocated from the interrupt pool associated with the - * MC bus that contains the device, if the device is not a DPRC device. - * Otherwise, the IRQs are allocated from the interrupt pool associated - * with the MC bus that represents the DPRC device itself. + * Allocate the IRQs required by a given fsl-mc device. */ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev) { @@ -495,8 +494,7 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev) EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs); /* - * It frees the IRQs that were allocated for a MC object device, by - * returning them to the corresponding interrupt pool. + * Frees the IRQs that were allocated for an fsl-mc device. */ void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev) { @@ -605,7 +603,7 @@ static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev) return error; dev_dbg(&mc_dev->dev, - "Allocatable MC object device bound to fsl_mc_allocator driver"); + "Allocatable fsl-mc device bound to fsl_mc_allocator driver"); return 0; } @@ -627,7 +625,7 @@ static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev) } dev_dbg(&mc_dev->dev, - "Allocatable MC object device unbound from fsl_mc_allocator driver"); + "Allocatable fsl-mc device unbound from fsl_mc_allocator driver"); return 0; } diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c index 44f64b6f0fc90654e78d9a15ab4230ab066c9c84..5ac373c0c7161e32f74a314108d3245fe8c44e73 100644 --- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c +++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c @@ -1,7 +1,7 @@ /* * Freescale Management Complex (MC) bus driver * - * Copyright (C) 2014 Freescale Semiconductor, Inc. + * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. * Author: German Rivera * * This file is licensed under the terms of the GNU General Public @@ -9,6 +9,8 @@ * warranty of any kind, whether express or implied. */ +#define pr_fmt(fmt) "fsl-mc: " fmt + #include #include #include @@ -34,7 +36,7 @@ static struct kmem_cache *mc_dev_cache; /** * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device - * @root_mc_bus_dev: MC object device representing the root DPRC + * @root_mc_bus_dev: fsl-mc device representing the root DPRC * @num_translation_ranges: number of entries in addr_translation_ranges * @translation_ranges: array of bus to system address translation ranges */ @@ -62,8 +64,8 @@ struct fsl_mc_addr_translation_range { /** * fsl_mc_bus_match - device to driver matching callback - * @dev: the MC object device structure to match against - * @drv: the device driver to search for matching MC object device id + * @dev: the fsl-mc device to match against + * @drv: the device driver to search for matching fsl-mc object type * structures * * Returns 1 on success, 0 otherwise. @@ -91,7 +93,7 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv) /* * Traverse the match_id table of the given driver, trying to find - * a matching for the given MC object device. + * a matching for the given device. */ for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) { if (id->vendor == mc_dev->obj_desc.vendor && @@ -164,8 +166,7 @@ static int fsl_mc_driver_probe(struct device *dev) error = mc_drv->probe(mc_dev); if (error < 0) { - dev_err(dev, "MC object device probe callback failed: %d\n", - error); + dev_err(dev, "%s failed: %d\n", __func__, error); return error; } @@ -183,9 +184,7 @@ static int fsl_mc_driver_remove(struct device *dev) error = mc_drv->remove(mc_dev); if (error < 0) { - dev_err(dev, - "MC object device remove callback failed: %d\n", - error); + dev_err(dev, "%s failed: %d\n", __func__, error); return error; } @@ -232,8 +231,6 @@ int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver, return error; } - pr_info("MC object device driver %s registered\n", - mc_driver->driver.name); return 0; } EXPORT_SYMBOL_GPL(__fsl_mc_driver_register); @@ -315,21 +312,6 @@ static int get_dprc_icid(struct fsl_mc_io *mc_io, return error; } -static int get_dprc_version(struct fsl_mc_io *mc_io, - int container_id, u16 *major, u16 *minor) -{ - struct dprc_attributes attr; - int error; - - error = get_dprc_attr(mc_io, container_id, &attr); - if (error == 0) { - *major = attr.version.major; - *minor = attr.version.minor; - } - - return error; -} - static int translate_mc_addr(struct fsl_mc_device *mc_dev, enum dprc_region_type mc_region_type, u64 mc_offset, phys_addr_t *phys_addr) @@ -452,7 +434,7 @@ bool fsl_mc_is_root_dprc(struct device *dev) } /** - * Add a newly discovered MC object device to be visible in Linux + * Add a newly discovered fsl-mc device to be visible in Linux */ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc, struct fsl_mc_io *mc_io, @@ -533,8 +515,8 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc, goto error_cleanup_dev; } else { /* - * A non-DPRC MC object device has to be a child of another - * MC object (specifically a DPRC object) + * A non-DPRC object has to be a child of a DPRC, use the + * parent's ICID and interrupt domain. */ mc_dev->icid = parent_mc_dev->icid; mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK; @@ -572,8 +554,7 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc, } (void)get_device(&mc_dev->dev); - dev_dbg(parent_dev, "Added MC object device %s\n", - dev_name(&mc_dev->dev)); + dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev)); *new_mc_dev = mc_dev; return 0; @@ -590,10 +571,10 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc, EXPORT_SYMBOL_GPL(fsl_mc_device_add); /** - * fsl_mc_device_remove - Remove a MC object device from being visible to + * fsl_mc_device_remove - Remove an fsl-mc device from being visible to * Linux * - * @mc_dev: Pointer to a MC object device object + * @mc_dev: Pointer to an fsl-mc device */ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev) { @@ -749,8 +730,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) struct mc_version mc_version; struct resource res; - dev_info(&pdev->dev, "Root MC bus device probed"); - mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); if (!mc) return -ENOMEM; @@ -783,8 +762,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) goto error_cleanup_mc_io; } - dev_info(&pdev->dev, - "Freescale Management Complex Firmware version: %u.%u.%u\n", + dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n", mc_version.major, mc_version.minor, mc_version.revision); error = get_mc_addr_translation_ranges(&pdev->dev, @@ -793,7 +771,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) if (error < 0) goto error_cleanup_mc_io; - error = dpmng_get_container_id(mc_io, 0, &container_id); + error = dprc_get_container_id(mc_io, 0, &container_id); if (error < 0) { dev_err(&pdev->dev, "dpmng_get_container_id() failed: %d\n", error); @@ -801,8 +779,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) } memset(&obj_desc, 0, sizeof(struct dprc_obj_desc)); - error = get_dprc_version(mc_io, container_id, - &obj_desc.ver_major, &obj_desc.ver_minor); + error = dprc_get_api_version(mc_io, 0, + &obj_desc.ver_major, + &obj_desc.ver_minor); if (error < 0) goto error_cleanup_mc_io; @@ -840,7 +819,6 @@ static int fsl_mc_bus_remove(struct platform_device *pdev) fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io); mc->root_mc_bus_dev->mc_io = NULL; - dev_info(&pdev->dev, "Root MC bus device removed"); return 0; } @@ -875,12 +853,10 @@ static int __init fsl_mc_bus_driver_init(void) error = bus_register(&fsl_mc_bus_type); if (error < 0) { - pr_err("fsl-mc bus type registration failed: %d\n", error); + pr_err("bus type registration failed: %d\n", error); goto error_cleanup_cache; } - pr_info("fsl-mc bus type registered\n"); - error = platform_driver_register(&fsl_mc_bus_driver); if (error < 0) { pr_err("platform_driver_register() failed: %d\n", error); diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c index 3d46b1b1fa18e9c4a7adfac680b3a9db9ced48ca..7975c6e6fee327c563c099b6bef1bc5c35e9189b 100644 --- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c +++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c @@ -1,7 +1,7 @@ /* * Freescale Management Complex (MC) bus driver MSI support * - * Copyright (C) 2015 Freescale Semiconductor, Inc. + * Copyright (C) 2015-2016 Freescale Semiconductor, Inc. * Author: German Rivera * * This file is licensed under the terms of the GNU General Public diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-private.h b/drivers/staging/fsl-mc/bus/fsl-mc-private.h index d459c2673f39690f9013778faa6514d7aedd7a6e..5c49c9d2df6ac914cda929cd48df7000d1c8440a 100644 --- a/drivers/staging/fsl-mc/bus/fsl-mc-private.h +++ b/drivers/staging/fsl-mc/bus/fsl-mc-private.h @@ -10,6 +10,9 @@ #ifndef _FSL_MC_PRIVATE_H_ #define _FSL_MC_PRIVATE_H_ +#include "../include/mc.h" +#include "../include/mc-bus.h" + int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc, struct fsl_mc_io *mc_io, struct device *parent_dev, diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c index 7a6ac640752fdb8f1bb46dc623047cd292ad88a0..6b1cd574644f80bae411037efecaa9fbb2cdb3d6 100644 --- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c +++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c @@ -1,7 +1,7 @@ /* * Freescale Management Complex (MC) bus driver MSI support * - * Copyright (C) 2015 Freescale Semiconductor, Inc. + * Copyright (C) 2015-2016 Freescale Semiconductor, Inc. * Author: German Rivera * * This file is licensed under the terms of the GNU General Public @@ -19,7 +19,7 @@ #include "../include/mc-bus.h" static struct irq_chip its_msi_irq_chip = { - .name = "fsl-mc-bus-msi", + .name = "ITS-fMSI", .irq_mask = irq_chip_mask_parent, .irq_unmask = irq_chip_unmask_parent, .irq_eoi = irq_chip_eoi_parent, diff --git a/drivers/staging/fsl-mc/bus/mc-io.c b/drivers/staging/fsl-mc/bus/mc-io.c index 798c965fe2033da18de82686e76492d4bec69736..d66b87f0903b031b8835a7a53028c44a3a9e248c 100644 --- a/drivers/staging/fsl-mc/bus/mc-io.c +++ b/drivers/staging/fsl-mc/bus/mc-io.c @@ -1,4 +1,5 @@ -/* Copyright 2013-2016 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -11,7 +12,6 @@ * names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * - * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c index 285917c7c8e43088045c21fc04765463f7985b42..4d82802b384da8aed0d958540239c6f5ddd1ba2d 100644 --- a/drivers/staging/fsl-mc/bus/mc-sys.c +++ b/drivers/staging/fsl-mc/bus/mc-sys.c @@ -1,4 +1,5 @@ -/* Copyright 2013-2014 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * I/O services to send MC commands to the MC hardware * @@ -13,7 +14,6 @@ * names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * - * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any @@ -67,7 +67,7 @@ static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd) struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header; u16 cmd_id = le16_to_cpu(hdr->cmd_id); - return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT; + return cmd_id; } static int mc_status_to_error(enum mc_cmd_status status) @@ -200,7 +200,7 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io, if (time_after_eq(jiffies, jiffies_until_timeout)) { dev_dbg(mc_io->dev, - "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", + "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n", mc_io->portal_phys_addr, (unsigned int)mc_cmd_hdr_read_token(cmd), (unsigned int)mc_cmd_hdr_read_cmdid(cmd)); @@ -240,7 +240,7 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io, timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; if (timeout_usecs == 0) { dev_dbg(mc_io->dev, - "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", + "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n", mc_io->portal_phys_addr, (unsigned int)mc_cmd_hdr_read_token(cmd), (unsigned int)mc_cmd_hdr_read_cmdid(cmd)); @@ -294,7 +294,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd) if (status != MC_CMD_STATUS_OK) { dev_dbg(mc_io->dev, - "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", + "MC command failed: portal: %#llx, dprc handle: %#x, command: %#x, status: %s (%#x)\n", mc_io->portal_phys_addr, (unsigned int)mc_cmd_hdr_read_token(cmd), (unsigned int)mc_cmd_hdr_read_cmdid(cmd), diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h index e14e85a5d6dfd982c08f02d110939921b5a12f30..bf34b1e0e730f5c0b5b8bdafab269003067cdde3 100644 --- a/drivers/staging/fsl-mc/include/dpbp.h +++ b/drivers/staging/fsl-mc/include/dpbp.h @@ -1,4 +1,5 @@ -/* Copyright 2013-2015 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -32,7 +33,8 @@ #ifndef __FSL_DPBP_H #define __FSL_DPBP_H -/* Data Path Buffer Pool API +/* + * Data Path Buffer Pool API * Contains initialization APIs and runtime control APIs for DPBP */ @@ -44,8 +46,8 @@ int dpbp_open(struct fsl_mc_io *mc_io, u16 *token); int dpbp_close(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token); + u32 cmd_flags, + u16 token); /** * struct dpbp_cfg - Structure representing DPBP configuration @@ -55,14 +57,16 @@ struct dpbp_cfg { u32 options; }; -int dpbp_create(struct fsl_mc_io *mc_io, - u32 cmd_flags, - const struct dpbp_cfg *cfg, - u16 *token); +int dpbp_create(struct fsl_mc_io *mc_io, + u16 dprc_token, + u32 cmd_flags, + const struct dpbp_cfg *cfg, + u32 *obj_id); int dpbp_destroy(struct fsl_mc_io *mc_io, + u16 dprc_token, u32 cmd_flags, - u16 token); + u32 obj_id); int dpbp_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, @@ -88,85 +92,75 @@ int dpbp_reset(struct fsl_mc_io *mc_io, * @irq_num: A user defined number associated with this IRQ */ struct dpbp_irq_cfg { - u64 addr; - u32 val; - int irq_num; + u64 addr; + u32 val; + int irq_num; }; -int dpbp_set_irq(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - struct dpbp_irq_cfg *irq_cfg); - -int dpbp_get_irq(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - int *type, - struct dpbp_irq_cfg *irq_cfg); - -int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u8 en); - -int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u8 *en); +int dpbp_set_irq(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + struct dpbp_irq_cfg *irq_cfg); + +int dpbp_get_irq(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + int *type, + struct dpbp_irq_cfg *irq_cfg); + +int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en); + +int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 *en); int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 mask); + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask); int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 *mask); - -int dpbp_get_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 *status); - -int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 status); + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *mask); + +int dpbp_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status); + +int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 status); /** * struct dpbp_attr - Structure representing DPBP attributes * @id: DPBP object ID - * @version: DPBP version * @bpid: Hardware buffer pool ID; should be used as an argument in * acquire/release operations on buffers */ struct dpbp_attr { int id; - /** - * struct version - Structure representing DPBP version - * @major: DPBP major version - * @minor: DPBP minor version - */ - struct { - u16 major; - u16 minor; - } version; u16 bpid; }; -int dpbp_get_attributes(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - struct dpbp_attr *attr); +int dpbp_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpbp_attr *attr); /** * DPBP notifications options @@ -196,24 +190,29 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io, * @options: Mask of available options; use 'DPBP_NOTIF_OPT_' values */ struct dpbp_notification_cfg { - u32 depletion_entry; - u32 depletion_exit; - u32 surplus_entry; - u32 surplus_exit; - u64 message_iova; - u64 message_ctx; - u16 options; + u32 depletion_entry; + u32 depletion_exit; + u32 surplus_entry; + u32 surplus_exit; + u64 message_iova; + u64 message_ctx; + u16 options; }; -int dpbp_set_notifications(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - struct dpbp_notification_cfg *cfg); +int dpbp_set_notifications(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpbp_notification_cfg *cfg); + +int dpbp_get_notifications(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpbp_notification_cfg *cfg); -int dpbp_get_notifications(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - struct dpbp_notification_cfg *cfg); +int dpbp_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver); /** @} */ diff --git a/drivers/staging/fsl-mc/include/dpmng.h b/drivers/staging/fsl-mc/include/dpmng.h index e5cfd017f9a59038892521b82d0f37b5e83e6fae..7d8e255da578ae5286550f89430d004d3cf715a6 100644 --- a/drivers/staging/fsl-mc/include/dpmng.h +++ b/drivers/staging/fsl-mc/include/dpmng.h @@ -1,4 +1,5 @@ -/* Copyright 2013-2015 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -32,7 +33,8 @@ #ifndef __FSL_DPMNG_H #define __FSL_DPMNG_H -/* Management Complex General API +/* + * Management Complex General API * Contains general API for the Management Complex firmware */ @@ -58,12 +60,12 @@ struct mc_version { u32 revision; }; -int mc_get_version(struct fsl_mc_io *mc_io, - u32 cmd_flags, - struct mc_version *mc_ver_info); +int mc_get_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + struct mc_version *mc_ver_info); -int dpmng_get_container_id(struct fsl_mc_io *mc_io, - u32 cmd_flags, - int *container_id); +int dpmng_get_container_id(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int *container_id); #endif /* __FSL_DPMNG_H */ diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h index 593b2bbe7f714379f767a79aa38ec7fb4a830e5a..f9ea769ccfab159a3ccd301b2c43c03ed6a919bd 100644 --- a/drivers/staging/fsl-mc/include/dprc.h +++ b/drivers/staging/fsl-mc/include/dprc.h @@ -1,4 +1,5 @@ -/* Copyright 2013-2015 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -34,7 +35,8 @@ #include "mc-cmd.h" -/* Data Path Resource Container API +/* + * Data Path Resource Container API * Contains DPRC API for managing and querying DPAA resources */ @@ -70,12 +72,14 @@ int dprc_close(struct fsl_mc_io *mc_io, * and can be retrieved using dprc_get_attributes() */ -/* Spawn Policy Option allowed - Indicates that the new container is allowed +/* + * Spawn Policy Option allowed - Indicates that the new container is allowed * to spawn and have its own child containers. */ #define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001 -/* General Container allocation policy - Indicates that the new container is +/* + * General Container allocation policy - Indicates that the new container is * allowed to allocate requested resources from its parent container; if not * set, the container is only allowed to use resources in its own pools; Note * that this is a container's global policy, but the parent container may @@ -83,12 +87,14 @@ int dprc_close(struct fsl_mc_io *mc_io, */ #define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002 -/* Object initialization allowed - software context associated with this +/* + * Object initialization allowed - software context associated with this * container is allowed to invoke object initialization operations. */ #define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004 -/* Topology change allowed - software context associated with this +/* + * Topology change allowed - software context associated with this * container is allowed to invoke topology operations, such as attach/detach * of network objects. */ @@ -116,17 +122,17 @@ struct dprc_cfg { char label[16]; }; -int dprc_create_container(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - struct dprc_cfg *cfg, - int *child_container_id, - u64 *child_portal_offset); +int dprc_create_container(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dprc_cfg *cfg, + int *child_container_id, + u64 *child_portal_offset); -int dprc_destroy_container(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - int child_container_id); +int dprc_destroy_container(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int child_container_id); int dprc_reset_container(struct fsl_mc_io *mc_io, u32 cmd_flags, @@ -139,7 +145,7 @@ int dprc_reset_container(struct fsl_mc_io *mc_io, #define DPRC_IRQ_INDEX 0 /* Number of dprc's IRQs */ -#define DPRC_NUM_OF_IRQS 1 +#define DPRC_NUM_OF_IRQS 1 /* DPRC IRQ events */ @@ -151,12 +157,14 @@ int dprc_reset_container(struct fsl_mc_io *mc_io, #define DPRC_IRQ_EVENT_RES_ADDED 0x00000004 /* IRQ event - Indicates that resources removed from the container */ #define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008 -/* IRQ event - Indicates that one of the descendant containers that opened by +/* + * IRQ event - Indicates that one of the descendant containers that opened by * this container is destroyed */ #define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010 -/* IRQ event - Indicates that on one of the container's opened object is +/* + * IRQ event - Indicates that on one of the container's opened object is * destroyed */ #define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020 @@ -171,59 +179,59 @@ int dprc_reset_container(struct fsl_mc_io *mc_io, * @irq_num: A user defined number associated with this IRQ */ struct dprc_irq_cfg { - phys_addr_t paddr; - u32 val; - int irq_num; + phys_addr_t paddr; + u32 val; + int irq_num; }; -int dprc_set_irq(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - struct dprc_irq_cfg *irq_cfg); - -int dprc_get_irq(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - int *type, - struct dprc_irq_cfg *irq_cfg); - -int dprc_set_irq_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u8 en); - -int dprc_get_irq_enable(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u8 *en); - -int dprc_set_irq_mask(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 mask); - -int dprc_get_irq_mask(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 *mask); - -int dprc_get_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 *status); - -int dprc_clear_irq_status(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - u8 irq_index, - u32 status); +int dprc_set_irq(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + struct dprc_irq_cfg *irq_cfg); + +int dprc_get_irq(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + int *type, + struct dprc_irq_cfg *irq_cfg); + +int dprc_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en); + +int dprc_get_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 *en); + +int dprc_set_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask); + +int dprc_get_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *mask); + +int dprc_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status); + +int dprc_clear_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 status); /** * struct dprc_attributes - Container attributes @@ -231,63 +239,56 @@ int dprc_clear_irq_status(struct fsl_mc_io *mc_io, * @icid: Container's ICID * @portal_id: Container's portal ID * @options: Container's options as set at container's creation - * @version: DPRC version */ struct dprc_attributes { int container_id; u16 icid; int portal_id; u64 options; - /** - * struct version - DPRC version - * @major: DPRC major version - * @minor: DPRC minor version - */ - struct { - u16 major; - u16 minor; - } version; }; -int dprc_get_attributes(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - struct dprc_attributes *attributes); +int dprc_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dprc_attributes *attributes); int dprc_set_res_quota(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - int child_container_id, - char *type, - u16 quota); + u32 cmd_flags, + u16 token, + int child_container_id, + char *type, + u16 quota); int dprc_get_res_quota(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - int child_container_id, - char *type, - u16 *quota); + u32 cmd_flags, + u16 token, + int child_container_id, + char *type, + u16 *quota); /* Resource request options */ -/* Explicit resource ID request - The requested objects/resources +/* + * Explicit resource ID request - The requested objects/resources * are explicit and sequential (in case of resources). * The base ID is given at res_req at base_align field */ -#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001 +#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001 -/* Aligned resources request - Relevant only for resources +/* + * Aligned resources request - Relevant only for resources * request (and not objects). Indicates that resources base ID should be * sequential and aligned to the value given at dprc_res_req base_align field */ -#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002 +#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002 -/* Plugged Flag - Relevant only for object assignment request. +/* + * Plugged Flag - Relevant only for object assignment request. * Indicates that after all objects assigned. An interrupt will be invoked at * the relevant GPP. The assigned object will be marked as plugged. * plugged objects can't be assigned from their container */ -#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004 +#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004 /** * struct dprc_res_req - Resource request descriptor, to be used in assignment @@ -312,33 +313,33 @@ struct dprc_res_req { int id_base_align; }; -int dprc_assign(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - int container_id, - struct dprc_res_req *res_req); - -int dprc_unassign(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - int child_container_id, - struct dprc_res_req *res_req); - -int dprc_get_pool_count(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - int *pool_count); - -int dprc_get_pool(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - int pool_index, - char *type); +int dprc_assign(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int container_id, + struct dprc_res_req *res_req); + +int dprc_unassign(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int child_container_id, + struct dprc_res_req *res_req); + +int dprc_get_pool_count(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *pool_count); + +int dprc_get_pool(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int pool_index, + char *type); int dprc_get_obj_count(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - int *obj_count); + u32 cmd_flags, + u16 token, + int *obj_count); /* Objects Attributes Flags */ @@ -353,7 +354,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io, * masters; * user is responsible for proper memory handling through IOMMU configuration. */ -#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 +#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 /** * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj() @@ -381,41 +382,41 @@ struct dprc_obj_desc { u16 flags; }; -int dprc_get_obj(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - int obj_index, - struct dprc_obj_desc *obj_desc); - -int dprc_get_obj_desc(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - char *obj_type, - int obj_id, - struct dprc_obj_desc *obj_desc); - -int dprc_set_obj_irq(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - char *obj_type, - int obj_id, - u8 irq_index, - struct dprc_irq_cfg *irq_cfg); - -int dprc_get_obj_irq(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - char *obj_type, - int obj_id, - u8 irq_index, - int *type, - struct dprc_irq_cfg *irq_cfg); - -int dprc_get_res_count(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - char *type, - int *res_count); +int dprc_get_obj(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int obj_index, + struct dprc_obj_desc *obj_desc); + +int dprc_get_obj_desc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + char *obj_type, + int obj_id, + struct dprc_obj_desc *obj_desc); + +int dprc_set_obj_irq(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + char *obj_type, + int obj_id, + u8 irq_index, + struct dprc_irq_cfg *irq_cfg); + +int dprc_get_obj_irq(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + char *obj_type, + int obj_id, + u8 irq_index, + int *type, + struct dprc_irq_cfg *irq_cfg); + +int dprc_get_res_count(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + char *type, + int *res_count); /** * enum dprc_iter_status - Iteration status @@ -444,11 +445,11 @@ struct dprc_res_ids_range_desc { enum dprc_iter_status iter_status; }; -int dprc_get_res_ids(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - char *type, - struct dprc_res_ids_range_desc *range_desc); +int dprc_get_res_ids(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + char *type, + struct dprc_res_ids_range_desc *range_desc); /* Region flags */ /* Cacheable - Indicates that region should be mapped as cacheable */ @@ -481,20 +482,20 @@ struct dprc_region_desc { enum dprc_region_type type; }; -int dprc_get_obj_region(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - char *obj_type, - int obj_id, - u8 region_index, - struct dprc_region_desc *region_desc); - -int dprc_set_obj_label(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - char *obj_type, - int obj_id, - char *label); +int dprc_get_obj_region(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + char *obj_type, + int obj_id, + u8 region_index, + struct dprc_region_desc *region_desc); + +int dprc_set_obj_label(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + char *obj_type, + int obj_id, + char *label); /** * struct dprc_endpoint - Endpoint description for link connect/disconnect @@ -521,24 +522,33 @@ struct dprc_connection_cfg { u32 max_rate; }; -int dprc_connect(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - const struct dprc_endpoint *endpoint1, - const struct dprc_endpoint *endpoint2, +int dprc_connect(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dprc_endpoint *endpoint1, + const struct dprc_endpoint *endpoint2, const struct dprc_connection_cfg *cfg); -int dprc_disconnect(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - const struct dprc_endpoint *endpoint); - -int dprc_get_connection(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token, - const struct dprc_endpoint *endpoint1, - struct dprc_endpoint *endpoint2, - int *state); +int dprc_disconnect(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dprc_endpoint *endpoint); + +int dprc_get_connection(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dprc_endpoint *endpoint1, + struct dprc_endpoint *endpoint2, + int *state); + +int dprc_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver); + +int dprc_get_container_id(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int *container_id); #endif /* _FSL_DPRC_H */ diff --git a/drivers/staging/fsl-mc/include/mc-bus.h b/drivers/staging/fsl-mc/include/mc-bus.h index 170684a57ca238f15c5d427002204565edc084e4..42700de94d590e8563033b431c6adf7c423164c7 100644 --- a/drivers/staging/fsl-mc/include/mc-bus.h +++ b/drivers/staging/fsl-mc/include/mc-bus.h @@ -1,7 +1,7 @@ /* * Freescale Management Complex (MC) bus declarations * - * Copyright (C) 2014 Freescale Semiconductor, Inc. + * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. * Author: German Rivera * * This file is licensed under the terms of the GNU General Public @@ -42,8 +42,8 @@ struct msi_domain_info; */ struct fsl_mc_resource_pool { enum fsl_mc_pool_type type; - int16_t max_count; - int16_t free_count; + int max_count; + int free_count; struct mutex mutex; /* serializes access to free_list */ struct list_head free_list; struct fsl_mc_bus *mc_bus; diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h index 5decb9890c31d727f968818f1175934e2fde91dc..2e08aa31b08402e97b1eb1937b3e4030896bdbf1 100644 --- a/drivers/staging/fsl-mc/include/mc-cmd.h +++ b/drivers/staging/fsl-mc/include/mc-cmd.h @@ -1,4 +1,5 @@ -/* Copyright 2013-2015 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -48,6 +49,15 @@ struct mc_command { u64 params[MC_CMD_NUM_OF_PARAMS]; }; +struct mc_rsp_create { + __le32 object_id; +}; + +struct mc_rsp_api_ver { + __le16 major_ver; + __le16 minor_ver; +}; + enum mc_cmd_status { MC_CMD_STATUS_OK = 0x0, /* Completed successfully */ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */ @@ -72,11 +82,6 @@ enum mc_cmd_status { /* Command completion flag */ #define MC_CMD_FLAG_INTR_DIS 0x01 -#define MC_CMD_HDR_CMDID_MASK 0xFFF0 -#define MC_CMD_HDR_CMDID_SHIFT 4 -#define MC_CMD_HDR_TOKEN_MASK 0xFFC0 -#define MC_CMD_HDR_TOKEN_SHIFT 6 - static inline u64 mc_encode_cmd_header(u16 cmd_id, u32 cmd_flags, u16 token) @@ -84,10 +89,8 @@ static inline u64 mc_encode_cmd_header(u16 cmd_id, u64 header = 0; struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header; - hdr->cmd_id = cpu_to_le16((cmd_id << MC_CMD_HDR_CMDID_SHIFT) & - MC_CMD_HDR_CMDID_MASK); - hdr->token = cpu_to_le16((token << MC_CMD_HDR_TOKEN_SHIFT) & - MC_CMD_HDR_TOKEN_MASK); + hdr->cmd_id = cpu_to_le16(cmd_id); + hdr->token = cpu_to_le16(token); hdr->status = MC_CMD_STATUS_READY; if (cmd_flags & MC_CMD_FLAG_PRI) hdr->flags_hw = MC_CMD_FLAG_PRI; @@ -102,7 +105,26 @@ static inline u16 mc_cmd_hdr_read_token(struct mc_command *cmd) struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header; u16 token = le16_to_cpu(hdr->token); - return (token & MC_CMD_HDR_TOKEN_MASK) >> MC_CMD_HDR_TOKEN_SHIFT; + return token; +} + +static inline u32 mc_cmd_read_object_id(struct mc_command *cmd) +{ + struct mc_rsp_create *rsp_params; + + rsp_params = (struct mc_rsp_create *)cmd->params; + return le32_to_cpu(rsp_params->object_id); +} + +static inline void mc_cmd_read_api_version(struct mc_command *cmd, + u16 *major_ver, + u16 *minor_ver) +{ + struct mc_rsp_api_ver *rsp_params; + + rsp_params = (struct mc_rsp_api_ver *)cmd->params; + *major_ver = le16_to_cpu(rsp_params->major_ver); + *minor_ver = le16_to_cpu(rsp_params->minor_ver); } #endif /* __FSL_MC_CMD_H */ diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h index 89ad0cf54702099e921cfc579f14af9bcd5c9818..dca7f9084e05e8d628a23c74ed825f5a842f032c 100644 --- a/drivers/staging/fsl-mc/include/mc-sys.h +++ b/drivers/staging/fsl-mc/include/mc-sys.h @@ -1,4 +1,5 @@ -/* Copyright 2013-2014 Freescale Semiconductor Inc. +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. * * Interface of the I/O services to send MC commands to the MC hardware * diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h index f6e720e8446028fa7c6ea17b1e9f35e64b899573..1c46c0c2a895a12133480b08fe89cf3d7934d206 100644 --- a/drivers/staging/fsl-mc/include/mc.h +++ b/drivers/staging/fsl-mc/include/mc.h @@ -1,7 +1,7 @@ /* * Freescale Management Complex (MC) bus public interface * - * Copyright (C) 2014 Freescale Semiconductor, Inc. + * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. * Author: German Rivera * * This file is licensed under the terms of the GNU General Public @@ -81,7 +81,7 @@ enum fsl_mc_pool_type { */ struct fsl_mc_resource { enum fsl_mc_pool_type type; - int32_t id; + s32 id; void *data; struct fsl_mc_resource_pool *parent_pool; struct list_head node; diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c index 49c718b91e55ab60115c965ed39b6cf6cdb83a3b..41a49c8194e50c2bdaf81c782613255ac1fe1bbd 100644 --- a/drivers/staging/fwserial/fwserial.c +++ b/drivers/staging/fwserial/fwserial.c @@ -1667,12 +1667,6 @@ static inline void fill_plug_rsp_nack(struct fwserial_mgmt_pkt *pkt) pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code)); } -static inline void fill_unplug_req(struct fwserial_mgmt_pkt *pkt) -{ - pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG); - pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code)); -} - static inline void fill_unplug_rsp_nack(struct fwserial_mgmt_pkt *pkt) { pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK); diff --git a/drivers/staging/gdm724x/gdm_lte.h b/drivers/staging/gdm724x/gdm_lte.h index 88414e5a70cc7a71385381c398bd1cc541cbdcdc..7ddeabc0e50a4ae4fa67b30ca5ce86ba44266d41 100644 --- a/drivers/staging/gdm724x/gdm_lte.h +++ b/drivers/staging/gdm724x/gdm_lte.h @@ -47,15 +47,15 @@ struct phy_dev { void *priv_dev; struct net_device *dev[MAX_NIC_TYPE]; int (*send_hci_func)(void *priv_dev, void *data, int len, - void (*cb)(void *cb_data), void *cb_data); + void (*cb)(void *cb_data), void *cb_data); int (*send_sdu_func)(void *priv_dev, void *data, int len, - unsigned int dftEpsId, unsigned int epsId, - void (*cb)(void *cb_data), void *cb_data, - int dev_idx, int nic_type); + unsigned int dftEpsId, unsigned int epsId, + void (*cb)(void *cb_data), void *cb_data, + int dev_idx, int nic_type); int (*rcv_func)(void *priv_dev, - int (*cb)(void *cb_data, void *data, int len, - int context), - void *cb_data, int context); + int (*cb)(void *cb_data, void *data, int len, + int context), + void *cb_data, int context); struct gdm_endian * (*get_endian)(void *priv_dev); }; diff --git a/drivers/staging/gdm724x/gdm_tty.h b/drivers/staging/gdm724x/gdm_tty.h index 297438b4ddcbc39a0694e5dd63c09d2a86be2ccf..195c5902989f766a2083155b3255c6265b2975f6 100644 --- a/drivers/staging/gdm724x/gdm_tty.h +++ b/drivers/staging/gdm724x/gdm_tty.h @@ -17,7 +17,6 @@ #include #include - #define TTY_MAX_COUNT 2 #define MAX_ISSUE_NUM 3 diff --git a/drivers/staging/gdm724x/netlink_k.h b/drivers/staging/gdm724x/netlink_k.h index 7cf979b3f826c6a1eade416347d986d69fdb70db..5ebd73157f5ab6cf6d6b7e066173a21d5002d629 100644 --- a/drivers/staging/gdm724x/netlink_k.h +++ b/drivers/staging/gdm724x/netlink_k.h @@ -18,7 +18,8 @@ #include struct sock *netlink_init(int unit, - void (*cb)(struct net_device *dev, u16 type, void *msg, int len)); + void (*cb)(struct net_device *dev, + u16 type, void *msg, int len)); int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len); #endif /* _NETLINK_K_H_ */ diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c index 70323aa11f243baf831fe9c4510c9a0993ab4f52..3fda0cd6bb42b812687233806b8c406073ceef29 100644 --- a/drivers/staging/greybus/arche-apb-ctrl.c +++ b/drivers/staging/greybus/arche-apb-ctrl.c @@ -183,7 +183,7 @@ static int standby_boot_seq(struct platform_device *pdev) * Pasted from WDM spec, * - A falling edge on POWEROFF_L is detected (a) * - WDM enters standby mode, but no output signals are changed - * */ + */ /* TODO: POWEROFF_L is input to WDM module */ apb->state = ARCHE_PLATFORM_STATE_STANDBY; @@ -285,8 +285,10 @@ static ssize_t state_store(struct device *dev, if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING) return count; - /* First we want to make sure we power off everything - * and then enter FW flashing state */ + /* + * First we want to make sure we power off everything + * and then enter FW flashing state + */ poweroff_seq(pdev); ret = fw_flashing_seq(pdev); } else { diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c index 34307ac3f2558b1e30794bd77c7c2b85ae4839e3..338c2d3ee8421335f7f6e3b89779246fb92a159f 100644 --- a/drivers/staging/greybus/arche-platform.c +++ b/drivers/staging/greybus/arche-platform.c @@ -186,6 +186,7 @@ int arche_platform_change_state(enum arche_platform_state state, exit: spin_unlock_irqrestore(&arche_pdata->wake_lock, flags); mutex_unlock(&arche_pdata->platform_state_mutex); + put_device(&pdev->dev); of_node_put(np); return ret; } @@ -456,7 +457,8 @@ static ssize_t state_store(struct device *dev, goto exit; /* First we want to make sure we power off everything - * and then activate back again */ + * and then activate back again + */ device_for_each_child(arche_pdata->dev, NULL, apb_poweroff); arche_platform_poweroff_seq(arche_pdata); diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c index 8a0744b58a329aff24a620aca07feb4f00b1a32f..f8862c6d71026ccc5ab793da69da01dc875b51d3 100644 --- a/drivers/staging/greybus/audio_codec.c +++ b/drivers/staging/greybus/audio_codec.c @@ -405,7 +405,6 @@ static void gbcodec_shutdown(struct snd_pcm_substream *substream, params->state = GBAUDIO_CODEC_SHUTDOWN; mutex_unlock(&codec->lock); pm_relax(dai->dev); - return; } static int gbcodec_hw_params(struct snd_pcm_substream *substream, @@ -655,8 +654,10 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream) ret = gb_audio_apbridgea_shutdown_rx(data->connection, 0); params->state = GBAUDIO_CODEC_STOP; - } else + } else { ret = -EINVAL; + } + if (ret) dev_err_ratelimited(dai->dev, "%s:Error during %s %s stream:%d\n", diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h index ca027bd99ad70e36b0908004cfc20c3a875bd72d..62fd93939a1f2c98e698d3e6af1c90a0a11ce01e 100644 --- a/drivers/staging/greybus/audio_codec.h +++ b/drivers/staging/greybus/audio_codec.h @@ -158,7 +158,6 @@ struct gbaudio_module_info { int dev_id; /* check if it should be bundle_id/hd_cport_id */ int vid; int pid; - int slot; int type; int set_uevent; char vstr[NAME_SIZE]; diff --git a/drivers/staging/greybus/audio_manager.h b/drivers/staging/greybus/audio_manager.h index c4ca09754a6aa080d2c2d49b3d0e3a8aa905d3a7..5ab8f5e0ed3fbb774d9e02a841a699f4b4ad54ef 100644 --- a/drivers/staging/greybus/audio_manager.h +++ b/drivers/staging/greybus/audio_manager.h @@ -18,10 +18,9 @@ struct gb_audio_manager_module_descriptor { char name[GB_AUDIO_MANAGER_MODULE_NAME_LEN]; - int slot; int vid; int pid; - int cport; + int intf_id; unsigned int ip_devices; unsigned int op_devices; }; diff --git a/drivers/staging/greybus/audio_manager_module.c b/drivers/staging/greybus/audio_manager_module.c index a10e96ad79c1f9f520c7d26d1fcc41b40f16b354..adc16977452d198bbfac63186427051083cc7a73 100644 --- a/drivers/staging/greybus/audio_manager_module.c +++ b/drivers/staging/greybus/audio_manager_module.c @@ -81,16 +81,6 @@ static ssize_t gb_audio_module_name_show( static struct gb_audio_manager_module_attribute gb_audio_module_name_attribute = __ATTR(name, 0664, gb_audio_module_name_show, NULL); -static ssize_t gb_audio_module_slot_show( - struct gb_audio_manager_module *module, - struct gb_audio_manager_module_attribute *attr, char *buf) -{ - return sprintf(buf, "%d", module->desc.slot); -} - -static struct gb_audio_manager_module_attribute gb_audio_module_slot_attribute = - __ATTR(slot, 0664, gb_audio_module_slot_show, NULL); - static ssize_t gb_audio_module_vid_show( struct gb_audio_manager_module *module, struct gb_audio_manager_module_attribute *attr, char *buf) @@ -111,16 +101,16 @@ static ssize_t gb_audio_module_pid_show( static struct gb_audio_manager_module_attribute gb_audio_module_pid_attribute = __ATTR(pid, 0664, gb_audio_module_pid_show, NULL); -static ssize_t gb_audio_module_cport_show( +static ssize_t gb_audio_module_intf_id_show( struct gb_audio_manager_module *module, struct gb_audio_manager_module_attribute *attr, char *buf) { - return sprintf(buf, "%d", module->desc.cport); + return sprintf(buf, "%d", module->desc.intf_id); } static struct gb_audio_manager_module_attribute - gb_audio_module_cport_attribute = - __ATTR(cport, 0664, gb_audio_module_cport_show, NULL); + gb_audio_module_intf_id_attribute = + __ATTR(intf_id, 0664, gb_audio_module_intf_id_show, NULL); static ssize_t gb_audio_module_ip_devices_show( struct gb_audio_manager_module *module, @@ -146,10 +136,9 @@ static struct gb_audio_manager_module_attribute static struct attribute *gb_audio_module_default_attrs[] = { &gb_audio_module_name_attribute.attr, - &gb_audio_module_slot_attribute.attr, &gb_audio_module_vid_attribute.attr, &gb_audio_module_pid_attribute.attr, - &gb_audio_module_cport_attribute.attr, + &gb_audio_module_intf_id_attribute.attr, &gb_audio_module_ip_devices_attribute.attr, &gb_audio_module_op_devices_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ @@ -164,29 +153,26 @@ static struct kobj_type gb_audio_module_type = { static void send_add_uevent(struct gb_audio_manager_module *module) { char name_string[128]; - char slot_string[64]; char vid_string[64]; char pid_string[64]; - char cport_string[64]; + char intf_id_string[64]; char ip_devices_string[64]; char op_devices_string[64]; char *envp[] = { name_string, - slot_string, vid_string, pid_string, - cport_string, + intf_id_string, ip_devices_string, op_devices_string, NULL }; snprintf(name_string, 128, "NAME=%s", module->desc.name); - snprintf(slot_string, 64, "SLOT=%d", module->desc.slot); snprintf(vid_string, 64, "VID=%d", module->desc.vid); snprintf(pid_string, 64, "PID=%d", module->desc.pid); - snprintf(cport_string, 64, "CPORT=%d", module->desc.cport); + snprintf(intf_id_string, 64, "INTF_ID=%d", module->desc.intf_id); snprintf(ip_devices_string, 64, "I/P DEVICES=0x%X", module->desc.ip_devices); snprintf(op_devices_string, 64, "O/P DEVICES=0x%X", @@ -246,13 +232,12 @@ int gb_audio_manager_module_create( void gb_audio_manager_module_dump(struct gb_audio_manager_module *module) { - pr_info("audio module #%d name=%s slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X o/p devices=0x%X\n", + pr_info("audio module #%d name=%s vid=%d pid=%d intf_id=%d i/p devices=0x%X o/p devices=0x%X\n", module->id, module->desc.name, - module->desc.slot, module->desc.vid, module->desc.pid, - module->desc.cport, + module->desc.intf_id, module->desc.ip_devices, module->desc.op_devices); } diff --git a/drivers/staging/greybus/audio_manager_sysfs.c b/drivers/staging/greybus/audio_manager_sysfs.c index d8bf8591ff9ecfc5e7286de3120370d18974686f..34ebd147052f448187210a6de739204c49e89579 100644 --- a/drivers/staging/greybus/audio_manager_sysfs.c +++ b/drivers/staging/greybus/audio_manager_sysfs.c @@ -20,10 +20,9 @@ static ssize_t manager_sysfs_add_store( int num = sscanf(buf, "name=%" GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "s " - "slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X" - "o/p devices=0x%X", - desc.name, &desc.slot, &desc.vid, &desc.pid, - &desc.cport, &desc.ip_devices, &desc.op_devices); + "vid=%d pid=%d intf_id=%d i/p devices=0x%X o/p devices=0x%X", + desc.name, &desc.vid, &desc.pid, &desc.intf_id, + &desc.ip_devices, &desc.op_devices); if (num != 7) return -EINVAL; @@ -44,7 +43,7 @@ static ssize_t manager_sysfs_remove_store( { int id; - int num = sscanf(buf, "%d", &id); + int num = kstrtoint(buf, 10, &id); if (num != 1) return -EINVAL; @@ -65,16 +64,17 @@ static ssize_t manager_sysfs_dump_store( { int id; - int num = sscanf(buf, "%d", &id); + int num = kstrtoint(buf, 10, &id); if (num == 1) { num = gb_audio_manager_dump_module(id); if (num) return num; - } else if (!strncmp("all", buf, 3)) + } else if (!strncmp("all", buf, 3)) { gb_audio_manager_dump_all(); - else + } else { return -EINVAL; + } return count; } diff --git a/drivers/staging/greybus/audio_module.c b/drivers/staging/greybus/audio_module.c index ae1c0fa8575281658ee2229457ab766b28945eef..17a9948b1ba1e2c26007f922242019fb63f6567c 100644 --- a/drivers/staging/greybus/audio_module.c +++ b/drivers/staging/greybus/audio_module.c @@ -207,10 +207,8 @@ static int gb_audio_add_data_connection(struct gbaudio_module_info *gbmodule, struct gbaudio_data_connection *dai; dai = devm_kzalloc(gbmodule->dev, sizeof(*dai), GFP_KERNEL); - if (!dai) { - dev_err(gbmodule->dev, "DAI Malloc failure\n"); + if (!dai) return -ENOMEM; - } connection = gb_connection_create_offloaded(bundle, le16_to_cpu(cport_desc->id), @@ -345,10 +343,9 @@ static int gb_audio_probe(struct gb_bundle *bundle, dev_dbg(dev, "Inform set_event:%d to above layer\n", 1); /* prepare for the audio manager */ strlcpy(desc.name, gbmodule->name, GB_AUDIO_MANAGER_MODULE_NAME_LEN); - desc.slot = 1; /* todo */ desc.vid = 2; /* todo */ desc.pid = 3; /* todo */ - desc.cport = gbmodule->dev_id; + desc.intf_id = gbmodule->dev_id; desc.op_devices = gbmodule->op_devices; desc.ip_devices = gbmodule->ip_devices; gbmodule->manager_id = gb_audio_manager_add(&desc); diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c index b6251691a33d456437159a241d73a89a5a0411f4..8b216ca99cf946ecf81f2a21b91517c506d4e9b4 100644 --- a/drivers/staging/greybus/audio_topology.c +++ b/drivers/staging/greybus/audio_topology.c @@ -114,6 +114,7 @@ static int gbaudio_map_widgetname(struct gbaudio_module_info *module, const char *name) { struct gbaudio_widget *widget; + list_for_each_entry(widget, &module->widget_list, list) { if (!strncmp(widget->name, name, NAME_SIZE)) return widget->id; @@ -1044,8 +1045,10 @@ static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module, control->texts = (const char * const *) gb_generate_enum_strings(module, gbenum); control->items = gbenum->items; - } else + } else { csize = sizeof(struct gb_audio_control); + } + *w_size += csize; curr = (void *)curr + csize; list_add(&control->list, &module->widget_ctl_list); @@ -1190,8 +1193,9 @@ static int gbaudio_tplg_process_kcontrols(struct gbaudio_module_info *module, control->texts = (const char * const *) gb_generate_enum_strings(module, gbenum); control->items = gbenum->items; - } else + } else { csize = sizeof(struct gb_audio_control); + } list_add(&control->list, &module->ctl_list); dev_dbg(module->dev, "%d:%s created of type %d\n", curr->id, diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c index 491bdd720c0c24a09879300b63599d978428ae39..0ee291ca2c721aa7e306e4275b9a97eefbd4f0c1 100644 --- a/drivers/staging/greybus/camera.c +++ b/drivers/staging/greybus/camera.c @@ -289,6 +289,7 @@ static const int gb_camera_configure_streams_validate_response( for (i = 0; i < resp->num_streams; i++) { struct gb_camera_stream_config_response *cfg = &resp->config[i]; + if (cfg->padding) { gcam_err(gcam, "stream #%u padding != 0\n", i); return -EIO; @@ -796,7 +797,7 @@ static int gb_camera_op_configure_streams(void *priv, unsigned int *nstreams, if (gb_nstreams > GB_CAMERA_MAX_STREAMS) return -EINVAL; - gb_streams = kzalloc(gb_nstreams * sizeof(*gb_streams), GFP_KERNEL); + gb_streams = kcalloc(gb_nstreams, sizeof(*gb_streams), GFP_KERNEL); if (!gb_streams) return -ENOMEM; @@ -937,7 +938,7 @@ static ssize_t gb_camera_debugfs_configure_streams(struct gb_camera *gcam, return ret; /* For each stream to configure parse width, height and format */ - streams = kzalloc(nstreams * sizeof(*streams), GFP_KERNEL); + streams = kcalloc(nstreams, sizeof(*streams), GFP_KERNEL); if (!streams) return -ENOMEM; @@ -1091,7 +1092,7 @@ static ssize_t gb_camera_debugfs_read(struct file *file, char __user *buf, size_t len, loff_t *offset) { const struct gb_camera_debugfs_entry *op = file->private_data; - struct gb_camera *gcam = file->f_inode->i_private; + struct gb_camera *gcam = file_inode(file)->i_private; struct gb_camera_debugfs_buffer *buffer; ssize_t ret; @@ -1113,12 +1114,12 @@ static ssize_t gb_camera_debugfs_write(struct file *file, loff_t *offset) { const struct gb_camera_debugfs_entry *op = file->private_data; - struct gb_camera *gcam = file->f_inode->i_private; + struct gb_camera *gcam = file_inode(file)->i_private; ssize_t ret; char *kbuf; if (len > 1024) - return -EINVAL; + return -EINVAL; kbuf = kmalloc(len + 1, GFP_KERNEL); if (!kbuf) diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c index baab460eeaa3828f7a3f84490f4dda566b647c44..c1929dfa9b313a24a35a3b88bf6a42e6ae4270f3 100644 --- a/drivers/staging/greybus/es2.c +++ b/drivers/staging/greybus/es2.c @@ -175,10 +175,9 @@ static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd) u8 *data; int retval; - data = kmalloc(size, GFP_KERNEL); + data = kmemdup(req, size, GFP_KERNEL); if (!data) return -ENOMEM; - memcpy(data, req, size); retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), cmd, @@ -1034,7 +1033,7 @@ static struct arpc *arpc_alloc(void *payload, u16 size, u8 type) goto err_free_req; rpc->req->type = type; - rpc->req->size = cpu_to_le16(sizeof(rpc->req) + size); + rpc->req->size = cpu_to_le16(sizeof(*rpc->req) + size); memcpy(rpc->req->data, payload, size); init_completion(&rpc->response_received); @@ -1250,7 +1249,7 @@ static int apb_log_poll(void *data) static ssize_t apb_log_read(struct file *f, char __user *buf, size_t count, loff_t *ppos) { - struct es2_ap_dev *es2 = f->f_inode->i_private; + struct es2_ap_dev *es2 = file_inode(f)->i_private; ssize_t ret; size_t copied; char *tmp_buf; @@ -1304,7 +1303,7 @@ static void usb_log_disable(struct es2_ap_dev *es2) static ssize_t apb_log_enable_read(struct file *f, char __user *buf, size_t count, loff_t *ppos) { - struct es2_ap_dev *es2 = f->f_inode->i_private; + struct es2_ap_dev *es2 = file_inode(f)->i_private; int enable = !IS_ERR_OR_NULL(es2->apb_log_task); char tmp_buf[3]; @@ -1317,7 +1316,7 @@ static ssize_t apb_log_enable_write(struct file *f, const char __user *buf, { int enable; ssize_t retval; - struct es2_ap_dev *es2 = f->f_inode->i_private; + struct es2_ap_dev *es2 = file_inode(f)->i_private; retval = kstrtoint_from_user(buf, count, 10, &enable); if (retval) diff --git a/drivers/staging/greybus/log.c b/drivers/staging/greybus/log.c index 70dd9e5a1cf2da831d37a180f2fd18f25568222d..1a18ab1ff8aaefc4c7f802e74c66ed5cf213b4b9 100644 --- a/drivers/staging/greybus/log.c +++ b/drivers/staging/greybus/log.c @@ -55,8 +55,10 @@ static int gb_log_request_handler(struct gb_operation *op) /* Ensure the buffer is 0 terminated */ receive->msg[len - 1] = '\0'; - /* Print with dev_dbg() so that it can be easily turned off using - * dynamic debugging (and prevent any DoS) */ + /* + * Print with dev_dbg() so that it can be easily turned off using + * dynamic debugging (and prevent any DoS) + */ dev_dbg(dev, "%s", receive->msg); return 0; diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c index 5649ef1e379d14c31e120cfb49f00eba7ab77404..66b37ea29ef0641085bebe59c81ac9ed6e6ec3bb 100644 --- a/drivers/staging/greybus/sdio.c +++ b/drivers/staging/greybus/sdio.c @@ -191,9 +191,8 @@ static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event) state_changed = 1; } - if (event & GB_SDIO_WP) { + if (event & GB_SDIO_WP) host->read_only = true; - } if (state_changed) { dev_info(mmc_dev(host->mmc), "card %s now event\n", diff --git a/drivers/staging/greybus/svc.c b/drivers/staging/greybus/svc.c index 550055ec27a5ce620f42b2c92662719ec0c32a65..8779270cadc1d4c86c21b8ac8ba08c96356fc564 100644 --- a/drivers/staging/greybus/svc.c +++ b/drivers/staging/greybus/svc.c @@ -757,7 +757,7 @@ static int gb_svc_version_request(struct gb_operation *op) static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf, size_t len, loff_t *offset) { - struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private; + struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private; struct gb_svc *svc = pwrmon_rails->svc; int ret, desc; u32 value; @@ -780,7 +780,7 @@ static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf, static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf, size_t len, loff_t *offset) { - struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private; + struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private; struct gb_svc *svc = pwrmon_rails->svc; int ret, desc; u32 value; @@ -803,7 +803,7 @@ static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf, static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf, size_t len, loff_t *offset) { - struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private; + struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private; struct gb_svc *svc = pwrmon_rails->svc; int ret, desc; u32 value; diff --git a/drivers/staging/greybus/timesync.c b/drivers/staging/greybus/timesync.c index 2e68af7dea6d51ad8c3ae5657e0af9debaf4a285..29e6c1c12807bec896abb6648b41a7fb4475d480 100644 --- a/drivers/staging/greybus/timesync.c +++ b/drivers/staging/greybus/timesync.c @@ -807,11 +807,11 @@ static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state) return -EINVAL; mutex_lock(×ync_svc->mutex); - if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) { + if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) gb_timesync_set_state_atomic(timesync_svc, state); - } else { + else ret = -ENODEV; - } + mutex_unlock(×ync_svc->mutex); return ret; } @@ -921,7 +921,7 @@ EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous); static ssize_t gb_timesync_ping_read(struct file *file, char __user *ubuf, size_t len, loff_t *offset, bool ktime) { - struct gb_timesync_svc *timesync_svc = file->f_inode->i_private; + struct gb_timesync_svc *timesync_svc = file_inode(file)->i_private; char *buf; ssize_t ret = 0; diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c index 2633d2bfb1b4f86bfd34e0da52f1e647b105e301..6d39f4a04754cec83d3ff3c135dfb559acb5e62f 100644 --- a/drivers/staging/greybus/uart.c +++ b/drivers/staging/greybus/uart.c @@ -623,9 +623,6 @@ static int get_serial_info(struct gb_tty *gb_tty, { struct serial_struct tmp; - if (!info) - return -EINVAL; - memset(&tmp, 0, sizeof(tmp)); tmp.flags = ASYNC_LOW_LATENCY | ASYNC_SKIP_TEST; tmp.type = PORT_16550A; @@ -711,25 +708,20 @@ static int wait_serial_change(struct gb_tty *gb_tty, unsigned long arg) return retval; } -static int get_serial_usage(struct gb_tty *gb_tty, - struct serial_icounter_struct __user *count) +static int gb_tty_get_icount(struct tty_struct *tty, + struct serial_icounter_struct *icount) { - struct serial_icounter_struct icount; - int retval = 0; - - memset(&icount, 0, sizeof(icount)); - icount.dsr = gb_tty->iocount.dsr; - icount.rng = gb_tty->iocount.rng; - icount.dcd = gb_tty->iocount.dcd; - icount.frame = gb_tty->iocount.frame; - icount.overrun = gb_tty->iocount.overrun; - icount.parity = gb_tty->iocount.parity; - icount.brk = gb_tty->iocount.brk; + struct gb_tty *gb_tty = tty->driver_data; - if (copy_to_user(count, &icount, sizeof(icount)) > 0) - retval = -EFAULT; + icount->dsr = gb_tty->iocount.dsr; + icount->rng = gb_tty->iocount.rng; + icount->dcd = gb_tty->iocount.dcd; + icount->frame = gb_tty->iocount.frame; + icount->overrun = gb_tty->iocount.overrun; + icount->parity = gb_tty->iocount.parity; + icount->brk = gb_tty->iocount.brk; - return retval; + return 0; } static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd, @@ -746,9 +738,6 @@ static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd, (struct serial_struct __user *)arg); case TIOCMIWAIT: return wait_serial_change(gb_tty, arg); - case TIOCGICOUNT: - return get_serial_usage(gb_tty, - (struct serial_icounter_struct __user *)arg); } return -ENOIOCTLCMD; @@ -830,9 +819,10 @@ static const struct tty_operations gb_ops = { .set_termios = gb_tty_set_termios, .tiocmget = gb_tty_tiocmget, .tiocmset = gb_tty_tiocmset, + .get_icount = gb_tty_get_icount, }; -static struct tty_port_operations gb_port_ops = { +static const struct tty_port_operations gb_port_ops = { .dtr_rts = gb_tty_dtr_rts, .activate = gb_tty_port_activate, .shutdown = gb_tty_port_shutdown, diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c index 8ed4d395be58f5fd3d893ba6efbda0783a96022c..19b550fff04bf0c9b33c74ad09a2016b8f518fc4 100644 --- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c +++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c @@ -38,7 +38,7 @@ static u8 bits_magic[] = { static struct platform_device *firmware_pdev; static char *file = "xlinx_fpga_firmware.bit"; -module_param(file, charp, S_IRUGO); +module_param(file, charp, 0444); MODULE_PARM_DESC(file, "Xilinx FPGA firmware file."); static void read_bitstream(char *bitdata, char *buf, int *offset, int rdsize) diff --git a/drivers/staging/i4l/act2000/act2000_isa.c b/drivers/staging/i4l/act2000/act2000_isa.c index ad7a0391369fcdd68bf89520e67bd4486ac93f56..76ff5de657812a5047105386fbdbde612de15d95 100644 --- a/drivers/staging/i4l/act2000/act2000_isa.c +++ b/drivers/staging/i4l/act2000/act2000_isa.c @@ -259,6 +259,7 @@ act2000_isa_receive(act2000_card *card) "act2000_isa_receive: Invalid CAPI msg\n"); { int i; __u8 *p; __u8 *t; __u8 tmp[30]; + for (i = 0, p = (__u8 *)&card->idat.isa.rcvhdr, t = tmp; i < 8; i++) t += sprintf(t, "%02x ", *(p++)); printk(KERN_WARNING "act2000_isa_receive: %s\n", tmp); diff --git a/drivers/staging/i4l/act2000/capi.c b/drivers/staging/i4l/act2000/capi.c index 62f56294853c15d4db97568308af610561e80807..61386a78fb9136054154e96689cb7a86ddcdff76 100644 --- a/drivers/staging/i4l/act2000/capi.c +++ b/drivers/staging/i4l/act2000/capi.c @@ -99,7 +99,7 @@ actcapi_chkhdr(act2000_card *card, actcapi_msghdr *hdr) for (i = 0; i < num_valid_imsg; i++) if ((hdr->cmd.cmd == valid_msg[i].cmd.cmd) && (hdr->cmd.subcmd == valid_msg[i].cmd.subcmd)) { - return (i ? 1 : 2); + return i ? 1 : 2; } return 0; } @@ -506,6 +506,7 @@ static int new_plci(act2000_card *card, __u16 plci) { int i; + for (i = 0; i < ACT2000_BCH; i++) if (card->bch[i].plci == 0x8000) { card->bch[i].plci = plci; @@ -518,6 +519,7 @@ static int find_plci(act2000_card *card, __u16 plci) { int i; + for (i = 0; i < ACT2000_BCH; i++) if (card->bch[i].plci == plci) return i; @@ -528,6 +530,7 @@ static int find_ncci(act2000_card *card, __u16 ncci) { int i; + for (i = 0; i < ACT2000_BCH; i++) if (card->bch[i].ncci == ncci) return i; @@ -538,6 +541,7 @@ static int find_dialing(act2000_card *card, __u16 callref) { int i; + for (i = 0; i < ACT2000_BCH; i++) if ((card->bch[i].callref == callref) && (card->bch[i].fsm_state == ACT2000_STATE_OCALL)) @@ -1088,6 +1092,7 @@ actcapi_debug_msg(struct sk_buff *skb, int direction) int l = msg->hdr.len - 12; int j; char *p = tmp; + for (j = 0; j < l; j++) p += sprintf(p, "%02x ", msg->msg.info_ind.el.display[j]); printk(KERN_DEBUG " D = '%s'\n", tmp); diff --git a/drivers/staging/i4l/act2000/module.c b/drivers/staging/i4l/act2000/module.c index 99c9c0a1c63e3158284bd47ee8345b0028c649b9..6aa120319e52fc186ca2723995068c905eac3447 100644 --- a/drivers/staging/i4l/act2000/module.c +++ b/drivers/staging/i4l/act2000/module.c @@ -19,8 +19,7 @@ #include #include -static unsigned short act2000_isa_ports[] = -{ +static unsigned short act2000_isa_ports[] = { 0x0200, 0x0240, 0x0280, 0x02c0, 0x0300, 0x0340, 0x0380, 0xcfe0, 0xcfa0, 0xcf60, 0xcf20, 0xcee0, 0xcea0, 0xce60, }; @@ -95,7 +94,7 @@ act2000_find_msn(act2000_card *card, char *msn, int ia5) p = p->next; } if (!ia5) - return (1 << (eaz - '0')); + return 1 << (eaz - '0'); else return eaz; } @@ -111,10 +110,10 @@ act2000_find_eaz(act2000_card *card, char eaz) while (p) { if (p->eaz == eaz) - return (p->msn); + return p->msn; p = p->next; } - return ("\0"); + return "\0"; } /* @@ -293,7 +292,7 @@ act2000_command(act2000_card *card, isdn_ctrl *c) if (ret) return ret; if (card->flags & ACT2000_FLAGS_RUNNING) - return (actcapi_manufacturer_req_msn(card)); + return actcapi_manufacturer_req_msn(card); return 0; case ACT2000_IOCTL_ADDCARD: if (copy_from_user(&cdef, arg, @@ -377,6 +376,7 @@ act2000_command(act2000_card *card, isdn_ctrl *c) } if (card->ptype == ISDN_PTYPE_1TR6) { int i; + chan->eazmask = 0; for (i = 0; i < strlen(c->parm.num); i++) if (isdigit(c->parm.num[i])) @@ -512,7 +512,7 @@ if_command(isdn_ctrl *c) act2000_card *card = act2000_findcard(c->driver); if (card) - return (act2000_command(card, c)); + return act2000_command(card, c); printk(KERN_ERR "act2000: if_command %d called with invalid driverId %d!\n", c->command, c->driver); @@ -527,7 +527,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel) if (card) { if (!(card->flags & ACT2000_FLAGS_RUNNING)) return -ENODEV; - return (len); + return len; } printk(KERN_ERR "act2000: if_writecmd called with invalid driverId!\n"); @@ -542,7 +542,7 @@ if_readstatus(u_char __user *buf, int len, int id, int channel) if (card) { if (!(card->flags & ACT2000_FLAGS_RUNNING)) return -ENODEV; - return (act2000_readstatus(buf, len, card)); + return act2000_readstatus(buf, len, card); } printk(KERN_ERR "act2000: if_readstatus called with invalid driverId!\n"); @@ -557,7 +557,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb) if (card) { if (!(card->flags & ACT2000_FLAGS_RUNNING)) return -ENODEV; - return (act2000_sendbuf(card, channel, ack, skb)); + return act2000_sendbuf(card, channel, ack, skb); } printk(KERN_ERR "act2000: if_sendbuf called with invalid driverId!\n"); @@ -574,6 +574,7 @@ act2000_alloccard(int bus, int port, int irq, char *id) { int i; act2000_card *card; + if (!(card = kzalloc(sizeof(act2000_card), GFP_KERNEL))) { printk(KERN_WARNING "act2000: (%s) Could not allocate card-struct.\n", id); @@ -776,7 +777,7 @@ act2000_addcard(int bus, int port, int irq, char *id) failed++; } } - return (added - failed); + return added - failed; } #define DRIVERNAME "IBM Active 2000 ISDN driver" @@ -795,6 +796,7 @@ static void __exit act2000_exit(void) { act2000_card *card = cards; act2000_card *last; + while (card) { unregister_card(card); del_timer_sync(&card->ptimer); diff --git a/drivers/staging/i4l/icn/icn.c b/drivers/staging/i4l/icn/icn.c index 514bfc2c5b53b84bda9177619736c57d6a388654..3750ba38adc50e4cff68dffb8413942d4adbd5be 100644 --- a/drivers/staging/i4l/icn/icn.c +++ b/drivers/staging/i4l/icn/icn.c @@ -411,8 +411,7 @@ typedef struct icn_stat { int action; } icn_stat; /* *INDENT-OFF* */ -static icn_stat icn_stat_table[] = -{ +static icn_stat icn_stat_table[] = { {"BCON_", ISDN_STAT_BCONN, 1}, /* B-Channel connected */ {"BDIS_", ISDN_STAT_BHUP, 2}, /* B-Channel disconnected */ /* diff --git a/drivers/staging/i4l/icn/icn.h b/drivers/staging/i4l/icn/icn.h index f8f2e76d34bf30d8e398d577ba025384330ac316..07e2e0196527829160be63f4377de529bcb3b309 100644 --- a/drivers/staging/i4l/icn/icn.h +++ b/drivers/staging/i4l/icn/icn.h @@ -54,7 +54,7 @@ typedef struct icn_cdef { /* some useful macros for debugging */ #ifdef ICN_DEBUG_PORT -#define OUTB_P(v, p) {printk(KERN_DEBUG "icn: outb_p(0x%02x,0x%03x)\n", v, p); outb_p(v, p);} +#define OUTB_P(v, p) {pr_debug("icn: outb_p(0x%02x,0x%03x)\n", v, p); outb_p(v, p);} #else #define OUTB_P outb #endif @@ -186,8 +186,7 @@ typedef icn_dev *icn_devptr; #ifdef __KERNEL__ static icn_card *cards = (icn_card *) 0; -static u_char chan2bank[] = -{0, 4, 8, 12}; /* for icn_map_channel() */ +static u_char chan2bank[] = {0, 4, 8, 12}; /* for icn_map_channel() */ static icn_dev dev; diff --git a/drivers/staging/i4l/pcbit/callbacks.c b/drivers/staging/i4l/pcbit/callbacks.c index efb6d6a3639ae4755fecd8b5606cfdd8d6e3e7ee..212ab0b229d4a877790cb3cde62aa2a43324492d 100644 --- a/drivers/staging/i4l/pcbit/callbacks.c +++ b/drivers/staging/i4l/pcbit/callbacks.c @@ -22,7 +22,7 @@ #include #include -#include +#include #include diff --git a/drivers/staging/i4l/pcbit/capi.c b/drivers/staging/i4l/pcbit/capi.c index 373f90feda5a9bcc8c244735ec3c67ffda565561..a6c4e00dc726683926290c46e73310ad294ad222 100644 --- a/drivers/staging/i4l/pcbit/capi.c +++ b/drivers/staging/i4l/pcbit/capi.c @@ -27,7 +27,6 @@ * encode our number in CallerPN and ConnectedPN */ -#include #include #include @@ -36,8 +35,8 @@ #include -#include -#include +#include +#include #include diff --git a/drivers/staging/i4l/pcbit/drv.c b/drivers/staging/i4l/pcbit/drv.c index d417df5efb5fc2fbdb2f7978895eb4af07ee0f04..89b0b5b94ce505a708a420d681c1e44bc20eb282 100644 --- a/drivers/staging/i4l/pcbit/drv.c +++ b/drivers/staging/i4l/pcbit/drv.c @@ -27,12 +27,11 @@ #include #include #include -#include #include #include -#include -#include +#include +#include #include #include "pcbit.h" diff --git a/drivers/staging/i4l/pcbit/edss1.c b/drivers/staging/i4l/pcbit/edss1.c index 6d291d548423cecda2966c03936027a698da3665..5980d1b5da95d370c939f79d6fe3d9bbdc65e8d2 100644 --- a/drivers/staging/i4l/pcbit/edss1.c +++ b/drivers/staging/i4l/pcbit/edss1.c @@ -23,7 +23,7 @@ #include #include -#include +#include #include diff --git a/drivers/staging/i4l/pcbit/layer2.c b/drivers/staging/i4l/pcbit/layer2.c index a136c72547e5c740c8c2bf891c35685810e49982..0592bf6ee9c9eef6e6c95e4e3fe29ea8e3f78b2b 100644 --- a/drivers/staging/i4l/pcbit/layer2.c +++ b/drivers/staging/i4l/pcbit/layer2.c @@ -36,7 +36,7 @@ #include -#include +#include #include "pcbit.h" diff --git a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583 b/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583 deleted file mode 100644 index 470f7ad9c0730b26d44f3bcca559408c0bc8c38c..0000000000000000000000000000000000000000 --- a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583 +++ /dev/null @@ -1,6 +0,0 @@ -What: /sys/bus/iio/devices/device[n]/in_illuminance0_calibrate -KernelVersion: 2.6.37 -Contact: linux-iio@vger.kernel.org -Description: - This property causes an internal calibration of the als gain trim - value which is later used in calculating illuminance in lux. diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO index 93a896883e37b5b4a8233daab76f527629015cdb..4922402e2e98da7b72160dd93a3b1d38f28c4bd0 100644 --- a/drivers/staging/iio/TODO +++ b/drivers/staging/iio/TODO @@ -1,76 +1,8 @@ -2009 8/18 - -Core: -1) Get reviews -2) Additional testing -3) Ensure all desirable features present by adding more devices. - Major changes not expected except in response to comments - -Max1363 core: -1) Possibly add sysfs exports of constant useful to userspace. -Would be nice -2) Support hardware generated interrupts -3) Expand device set. Lots of other maxim adc's have very - similar interfaces. - -MXS LRADC driver: -This is a classic MFD device as it combines the following subdevices - - touchscreen controller (input subsystem related device) - - general purpose ADC channels - - battery voltage monitor (power subsystem related device) - - die temperature monitor (thermal management) - -At least the battery voltage and die temperature feature is required in-kernel -by a driver of the SoC's battery charging unit to avoid any damage to the -silicon and the battery. - -TSL2561 -Would be nice -1) Open question of userspace vs kernel space balance when -converting to useful light measurements from device ones. -2) Add sysfs elements necessary to allow device agnostic -unit conversion. - -LIS3L02DQ core - -LIS3L02DQ ring - -KXSD9 -Currently minimal driver, would be nice to add: -1) Support for all chip generated interrupts (events), -basically get support up to level of lis3l02dq driver. - -Ring buffer core - -SCA3000 -Would be nice -1) Testing on devices other than sca3000-e05 - -Trigger core support -1) Discussion of approach. Is it general enough? - -Ring Buffer: -1) Discussion of approach. -There are probably better ways of doing this. The -intention is to allow for more than one software ring -buffer implementation as different users will have -different requirements. This one suits mid range -frequencies (100Hz - 4kHz). -2) Lots of testing - -GPIO trigger -1) Add control over the type of interrupt etc. This will -necessitate a header that is also visible from arch board -files. (avoided at the moment to keep the driver set -contained in staging). +2016 10/09 ADI Drivers: CC the device-drivers-devel@blackfin.uclinux.org mailing list when e-mailing the normal IIO list (see below). -Documentation -1) Lots of cleanup and expansion. -2) Some device require individual docs. - Contact: Jonathan Cameron . Mailing list: linux-iio@vger.kernel.org diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig index 1c994b57c7d2082edca68c1634fe3fe5afa5cdca..c6b0f5eae7abf778c3fcf8cbcc0fbb9c821c82aa 100644 --- a/drivers/staging/iio/accel/Kconfig +++ b/drivers/staging/iio/accel/Kconfig @@ -51,14 +51,4 @@ config ADIS16240 To compile this driver as a module, say M here: the module will be called adis16240. -config SCA3000 - depends on IIO_BUFFER - depends on SPI - tristate "VTI SCA3000 series accelerometers" - help - Say Y here to build support for the VTI SCA3000 series of SPI - accelerometers. These devices use a hardware ring buffer. - - To compile this driver as a module, say M here: the module will be - called sca3000. endmenu diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile index 1810a434a755048d34fcd87f7073feef9b024272..febb137b60c4f111f1a4fbe7c86b8d5492b6f947 100644 --- a/drivers/staging/iio/accel/Makefile +++ b/drivers/staging/iio/accel/Makefile @@ -13,6 +13,3 @@ obj-$(CONFIG_ADIS16209) += adis16209.o adis16240-y := adis16240_core.o obj-$(CONFIG_ADIS16240) += adis16240.o - -sca3000-y := sca3000_core.o sca3000_ring.o -obj-$(CONFIG_SCA3000) += sca3000.o diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h deleted file mode 100644 index 4dcc8575cbe38eaf81322424f50dddfe74dc8fd7..0000000000000000000000000000000000000000 --- a/drivers/staging/iio/accel/sca3000.h +++ /dev/null @@ -1,279 +0,0 @@ -/* - * sca3000.c -- support VTI sca3000 series accelerometers - * via SPI - * - * Copyright (c) 2007 Jonathan Cameron - * - * Partly based upon tle62x0.c - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Initial mode is direct measurement. - * - * Untested things - * - * Temperature reading (the e05 I'm testing with doesn't have a sensor) - * - * Free fall detection mode - supported but untested as I'm not droping my - * dubious wire rig far enough to test it. - * - * Unsupported as yet - * - * Time stamping of data from ring. Various ideas on how to do this but none - * are remotely simple. Suggestions welcome. - * - * Individual enabling disabling of channels going into ring buffer - * - * Overflow handling (this is signaled for all but 8 bit ring buffer mode.) - * - * Motion detector using AND combinations of signals. - * - * Note: Be very careful about not touching an register bytes marked - * as reserved on the data sheet. They really mean it as changing convents of - * some will cause the device to lock up. - * - * Known issues - on rare occasions the interrupts lock up. Not sure why as yet. - * Can probably alleviate this by reading the interrupt register on start, but - * that is really just brushing the problem under the carpet. - */ -#ifndef _SCA3000 -#define _SCA3000 - -#define SCA3000_WRITE_REG(a) (((a) << 2) | 0x02) -#define SCA3000_READ_REG(a) ((a) << 2) - -#define SCA3000_REG_ADDR_REVID 0x00 -#define SCA3000_REVID_MAJOR_MASK 0xf0 -#define SCA3000_REVID_MINOR_MASK 0x0f - -#define SCA3000_REG_ADDR_STATUS 0x02 -#define SCA3000_LOCKED 0x20 -#define SCA3000_EEPROM_CS_ERROR 0x02 -#define SCA3000_SPI_FRAME_ERROR 0x01 - -/* All reads done using register decrement so no need to directly access LSBs */ -#define SCA3000_REG_ADDR_X_MSB 0x05 -#define SCA3000_REG_ADDR_Y_MSB 0x07 -#define SCA3000_REG_ADDR_Z_MSB 0x09 - -#define SCA3000_REG_ADDR_RING_OUT 0x0f - -/* Temp read untested - the e05 doesn't have the sensor */ -#define SCA3000_REG_ADDR_TEMP_MSB 0x13 - -#define SCA3000_REG_ADDR_MODE 0x14 -#define SCA3000_MODE_PROT_MASK 0x28 - -#define SCA3000_RING_BUF_ENABLE 0x80 -#define SCA3000_RING_BUF_8BIT 0x40 -/* - * Free fall detection triggers an interrupt if the acceleration - * is below a threshold for equivalent of 25cm drop - */ -#define SCA3000_FREE_FALL_DETECT 0x10 -#define SCA3000_MEAS_MODE_NORMAL 0x00 -#define SCA3000_MEAS_MODE_OP_1 0x01 -#define SCA3000_MEAS_MODE_OP_2 0x02 - -/* - * In motion detection mode the accelerations are band pass filtered - * (approx 1 - 25Hz) and then a programmable threshold used to trigger - * and interrupt. - */ -#define SCA3000_MEAS_MODE_MOT_DET 0x03 - -#define SCA3000_REG_ADDR_BUF_COUNT 0x15 - -#define SCA3000_REG_ADDR_INT_STATUS 0x16 - -#define SCA3000_INT_STATUS_THREE_QUARTERS 0x80 -#define SCA3000_INT_STATUS_HALF 0x40 - -#define SCA3000_INT_STATUS_FREE_FALL 0x08 -#define SCA3000_INT_STATUS_Y_TRIGGER 0x04 -#define SCA3000_INT_STATUS_X_TRIGGER 0x02 -#define SCA3000_INT_STATUS_Z_TRIGGER 0x01 - -/* Used to allow access to multiplexed registers */ -#define SCA3000_REG_ADDR_CTRL_SEL 0x18 -/* Only available for SCA3000-D03 and SCA3000-D01 */ -#define SCA3000_REG_CTRL_SEL_I2C_DISABLE 0x01 -#define SCA3000_REG_CTRL_SEL_MD_CTRL 0x02 -#define SCA3000_REG_CTRL_SEL_MD_Y_TH 0x03 -#define SCA3000_REG_CTRL_SEL_MD_X_TH 0x04 -#define SCA3000_REG_CTRL_SEL_MD_Z_TH 0x05 -/* - * BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device - * will not function - */ -#define SCA3000_REG_CTRL_SEL_OUT_CTRL 0x0B -#define SCA3000_OUT_CTRL_PROT_MASK 0xE0 -#define SCA3000_OUT_CTRL_BUF_X_EN 0x10 -#define SCA3000_OUT_CTRL_BUF_Y_EN 0x08 -#define SCA3000_OUT_CTRL_BUF_Z_EN 0x04 -#define SCA3000_OUT_CTRL_BUF_DIV_MASK 0x03 -#define SCA3000_OUT_CTRL_BUF_DIV_4 0x02 -#define SCA3000_OUT_CTRL_BUF_DIV_2 0x01 - -/* - * Control which motion detector interrupts are on. - * For now only OR combinations are supported. - */ -#define SCA3000_MD_CTRL_PROT_MASK 0xC0 -#define SCA3000_MD_CTRL_OR_Y 0x01 -#define SCA3000_MD_CTRL_OR_X 0x02 -#define SCA3000_MD_CTRL_OR_Z 0x04 -/* Currently unsupported */ -#define SCA3000_MD_CTRL_AND_Y 0x08 -#define SCA3000_MD_CTRL_AND_X 0x10 -#define SAC3000_MD_CTRL_AND_Z 0x20 - -/* - * Some control registers of complex access methods requiring this register to - * be used to remove a lock. - */ -#define SCA3000_REG_ADDR_UNLOCK 0x1e - -#define SCA3000_REG_ADDR_INT_MASK 0x21 -#define SCA3000_INT_MASK_PROT_MASK 0x1C - -#define SCA3000_INT_MASK_RING_THREE_QUARTER 0x80 -#define SCA3000_INT_MASK_RING_HALF 0x40 - -#define SCA3000_INT_MASK_ALL_INTS 0x02 -#define SCA3000_INT_MASK_ACTIVE_HIGH 0x01 -#define SCA3000_INT_MASK_ACTIVE_LOW 0x00 - -/* Values of multiplexed registers (write to ctrl_data after select) */ -#define SCA3000_REG_ADDR_CTRL_DATA 0x22 - -/* - * Measurement modes available on some sca3000 series chips. Code assumes others - * may become available in the future. - * - * Bypass - Bypass the low-pass filter in the signal channel so as to increase - * signal bandwidth. - * - * Narrow - Narrow low-pass filtering of the signal channel and half output - * data rate by decimation. - * - * Wide - Widen low-pass filtering of signal channel to increase bandwidth - */ -#define SCA3000_OP_MODE_BYPASS 0x01 -#define SCA3000_OP_MODE_NARROW 0x02 -#define SCA3000_OP_MODE_WIDE 0x04 -#define SCA3000_MAX_TX 6 -#define SCA3000_MAX_RX 2 - -/** - * struct sca3000_state - device instance state information - * @us: the associated spi device - * @info: chip variant information - * @interrupt_handler_ws: event interrupt handler for all events - * @last_timestamp: the timestamp of the last event - * @mo_det_use_count: reference counter for the motion detection unit - * @lock: lock used to protect elements of sca3000_state - * and the underlying device state. - * @bpse: number of bits per scan element - * @tx: dma-able transmit buffer - * @rx: dma-able receive buffer - **/ -struct sca3000_state { - struct spi_device *us; - const struct sca3000_chip_info *info; - struct work_struct interrupt_handler_ws; - s64 last_timestamp; - int mo_det_use_count; - struct mutex lock; - int bpse; - /* Can these share a cacheline ? */ - u8 rx[2] ____cacheline_aligned; - u8 tx[6] ____cacheline_aligned; -}; - -/** - * struct sca3000_chip_info - model dependent parameters - * @scale: scale * 10^-6 - * @temp_output: some devices have temperature sensors. - * @measurement_mode_freq: normal mode sampling frequency - * @option_mode_1: first optional mode. Not all models have one - * @option_mode_1_freq: option mode 1 sampling frequency - * @option_mode_2: second optional mode. Not all chips have one - * @option_mode_2_freq: option mode 2 sampling frequency - * - * This structure is used to hold information about the functionality of a given - * sca3000 variant. - **/ -struct sca3000_chip_info { - unsigned int scale; - bool temp_output; - int measurement_mode_freq; - int option_mode_1; - int option_mode_1_freq; - int option_mode_2; - int option_mode_2_freq; - int mot_det_mult_xz[6]; - int mot_det_mult_y[7]; -}; - -int sca3000_read_data_short(struct sca3000_state *st, - u8 reg_address_high, - int len); - -/** - * sca3000_write_reg() write a single register - * @address: address of register on chip - * @val: value to be written to register - * - * The main lock must be held. - **/ -int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val); - -#ifdef CONFIG_IIO_BUFFER -/** - * sca3000_register_ring_funcs() setup the ring state change functions - **/ -void sca3000_register_ring_funcs(struct iio_dev *indio_dev); - -/** - * sca3000_configure_ring() - allocate and configure ring buffer - * @indio_dev: iio-core device whose ring is to be configured - * - * The hardware ring buffer needs far fewer ring buffer functions than - * a software one as a lot of things are handled automatically. - * This function also tells the iio core that our device supports a - * hardware ring buffer mode. - **/ -int sca3000_configure_ring(struct iio_dev *indio_dev); - -/** - * sca3000_unconfigure_ring() - deallocate the ring buffer - * @indio_dev: iio-core device whose ring we are freeing - **/ -void sca3000_unconfigure_ring(struct iio_dev *indio_dev); - -/** - * sca3000_ring_int_process() handles ring related event pushing and escalation - * @val: the event code - **/ -void sca3000_ring_int_process(u8 val, struct iio_buffer *ring); - -#else -static inline void sca3000_register_ring_funcs(struct iio_dev *indio_dev) -{ -} - -static inline -int sca3000_register_ring_access_and_init(struct iio_dev *indio_dev) -{ - return 0; -} - -static inline void sca3000_ring_int_process(u8 val, void *ring) -{ -} - -#endif -#endif /* _SCA3000 */ diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c deleted file mode 100644 index 564b36d4f6486c7c0dad7a18cca4b0bf0155c4a9..0000000000000000000000000000000000000000 --- a/drivers/staging/iio/accel/sca3000_core.c +++ /dev/null @@ -1,1210 +0,0 @@ -/* - * sca3000_core.c -- support VTI sca3000 series accelerometers via SPI - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * Copyright (c) 2009 Jonathan Cameron - * - * See industrialio/accels/sca3000.h for comments. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sca3000.h" - -enum sca3000_variant { - d01, - e02, - e04, - e05, -}; - -/* - * Note where option modes are not defined, the chip simply does not - * support any. - * Other chips in the sca3000 series use i2c and are not included here. - * - * Some of these devices are only listed in the family data sheet and - * do not actually appear to be available. - */ -static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = { - [d01] = { - .scale = 7357, - .temp_output = true, - .measurement_mode_freq = 250, - .option_mode_1 = SCA3000_OP_MODE_BYPASS, - .option_mode_1_freq = 250, - .mot_det_mult_xz = {50, 100, 200, 350, 650, 1300}, - .mot_det_mult_y = {50, 100, 150, 250, 450, 850, 1750}, - }, - [e02] = { - .scale = 9810, - .measurement_mode_freq = 125, - .option_mode_1 = SCA3000_OP_MODE_NARROW, - .option_mode_1_freq = 63, - .mot_det_mult_xz = {100, 150, 300, 550, 1050, 2050}, - .mot_det_mult_y = {50, 100, 200, 350, 700, 1350, 2700}, - }, - [e04] = { - .scale = 19620, - .measurement_mode_freq = 100, - .option_mode_1 = SCA3000_OP_MODE_NARROW, - .option_mode_1_freq = 50, - .option_mode_2 = SCA3000_OP_MODE_WIDE, - .option_mode_2_freq = 400, - .mot_det_mult_xz = {200, 300, 600, 1100, 2100, 4100}, - .mot_det_mult_y = {100, 200, 400, 7000, 1400, 2700, 54000}, - }, - [e05] = { - .scale = 61313, - .measurement_mode_freq = 200, - .option_mode_1 = SCA3000_OP_MODE_NARROW, - .option_mode_1_freq = 50, - .option_mode_2 = SCA3000_OP_MODE_WIDE, - .option_mode_2_freq = 400, - .mot_det_mult_xz = {600, 900, 1700, 3200, 6100, 11900}, - .mot_det_mult_y = {300, 600, 1200, 2000, 4100, 7800, 15600}, - }, -}; - -int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val) -{ - st->tx[0] = SCA3000_WRITE_REG(address); - st->tx[1] = val; - return spi_write(st->us, st->tx, 2); -} - -int sca3000_read_data_short(struct sca3000_state *st, - u8 reg_address_high, - int len) -{ - struct spi_transfer xfer[2] = { - { - .len = 1, - .tx_buf = st->tx, - }, { - .len = len, - .rx_buf = st->rx, - } - }; - st->tx[0] = SCA3000_READ_REG(reg_address_high); - - return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer)); -} - -/** - * sca3000_reg_lock_on() test if the ctrl register lock is on - * - * Lock must be held. - **/ -static int sca3000_reg_lock_on(struct sca3000_state *st) -{ - int ret; - - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_STATUS, 1); - if (ret < 0) - return ret; - - return !(st->rx[0] & SCA3000_LOCKED); -} - -/** - * __sca3000_unlock_reg_lock() unlock the control registers - * - * Note the device does not appear to support doing this in a single transfer. - * This should only ever be used as part of ctrl reg read. - * Lock must be held before calling this - **/ -static int __sca3000_unlock_reg_lock(struct sca3000_state *st) -{ - struct spi_transfer xfer[3] = { - { - .len = 2, - .cs_change = 1, - .tx_buf = st->tx, - }, { - .len = 2, - .cs_change = 1, - .tx_buf = st->tx + 2, - }, { - .len = 2, - .tx_buf = st->tx + 4, - }, - }; - st->tx[0] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK); - st->tx[1] = 0x00; - st->tx[2] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK); - st->tx[3] = 0x50; - st->tx[4] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK); - st->tx[5] = 0xA0; - - return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer)); -} - -/** - * sca3000_write_ctrl_reg() write to a lock protect ctrl register - * @sel: selects which registers we wish to write to - * @val: the value to be written - * - * Certain control registers are protected against overwriting by the lock - * register and use a shared write address. This function allows writing of - * these registers. - * Lock must be held. - **/ -static int sca3000_write_ctrl_reg(struct sca3000_state *st, - u8 sel, - uint8_t val) -{ - int ret; - - ret = sca3000_reg_lock_on(st); - if (ret < 0) - goto error_ret; - if (ret) { - ret = __sca3000_unlock_reg_lock(st); - if (ret) - goto error_ret; - } - - /* Set the control select register */ - ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, sel); - if (ret) - goto error_ret; - - /* Write the actual value into the register */ - ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_DATA, val); - -error_ret: - return ret; -} - -/** - * sca3000_read_ctrl_reg() read from lock protected control register. - * - * Lock must be held. - **/ -static int sca3000_read_ctrl_reg(struct sca3000_state *st, - u8 ctrl_reg) -{ - int ret; - - ret = sca3000_reg_lock_on(st); - if (ret < 0) - goto error_ret; - if (ret) { - ret = __sca3000_unlock_reg_lock(st); - if (ret) - goto error_ret; - } - /* Set the control select register */ - ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, ctrl_reg); - if (ret) - goto error_ret; - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_CTRL_DATA, 1); - if (ret) - goto error_ret; - return st->rx[0]; -error_ret: - return ret; -} - -/** - * sca3000_show_rev() - sysfs interface to read the chip revision number - **/ -static ssize_t sca3000_show_rev(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - int len = 0, ret; - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct sca3000_state *st = iio_priv(indio_dev); - - mutex_lock(&st->lock); - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_REVID, 1); - if (ret < 0) - goto error_ret; - len += sprintf(buf + len, - "major=%d, minor=%d\n", - st->rx[0] & SCA3000_REVID_MAJOR_MASK, - st->rx[0] & SCA3000_REVID_MINOR_MASK); -error_ret: - mutex_unlock(&st->lock); - - return ret ? ret : len; -} - -/** - * sca3000_show_available_measurement_modes() display available modes - * - * This is all read from chip specific data in the driver. Not all - * of the sca3000 series support modes other than normal. - **/ -static ssize_t -sca3000_show_available_measurement_modes(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct sca3000_state *st = iio_priv(indio_dev); - int len = 0; - - len += sprintf(buf + len, "0 - normal mode"); - switch (st->info->option_mode_1) { - case SCA3000_OP_MODE_NARROW: - len += sprintf(buf + len, ", 1 - narrow mode"); - break; - case SCA3000_OP_MODE_BYPASS: - len += sprintf(buf + len, ", 1 - bypass mode"); - break; - } - switch (st->info->option_mode_2) { - case SCA3000_OP_MODE_WIDE: - len += sprintf(buf + len, ", 2 - wide mode"); - break; - } - /* always supported */ - len += sprintf(buf + len, " 3 - motion detection\n"); - - return len; -} - -/** - * sca3000_show_measurement_mode() sysfs read of current mode - **/ -static ssize_t -sca3000_show_measurement_mode(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct sca3000_state *st = iio_priv(indio_dev); - int len = 0, ret; - - mutex_lock(&st->lock); - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1); - if (ret) - goto error_ret; - /* mask bottom 2 bits - only ones that are relevant */ - st->rx[0] &= 0x03; - switch (st->rx[0]) { - case SCA3000_MEAS_MODE_NORMAL: - len += sprintf(buf + len, "0 - normal mode\n"); - break; - case SCA3000_MEAS_MODE_MOT_DET: - len += sprintf(buf + len, "3 - motion detection\n"); - break; - case SCA3000_MEAS_MODE_OP_1: - switch (st->info->option_mode_1) { - case SCA3000_OP_MODE_NARROW: - len += sprintf(buf + len, "1 - narrow mode\n"); - break; - case SCA3000_OP_MODE_BYPASS: - len += sprintf(buf + len, "1 - bypass mode\n"); - break; - } - break; - case SCA3000_MEAS_MODE_OP_2: - switch (st->info->option_mode_2) { - case SCA3000_OP_MODE_WIDE: - len += sprintf(buf + len, "2 - wide mode\n"); - break; - } - break; - } - -error_ret: - mutex_unlock(&st->lock); - - return ret ? ret : len; -} - -/** - * sca3000_store_measurement_mode() set the current mode - **/ -static ssize_t -sca3000_store_measurement_mode(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct sca3000_state *st = iio_priv(indio_dev); - int ret; - u8 mask = 0x03; - u8 val; - - mutex_lock(&st->lock); - ret = kstrtou8(buf, 10, &val); - if (ret) - goto error_ret; - if (val > 3) { - ret = -EINVAL; - goto error_ret; - } - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1); - if (ret) - goto error_ret; - st->rx[0] &= ~mask; - st->rx[0] |= (val & mask); - ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, st->rx[0]); - if (ret) - goto error_ret; - mutex_unlock(&st->lock); - - return len; - -error_ret: - mutex_unlock(&st->lock); - - return ret; -} - -/* - * Not even vaguely standard attributes so defined here rather than - * in the relevant IIO core headers - */ -static IIO_DEVICE_ATTR(measurement_mode_available, S_IRUGO, - sca3000_show_available_measurement_modes, - NULL, 0); - -static IIO_DEVICE_ATTR(measurement_mode, S_IRUGO | S_IWUSR, - sca3000_show_measurement_mode, - sca3000_store_measurement_mode, - 0); - -/* More standard attributes */ - -static IIO_DEVICE_ATTR(revision, S_IRUGO, sca3000_show_rev, NULL, 0); - -static const struct iio_event_spec sca3000_event = { - .type = IIO_EV_TYPE_MAG, - .dir = IIO_EV_DIR_RISING, - .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE), -}; - -#define SCA3000_CHAN(index, mod) \ - { \ - .type = IIO_ACCEL, \ - .modified = 1, \ - .channel2 = mod, \ - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\ - .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\ - .address = index, \ - .scan_index = index, \ - .scan_type = { \ - .sign = 's', \ - .realbits = 11, \ - .storagebits = 16, \ - .shift = 5, \ - }, \ - .event_spec = &sca3000_event, \ - .num_event_specs = 1, \ - } - -static const struct iio_chan_spec sca3000_channels[] = { - SCA3000_CHAN(0, IIO_MOD_X), - SCA3000_CHAN(1, IIO_MOD_Y), - SCA3000_CHAN(2, IIO_MOD_Z), -}; - -static const struct iio_chan_spec sca3000_channels_with_temp[] = { - SCA3000_CHAN(0, IIO_MOD_X), - SCA3000_CHAN(1, IIO_MOD_Y), - SCA3000_CHAN(2, IIO_MOD_Z), - { - .type = IIO_TEMP, - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | - BIT(IIO_CHAN_INFO_OFFSET), - /* No buffer support */ - .scan_index = -1, - }, -}; - -static u8 sca3000_addresses[3][3] = { - [0] = {SCA3000_REG_ADDR_X_MSB, SCA3000_REG_CTRL_SEL_MD_X_TH, - SCA3000_MD_CTRL_OR_X}, - [1] = {SCA3000_REG_ADDR_Y_MSB, SCA3000_REG_CTRL_SEL_MD_Y_TH, - SCA3000_MD_CTRL_OR_Y}, - [2] = {SCA3000_REG_ADDR_Z_MSB, SCA3000_REG_CTRL_SEL_MD_Z_TH, - SCA3000_MD_CTRL_OR_Z}, -}; - -/** - * __sca3000_get_base_freq() obtain mode specific base frequency - * - * lock must be held - **/ -static inline int __sca3000_get_base_freq(struct sca3000_state *st, - const struct sca3000_chip_info *info, - int *base_freq) -{ - int ret; - - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1); - if (ret) - goto error_ret; - switch (0x03 & st->rx[0]) { - case SCA3000_MEAS_MODE_NORMAL: - *base_freq = info->measurement_mode_freq; - break; - case SCA3000_MEAS_MODE_OP_1: - *base_freq = info->option_mode_1_freq; - break; - case SCA3000_MEAS_MODE_OP_2: - *base_freq = info->option_mode_2_freq; - break; - default: - ret = -EINVAL; - } -error_ret: - return ret; -} - -/** - * read_raw handler for IIO_CHAN_INFO_SAMP_FREQ - * - * lock must be held - **/ -static int read_raw_samp_freq(struct sca3000_state *st, int *val) -{ - int ret; - - ret = __sca3000_get_base_freq(st, st->info, val); - if (ret) - return ret; - - ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL); - if (ret < 0) - return ret; - - if (*val > 0) { - ret &= SCA3000_OUT_CTRL_BUF_DIV_MASK; - switch (ret) { - case SCA3000_OUT_CTRL_BUF_DIV_2: - *val /= 2; - break; - case SCA3000_OUT_CTRL_BUF_DIV_4: - *val /= 4; - break; - } - } - - return 0; -} - -/** - * write_raw handler for IIO_CHAN_INFO_SAMP_FREQ - * - * lock must be held - **/ -static int write_raw_samp_freq(struct sca3000_state *st, int val) -{ - int ret, base_freq, ctrlval; - - ret = __sca3000_get_base_freq(st, st->info, &base_freq); - if (ret) - return ret; - - ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL); - if (ret < 0) - return ret; - - ctrlval = ret & ~SCA3000_OUT_CTRL_BUF_DIV_MASK; - - if (val == base_freq / 2) - ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_2; - if (val == base_freq / 4) - ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_4; - else if (val != base_freq) - return -EINVAL; - - return sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL, - ctrlval); -} - -static int sca3000_read_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, - int *val, - int *val2, - long mask) -{ - struct sca3000_state *st = iio_priv(indio_dev); - int ret; - u8 address; - - switch (mask) { - case IIO_CHAN_INFO_RAW: - mutex_lock(&st->lock); - if (chan->type == IIO_ACCEL) { - if (st->mo_det_use_count) { - mutex_unlock(&st->lock); - return -EBUSY; - } - address = sca3000_addresses[chan->address][0]; - ret = sca3000_read_data_short(st, address, 2); - if (ret < 0) { - mutex_unlock(&st->lock); - return ret; - } - *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF; - *val = ((*val) << (sizeof(*val) * 8 - 13)) >> - (sizeof(*val) * 8 - 13); - } else { - /* get the temperature when available */ - ret = sca3000_read_data_short(st, - SCA3000_REG_ADDR_TEMP_MSB, - 2); - if (ret < 0) { - mutex_unlock(&st->lock); - return ret; - } - *val = ((st->rx[0] & 0x3F) << 3) | - ((st->rx[1] & 0xE0) >> 5); - } - mutex_unlock(&st->lock); - return IIO_VAL_INT; - case IIO_CHAN_INFO_SCALE: - *val = 0; - if (chan->type == IIO_ACCEL) - *val2 = st->info->scale; - else /* temperature */ - *val2 = 555556; - return IIO_VAL_INT_PLUS_MICRO; - case IIO_CHAN_INFO_OFFSET: - *val = -214; - *val2 = 600000; - return IIO_VAL_INT_PLUS_MICRO; - case IIO_CHAN_INFO_SAMP_FREQ: - mutex_lock(&st->lock); - ret = read_raw_samp_freq(st, val); - mutex_unlock(&st->lock); - return ret ? ret : IIO_VAL_INT; - default: - return -EINVAL; - } -} - -static int sca3000_write_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, - int val, int val2, long mask) -{ - struct sca3000_state *st = iio_priv(indio_dev); - int ret; - - switch (mask) { - case IIO_CHAN_INFO_SAMP_FREQ: - if (val2) - return -EINVAL; - mutex_lock(&st->lock); - ret = write_raw_samp_freq(st, val); - mutex_unlock(&st->lock); - return ret; - default: - return -EINVAL; - } - - return ret; -} - -/** - * sca3000_read_av_freq() sysfs function to get available frequencies - * - * The later modes are only relevant to the ring buffer - and depend on current - * mode. Note that data sheet gives rather wide tolerances for these so integer - * division will give good enough answer and not all chips have them specified - * at all. - **/ -static ssize_t sca3000_read_av_freq(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct sca3000_state *st = iio_priv(indio_dev); - int len = 0, ret, val; - - mutex_lock(&st->lock); - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1); - val = st->rx[0]; - mutex_unlock(&st->lock); - if (ret) - goto error_ret; - - switch (val & 0x03) { - case SCA3000_MEAS_MODE_NORMAL: - len += sprintf(buf + len, "%d %d %d\n", - st->info->measurement_mode_freq, - st->info->measurement_mode_freq / 2, - st->info->measurement_mode_freq / 4); - break; - case SCA3000_MEAS_MODE_OP_1: - len += sprintf(buf + len, "%d %d %d\n", - st->info->option_mode_1_freq, - st->info->option_mode_1_freq / 2, - st->info->option_mode_1_freq / 4); - break; - case SCA3000_MEAS_MODE_OP_2: - len += sprintf(buf + len, "%d %d %d\n", - st->info->option_mode_2_freq, - st->info->option_mode_2_freq / 2, - st->info->option_mode_2_freq / 4); - break; - } - return len; -error_ret: - return ret; -} - -/* - * Should only really be registered if ring buffer support is compiled in. - * Does no harm however and doing it right would add a fair bit of complexity - */ -static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq); - -/** - * sca3000_read_thresh() - query of a threshold - **/ -static int sca3000_read_thresh(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, - enum iio_event_type type, - enum iio_event_direction dir, - enum iio_event_info info, - int *val, int *val2) -{ - int ret, i; - struct sca3000_state *st = iio_priv(indio_dev); - int num = chan->channel2; - - mutex_lock(&st->lock); - ret = sca3000_read_ctrl_reg(st, sca3000_addresses[num][1]); - mutex_unlock(&st->lock); - if (ret < 0) - return ret; - *val = 0; - if (num == 1) - for_each_set_bit(i, (unsigned long *)&ret, - ARRAY_SIZE(st->info->mot_det_mult_y)) - *val += st->info->mot_det_mult_y[i]; - else - for_each_set_bit(i, (unsigned long *)&ret, - ARRAY_SIZE(st->info->mot_det_mult_xz)) - *val += st->info->mot_det_mult_xz[i]; - - return IIO_VAL_INT; -} - -/** - * sca3000_write_thresh() control of threshold - **/ -static int sca3000_write_thresh(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, - enum iio_event_type type, - enum iio_event_direction dir, - enum iio_event_info info, - int val, int val2) -{ - struct sca3000_state *st = iio_priv(indio_dev); - int num = chan->channel2; - int ret; - int i; - u8 nonlinear = 0; - - if (num == 1) { - i = ARRAY_SIZE(st->info->mot_det_mult_y); - while (i > 0) - if (val >= st->info->mot_det_mult_y[--i]) { - nonlinear |= (1 << i); - val -= st->info->mot_det_mult_y[i]; - } - } else { - i = ARRAY_SIZE(st->info->mot_det_mult_xz); - while (i > 0) - if (val >= st->info->mot_det_mult_xz[--i]) { - nonlinear |= (1 << i); - val -= st->info->mot_det_mult_xz[i]; - } - } - - mutex_lock(&st->lock); - ret = sca3000_write_ctrl_reg(st, sca3000_addresses[num][1], nonlinear); - mutex_unlock(&st->lock); - - return ret; -} - -static struct attribute *sca3000_attributes[] = { - &iio_dev_attr_revision.dev_attr.attr, - &iio_dev_attr_measurement_mode_available.dev_attr.attr, - &iio_dev_attr_measurement_mode.dev_attr.attr, - &iio_dev_attr_sampling_frequency_available.dev_attr.attr, - NULL, -}; - -static const struct attribute_group sca3000_attribute_group = { - .attrs = sca3000_attributes, -}; - -/** - * sca3000_event_handler() - handling ring and non ring events - * - * Ring related interrupt handler. Depending on event, push to - * the ring buffer event chrdev or the event one. - * - * This function is complicated by the fact that the devices can signify ring - * and non ring events via the same interrupt line and they can only - * be distinguished via a read of the relevant status register. - **/ -static irqreturn_t sca3000_event_handler(int irq, void *private) -{ - struct iio_dev *indio_dev = private; - struct sca3000_state *st = iio_priv(indio_dev); - int ret, val; - s64 last_timestamp = iio_get_time_ns(indio_dev); - - /* - * Could lead if badly timed to an extra read of status reg, - * but ensures no interrupt is missed. - */ - mutex_lock(&st->lock); - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_STATUS, 1); - val = st->rx[0]; - mutex_unlock(&st->lock); - if (ret) - goto done; - - sca3000_ring_int_process(val, indio_dev->buffer); - - if (val & SCA3000_INT_STATUS_FREE_FALL) - iio_push_event(indio_dev, - IIO_MOD_EVENT_CODE(IIO_ACCEL, - 0, - IIO_MOD_X_AND_Y_AND_Z, - IIO_EV_TYPE_MAG, - IIO_EV_DIR_FALLING), - last_timestamp); - - if (val & SCA3000_INT_STATUS_Y_TRIGGER) - iio_push_event(indio_dev, - IIO_MOD_EVENT_CODE(IIO_ACCEL, - 0, - IIO_MOD_Y, - IIO_EV_TYPE_MAG, - IIO_EV_DIR_RISING), - last_timestamp); - - if (val & SCA3000_INT_STATUS_X_TRIGGER) - iio_push_event(indio_dev, - IIO_MOD_EVENT_CODE(IIO_ACCEL, - 0, - IIO_MOD_X, - IIO_EV_TYPE_MAG, - IIO_EV_DIR_RISING), - last_timestamp); - - if (val & SCA3000_INT_STATUS_Z_TRIGGER) - iio_push_event(indio_dev, - IIO_MOD_EVENT_CODE(IIO_ACCEL, - 0, - IIO_MOD_Z, - IIO_EV_TYPE_MAG, - IIO_EV_DIR_RISING), - last_timestamp); - -done: - return IRQ_HANDLED; -} - -/** - * sca3000_read_event_config() what events are enabled - **/ -static int sca3000_read_event_config(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, - enum iio_event_type type, - enum iio_event_direction dir) -{ - struct sca3000_state *st = iio_priv(indio_dev); - int ret; - u8 protect_mask = 0x03; - int num = chan->channel2; - - /* read current value of mode register */ - mutex_lock(&st->lock); - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1); - if (ret) - goto error_ret; - - if ((st->rx[0] & protect_mask) != SCA3000_MEAS_MODE_MOT_DET) { - ret = 0; - } else { - ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL); - if (ret < 0) - goto error_ret; - /* only supporting logical or's for now */ - ret = !!(ret & sca3000_addresses[num][2]); - } -error_ret: - mutex_unlock(&st->lock); - - return ret; -} - -/** - * sca3000_query_free_fall_mode() is free fall mode enabled - **/ -static ssize_t sca3000_query_free_fall_mode(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - int ret; - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct sca3000_state *st = iio_priv(indio_dev); - int val; - - mutex_lock(&st->lock); - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1); - val = st->rx[0]; - mutex_unlock(&st->lock); - if (ret < 0) - return ret; - return sprintf(buf, "%d\n", !!(val & SCA3000_FREE_FALL_DETECT)); -} - -/** - * sca3000_set_free_fall_mode() simple on off control for free fall int - * - * In these chips the free fall detector should send an interrupt if - * the device falls more than 25cm. This has not been tested due - * to fragile wiring. - **/ -static ssize_t sca3000_set_free_fall_mode(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct sca3000_state *st = iio_priv(indio_dev); - u8 val; - int ret; - u8 protect_mask = SCA3000_FREE_FALL_DETECT; - - mutex_lock(&st->lock); - ret = kstrtou8(buf, 10, &val); - if (ret) - goto error_ret; - - /* read current value of mode register */ - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1); - if (ret) - goto error_ret; - - /* if off and should be on */ - if (val && !(st->rx[0] & protect_mask)) - ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, - (st->rx[0] | SCA3000_FREE_FALL_DETECT)); - /* if on and should be off */ - else if (!val && (st->rx[0] & protect_mask)) - ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, - (st->rx[0] & ~protect_mask)); -error_ret: - mutex_unlock(&st->lock); - - return ret ? ret : len; -} - -/** - * sca3000_write_event_config() simple on off control for motion detector - * - * This is a per axis control, but enabling any will result in the - * motion detector unit being enabled. - * N.B. enabling motion detector stops normal data acquisition. - * There is a complexity in knowing which mode to return to when - * this mode is disabled. Currently normal mode is assumed. - **/ -static int sca3000_write_event_config(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, - enum iio_event_type type, - enum iio_event_direction dir, - int state) -{ - struct sca3000_state *st = iio_priv(indio_dev); - int ret, ctrlval; - u8 protect_mask = 0x03; - int num = chan->channel2; - - mutex_lock(&st->lock); - /* - * First read the motion detector config to find out if - * this axis is on - */ - ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL); - if (ret < 0) - goto exit_point; - ctrlval = ret; - /* if off and should be on */ - if (state && !(ctrlval & sca3000_addresses[num][2])) { - ret = sca3000_write_ctrl_reg(st, - SCA3000_REG_CTRL_SEL_MD_CTRL, - ctrlval | - sca3000_addresses[num][2]); - if (ret) - goto exit_point; - st->mo_det_use_count++; - } else if (!state && (ctrlval & sca3000_addresses[num][2])) { - ret = sca3000_write_ctrl_reg(st, - SCA3000_REG_CTRL_SEL_MD_CTRL, - ctrlval & - ~(sca3000_addresses[num][2])); - if (ret) - goto exit_point; - st->mo_det_use_count--; - } - - /* read current value of mode register */ - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1); - if (ret) - goto exit_point; - /* if off and should be on */ - if ((st->mo_det_use_count) && - ((st->rx[0] & protect_mask) != SCA3000_MEAS_MODE_MOT_DET)) - ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, - (st->rx[0] & ~protect_mask) - | SCA3000_MEAS_MODE_MOT_DET); - /* if on and should be off */ - else if (!(st->mo_det_use_count) && - ((st->rx[0] & protect_mask) == SCA3000_MEAS_MODE_MOT_DET)) - ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, - (st->rx[0] & ~protect_mask)); -exit_point: - mutex_unlock(&st->lock); - - return ret; -} - -/* Free fall detector related event attribute */ -static IIO_DEVICE_ATTR_NAMED(accel_xayaz_mag_falling_en, - in_accel_x & y & z_mag_falling_en, - S_IRUGO | S_IWUSR, - sca3000_query_free_fall_mode, - sca3000_set_free_fall_mode, - 0); - -static IIO_CONST_ATTR_NAMED(accel_xayaz_mag_falling_period, - in_accel_x & y & z_mag_falling_period, - "0.226"); - -static struct attribute *sca3000_event_attributes[] = { - &iio_dev_attr_accel_xayaz_mag_falling_en.dev_attr.attr, - &iio_const_attr_accel_xayaz_mag_falling_period.dev_attr.attr, - NULL, -}; - -static struct attribute_group sca3000_event_attribute_group = { - .attrs = sca3000_event_attributes, - .name = "events", -}; - -/** - * sca3000_clean_setup() get the device into a predictable state - * - * Devices use flash memory to store many of the register values - * and hence can come up in somewhat unpredictable states. - * Hence reset everything on driver load. - **/ -static int sca3000_clean_setup(struct sca3000_state *st) -{ - int ret; - - mutex_lock(&st->lock); - /* Ensure all interrupts have been acknowledged */ - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_STATUS, 1); - if (ret) - goto error_ret; - - /* Turn off all motion detection channels */ - ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL); - if (ret < 0) - goto error_ret; - ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL, - ret & SCA3000_MD_CTRL_PROT_MASK); - if (ret) - goto error_ret; - - /* Disable ring buffer */ - ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL); - if (ret < 0) - goto error_ret; - ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL, - (ret & SCA3000_OUT_CTRL_PROT_MASK) - | SCA3000_OUT_CTRL_BUF_X_EN - | SCA3000_OUT_CTRL_BUF_Y_EN - | SCA3000_OUT_CTRL_BUF_Z_EN - | SCA3000_OUT_CTRL_BUF_DIV_4); - if (ret) - goto error_ret; - /* Enable interrupts, relevant to mode and set up as active low */ - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1); - if (ret) - goto error_ret; - ret = sca3000_write_reg(st, - SCA3000_REG_ADDR_INT_MASK, - (ret & SCA3000_INT_MASK_PROT_MASK) - | SCA3000_INT_MASK_ACTIVE_LOW); - if (ret) - goto error_ret; - /* - * Select normal measurement mode, free fall off, ring off - * Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5 - * as that occurs in one of the example on the datasheet - */ - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1); - if (ret) - goto error_ret; - ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, - (st->rx[0] & SCA3000_MODE_PROT_MASK)); - st->bpse = 11; - -error_ret: - mutex_unlock(&st->lock); - return ret; -} - -static const struct iio_info sca3000_info = { - .attrs = &sca3000_attribute_group, - .read_raw = &sca3000_read_raw, - .write_raw = &sca3000_write_raw, - .event_attrs = &sca3000_event_attribute_group, - .read_event_value = &sca3000_read_thresh, - .write_event_value = &sca3000_write_thresh, - .read_event_config = &sca3000_read_event_config, - .write_event_config = &sca3000_write_event_config, - .driver_module = THIS_MODULE, -}; - -static int sca3000_probe(struct spi_device *spi) -{ - int ret; - struct sca3000_state *st; - struct iio_dev *indio_dev; - - indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); - if (!indio_dev) - return -ENOMEM; - - st = iio_priv(indio_dev); - spi_set_drvdata(spi, indio_dev); - st->us = spi; - mutex_init(&st->lock); - st->info = &sca3000_spi_chip_info_tbl[spi_get_device_id(spi) - ->driver_data]; - - indio_dev->dev.parent = &spi->dev; - indio_dev->name = spi_get_device_id(spi)->name; - indio_dev->info = &sca3000_info; - if (st->info->temp_output) { - indio_dev->channels = sca3000_channels_with_temp; - indio_dev->num_channels = - ARRAY_SIZE(sca3000_channels_with_temp); - } else { - indio_dev->channels = sca3000_channels; - indio_dev->num_channels = ARRAY_SIZE(sca3000_channels); - } - indio_dev->modes = INDIO_DIRECT_MODE; - - sca3000_configure_ring(indio_dev); - ret = iio_device_register(indio_dev); - if (ret < 0) - return ret; - - if (spi->irq) { - ret = request_threaded_irq(spi->irq, - NULL, - &sca3000_event_handler, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - "sca3000", - indio_dev); - if (ret) - goto error_unregister_dev; - } - sca3000_register_ring_funcs(indio_dev); - ret = sca3000_clean_setup(st); - if (ret) - goto error_free_irq; - return 0; - -error_free_irq: - if (spi->irq) - free_irq(spi->irq, indio_dev); -error_unregister_dev: - iio_device_unregister(indio_dev); - return ret; -} - -static int sca3000_stop_all_interrupts(struct sca3000_state *st) -{ - int ret; - - mutex_lock(&st->lock); - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1); - if (ret) - goto error_ret; - ret = sca3000_write_reg(st, SCA3000_REG_ADDR_INT_MASK, - (st->rx[0] & - ~(SCA3000_INT_MASK_RING_THREE_QUARTER | - SCA3000_INT_MASK_RING_HALF | - SCA3000_INT_MASK_ALL_INTS))); -error_ret: - mutex_unlock(&st->lock); - return ret; -} - -static int sca3000_remove(struct spi_device *spi) -{ - struct iio_dev *indio_dev = spi_get_drvdata(spi); - struct sca3000_state *st = iio_priv(indio_dev); - - /* Must ensure no interrupts can be generated after this! */ - sca3000_stop_all_interrupts(st); - if (spi->irq) - free_irq(spi->irq, indio_dev); - iio_device_unregister(indio_dev); - sca3000_unconfigure_ring(indio_dev); - - return 0; -} - -static const struct spi_device_id sca3000_id[] = { - {"sca3000_d01", d01}, - {"sca3000_e02", e02}, - {"sca3000_e04", e04}, - {"sca3000_e05", e05}, - {} -}; -MODULE_DEVICE_TABLE(spi, sca3000_id); - -static struct spi_driver sca3000_driver = { - .driver = { - .name = "sca3000", - }, - .probe = sca3000_probe, - .remove = sca3000_remove, - .id_table = sca3000_id, -}; -module_spi_driver(sca3000_driver); - -MODULE_AUTHOR("Jonathan Cameron "); -MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c deleted file mode 100644 index d1cb9b9cf22b21ed551f88dcf17f745a0cf23065..0000000000000000000000000000000000000000 --- a/drivers/staging/iio/accel/sca3000_ring.c +++ /dev/null @@ -1,350 +0,0 @@ -/* - * sca3000_ring.c -- support VTI sca3000 series accelerometers via SPI - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * Copyright (c) 2009 Jonathan Cameron - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include "../ring_hw.h" -#include "sca3000.h" - -/* RFC / future work - * - * The internal ring buffer doesn't actually change what it holds depending - * on which signals are enabled etc, merely whether you can read them. - * As such the scan mode selection is somewhat different than for a software - * ring buffer and changing it actually covers any data already in the buffer. - * Currently scan elements aren't configured so it doesn't matter. - */ - -static int sca3000_read_data(struct sca3000_state *st, - u8 reg_address_high, - u8 **rx_p, - int len) -{ - int ret; - struct spi_transfer xfer[2] = { - { - .len = 1, - .tx_buf = st->tx, - }, { - .len = len, - } - }; - *rx_p = kmalloc(len, GFP_KERNEL); - if (!*rx_p) { - ret = -ENOMEM; - goto error_ret; - } - xfer[1].rx_buf = *rx_p; - st->tx[0] = SCA3000_READ_REG(reg_address_high); - ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer)); - if (ret) { - dev_err(get_device(&st->us->dev), "problem reading register"); - goto error_free_rx; - } - - return 0; -error_free_rx: - kfree(*rx_p); -error_ret: - return ret; -} - -/** - * sca3000_read_first_n_hw_rb() - main ring access, pulls data from ring - * @r: the ring - * @count: number of samples to try and pull - * @data: output the actual samples pulled from the hw ring - * - * Currently does not provide timestamps. As the hardware doesn't add them they - * can only be inferred approximately from ring buffer events such as 50% full - * and knowledge of when buffer was last emptied. This is left to userspace. - **/ -static int sca3000_read_first_n_hw_rb(struct iio_buffer *r, - size_t count, char __user *buf) -{ - struct iio_hw_buffer *hw_ring = iio_to_hw_buf(r); - struct iio_dev *indio_dev = hw_ring->private; - struct sca3000_state *st = iio_priv(indio_dev); - u8 *rx; - int ret, i, num_available, num_read = 0; - int bytes_per_sample = 1; - - if (st->bpse == 11) - bytes_per_sample = 2; - - mutex_lock(&st->lock); - if (count % bytes_per_sample) { - ret = -EINVAL; - goto error_ret; - } - - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_BUF_COUNT, 1); - if (ret) - goto error_ret; - num_available = st->rx[0]; - /* - * num_available is the total number of samples available - * i.e. number of time points * number of channels. - */ - if (count > num_available * bytes_per_sample) - num_read = num_available * bytes_per_sample; - else - num_read = count; - - ret = sca3000_read_data(st, - SCA3000_REG_ADDR_RING_OUT, - &rx, num_read); - if (ret) - goto error_ret; - - for (i = 0; i < num_read / sizeof(u16); i++) - *(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i); - - if (copy_to_user(buf, rx, num_read)) - ret = -EFAULT; - kfree(rx); - r->stufftoread = 0; -error_ret: - mutex_unlock(&st->lock); - - return ret ? ret : num_read; -} - -static size_t sca3000_ring_buf_data_available(struct iio_buffer *r) -{ - return r->stufftoread ? r->watermark : 0; -} - -/** - * sca3000_query_ring_int() is the hardware ring status interrupt enabled - **/ -static ssize_t sca3000_query_ring_int(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); - int ret, val; - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct sca3000_state *st = iio_priv(indio_dev); - - mutex_lock(&st->lock); - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1); - val = st->rx[0]; - mutex_unlock(&st->lock); - if (ret) - return ret; - - return sprintf(buf, "%d\n", !!(val & this_attr->address)); -} - -/** - * sca3000_set_ring_int() set state of ring status interrupt - **/ -static ssize_t sca3000_set_ring_int(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct sca3000_state *st = iio_priv(indio_dev); - struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); - u8 val; - int ret; - - mutex_lock(&st->lock); - ret = kstrtou8(buf, 10, &val); - if (ret) - goto error_ret; - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1); - if (ret) - goto error_ret; - if (val) - ret = sca3000_write_reg(st, - SCA3000_REG_ADDR_INT_MASK, - st->rx[0] | this_attr->address); - else - ret = sca3000_write_reg(st, - SCA3000_REG_ADDR_INT_MASK, - st->rx[0] & ~this_attr->address); -error_ret: - mutex_unlock(&st->lock); - - return ret ? ret : len; -} - -static IIO_DEVICE_ATTR(50_percent, S_IRUGO | S_IWUSR, - sca3000_query_ring_int, - sca3000_set_ring_int, - SCA3000_INT_MASK_RING_HALF); - -static IIO_DEVICE_ATTR(75_percent, S_IRUGO | S_IWUSR, - sca3000_query_ring_int, - sca3000_set_ring_int, - SCA3000_INT_MASK_RING_THREE_QUARTER); - -static ssize_t sca3000_show_buffer_scale(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct sca3000_state *st = iio_priv(indio_dev); - - return sprintf(buf, "0.%06d\n", 4 * st->info->scale); -} - -static IIO_DEVICE_ATTR(in_accel_scale, - S_IRUGO, - sca3000_show_buffer_scale, - NULL, - 0); - -/* - * Ring buffer attributes - * This device is a bit unusual in that the sampling frequency and bpse - * only apply to the ring buffer. At all times full rate and accuracy - * is available via direct reading from registers. - */ -static const struct attribute *sca3000_ring_attributes[] = { - &iio_dev_attr_50_percent.dev_attr.attr, - &iio_dev_attr_75_percent.dev_attr.attr, - &iio_dev_attr_in_accel_scale.dev_attr.attr, - NULL, -}; - -static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev) -{ - struct iio_buffer *buf; - struct iio_hw_buffer *ring; - - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (!ring) - return NULL; - - ring->private = indio_dev; - buf = &ring->buf; - buf->stufftoread = 0; - buf->length = 64; - buf->attrs = sca3000_ring_attributes; - iio_buffer_init(buf); - - return buf; -} - -static void sca3000_ring_release(struct iio_buffer *r) -{ - kfree(iio_to_hw_buf(r)); -} - -static const struct iio_buffer_access_funcs sca3000_ring_access_funcs = { - .read_first_n = &sca3000_read_first_n_hw_rb, - .data_available = sca3000_ring_buf_data_available, - .release = sca3000_ring_release, - - .modes = INDIO_BUFFER_HARDWARE, -}; - -int sca3000_configure_ring(struct iio_dev *indio_dev) -{ - struct iio_buffer *buffer; - - buffer = sca3000_rb_allocate(indio_dev); - if (!buffer) - return -ENOMEM; - indio_dev->modes |= INDIO_BUFFER_HARDWARE; - - indio_dev->buffer->access = &sca3000_ring_access_funcs; - - iio_device_attach_buffer(indio_dev, buffer); - - return 0; -} - -void sca3000_unconfigure_ring(struct iio_dev *indio_dev) -{ - iio_buffer_put(indio_dev->buffer); -} - -static inline -int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state) -{ - struct sca3000_state *st = iio_priv(indio_dev); - int ret; - - mutex_lock(&st->lock); - ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1); - if (ret) - goto error_ret; - if (state) { - dev_info(&indio_dev->dev, "supposedly enabling ring buffer\n"); - ret = sca3000_write_reg(st, - SCA3000_REG_ADDR_MODE, - (st->rx[0] | SCA3000_RING_BUF_ENABLE)); - } else - ret = sca3000_write_reg(st, - SCA3000_REG_ADDR_MODE, - (st->rx[0] & ~SCA3000_RING_BUF_ENABLE)); -error_ret: - mutex_unlock(&st->lock); - - return ret; -} - -/** - * sca3000_hw_ring_preenable() hw ring buffer preenable function - * - * Very simple enable function as the chip will allows normal reads - * during ring buffer operation so as long as it is indeed running - * before we notify the core, the precise ordering does not matter. - **/ -static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev) -{ - return __sca3000_hw_ring_state_set(indio_dev, 1); -} - -static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev) -{ - return __sca3000_hw_ring_state_set(indio_dev, 0); -} - -static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = { - .preenable = &sca3000_hw_ring_preenable, - .postdisable = &sca3000_hw_ring_postdisable, -}; - -void sca3000_register_ring_funcs(struct iio_dev *indio_dev) -{ - indio_dev->setup_ops = &sca3000_ring_setup_ops; -} - -/** - * sca3000_ring_int_process() ring specific interrupt handling. - * - * This is only split from the main interrupt handler so as to - * reduce the amount of code if the ring buffer is not enabled. - **/ -void sca3000_ring_int_process(u8 val, struct iio_buffer *ring) -{ - if (val & (SCA3000_INT_STATUS_THREE_QUARTERS | - SCA3000_INT_STATUS_HALF)) { - ring->stufftoread = true; - wake_up_interruptible(&ring->pollq); - } -} diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile index 3cdd83ccec8e23eb6e44419e1883240a825b26d8..ac09485923b6a22a44ba1fc5074055f8f0ab7b18 100644 --- a/drivers/staging/iio/adc/Makefile +++ b/drivers/staging/iio/adc/Makefile @@ -2,7 +2,6 @@ # Makefile for industrial I/O ADC drivers # -ad7606-y := ad7606_core.o ad7606_ring.o obj-$(CONFIG_AD7606_IFACE_PARALLEL) += ad7606_par.o obj-$(CONFIG_AD7606_IFACE_SPI) += ad7606_spi.o obj-$(CONFIG_AD7606) += ad7606.o diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index 1cf6b79801a9889ffdd1786f36b475bfa529fe07..1fb68c01abd5914125f33bd71abe85e2b26aadd3 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c @@ -152,7 +152,8 @@ */ struct ad7192_state { - struct regulator *reg; + struct regulator *avdd; + struct regulator *dvdd; u16 int_vref_mv; u32 mclk; u32 f_order; @@ -322,57 +323,6 @@ static int ad7192_setup(struct ad7192_state *st, return ret; } -static ssize_t ad7192_read_frequency(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7192_state *st = iio_priv(indio_dev); - - return sprintf(buf, "%d\n", st->mclk / - (st->f_order * 1024 * AD7192_MODE_RATE(st->mode))); -} - -static ssize_t ad7192_write_frequency(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7192_state *st = iio_priv(indio_dev); - unsigned long lval; - int div, ret; - - ret = kstrtoul(buf, 10, &lval); - if (ret) - return ret; - if (lval == 0) - return -EINVAL; - - ret = iio_device_claim_direct_mode(indio_dev); - if (ret) - return ret; - - div = st->mclk / (lval * st->f_order * 1024); - if (div < 1 || div > 1023) { - ret = -EINVAL; - goto out; - } - - st->mode &= ~AD7192_MODE_RATE(-1); - st->mode |= AD7192_MODE_RATE(div); - ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode); - -out: - iio_device_release_direct_mode(indio_dev); - - return ret ? ret : len; -} - -static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, - ad7192_read_frequency, - ad7192_write_frequency); - static ssize_t ad7192_show_scale_available(struct device *dev, struct device_attribute *attr, char *buf) @@ -471,7 +421,6 @@ static IIO_DEVICE_ATTR(ac_excitation_en, S_IRUGO | S_IWUSR, AD7192_REG_MODE); static struct attribute *ad7192_attributes[] = { - &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage_scale_available.dev_attr.attr, &iio_dev_attr_bridge_switch_en.dev_attr.attr, @@ -484,7 +433,6 @@ static const struct attribute_group ad7192_attribute_group = { }; static struct attribute *ad7195_attributes[] = { - &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage_scale_available.dev_attr.attr, &iio_dev_attr_bridge_switch_en.dev_attr.attr, @@ -536,6 +484,10 @@ static int ad7192_read_raw(struct iio_dev *indio_dev, if (chan->type == IIO_TEMP) *val -= 273 * ad7192_get_temp_scale(unipolar); return IIO_VAL_INT; + case IIO_CHAN_INFO_SAMP_FREQ: + *val = st->mclk / + (st->f_order * 1024 * AD7192_MODE_RATE(st->mode)); + return IIO_VAL_INT; } return -EINVAL; @@ -548,7 +500,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev, long mask) { struct ad7192_state *st = iio_priv(indio_dev); - int ret, i; + int ret, i, div; unsigned int tmp; ret = iio_device_claim_direct_mode(indio_dev); @@ -572,6 +524,22 @@ static int ad7192_write_raw(struct iio_dev *indio_dev, break; } break; + case IIO_CHAN_INFO_SAMP_FREQ: + if (!val) { + ret = -EINVAL; + break; + } + + div = st->mclk / (val * st->f_order * 1024); + if (div < 1 || div > 1023) { + ret = -EINVAL; + break; + } + + st->mode &= ~AD7192_MODE_RATE(-1); + st->mode |= AD7192_MODE_RATE(div); + ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode); + break; default: ret = -EINVAL; } @@ -585,7 +553,14 @@ static int ad7192_write_raw_get_fmt(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, long mask) { - return IIO_VAL_INT_PLUS_NANO; + switch (mask) { + case IIO_CHAN_INFO_SCALE: + return IIO_VAL_INT_PLUS_NANO; + case IIO_CHAN_INFO_SAMP_FREQ: + return IIO_VAL_INT; + default: + return -EINVAL; + } } static const struct iio_info ad7192_info = { @@ -659,15 +634,30 @@ static int ad7192_probe(struct spi_device *spi) st = iio_priv(indio_dev); - st->reg = devm_regulator_get(&spi->dev, "vcc"); - if (!IS_ERR(st->reg)) { - ret = regulator_enable(st->reg); - if (ret) - return ret; + st->avdd = devm_regulator_get(&spi->dev, "avdd"); + if (IS_ERR(st->avdd)) + return PTR_ERR(st->avdd); - voltage_uv = regulator_get_voltage(st->reg); + ret = regulator_enable(st->avdd); + if (ret) { + dev_err(&spi->dev, "Failed to enable specified AVdd supply\n"); + return ret; + } + + st->dvdd = devm_regulator_get(&spi->dev, "dvdd"); + if (IS_ERR(st->dvdd)) { + ret = PTR_ERR(st->dvdd); + goto error_disable_avdd; } + ret = regulator_enable(st->dvdd); + if (ret) { + dev_err(&spi->dev, "Failed to enable specified DVdd supply\n"); + goto error_disable_avdd; + } + + voltage_uv = regulator_get_voltage(st->avdd); + if (pdata->vref_mv) st->int_vref_mv = pdata->vref_mv; else if (voltage_uv) @@ -701,7 +691,7 @@ static int ad7192_probe(struct spi_device *spi) ret = ad_sd_setup_buffer_and_trigger(indio_dev); if (ret) - goto error_disable_reg; + goto error_disable_dvdd; ret = ad7192_setup(st, pdata); if (ret) @@ -714,9 +704,10 @@ static int ad7192_probe(struct spi_device *spi) error_remove_trigger: ad_sd_cleanup_buffer_and_trigger(indio_dev); -error_disable_reg: - if (!IS_ERR(st->reg)) - regulator_disable(st->reg); +error_disable_dvdd: + regulator_disable(st->dvdd); +error_disable_avdd: + regulator_disable(st->avdd); return ret; } @@ -729,8 +720,8 @@ static int ad7192_remove(struct spi_device *spi) iio_device_unregister(indio_dev); ad_sd_cleanup_buffer_and_trigger(indio_dev); - if (!IS_ERR(st->reg)) - regulator_disable(st->reg); + regulator_disable(st->dvdd); + regulator_disable(st->avdd); return 0; } diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c index b460dda7eb6554e2704f4eb67b33c3d1b0a2e4cb..ee679ac0368f86dd1671abf9aef3297401ca805a 100644 --- a/drivers/staging/iio/adc/ad7280a.c +++ b/drivers/staging/iio/adc/ad7280a.c @@ -777,7 +777,7 @@ static struct attribute *ad7280_event_attributes[] = { NULL, }; -static struct attribute_group ad7280_event_attrs_group = { +static const struct attribute_group ad7280_event_attrs_group = { .attrs = ad7280_event_attributes, }; diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606.c similarity index 50% rename from drivers/staging/iio/adc/ad7606_core.c rename to drivers/staging/iio/adc/ad7606.c index f79ee61851f6024994e9fe201c2ffde1755e3c0f..453190864b2ffc60b9b7515ce55ee53f2ad1c11e 100644 --- a/drivers/staging/iio/adc/ad7606_core.c +++ b/drivers/staging/iio/adc/ad7606.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -21,58 +21,109 @@ #include #include #include +#include +#include #include "ad7606.h" -int ad7606_reset(struct ad7606_state *st) +static int ad7606_reset(struct ad7606_state *st) { - if (gpio_is_valid(st->pdata->gpio_reset)) { - gpio_set_value(st->pdata->gpio_reset, 1); + if (st->gpio_reset) { + gpiod_set_value(st->gpio_reset, 1); ndelay(100); /* t_reset >= 100ns */ - gpio_set_value(st->pdata->gpio_reset, 0); + gpiod_set_value(st->gpio_reset, 0); return 0; } return -ENODEV; } +static int ad7606_read_samples(struct ad7606_state *st) +{ + unsigned int num = st->chip_info->num_channels; + u16 *data = st->data; + int ret; + + /* + * The frstdata signal is set to high while and after reading the sample + * of the first channel and low for all other channels. This can be used + * to check that the incoming data is correctly aligned. During normal + * operation the data should never become unaligned, but some glitch or + * electrostatic discharge might cause an extra read or clock cycle. + * Monitoring the frstdata signal allows to recover from such failure + * situations. + */ + + if (st->gpio_frstdata) { + ret = st->bops->read_block(st->dev, 1, data); + if (ret) + return ret; + + if (!gpiod_get_value(st->gpio_frstdata)) { + ad7606_reset(st); + return -EIO; + } + + data++; + num--; + } + + return st->bops->read_block(st->dev, num, data); +} + +static irqreturn_t ad7606_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct ad7606_state *st = iio_priv(pf->indio_dev); + + gpiod_set_value(st->gpio_convst, 1); + + return IRQ_HANDLED; +} + +/** + * ad7606_poll_bh_to_ring() bh of trigger launched polling to ring buffer + * @work_s: the work struct through which this was scheduled + * + * Currently there is no option in this driver to disable the saving of + * timestamps within the ring. + * I think the one copy of this at a time was to avoid problems if the + * trigger was set far too high and the reads then locked up the computer. + **/ +static void ad7606_poll_bh_to_ring(struct work_struct *work_s) +{ + struct ad7606_state *st = container_of(work_s, struct ad7606_state, + poll_work); + struct iio_dev *indio_dev = iio_priv_to_dev(st); + int ret; + + ret = ad7606_read_samples(st); + if (ret == 0) + iio_push_to_buffers_with_timestamp(indio_dev, st->data, + iio_get_time_ns(indio_dev)); + + gpiod_set_value(st->gpio_convst, 0); + iio_trigger_notify_done(indio_dev->trig); +} + static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch) { struct ad7606_state *st = iio_priv(indio_dev); int ret; st->done = false; - gpio_set_value(st->pdata->gpio_convst, 1); + gpiod_set_value(st->gpio_convst, 1); ret = wait_event_interruptible(st->wq_data_avail, st->done); if (ret) goto error_ret; - if (gpio_is_valid(st->pdata->gpio_frstdata)) { - ret = st->bops->read_block(st->dev, 1, st->data); - if (ret) - goto error_ret; - if (!gpio_get_value(st->pdata->gpio_frstdata)) { - /* This should never happen */ - ad7606_reset(st); - ret = -EIO; - goto error_ret; - } - ret = st->bops->read_block(st->dev, - st->chip_info->num_channels - 1, &st->data[1]); - if (ret) - goto error_ret; - } else { - ret = st->bops->read_block(st->dev, - st->chip_info->num_channels, st->data); - if (ret) - goto error_ret; - } - - ret = st->data[ch]; + ret = ad7606_read_samples(st); + if (ret == 0) + ret = st->data[ch]; error_ret: - gpio_set_value(st->pdata->gpio_convst, 0); + gpiod_set_value(st->gpio_convst, 0); return ret; } @@ -103,6 +154,9 @@ static int ad7606_read_raw(struct iio_dev *indio_dev, *val = st->range * 2; *val2 = st->chip_info->channels[0].scan_type.realbits; return IIO_VAL_FRACTIONAL_LOG2; + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + *val = st->oversampling; + return IIO_VAL_INT; } return -EINVAL; } @@ -129,12 +183,11 @@ static ssize_t ad7606_store_range(struct device *dev, if (ret) return ret; - if (!(lval == 5000 || lval == 10000)) { - dev_err(dev, "range is not supported\n"); + if (!(lval == 5000 || lval == 10000)) return -EINVAL; - } + mutex_lock(&indio_dev->mlock); - gpio_set_value(st->pdata->gpio_range, lval == 10000); + gpiod_set_value(st->gpio_range, lval == 10000); st->range = lval; mutex_unlock(&indio_dev->mlock); @@ -145,19 +198,9 @@ static IIO_DEVICE_ATTR(in_voltage_range, S_IRUGO | S_IWUSR, ad7606_show_range, ad7606_store_range, 0); static IIO_CONST_ATTR(in_voltage_range_available, "5000 10000"); -static ssize_t ad7606_show_oversampling_ratio(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7606_state *st = iio_priv(indio_dev); - - return sprintf(buf, "%u\n", st->oversampling); -} - static int ad7606_oversampling_get_index(unsigned int val) { - unsigned char supported[] = {0, 2, 4, 8, 16, 32, 64}; + unsigned char supported[] = {1, 2, 4, 8, 16, 32, 64}; int i; for (i = 0; i < ARRAY_SIZE(supported); i++) @@ -167,44 +210,45 @@ static int ad7606_oversampling_get_index(unsigned int val) return -EINVAL; } -static ssize_t ad7606_store_oversampling_ratio(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static int ad7606_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, + int val2, + long mask) { - struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7606_state *st = iio_priv(indio_dev); - unsigned long lval; + int values[3]; int ret; - ret = kstrtoul(buf, 10, &lval); - if (ret) - return ret; + switch (mask) { + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + if (val2) + return -EINVAL; + ret = ad7606_oversampling_get_index(val); + if (ret < 0) + return ret; - ret = ad7606_oversampling_get_index(lval); - if (ret < 0) { - dev_err(dev, "oversampling %lu is not supported\n", lval); - return ret; - } + values[0] = (ret >> 0) & 1; + values[1] = (ret >> 1) & 1; + values[2] = (ret >> 2) & 1; - mutex_lock(&indio_dev->mlock); - gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1); - gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1); - gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1); - st->oversampling = lval; - mutex_unlock(&indio_dev->mlock); + mutex_lock(&indio_dev->mlock); + gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc, + values); + st->oversampling = val; + mutex_unlock(&indio_dev->mlock); - return count; + return 0; + default: + return -EINVAL; + } } -static IIO_DEVICE_ATTR(oversampling_ratio, S_IRUGO | S_IWUSR, - ad7606_show_oversampling_ratio, - ad7606_store_oversampling_ratio, 0); -static IIO_CONST_ATTR(oversampling_ratio_available, "0 2 4 8 16 32 64"); +static IIO_CONST_ATTR(oversampling_ratio_available, "1 2 4 8 16 32 64"); static struct attribute *ad7606_attributes_os_and_range[] = { &iio_dev_attr_in_voltage_range.dev_attr.attr, &iio_const_attr_in_voltage_range_available.dev_attr.attr, - &iio_dev_attr_oversampling_ratio.dev_attr.attr, &iio_const_attr_oversampling_ratio_available.dev_attr.attr, NULL, }; @@ -214,7 +258,6 @@ static const struct attribute_group ad7606_attribute_group_os_and_range = { }; static struct attribute *ad7606_attributes_os[] = { - &iio_dev_attr_oversampling_ratio.dev_attr.attr, &iio_const_attr_oversampling_ratio_available.dev_attr.attr, NULL, }; @@ -241,6 +284,8 @@ static const struct attribute_group ad7606_attribute_group_range = { .address = num, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\ + .info_mask_shared_by_all = \ + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \ .scan_index = num, \ .scan_type = { \ .sign = 's', \ @@ -267,20 +312,14 @@ static const struct ad7606_chip_info ad7606_chip_info_tbl[] = { * More devices added in future */ [ID_AD7606_8] = { - .name = "ad7606", - .int_vref_mv = 2500, .channels = ad7606_channels, .num_channels = 9, }, [ID_AD7606_6] = { - .name = "ad7606-6", - .int_vref_mv = 2500, .channels = ad7606_channels, .num_channels = 7, }, [ID_AD7606_4] = { - .name = "ad7606-4", - .int_vref_mv = 2500, .channels = ad7606_channels, .num_channels = 5, }, @@ -288,119 +327,34 @@ static const struct ad7606_chip_info ad7606_chip_info_tbl[] = { static int ad7606_request_gpios(struct ad7606_state *st) { - struct gpio gpio_array[3] = { - [0] = { - .gpio = st->pdata->gpio_os0, - .flags = GPIOF_DIR_OUT | ((st->oversampling & 1) ? - GPIOF_INIT_HIGH : GPIOF_INIT_LOW), - .label = "AD7606_OS0", - }, - [1] = { - .gpio = st->pdata->gpio_os1, - .flags = GPIOF_DIR_OUT | ((st->oversampling & 2) ? - GPIOF_INIT_HIGH : GPIOF_INIT_LOW), - .label = "AD7606_OS1", - }, - [2] = { - .gpio = st->pdata->gpio_os2, - .flags = GPIOF_DIR_OUT | ((st->oversampling & 4) ? - GPIOF_INIT_HIGH : GPIOF_INIT_LOW), - .label = "AD7606_OS2", - }, - }; - int ret; - - if (gpio_is_valid(st->pdata->gpio_convst)) { - ret = gpio_request_one(st->pdata->gpio_convst, - GPIOF_OUT_INIT_LOW, - "AD7606_CONVST"); - if (ret) { - dev_err(st->dev, "failed to request GPIO CONVST\n"); - goto error_ret; - } - } else { - ret = -EIO; - goto error_ret; - } - - if (gpio_is_valid(st->pdata->gpio_os0) && - gpio_is_valid(st->pdata->gpio_os1) && - gpio_is_valid(st->pdata->gpio_os2)) { - ret = gpio_request_array(gpio_array, ARRAY_SIZE(gpio_array)); - if (ret < 0) - goto error_free_convst; - } - - if (gpio_is_valid(st->pdata->gpio_reset)) { - ret = gpio_request_one(st->pdata->gpio_reset, - GPIOF_OUT_INIT_LOW, - "AD7606_RESET"); - if (ret < 0) - goto error_free_os; - } - - if (gpio_is_valid(st->pdata->gpio_range)) { - ret = gpio_request_one(st->pdata->gpio_range, GPIOF_DIR_OUT | - ((st->range == 10000) ? GPIOF_INIT_HIGH : - GPIOF_INIT_LOW), "AD7606_RANGE"); - if (ret < 0) - goto error_free_reset; - } - if (gpio_is_valid(st->pdata->gpio_stby)) { - ret = gpio_request_one(st->pdata->gpio_stby, - GPIOF_OUT_INIT_HIGH, - "AD7606_STBY"); - if (ret < 0) - goto error_free_range; - } - - if (gpio_is_valid(st->pdata->gpio_frstdata)) { - ret = gpio_request_one(st->pdata->gpio_frstdata, GPIOF_IN, - "AD7606_FRSTDATA"); - if (ret < 0) - goto error_free_stby; - } - - return 0; - -error_free_stby: - if (gpio_is_valid(st->pdata->gpio_stby)) - gpio_free(st->pdata->gpio_stby); -error_free_range: - if (gpio_is_valid(st->pdata->gpio_range)) - gpio_free(st->pdata->gpio_range); -error_free_reset: - if (gpio_is_valid(st->pdata->gpio_reset)) - gpio_free(st->pdata->gpio_reset); -error_free_os: - if (gpio_is_valid(st->pdata->gpio_os0) && - gpio_is_valid(st->pdata->gpio_os1) && - gpio_is_valid(st->pdata->gpio_os2)) - gpio_free_array(gpio_array, ARRAY_SIZE(gpio_array)); -error_free_convst: - gpio_free(st->pdata->gpio_convst); -error_ret: - return ret; -} - -static void ad7606_free_gpios(struct ad7606_state *st) -{ - if (gpio_is_valid(st->pdata->gpio_frstdata)) - gpio_free(st->pdata->gpio_frstdata); - if (gpio_is_valid(st->pdata->gpio_stby)) - gpio_free(st->pdata->gpio_stby); - if (gpio_is_valid(st->pdata->gpio_range)) - gpio_free(st->pdata->gpio_range); - if (gpio_is_valid(st->pdata->gpio_reset)) - gpio_free(st->pdata->gpio_reset); - if (gpio_is_valid(st->pdata->gpio_os0) && - gpio_is_valid(st->pdata->gpio_os1) && - gpio_is_valid(st->pdata->gpio_os2)) { - gpio_free(st->pdata->gpio_os2); - gpio_free(st->pdata->gpio_os1); - gpio_free(st->pdata->gpio_os0); - } - gpio_free(st->pdata->gpio_convst); + struct device *dev = st->dev; + + st->gpio_convst = devm_gpiod_get(dev, "conversion-start", + GPIOD_OUT_LOW); + if (IS_ERR(st->gpio_convst)) + return PTR_ERR(st->gpio_convst); + + st->gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(st->gpio_reset)) + return PTR_ERR(st->gpio_reset); + + st->gpio_range = devm_gpiod_get_optional(dev, "range", GPIOD_OUT_LOW); + if (IS_ERR(st->gpio_range)) + return PTR_ERR(st->gpio_range); + + st->gpio_standby = devm_gpiod_get_optional(dev, "standby", + GPIOD_OUT_HIGH); + if (IS_ERR(st->gpio_standby)) + return PTR_ERR(st->gpio_standby); + + st->gpio_frstdata = devm_gpiod_get_optional(dev, "first-data", + GPIOD_IN); + if (IS_ERR(st->gpio_frstdata)) + return PTR_ERR(st->gpio_frstdata); + + st->gpio_os = devm_gpiod_get_array_optional(dev, "oversampling-ratio", + GPIOD_OUT_LOW); + return PTR_ERR_OR_ZERO(st->gpio_os); } /** @@ -429,12 +383,14 @@ static const struct iio_info ad7606_info_no_os_or_range = { static const struct iio_info ad7606_info_os_and_range = { .driver_module = THIS_MODULE, .read_raw = &ad7606_read_raw, + .write_raw = &ad7606_write_raw, .attrs = &ad7606_attribute_group_os_and_range, }; static const struct iio_info ad7606_info_os = { .driver_module = THIS_MODULE, .read_raw = &ad7606_read_raw, + .write_raw = &ad7606_write_raw, .attrs = &ad7606_attribute_group_os, }; @@ -444,81 +400,73 @@ static const struct iio_info ad7606_info_range = { .attrs = &ad7606_attribute_group_range, }; -struct iio_dev *ad7606_probe(struct device *dev, int irq, - void __iomem *base_address, - unsigned int id, - const struct ad7606_bus_ops *bops) +int ad7606_probe(struct device *dev, int irq, void __iomem *base_address, + const char *name, unsigned int id, + const struct ad7606_bus_ops *bops) { - struct ad7606_platform_data *pdata = dev->platform_data; struct ad7606_state *st; int ret; struct iio_dev *indio_dev; indio_dev = devm_iio_device_alloc(dev, sizeof(*st)); if (!indio_dev) - return ERR_PTR(-ENOMEM); + return -ENOMEM; st = iio_priv(indio_dev); st->dev = dev; st->bops = bops; st->base_address = base_address; - st->range = pdata->default_range == 10000 ? 10000 : 5000; + st->range = 5000; + st->oversampling = 1; + INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring); - ret = ad7606_oversampling_get_index(pdata->default_os); - if (ret < 0) { - dev_warn(dev, "oversampling %d is not supported\n", - pdata->default_os); - st->oversampling = 0; - } else { - st->oversampling = pdata->default_os; - } + st->reg = devm_regulator_get(dev, "avcc"); + if (IS_ERR(st->reg)) + return PTR_ERR(st->reg); - st->reg = devm_regulator_get(dev, "vcc"); - if (!IS_ERR(st->reg)) { - ret = regulator_enable(st->reg); - if (ret) - return ERR_PTR(ret); + ret = regulator_enable(st->reg); + if (ret) { + dev_err(dev, "Failed to enable specified AVcc supply\n"); + return ret; } - st->pdata = pdata; + ret = ad7606_request_gpios(st); + if (ret) + goto error_disable_reg; + st->chip_info = &ad7606_chip_info_tbl[id]; indio_dev->dev.parent = dev; - if (gpio_is_valid(st->pdata->gpio_os0) && - gpio_is_valid(st->pdata->gpio_os1) && - gpio_is_valid(st->pdata->gpio_os2)) { - if (gpio_is_valid(st->pdata->gpio_range)) + if (st->gpio_os) { + if (st->gpio_range) indio_dev->info = &ad7606_info_os_and_range; else indio_dev->info = &ad7606_info_os; } else { - if (gpio_is_valid(st->pdata->gpio_range)) + if (st->gpio_range) indio_dev->info = &ad7606_info_range; else indio_dev->info = &ad7606_info_no_os_or_range; } indio_dev->modes = INDIO_DIRECT_MODE; - indio_dev->name = st->chip_info->name; + indio_dev->name = name; indio_dev->channels = st->chip_info->channels; indio_dev->num_channels = st->chip_info->num_channels; init_waitqueue_head(&st->wq_data_avail); - ret = ad7606_request_gpios(st); - if (ret) - goto error_disable_reg; - ret = ad7606_reset(st); if (ret) dev_warn(st->dev, "failed to RESET: no RESET GPIO specified\n"); - ret = request_irq(irq, ad7606_interrupt, - IRQF_TRIGGER_FALLING, st->chip_info->name, indio_dev); + ret = request_irq(irq, ad7606_interrupt, IRQF_TRIGGER_FALLING, name, + indio_dev); if (ret) - goto error_free_gpios; + goto error_disable_reg; - ret = ad7606_register_ring_funcs_and_init(indio_dev); + ret = iio_triggered_buffer_setup(indio_dev, &ad7606_trigger_handler, + NULL, NULL); if (ret) goto error_free_irq; @@ -526,35 +474,31 @@ struct iio_dev *ad7606_probe(struct device *dev, int irq, if (ret) goto error_unregister_ring; - return indio_dev; + dev_set_drvdata(dev, indio_dev); + + return 0; error_unregister_ring: - ad7606_ring_cleanup(indio_dev); + iio_triggered_buffer_cleanup(indio_dev); error_free_irq: free_irq(irq, indio_dev); -error_free_gpios: - ad7606_free_gpios(st); - error_disable_reg: - if (!IS_ERR(st->reg)) - regulator_disable(st->reg); - return ERR_PTR(ret); + regulator_disable(st->reg); + return ret; } EXPORT_SYMBOL_GPL(ad7606_probe); -int ad7606_remove(struct iio_dev *indio_dev, int irq) +int ad7606_remove(struct device *dev, int irq) { + struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7606_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); - ad7606_ring_cleanup(indio_dev); + iio_triggered_buffer_cleanup(indio_dev); free_irq(irq, indio_dev); - if (!IS_ERR(st->reg)) - regulator_disable(st->reg); - - ad7606_free_gpios(st); + regulator_disable(st->reg); return 0; } @@ -567,10 +511,9 @@ static int ad7606_suspend(struct device *dev) struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7606_state *st = iio_priv(indio_dev); - if (gpio_is_valid(st->pdata->gpio_stby)) { - if (gpio_is_valid(st->pdata->gpio_range)) - gpio_set_value(st->pdata->gpio_range, 1); - gpio_set_value(st->pdata->gpio_stby, 0); + if (st->gpio_standby) { + gpiod_set_value(st->gpio_range, 1); + gpiod_set_value(st->gpio_standby, 0); } return 0; @@ -581,12 +524,9 @@ static int ad7606_resume(struct device *dev) struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7606_state *st = iio_priv(indio_dev); - if (gpio_is_valid(st->pdata->gpio_stby)) { - if (gpio_is_valid(st->pdata->gpio_range)) - gpio_set_value(st->pdata->gpio_range, - st->range == 10000); - - gpio_set_value(st->pdata->gpio_stby, 1); + if (st->gpio_standby) { + gpiod_set_value(st->gpio_range, st->range == 10000); + gpiod_set_value(st->gpio_standby, 1); ad7606_reset(st); } diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h index 39f50440d915379dc601f333819954bea620f8e7..746f9553d2bae2ee6405d0589059735a9b5bb489 100644 --- a/drivers/staging/iio/adc/ad7606.h +++ b/drivers/staging/iio/adc/ad7606.h @@ -9,48 +9,14 @@ #ifndef IIO_ADC_AD7606_H_ #define IIO_ADC_AD7606_H_ -/* - * TODO: struct ad7606_platform_data needs to go into include/linux/iio - */ - -/** - * struct ad7606_platform_data - platform/board specific information - * @default_os: default oversampling value {0, 2, 4, 8, 16, 32, 64} - * @default_range: default range +/-{5000, 10000} mVolt - * @gpio_convst: number of gpio connected to the CONVST pin - * @gpio_reset: gpio connected to the RESET pin, if not used set to -1 - * @gpio_range: gpio connected to the RANGE pin, if not used set to -1 - * @gpio_os0: gpio connected to the OS0 pin, if not used set to -1 - * @gpio_os1: gpio connected to the OS1 pin, if not used set to -1 - * @gpio_os2: gpio connected to the OS2 pin, if not used set to -1 - * @gpio_frstdata: gpio connected to the FRSTDAT pin, if not used set to -1 - * @gpio_stby: gpio connected to the STBY pin, if not used set to -1 - */ - -struct ad7606_platform_data { - unsigned int default_os; - unsigned int default_range; - unsigned int gpio_convst; - unsigned int gpio_reset; - unsigned int gpio_range; - unsigned int gpio_os0; - unsigned int gpio_os1; - unsigned int gpio_os2; - unsigned int gpio_frstdata; - unsigned int gpio_stby; -}; - /** * struct ad7606_chip_info - chip specific information * @name: identification string for chip - * @int_vref_mv: the internal reference voltage * @channels: channel specification * @num_channels: number of channels */ struct ad7606_chip_info { - const char *name; - u16 int_vref_mv; const struct iio_chan_spec *channels; unsigned int num_channels; }; @@ -62,7 +28,6 @@ struct ad7606_chip_info { struct ad7606_state { struct device *dev; const struct ad7606_chip_info *chip_info; - struct ad7606_platform_data *pdata; struct regulator *reg; struct work_struct poll_work; wait_queue_head_t wq_data_avail; @@ -72,12 +37,19 @@ struct ad7606_state { bool done; void __iomem *base_address; + struct gpio_desc *gpio_convst; + struct gpio_desc *gpio_reset; + struct gpio_desc *gpio_range; + struct gpio_desc *gpio_standby; + struct gpio_desc *gpio_frstdata; + struct gpio_descs *gpio_os; + /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. + * 8 * 16-bit samples + 64-bit timestamp */ - - unsigned short data[8] ____cacheline_aligned; + unsigned short data[12] ____cacheline_aligned; }; struct ad7606_bus_ops { @@ -85,11 +57,10 @@ struct ad7606_bus_ops { int (*read_block)(struct device *, int, void *); }; -struct iio_dev *ad7606_probe(struct device *dev, int irq, - void __iomem *base_address, unsigned int id, - const struct ad7606_bus_ops *bops); -int ad7606_remove(struct iio_dev *indio_dev, int irq); -int ad7606_reset(struct ad7606_state *st); +int ad7606_probe(struct device *dev, int irq, void __iomem *base_address, + const char *name, unsigned int id, + const struct ad7606_bus_ops *bops); +int ad7606_remove(struct device *dev, int irq); enum ad7606_supported_device_ids { ID_AD7606_8, @@ -97,9 +68,6 @@ enum ad7606_supported_device_ids { ID_AD7606_4 }; -int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev); -void ad7606_ring_cleanup(struct iio_dev *indio_dev); - #ifdef CONFIG_PM_SLEEP extern const struct dev_pm_ops ad7606_pm_ops; #define AD7606_PM_OPS (&ad7606_pm_ops) diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c index 84d23930fdde1b351381c314a3755babf2ef4b4c..cd6c410c04846f83a9e0910c4993a8b60877be08 100644 --- a/drivers/staging/iio/adc/ad7606_par.c +++ b/drivers/staging/iio/adc/ad7606_par.c @@ -49,8 +49,8 @@ static const struct ad7606_bus_ops ad7606_par8_bops = { static int ad7606_par_probe(struct platform_device *pdev) { + const struct platform_device_id *id = platform_get_device_id(pdev); struct resource *res; - struct iio_dev *indio_dev; void __iomem *addr; resource_size_t remap_size; int irq; @@ -68,26 +68,15 @@ static int ad7606_par_probe(struct platform_device *pdev) remap_size = resource_size(res); - indio_dev = ad7606_probe(&pdev->dev, irq, addr, - platform_get_device_id(pdev)->driver_data, - remap_size > 1 ? &ad7606_par16_bops : - &ad7606_par8_bops); - - if (IS_ERR(indio_dev)) - return PTR_ERR(indio_dev); - - platform_set_drvdata(pdev, indio_dev); - - return 0; + return ad7606_probe(&pdev->dev, irq, addr, + id->name, id->driver_data, + remap_size > 1 ? &ad7606_par16_bops : + &ad7606_par8_bops); } static int ad7606_par_remove(struct platform_device *pdev) { - struct iio_dev *indio_dev = platform_get_drvdata(pdev); - - ad7606_remove(indio_dev, platform_get_irq(pdev, 0)); - - return 0; + return ad7606_remove(&pdev->dev, platform_get_irq(pdev, 0)); } static const struct platform_device_id ad7606_driver_ids[] = { diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c deleted file mode 100644 index 0572df9aad85278c9242f045fc5e766f01891ef8..0000000000000000000000000000000000000000 --- a/drivers/staging/iio/adc/ad7606_ring.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2011-2012 Analog Devices Inc. - * - * Licensed under the GPL-2. - * - */ - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "ad7606.h" - -/** - * ad7606_trigger_handler_th() th/bh of trigger launched polling to ring buffer - * - **/ -static irqreturn_t ad7606_trigger_handler_th_bh(int irq, void *p) -{ - struct iio_poll_func *pf = p; - struct ad7606_state *st = iio_priv(pf->indio_dev); - - gpio_set_value(st->pdata->gpio_convst, 1); - - return IRQ_HANDLED; -} - -/** - * ad7606_poll_bh_to_ring() bh of trigger launched polling to ring buffer - * @work_s: the work struct through which this was scheduled - * - * Currently there is no option in this driver to disable the saving of - * timestamps within the ring. - * I think the one copy of this at a time was to avoid problems if the - * trigger was set far too high and the reads then locked up the computer. - **/ -static void ad7606_poll_bh_to_ring(struct work_struct *work_s) -{ - struct ad7606_state *st = container_of(work_s, struct ad7606_state, - poll_work); - struct iio_dev *indio_dev = iio_priv_to_dev(st); - __u8 *buf; - int ret; - - buf = kzalloc(indio_dev->scan_bytes, GFP_KERNEL); - if (!buf) - return; - - if (gpio_is_valid(st->pdata->gpio_frstdata)) { - ret = st->bops->read_block(st->dev, 1, buf); - if (ret) - goto done; - if (!gpio_get_value(st->pdata->gpio_frstdata)) { - /* This should never happen. However - * some signal glitch caused by bad PCB desgin or - * electrostatic discharge, could cause an extra read - * or clock. This allows recovery. - */ - ad7606_reset(st); - goto done; - } - ret = st->bops->read_block(st->dev, - st->chip_info->num_channels - 1, buf + 2); - if (ret) - goto done; - } else { - ret = st->bops->read_block(st->dev, - st->chip_info->num_channels, buf); - if (ret) - goto done; - } - - iio_push_to_buffers_with_timestamp(indio_dev, buf, - iio_get_time_ns(indio_dev)); -done: - gpio_set_value(st->pdata->gpio_convst, 0); - iio_trigger_notify_done(indio_dev->trig); - kfree(buf); -} - -int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev) -{ - struct ad7606_state *st = iio_priv(indio_dev); - - INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring); - - return iio_triggered_buffer_setup(indio_dev, - &ad7606_trigger_handler_th_bh, &ad7606_trigger_handler_th_bh, - NULL); -} - -void ad7606_ring_cleanup(struct iio_dev *indio_dev) -{ - iio_triggered_buffer_cleanup(indio_dev); -} diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c index 9587fa86dc69589fc34aa0105584ef3b24add210..c9b1f26685f49fb0a2bf24e6de9fb4630d3dc659 100644 --- a/drivers/staging/iio/adc/ad7606_spi.c +++ b/drivers/staging/iio/adc/ad7606_spi.c @@ -42,25 +42,16 @@ static const struct ad7606_bus_ops ad7606_spi_bops = { static int ad7606_spi_probe(struct spi_device *spi) { - struct iio_dev *indio_dev; + const struct spi_device_id *id = spi_get_device_id(spi); - indio_dev = ad7606_probe(&spi->dev, spi->irq, NULL, - spi_get_device_id(spi)->driver_data, - &ad7606_spi_bops); - - if (IS_ERR(indio_dev)) - return PTR_ERR(indio_dev); - - spi_set_drvdata(spi, indio_dev); - - return 0; + return ad7606_probe(&spi->dev, spi->irq, NULL, + id->name, id->driver_data, + &ad7606_spi_bops); } static int ad7606_spi_remove(struct spi_device *spi) { - struct iio_dev *indio_dev = dev_get_drvdata(&spi->dev); - - return ad7606_remove(indio_dev, spi->irq); + return ad7606_remove(&spi->dev, spi->irq); } static const struct spi_device_id ad7606_id[] = { diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c index c9a0c2aa602f7903425278742c40f3a1104a4d0c..e14960038d3e7946c59d8f9b23ec14c0f68ea566 100644 --- a/drivers/staging/iio/adc/ad7780.c +++ b/drivers/staging/iio/adc/ad7780.c @@ -173,14 +173,16 @@ static int ad7780_probe(struct spi_device *spi) ad_sd_init(&st->sd, indio_dev, spi, &ad7780_sigma_delta_info); - st->reg = devm_regulator_get(&spi->dev, "vcc"); - if (!IS_ERR(st->reg)) { - ret = regulator_enable(st->reg); - if (ret) - return ret; - - voltage_uv = regulator_get_voltage(st->reg); + st->reg = devm_regulator_get(&spi->dev, "avdd"); + if (IS_ERR(st->reg)) + return PTR_ERR(st->reg); + + ret = regulator_enable(st->reg); + if (ret) { + dev_err(&spi->dev, "Failed to enable specified AVdd supply\n"); + return ret; } + voltage_uv = regulator_get_voltage(st->reg); st->chip_info = &ad7780_chip_info_tbl[spi_get_device_id(spi)->driver_data]; @@ -222,8 +224,7 @@ static int ad7780_probe(struct spi_device *spi) error_cleanup_buffer_and_trigger: ad_sd_cleanup_buffer_and_trigger(indio_dev); error_disable_reg: - if (!IS_ERR(st->reg)) - regulator_disable(st->reg); + regulator_disable(st->reg); return ret; } @@ -236,8 +237,7 @@ static int ad7780_remove(struct spi_device *spi) iio_device_unregister(indio_dev); ad_sd_cleanup_buffer_and_trigger(indio_dev); - if (!IS_ERR(st->reg)) - regulator_disable(st->reg); + regulator_disable(st->reg); return 0; } diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c index 5e8115b0101159377c2c06f9669b7fe653276e97..72551f827382d6c17fd0d599b368ccf8930e7034 100644 --- a/drivers/staging/iio/adc/ad7816.c +++ b/drivers/staging/iio/adc/ad7816.c @@ -327,7 +327,7 @@ static struct attribute *ad7816_event_attributes[] = { NULL, }; -static struct attribute_group ad7816_event_attribute_group = { +static const struct attribute_group ad7816_event_attribute_group = { .attrs = ad7816_event_attributes, .name = "events", }; diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c index 3faffe59c9336768394533faffc66847e7b01071..a7d90c8bac5e54827eed43c0993121c2ea77f57c 100644 --- a/drivers/staging/iio/addac/adt7316.c +++ b/drivers/staging/iio/addac/adt7316.c @@ -2039,7 +2039,7 @@ static struct attribute *adt7316_event_attributes[] = { NULL, }; -static struct attribute_group adt7316_event_attribute_group = { +static const struct attribute_group adt7316_event_attribute_group = { .attrs = adt7316_event_attributes, .name = "events", }; @@ -2060,7 +2060,7 @@ static struct attribute *adt7516_event_attributes[] = { NULL, }; -static struct attribute_group adt7516_event_attribute_group = { +static const struct attribute_group adt7516_event_attribute_group = { .attrs = adt7516_event_attributes, .name = "events", }; diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c index 5578a077fcfbd2699209530fa9c1a1605bb0007c..6998c3ddfb6ab71c5d51518775f4ec02c56ecbbb 100644 --- a/drivers/staging/iio/cdc/ad7150.c +++ b/drivers/staging/iio/cdc/ad7150.c @@ -562,7 +562,7 @@ static struct attribute *ad7150_event_attributes[] = { NULL, }; -static struct attribute_group ad7150_event_attribute_group = { +static const struct attribute_group ad7150_event_attribute_group = { .attrs = ad7150_event_attributes, .name = "events", }; diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c index 485d0a5af53c8ac1f4b74abdec236254de56ffdc..b91b50f345bd9110d6d6552981d27062583f45d4 100644 --- a/drivers/staging/iio/cdc/ad7152.c +++ b/drivers/staging/iio/cdc/ad7152.c @@ -89,6 +89,7 @@ struct ad7152_chip_info { */ u8 filter_rate_setup; u8 setup[2]; + struct mutex state_lock; /* protect hardware state */ }; static inline ssize_t ad7152_start_calib(struct device *dev, @@ -115,10 +116,10 @@ static inline ssize_t ad7152_start_calib(struct device *dev, else regval |= AD7152_CONF_CH2EN; - mutex_lock(&indio_dev->mlock); + mutex_lock(&chip->state_lock); ret = i2c_smbus_write_byte_data(chip->client, AD7152_REG_CFG, regval); if (ret < 0) { - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&chip->state_lock); return ret; } @@ -126,14 +127,15 @@ static inline ssize_t ad7152_start_calib(struct device *dev, mdelay(20); ret = i2c_smbus_read_byte_data(chip->client, AD7152_REG_CFG); if (ret < 0) { - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&chip->state_lock); return ret; } } while ((ret == regval) && timeout--); - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&chip->state_lock); return len; } + static ssize_t ad7152_start_offset_calib(struct device *dev, struct device_attribute *attr, const char *buf, @@ -142,6 +144,7 @@ static ssize_t ad7152_start_offset_calib(struct device *dev, return ad7152_start_calib(dev, attr, buf, len, AD7152_CONF_MODE_OFFS_CAL); } + static ssize_t ad7152_start_gain_calib(struct device *dev, struct device_attribute *attr, const char *buf, @@ -165,63 +168,12 @@ static const unsigned char ad7152_filter_rate_table[][2] = { {200, 5 + 1}, {50, 20 + 1}, {20, 50 + 1}, {17, 60 + 1}, }; -static ssize_t ad7152_show_filter_rate_setup(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7152_chip_info *chip = iio_priv(indio_dev); - - return sprintf(buf, "%d\n", - ad7152_filter_rate_table[chip->filter_rate_setup][0]); -} - -static ssize_t ad7152_store_filter_rate_setup(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7152_chip_info *chip = iio_priv(indio_dev); - u8 data; - int ret, i; - - ret = kstrtou8(buf, 10, &data); - if (ret < 0) - return ret; - - for (i = 0; i < ARRAY_SIZE(ad7152_filter_rate_table); i++) - if (data >= ad7152_filter_rate_table[i][0]) - break; - - if (i >= ARRAY_SIZE(ad7152_filter_rate_table)) - i = ARRAY_SIZE(ad7152_filter_rate_table) - 1; - - mutex_lock(&indio_dev->mlock); - ret = i2c_smbus_write_byte_data(chip->client, - AD7152_REG_CFG2, AD7152_CFG2_OSR(i)); - if (ret < 0) { - mutex_unlock(&indio_dev->mlock); - return ret; - } - - chip->filter_rate_setup = i; - mutex_unlock(&indio_dev->mlock); - - return len; -} - -static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR, - ad7152_show_filter_rate_setup, - ad7152_store_filter_rate_setup); - static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("200 50 20 17"); static IIO_CONST_ATTR(in_capacitance_scale_available, "0.000061050 0.000030525 0.000015263 0.000007631"); static struct attribute *ad7152_attributes[] = { - &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr, &iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr, &iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr, @@ -247,6 +199,51 @@ static const int ad7152_scale_table[] = { 30525, 7631, 15263, 61050 }; +/** + * read_raw handler for IIO_CHAN_INFO_SAMP_FREQ + * + * lock must be held + **/ +static int ad7152_read_raw_samp_freq(struct device *dev, int *val) +{ + struct ad7152_chip_info *chip = iio_priv(dev_to_iio_dev(dev)); + + *val = ad7152_filter_rate_table[chip->filter_rate_setup][0]; + + return 0; +} + +/** + * write_raw handler for IIO_CHAN_INFO_SAMP_FREQ + * + * lock must be held + **/ +static int ad7152_write_raw_samp_freq(struct device *dev, int val) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct ad7152_chip_info *chip = iio_priv(indio_dev); + int ret, i; + + for (i = 0; i < ARRAY_SIZE(ad7152_filter_rate_table); i++) + if (val >= ad7152_filter_rate_table[i][0]) + break; + + if (i >= ARRAY_SIZE(ad7152_filter_rate_table)) + i = ARRAY_SIZE(ad7152_filter_rate_table) - 1; + + mutex_lock(&chip->state_lock); + ret = i2c_smbus_write_byte_data(chip->client, + AD7152_REG_CFG2, AD7152_CFG2_OSR(i)); + if (ret < 0) { + mutex_unlock(&chip->state_lock); + return ret; + } + + chip->filter_rate_setup = i; + mutex_unlock(&chip->state_lock); + + return ret; +} static int ad7152_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, @@ -256,7 +253,7 @@ static int ad7152_write_raw(struct iio_dev *indio_dev, struct ad7152_chip_info *chip = iio_priv(indio_dev); int ret, i; - mutex_lock(&indio_dev->mlock); + mutex_lock(&chip->state_lock); switch (mask) { case IIO_CHAN_INFO_CALIBSCALE: @@ -307,6 +304,17 @@ static int ad7152_write_raw(struct iio_dev *indio_dev, if (ret < 0) goto out; + ret = 0; + break; + case IIO_CHAN_INFO_SAMP_FREQ: + if (val2) { + ret = -EINVAL; + goto out; + } + ret = ad7152_write_raw_samp_freq(&indio_dev->dev, val); + if (ret < 0) + goto out; + ret = 0; break; default: @@ -314,9 +322,10 @@ static int ad7152_write_raw(struct iio_dev *indio_dev, } out: - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&chip->state_lock); return ret; } + static int ad7152_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, @@ -326,7 +335,7 @@ static int ad7152_read_raw(struct iio_dev *indio_dev, int ret; u8 regval = 0; - mutex_lock(&indio_dev->mlock); + mutex_lock(&chip->state_lock); switch (mask) { case IIO_CHAN_INFO_RAW: @@ -403,11 +412,18 @@ static int ad7152_read_raw(struct iio_dev *indio_dev, ret = IIO_VAL_INT_PLUS_NANO; break; + case IIO_CHAN_INFO_SAMP_FREQ: + ret = ad7152_read_raw_samp_freq(&indio_dev->dev, val); + if (ret < 0) + goto out; + + ret = IIO_VAL_INT; + break; default: ret = -EINVAL; } out: - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&chip->state_lock); return ret; } @@ -440,6 +456,7 @@ static const struct iio_chan_spec ad7152_channels[] = { BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), }, { .type = IIO_CAPACITANCE, .differential = 1, @@ -450,6 +467,7 @@ static const struct iio_chan_spec ad7152_channels[] = { BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), }, { .type = IIO_CAPACITANCE, .indexed = 1, @@ -458,6 +476,7 @@ static const struct iio_chan_spec ad7152_channels[] = { BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), }, { .type = IIO_CAPACITANCE, .differential = 1, @@ -468,8 +487,10 @@ static const struct iio_chan_spec ad7152_channels[] = { BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), } }; + /* * device probe and remove */ @@ -489,6 +510,7 @@ static int ad7152_probe(struct i2c_client *client, i2c_set_clientdata(client, indio_dev); chip->client = client; + mutex_init(&chip->state_lock); /* Establish that the iio_dev is a child of the i2c device */ indio_dev->name = id->name; diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c index 5771d4ee8ef106755b84a82cf8bbc7bc350b53f2..81f8b9ee11208f34d061e57dbebf3566835592bc 100644 --- a/drivers/staging/iio/cdc/ad7746.c +++ b/drivers/staging/iio/cdc/ad7746.c @@ -70,8 +70,10 @@ #define AD7746_EXCSETUP_EXCLVL(x) (((x) & 0x3) << 0) /* Config Register Bit Designations (AD7746_REG_CFG) */ -#define AD7746_CONF_VTFS(x) ((x) << 6) -#define AD7746_CONF_CAPFS(x) ((x) << 3) +#define AD7746_CONF_VTFS_SHIFT 6 +#define AD7746_CONF_CAPFS_SHIFT 3 +#define AD7746_CONF_VTFS_MASK GENMASK(7, 6) +#define AD7746_CONF_CAPFS_MASK GENMASK(5, 3) #define AD7746_CONF_MODE_IDLE (0 << 0) #define AD7746_CONF_MODE_CONT_CONV (1 << 0) #define AD7746_CONF_MODE_SINGLE_CONV (2 << 0) @@ -122,7 +124,8 @@ static const struct iio_chan_spec ad7746_channels[] = { .indexed = 1, .channel = 0, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7746_REG_VT_DATA_HIGH << 8 | AD7746_VTSETUP_VTMD_EXT_VIN, }, @@ -132,7 +135,8 @@ static const struct iio_chan_spec ad7746_channels[] = { .channel = 1, .extend_name = "supply", .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7746_REG_VT_DATA_HIGH << 8 | AD7746_VTSETUP_VTMD_VDD_MON, }, @@ -159,7 +163,7 @@ static const struct iio_chan_spec ad7746_channels[] = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) | - BIT(IIO_CHAN_INFO_SCALE), + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7746_REG_CAP_DATA_HIGH << 8, }, [CIN1_DIFF] = { @@ -171,7 +175,7 @@ static const struct iio_chan_spec ad7746_channels[] = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) | - BIT(IIO_CHAN_INFO_SCALE), + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7746_REG_CAP_DATA_HIGH << 8 | AD7746_CAPSETUP_CAPDIFF }, @@ -182,7 +186,7 @@ static const struct iio_chan_spec ad7746_channels[] = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) | - BIT(IIO_CHAN_INFO_SCALE), + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7746_REG_CAP_DATA_HIGH << 8 | AD7746_CAPSETUP_CIN2, }, @@ -195,7 +199,7 @@ static const struct iio_chan_spec ad7746_channels[] = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) | - BIT(IIO_CHAN_INFO_SCALE), + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7746_REG_CAP_DATA_HIGH << 8 | AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2, } @@ -215,15 +219,16 @@ static int ad7746_select_channel(struct iio_dev *indio_dev, struct iio_chan_spec const *chan) { struct ad7746_chip_info *chip = iio_priv(indio_dev); - int ret, delay; + int ret, delay, idx; u8 vt_setup, cap_setup; switch (chan->type) { case IIO_CAPACITANCE: cap_setup = (chan->address & 0xFF) | AD7746_CAPSETUP_CAPEN; vt_setup = chip->vt_setup & ~AD7746_VTSETUP_VTEN; - delay = ad7746_cap_filter_rate_table[(chip->config >> 3) & - 0x7][1]; + idx = (chip->config & AD7746_CONF_CAPFS_MASK) >> + AD7746_CONF_CAPFS_SHIFT; + delay = ad7746_cap_filter_rate_table[idx][1]; if (chip->capdac_set != chan->channel) { ret = i2c_smbus_write_byte_data(chip->client, @@ -244,8 +249,9 @@ static int ad7746_select_channel(struct iio_dev *indio_dev, case IIO_TEMP: vt_setup = (chan->address & 0xFF) | AD7746_VTSETUP_VTEN; cap_setup = chip->cap_setup & ~AD7746_CAPSETUP_CAPEN; - delay = ad7746_cap_filter_rate_table[(chip->config >> 6) & - 0x3][1]; + idx = (chip->config & AD7746_CONF_VTFS_MASK) >> + AD7746_CONF_VTFS_SHIFT; + delay = ad7746_cap_filter_rate_table[idx][1]; break; default: return -EINVAL; @@ -355,101 +361,47 @@ static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration, static IIO_DEVICE_ATTR(in_voltage0_calibscale_calibration, S_IWUSR, NULL, ad7746_start_gain_calib, VIN); -static ssize_t ad7746_show_cap_filter_rate_setup(struct device *dev, - struct device_attribute *attr, - char *buf) +static int ad7746_store_cap_filter_rate_setup(struct ad7746_chip_info *chip, + int val) { - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7746_chip_info *chip = iio_priv(indio_dev); - - return sprintf(buf, "%d\n", ad7746_cap_filter_rate_table[ - (chip->config >> 3) & 0x7][0]); -} - -static ssize_t ad7746_store_cap_filter_rate_setup(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7746_chip_info *chip = iio_priv(indio_dev); - u8 data; - int ret, i; - - ret = kstrtou8(buf, 10, &data); - if (ret < 0) - return ret; + int i; for (i = 0; i < ARRAY_SIZE(ad7746_cap_filter_rate_table); i++) - if (data >= ad7746_cap_filter_rate_table[i][0]) + if (val >= ad7746_cap_filter_rate_table[i][0]) break; if (i >= ARRAY_SIZE(ad7746_cap_filter_rate_table)) i = ARRAY_SIZE(ad7746_cap_filter_rate_table) - 1; - mutex_lock(&indio_dev->mlock); - chip->config &= ~AD7746_CONF_CAPFS(0x7); - chip->config |= AD7746_CONF_CAPFS(i); - mutex_unlock(&indio_dev->mlock); + chip->config &= ~AD7746_CONF_CAPFS_MASK; + chip->config |= i << AD7746_CONF_CAPFS_SHIFT; - return len; + return 0; } -static ssize_t ad7746_show_vt_filter_rate_setup(struct device *dev, - struct device_attribute *attr, - char *buf) +static int ad7746_store_vt_filter_rate_setup(struct ad7746_chip_info *chip, + int val) { - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7746_chip_info *chip = iio_priv(indio_dev); - - return sprintf(buf, "%d\n", ad7746_vt_filter_rate_table[ - (chip->config >> 6) & 0x3][0]); -} - -static ssize_t ad7746_store_vt_filter_rate_setup(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7746_chip_info *chip = iio_priv(indio_dev); - u8 data; - int ret, i; - - ret = kstrtou8(buf, 10, &data); - if (ret < 0) - return ret; + int i; for (i = 0; i < ARRAY_SIZE(ad7746_vt_filter_rate_table); i++) - if (data >= ad7746_vt_filter_rate_table[i][0]) + if (val >= ad7746_vt_filter_rate_table[i][0]) break; if (i >= ARRAY_SIZE(ad7746_vt_filter_rate_table)) i = ARRAY_SIZE(ad7746_vt_filter_rate_table) - 1; - mutex_lock(&indio_dev->mlock); - chip->config &= ~AD7746_CONF_VTFS(0x3); - chip->config |= AD7746_CONF_VTFS(i); - mutex_unlock(&indio_dev->mlock); + chip->config &= ~AD7746_CONF_VTFS_MASK; + chip->config |= i << AD7746_CONF_VTFS_SHIFT; - return len; + return 0; } -static IIO_DEVICE_ATTR(in_capacitance_sampling_frequency, - S_IRUGO | S_IWUSR, ad7746_show_cap_filter_rate_setup, - ad7746_store_cap_filter_rate_setup, 0); - -static IIO_DEVICE_ATTR(in_voltage_sampling_frequency, - S_IRUGO | S_IWUSR, ad7746_show_vt_filter_rate_setup, - ad7746_store_vt_filter_rate_setup, 0); - static IIO_CONST_ATTR(in_voltage_sampling_frequency_available, "50 31 16 8"); static IIO_CONST_ATTR(in_capacitance_sampling_frequency_available, "91 84 50 26 16 13 11 9"); static struct attribute *ad7746_attributes[] = { - &iio_dev_attr_in_capacitance_sampling_frequency.dev_attr.attr, - &iio_dev_attr_in_voltage_sampling_frequency.dev_attr.attr, &iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr, &iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr, &iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr, @@ -547,6 +499,23 @@ static int ad7746_write_raw(struct iio_dev *indio_dev, ret = 0; break; + case IIO_CHAN_INFO_SAMP_FREQ: + if (val2) { + ret = -EINVAL; + goto out; + } + + switch (chan->type) { + case IIO_CAPACITANCE: + ret = ad7746_store_cap_filter_rate_setup(chip, val); + break; + case IIO_VOLTAGE: + ret = ad7746_store_vt_filter_rate_setup(chip, val); + break; + default: + ret = -EINVAL; + } + break; default: ret = -EINVAL; } @@ -562,7 +531,7 @@ static int ad7746_read_raw(struct iio_dev *indio_dev, long mask) { struct ad7746_chip_info *chip = iio_priv(indio_dev); - int ret, delay; + int ret, delay, idx; u8 regval, reg; mutex_lock(&indio_dev->mlock); @@ -666,6 +635,24 @@ static int ad7746_read_raw(struct iio_dev *indio_dev, break; } + break; + case IIO_CHAN_INFO_SAMP_FREQ: + switch (chan->type) { + case IIO_CAPACITANCE: + idx = (chip->config & AD7746_CONF_CAPFS_MASK) >> + AD7746_CONF_CAPFS_SHIFT; + *val = ad7746_cap_filter_rate_table[idx][0]; + ret = IIO_VAL_INT; + break; + case IIO_VOLTAGE: + idx = (chip->config & AD7746_CONF_VTFS_MASK) >> + AD7746_CONF_VTFS_SHIFT; + *val = ad7746_vt_filter_rate_table[idx][0]; + ret = IIO_VAL_INT; + break; + default: + ret = -EINVAL; + } break; default: ret = -EINVAL; diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c index 358400b22d33356d606dead1a1de0b2fcd424ebd..a5b2f068168dc5664ab4f8a0cfca240cfd2cfb6e 100644 --- a/drivers/staging/iio/frequency/ad9832.c +++ b/drivers/staging/iio/frequency/ad9832.c @@ -204,7 +204,6 @@ static int ad9832_probe(struct spi_device *spi) struct ad9832_platform_data *pdata = dev_get_platdata(&spi->dev); struct iio_dev *indio_dev; struct ad9832_state *st; - struct regulator *reg; int ret; if (!pdata) { @@ -212,21 +211,35 @@ static int ad9832_probe(struct spi_device *spi) return -ENODEV; } - reg = devm_regulator_get(&spi->dev, "vcc"); - if (!IS_ERR(reg)) { - ret = regulator_enable(reg); - if (ret) - return ret; - } - indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); - if (!indio_dev) { - ret = -ENOMEM; - goto error_disable_reg; - } + if (!indio_dev) + return -ENOMEM; + spi_set_drvdata(spi, indio_dev); st = iio_priv(indio_dev); - st->reg = reg; + + st->avdd = devm_regulator_get(&spi->dev, "avdd"); + if (IS_ERR(st->avdd)) + return PTR_ERR(st->avdd); + + ret = regulator_enable(st->avdd); + if (ret) { + dev_err(&spi->dev, "Failed to enable specified AVDD supply\n"); + return ret; + } + + st->dvdd = devm_regulator_get(&spi->dev, "dvdd"); + if (IS_ERR(st->dvdd)) { + ret = PTR_ERR(st->dvdd); + goto error_disable_avdd; + } + + ret = regulator_enable(st->dvdd); + if (ret) { + dev_err(&spi->dev, "Failed to enable specified DVDD supply\n"); + goto error_disable_avdd; + } + st->mclk = pdata->mclk; st->spi = spi; @@ -277,42 +290,43 @@ static int ad9832_probe(struct spi_device *spi) ret = spi_sync(st->spi, &st->msg); if (ret) { dev_err(&spi->dev, "device init failed\n"); - goto error_disable_reg; + goto error_disable_dvdd; } ret = ad9832_write_frequency(st, AD9832_FREQ0HM, pdata->freq0); if (ret) - goto error_disable_reg; + goto error_disable_dvdd; ret = ad9832_write_frequency(st, AD9832_FREQ1HM, pdata->freq1); if (ret) - goto error_disable_reg; + goto error_disable_dvdd; ret = ad9832_write_phase(st, AD9832_PHASE0H, pdata->phase0); if (ret) - goto error_disable_reg; + goto error_disable_dvdd; ret = ad9832_write_phase(st, AD9832_PHASE1H, pdata->phase1); if (ret) - goto error_disable_reg; + goto error_disable_dvdd; ret = ad9832_write_phase(st, AD9832_PHASE2H, pdata->phase2); if (ret) - goto error_disable_reg; + goto error_disable_dvdd; ret = ad9832_write_phase(st, AD9832_PHASE3H, pdata->phase3); if (ret) - goto error_disable_reg; + goto error_disable_dvdd; ret = iio_device_register(indio_dev); if (ret) - goto error_disable_reg; + goto error_disable_dvdd; return 0; -error_disable_reg: - if (!IS_ERR(reg)) - regulator_disable(reg); +error_disable_dvdd: + regulator_disable(st->dvdd); +error_disable_avdd: + regulator_disable(st->avdd); return ret; } @@ -323,8 +337,8 @@ static int ad9832_remove(struct spi_device *spi) struct ad9832_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); - if (!IS_ERR(st->reg)) - regulator_disable(st->reg); + regulator_disable(st->dvdd); + regulator_disable(st->avdd); return 0; } diff --git a/drivers/staging/iio/frequency/ad9832.h b/drivers/staging/iio/frequency/ad9832.h index d32323b46be68f70888de6de1852c1e15eaa387c..1b08b04482a4bb8b88f9c665d450be23af907c60 100644 --- a/drivers/staging/iio/frequency/ad9832.h +++ b/drivers/staging/iio/frequency/ad9832.h @@ -58,7 +58,8 @@ /** * struct ad9832_state - driver instance specific data * @spi: spi_device - * @reg: supply regulator + * @avdd: supply regulator for the analog section + * @dvdd: supply regulator for the digital section * @mclk: external master clock * @ctrl_fp: cached frequency/phase control word * @ctrl_ss: cached sync/selsrc control word @@ -76,7 +77,8 @@ struct ad9832_state { struct spi_device *spi; - struct regulator *reg; + struct regulator *avdd; + struct regulator *dvdd; unsigned long mclk; unsigned short ctrl_fp; unsigned short ctrl_ss; diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c index 6366216e4f3783cabbf8c2270c0f54caa9a5b448..19216af1dfc92e5bf22b14a60272272d9b123a06 100644 --- a/drivers/staging/iio/frequency/ad9834.c +++ b/drivers/staging/iio/frequency/ad9834.c @@ -329,11 +329,14 @@ static int ad9834_probe(struct spi_device *spi) return -ENODEV; } - reg = devm_regulator_get(&spi->dev, "vcc"); - if (!IS_ERR(reg)) { - ret = regulator_enable(reg); - if (ret) - return ret; + reg = devm_regulator_get(&spi->dev, "avdd"); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + ret = regulator_enable(reg); + if (ret) { + dev_err(&spi->dev, "Failed to enable specified AVDD supply\n"); + return ret; } indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); @@ -416,8 +419,7 @@ static int ad9834_probe(struct spi_device *spi) return 0; error_disable_reg: - if (!IS_ERR(reg)) - regulator_disable(reg); + regulator_disable(reg); return ret; } @@ -428,8 +430,7 @@ static int ad9834_remove(struct spi_device *spi) struct ad9834_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); - if (!IS_ERR(st->reg)) - regulator_disable(st->reg); + regulator_disable(st->reg); return 0; } diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 5eecf1cb1028892796872ad592daf3a5e4d89e3c..9447898439380b344c49f87c49916cb0050f4989 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -655,6 +655,7 @@ static void ad5933_work(struct work_struct *work) __be16 buf[2]; int val[2]; unsigned char status; + int ret; mutex_lock(&indio_dev->mlock); if (st->state == AD5933_CTRL_INIT_START_FREQ) { @@ -662,19 +663,22 @@ static void ad5933_work(struct work_struct *work) ad5933_cmd(st, AD5933_CTRL_START_SWEEP); st->state = AD5933_CTRL_START_SWEEP; schedule_delayed_work(&st->work, st->poll_time_jiffies); - mutex_unlock(&indio_dev->mlock); - return; + goto out; } - ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); + ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); + if (ret) + goto out; if (status & AD5933_STAT_DATA_VALID) { int scan_count = bitmap_weight(indio_dev->active_scan_mask, indio_dev->masklength); - ad5933_i2c_read(st->client, + ret = ad5933_i2c_read(st->client, test_bit(1, indio_dev->active_scan_mask) ? AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA, scan_count * 2, (u8 *)buf); + if (ret) + goto out; if (scan_count == 2) { val[0] = be16_to_cpu(buf[0]); @@ -686,8 +690,7 @@ static void ad5933_work(struct work_struct *work) } else { /* no data available - try again later */ schedule_delayed_work(&st->work, st->poll_time_jiffies); - mutex_unlock(&indio_dev->mlock); - return; + goto out; } if (status & AD5933_STAT_SWEEP_DONE) { @@ -700,7 +703,7 @@ static void ad5933_work(struct work_struct *work) ad5933_cmd(st, AD5933_CTRL_INC_FREQ); schedule_delayed_work(&st->work, st->poll_time_jiffies); } - +out: mutex_unlock(&indio_dev->mlock); } @@ -723,13 +726,16 @@ static int ad5933_probe(struct i2c_client *client, if (!pdata) pdata = &ad5933_default_pdata; - st->reg = devm_regulator_get(&client->dev, "vcc"); - if (!IS_ERR(st->reg)) { - ret = regulator_enable(st->reg); - if (ret) - return ret; - voltage_uv = regulator_get_voltage(st->reg); + st->reg = devm_regulator_get(&client->dev, "vdd"); + if (IS_ERR(st->reg)) + return PTR_ERR(st->reg); + + ret = regulator_enable(st->reg); + if (ret) { + dev_err(&client->dev, "Failed to enable specified VDD supply\n"); + return ret; } + voltage_uv = regulator_get_voltage(st->reg); if (voltage_uv) st->vref_mv = voltage_uv / 1000; @@ -772,8 +778,7 @@ static int ad5933_probe(struct i2c_client *client, error_unreg_ring: iio_kfifo_free(indio_dev->buffer); error_disable_reg: - if (!IS_ERR(st->reg)) - regulator_disable(st->reg); + regulator_disable(st->reg); return ret; } @@ -785,8 +790,7 @@ static int ad5933_remove(struct i2c_client *client) iio_device_unregister(indio_dev); iio_kfifo_free(indio_dev->buffer); - if (!IS_ERR(st->reg)) - regulator_disable(st->reg); + regulator_disable(st->reg); return 0; } diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig index ca8d6e66c899dd1fb1858b7da52f07b09ed8506d..4fbf6298c0f34e1011483df5ec24ccbad9bc1db9 100644 --- a/drivers/staging/iio/light/Kconfig +++ b/drivers/staging/iio/light/Kconfig @@ -3,18 +3,6 @@ # menu "Light sensors" -config SENSORS_ISL29018 - tristate "ISL 29018 light and proximity sensor" - depends on I2C - select REGMAP_I2C - default n - help - If you say yes here you get support for ambient light sensing and - proximity infrared sensing from Intersil ISL29018. - This driver will provide the measurements of ambient light intensity - in lux, proximity infrared sensing and normal infrared sensing. - Data from sensor is accessible via sysfs. - config SENSORS_ISL29028 tristate "Intersil ISL29028 Concurrent Light and Proximity Sensor" depends on I2C @@ -25,13 +13,6 @@ config SENSORS_ISL29028 Proximity value via iio. The ISL29028 provides the concurrent sensing of ambient light and proximity. -config TSL2583 - tristate "TAOS TSL2580, TSL2581 and TSL2583 light-to-digital converters" - depends on I2C - help - Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices. - Access ALS data via iio, sysfs. - config TSL2x7x tristate "TAOS TSL/TMD2x71 and TSL/TMD2x72 Family of light and proximity sensors" depends on I2C diff --git a/drivers/staging/iio/light/Makefile b/drivers/staging/iio/light/Makefile index 9960fdf7c15b5b9746bd233c04971d43d22b5264..f8693e9fdc94437843f2593066440c43564e5896 100644 --- a/drivers/staging/iio/light/Makefile +++ b/drivers/staging/iio/light/Makefile @@ -2,7 +2,5 @@ # Makefile for industrial I/O Light sensors # -obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o -obj-$(CONFIG_TSL2583) += tsl2583.o obj-$(CONFIG_TSL2x7x) += tsl2x7x_core.o diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c deleted file mode 100644 index 08f1583ee34e92a79e3cbda3fbb5c46abfd84af9..0000000000000000000000000000000000000000 --- a/drivers/staging/iio/light/tsl2583.c +++ /dev/null @@ -1,963 +0,0 @@ -/* - * Device driver for monitoring ambient light intensity (lux) - * within the TAOS tsl258x family of devices (tsl2580, tsl2581). - * - * Copyright (c) 2011, TAOS Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define TSL258X_MAX_DEVICE_REGS 32 - -/* Triton register offsets */ -#define TSL258X_REG_MAX 8 - -/* Device Registers and Masks */ -#define TSL258X_CNTRL 0x00 -#define TSL258X_ALS_TIME 0X01 -#define TSL258X_INTERRUPT 0x02 -#define TSL258X_GAIN 0x07 -#define TSL258X_REVID 0x11 -#define TSL258X_CHIPID 0x12 -#define TSL258X_ALS_CHAN0LO 0x14 -#define TSL258X_ALS_CHAN0HI 0x15 -#define TSL258X_ALS_CHAN1LO 0x16 -#define TSL258X_ALS_CHAN1HI 0x17 -#define TSL258X_TMR_LO 0x18 -#define TSL258X_TMR_HI 0x19 - -/* tsl2583 cmd reg masks */ -#define TSL258X_CMD_REG 0x80 -#define TSL258X_CMD_SPL_FN 0x60 -#define TSL258X_CMD_ALS_INT_CLR 0X01 - -/* tsl2583 cntrl reg masks */ -#define TSL258X_CNTL_ADC_ENBL 0x02 -#define TSL258X_CNTL_PWR_ON 0x01 - -/* tsl2583 status reg masks */ -#define TSL258X_STA_ADC_VALID 0x01 -#define TSL258X_STA_ADC_INTR 0x10 - -/* Lux calculation constants */ -#define TSL258X_LUX_CALC_OVER_FLOW 65535 - -enum { - TSL258X_CHIP_UNKNOWN = 0, - TSL258X_CHIP_WORKING = 1, - TSL258X_CHIP_SUSPENDED = 2 -}; - -/* Per-device data */ -struct taos_als_info { - u16 als_ch0; - u16 als_ch1; - u16 lux; -}; - -struct taos_settings { - int als_time; - int als_gain; - int als_gain_trim; - int als_cal_target; -}; - -struct tsl2583_chip { - struct mutex als_mutex; - struct i2c_client *client; - struct taos_als_info als_cur_info; - struct taos_settings taos_settings; - int als_time_scale; - int als_saturation; - int taos_chip_status; - u8 taos_config[8]; -}; - -/* - * Initial values for device - this values can/will be changed by driver. - * and applications as needed. - * These values are dynamic. - */ -static const u8 taos_config[8] = { - 0x00, 0xee, 0x00, 0x03, 0x00, 0xFF, 0xFF, 0x00 -}; /* cntrl atime intC Athl0 Athl1 Athh0 Athh1 gain */ - -struct taos_lux { - unsigned int ratio; - unsigned int ch0; - unsigned int ch1; -}; - -/* This structure is intentionally large to accommodate updates via sysfs. */ -/* Sized to 11 = max 10 segments + 1 termination segment */ -/* Assumption is one and only one type of glass used */ -static struct taos_lux taos_device_lux[11] = { - { 9830, 8520, 15729 }, - { 12452, 10807, 23344 }, - { 14746, 6383, 11705 }, - { 17695, 4063, 6554 }, -}; - -struct gainadj { - s16 ch0; - s16 ch1; -}; - -/* Index = (0 - 3) Used to validate the gain selection index */ -static const struct gainadj gainadj[] = { - { 1, 1 }, - { 8, 8 }, - { 16, 16 }, - { 107, 115 } -}; - -/* - * Provides initial operational parameter defaults. - * These defaults may be changed through the device's sysfs files. - */ -static void taos_defaults(struct tsl2583_chip *chip) -{ - /* Operational parameters */ - chip->taos_settings.als_time = 100; - /* must be a multiple of 50mS */ - chip->taos_settings.als_gain = 0; - /* this is actually an index into the gain table */ - /* assume clear glass as default */ - chip->taos_settings.als_gain_trim = 1000; - /* default gain trim to account for aperture effects */ - chip->taos_settings.als_cal_target = 130; - /* Known external ALS reading used for calibration */ -} - -/* - * Read a number of bytes starting at register (reg) location. - * Return 0, or i2c_smbus_write_byte ERROR code. - */ -static int -taos_i2c_read(struct i2c_client *client, u8 reg, u8 *val, unsigned int len) -{ - int i, ret; - - for (i = 0; i < len; i++) { - /* select register to write */ - ret = i2c_smbus_write_byte(client, (TSL258X_CMD_REG | reg)); - if (ret < 0) { - dev_err(&client->dev, - "taos_i2c_read failed to write register %x\n", - reg); - return ret; - } - /* read the data */ - *val = i2c_smbus_read_byte(client); - val++; - reg++; - } - return 0; -} - -/* - * Reads and calculates current lux value. - * The raw ch0 and ch1 values of the ambient light sensed in the last - * integration cycle are read from the device. - * Time scale factor array values are adjusted based on the integration time. - * The raw values are multiplied by a scale factor, and device gain is obtained - * using gain index. Limit checks are done next, then the ratio of a multiple - * of ch1 value, to the ch0 value, is calculated. The array taos_device_lux[] - * declared above is then scanned to find the first ratio value that is just - * above the ratio we just calculated. The ch0 and ch1 multiplier constants in - * the array are then used along with the time scale factor array values, to - * calculate the lux. - */ -static int taos_get_lux(struct iio_dev *indio_dev) -{ - u16 ch0, ch1; /* separated ch0/ch1 data from device */ - u32 lux; /* raw lux calculated from device data */ - u64 lux64; - u32 ratio; - u8 buf[5]; - struct taos_lux *p; - struct tsl2583_chip *chip = iio_priv(indio_dev); - int i, ret; - u32 ch0lux = 0; - u32 ch1lux = 0; - - if (mutex_trylock(&chip->als_mutex) == 0) { - dev_info(&chip->client->dev, "taos_get_lux device is busy\n"); - return chip->als_cur_info.lux; /* busy, so return LAST VALUE */ - } - - if (chip->taos_chip_status != TSL258X_CHIP_WORKING) { - /* device is not enabled */ - dev_err(&chip->client->dev, "taos_get_lux device is not enabled\n"); - ret = -EBUSY; - goto out_unlock; - } - - ret = taos_i2c_read(chip->client, (TSL258X_CMD_REG), &buf[0], 1); - if (ret < 0) { - dev_err(&chip->client->dev, "taos_get_lux failed to read CMD_REG\n"); - goto out_unlock; - } - /* is data new & valid */ - if (!(buf[0] & TSL258X_STA_ADC_INTR)) { - dev_err(&chip->client->dev, "taos_get_lux data not valid\n"); - ret = chip->als_cur_info.lux; /* return LAST VALUE */ - goto out_unlock; - } - - for (i = 0; i < 4; i++) { - int reg = TSL258X_CMD_REG | (TSL258X_ALS_CHAN0LO + i); - - ret = taos_i2c_read(chip->client, reg, &buf[i], 1); - if (ret < 0) { - dev_err(&chip->client->dev, - "taos_get_lux failed to read register %x\n", - reg); - goto out_unlock; - } - } - - /* - * clear status, really interrupt status (interrupts are off), but - * we use the bit anyway - don't forget 0x80 - this is a command - */ - ret = i2c_smbus_write_byte(chip->client, - (TSL258X_CMD_REG | TSL258X_CMD_SPL_FN | - TSL258X_CMD_ALS_INT_CLR)); - - if (ret < 0) { - dev_err(&chip->client->dev, - "taos_i2c_write_command failed in taos_get_lux, err = %d\n", - ret); - goto out_unlock; /* have no data, so return failure */ - } - - /* extract ALS/lux data */ - ch0 = le16_to_cpup((const __le16 *)&buf[0]); - ch1 = le16_to_cpup((const __le16 *)&buf[2]); - - chip->als_cur_info.als_ch0 = ch0; - chip->als_cur_info.als_ch1 = ch1; - - if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation)) - goto return_max; - - if (!ch0) { - /* have no data, so return LAST VALUE */ - ret = 0; - chip->als_cur_info.lux = 0; - goto out_unlock; - } - /* calculate ratio */ - ratio = (ch1 << 15) / ch0; - /* convert to unscaled lux using the pointer to the table */ - for (p = (struct taos_lux *)taos_device_lux; - p->ratio != 0 && p->ratio < ratio; p++) - ; - - if (p->ratio == 0) { - lux = 0; - } else { - ch0lux = ((ch0 * p->ch0) + - (gainadj[chip->taos_settings.als_gain].ch0 >> 1)) - / gainadj[chip->taos_settings.als_gain].ch0; - ch1lux = ((ch1 * p->ch1) + - (gainadj[chip->taos_settings.als_gain].ch1 >> 1)) - / gainadj[chip->taos_settings.als_gain].ch1; - lux = ch0lux - ch1lux; - } - - /* note: lux is 31 bit max at this point */ - if (ch1lux > ch0lux) { - dev_dbg(&chip->client->dev, "No Data - Return last value\n"); - ret = 0; - chip->als_cur_info.lux = 0; - goto out_unlock; - } - - /* adjust for active time scale */ - if (chip->als_time_scale == 0) - lux = 0; - else - lux = (lux + (chip->als_time_scale >> 1)) / - chip->als_time_scale; - - /* Adjust for active gain scale. - * The taos_device_lux tables above have a factor of 8192 built in, - * so we need to shift right. - * User-specified gain provides a multiplier. - * Apply user-specified gain before shifting right to retain precision. - * Use 64 bits to avoid overflow on multiplication. - * Then go back to 32 bits before division to avoid using div_u64(). - */ - lux64 = lux; - lux64 = lux64 * chip->taos_settings.als_gain_trim; - lux64 >>= 13; - lux = lux64; - lux = (lux + 500) / 1000; - if (lux > TSL258X_LUX_CALC_OVER_FLOW) { /* check for overflow */ -return_max: - lux = TSL258X_LUX_CALC_OVER_FLOW; - } - - /* Update the structure with the latest VALID lux. */ - chip->als_cur_info.lux = lux; - ret = lux; - -out_unlock: - mutex_unlock(&chip->als_mutex); - return ret; -} - -/* - * Obtain single reading and calculate the als_gain_trim (later used - * to derive actual lux). - * Return updated gain_trim value. - */ -static int taos_als_calibrate(struct iio_dev *indio_dev) -{ - struct tsl2583_chip *chip = iio_priv(indio_dev); - u8 reg_val; - unsigned int gain_trim_val; - int ret; - int lux_val; - - ret = i2c_smbus_write_byte(chip->client, - (TSL258X_CMD_REG | TSL258X_CNTRL)); - if (ret < 0) { - dev_err(&chip->client->dev, - "taos_als_calibrate failed to reach the CNTRL register, ret=%d\n", - ret); - return ret; - } - - reg_val = i2c_smbus_read_byte(chip->client); - if ((reg_val & (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON)) - != (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON)) { - dev_err(&chip->client->dev, - "taos_als_calibrate failed: device not powered on with ADC enabled\n"); - return -1; - } - - ret = i2c_smbus_write_byte(chip->client, - (TSL258X_CMD_REG | TSL258X_CNTRL)); - if (ret < 0) { - dev_err(&chip->client->dev, - "taos_als_calibrate failed to reach the STATUS register, ret=%d\n", - ret); - return ret; - } - reg_val = i2c_smbus_read_byte(chip->client); - - if ((reg_val & TSL258X_STA_ADC_VALID) != TSL258X_STA_ADC_VALID) { - dev_err(&chip->client->dev, - "taos_als_calibrate failed: STATUS - ADC not valid.\n"); - return -ENODATA; - } - lux_val = taos_get_lux(indio_dev); - if (lux_val < 0) { - dev_err(&chip->client->dev, "taos_als_calibrate failed to get lux\n"); - return lux_val; - } - gain_trim_val = (unsigned int)(((chip->taos_settings.als_cal_target) - * chip->taos_settings.als_gain_trim) / lux_val); - - if ((gain_trim_val < 250) || (gain_trim_val > 4000)) { - dev_err(&chip->client->dev, - "taos_als_calibrate failed: trim_val of %d is out of range\n", - gain_trim_val); - return -ENODATA; - } - chip->taos_settings.als_gain_trim = (int)gain_trim_val; - - return (int)gain_trim_val; -} - -/* - * Turn the device on. - * Configuration must be set before calling this function. - */ -static int taos_chip_on(struct iio_dev *indio_dev) -{ - int i; - int ret; - u8 *uP; - u8 utmp; - int als_count; - int als_time; - struct tsl2583_chip *chip = iio_priv(indio_dev); - - /* and make sure we're not already on */ - if (chip->taos_chip_status == TSL258X_CHIP_WORKING) { - /* if forcing a register update - turn off, then on */ - dev_info(&chip->client->dev, "device is already enabled\n"); - return -EINVAL; - } - - /* determine als integration register */ - als_count = (chip->taos_settings.als_time * 100 + 135) / 270; - if (!als_count) - als_count = 1; /* ensure at least one cycle */ - - /* convert back to time (encompasses overrides) */ - als_time = (als_count * 27 + 5) / 10; - chip->taos_config[TSL258X_ALS_TIME] = 256 - als_count; - - /* Set the gain based on taos_settings struct */ - chip->taos_config[TSL258X_GAIN] = chip->taos_settings.als_gain; - - /* set chip struct re scaling and saturation */ - chip->als_saturation = als_count * 922; /* 90% of full scale */ - chip->als_time_scale = (als_time + 25) / 50; - - /* - * TSL258x Specific power-on / adc enable sequence - * Power on the device 1st. - */ - utmp = TSL258X_CNTL_PWR_ON; - ret = i2c_smbus_write_byte_data(chip->client, - TSL258X_CMD_REG | TSL258X_CNTRL, utmp); - if (ret < 0) { - dev_err(&chip->client->dev, "taos_chip_on failed on CNTRL reg.\n"); - return ret; - } - - /* - * Use the following shadow copy for our delay before enabling ADC. - * Write all the registers. - */ - for (i = 0, uP = chip->taos_config; i < TSL258X_REG_MAX; i++) { - ret = i2c_smbus_write_byte_data(chip->client, - TSL258X_CMD_REG + i, - *uP++); - if (ret < 0) { - dev_err(&chip->client->dev, - "taos_chip_on failed on reg %d.\n", i); - return ret; - } - } - - usleep_range(3000, 3500); - /* - * NOW enable the ADC - * initialize the desired mode of operation - */ - utmp = TSL258X_CNTL_PWR_ON | TSL258X_CNTL_ADC_ENBL; - ret = i2c_smbus_write_byte_data(chip->client, - TSL258X_CMD_REG | TSL258X_CNTRL, - utmp); - if (ret < 0) { - dev_err(&chip->client->dev, "taos_chip_on failed on 2nd CTRL reg.\n"); - return ret; - } - chip->taos_chip_status = TSL258X_CHIP_WORKING; - - return ret; -} - -static int taos_chip_off(struct iio_dev *indio_dev) -{ - struct tsl2583_chip *chip = iio_priv(indio_dev); - - /* turn device off */ - chip->taos_chip_status = TSL258X_CHIP_SUSPENDED; - return i2c_smbus_write_byte_data(chip->client, - TSL258X_CMD_REG | TSL258X_CNTRL, - 0x00); -} - -/* Sysfs Interface Functions */ - -static ssize_t taos_power_state_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct tsl2583_chip *chip = iio_priv(indio_dev); - - return sprintf(buf, "%d\n", chip->taos_chip_status); -} - -static ssize_t taos_power_state_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - int value; - - if (kstrtoint(buf, 0, &value)) - return -EINVAL; - - if (!value) - taos_chip_off(indio_dev); - else - taos_chip_on(indio_dev); - - return len; -} - -static ssize_t taos_gain_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct tsl2583_chip *chip = iio_priv(indio_dev); - char gain[4] = {0}; - - switch (chip->taos_settings.als_gain) { - case 0: - strcpy(gain, "001"); - break; - case 1: - strcpy(gain, "008"); - break; - case 2: - strcpy(gain, "016"); - break; - case 3: - strcpy(gain, "111"); - break; - } - - return sprintf(buf, "%s\n", gain); -} - -static ssize_t taos_gain_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct tsl2583_chip *chip = iio_priv(indio_dev); - int value; - - if (kstrtoint(buf, 0, &value)) - return -EINVAL; - - switch (value) { - case 1: - chip->taos_settings.als_gain = 0; - break; - case 8: - chip->taos_settings.als_gain = 1; - break; - case 16: - chip->taos_settings.als_gain = 2; - break; - case 111: - chip->taos_settings.als_gain = 3; - break; - default: - dev_err(dev, "Invalid Gain Index (must be 1,8,16,111)\n"); - return -1; - } - - return len; -} - -static ssize_t taos_gain_available_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "%s\n", "1 8 16 111"); -} - -static ssize_t taos_als_time_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct tsl2583_chip *chip = iio_priv(indio_dev); - - return sprintf(buf, "%d\n", chip->taos_settings.als_time); -} - -static ssize_t taos_als_time_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct tsl2583_chip *chip = iio_priv(indio_dev); - int value; - - if (kstrtoint(buf, 0, &value)) - return -EINVAL; - - if ((value < 50) || (value > 650)) - return -EINVAL; - - if (value % 50) - return -EINVAL; - - chip->taos_settings.als_time = value; - - return len; -} - -static ssize_t taos_als_time_available_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "%s\n", - "50 100 150 200 250 300 350 400 450 500 550 600 650"); -} - -static ssize_t taos_als_trim_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct tsl2583_chip *chip = iio_priv(indio_dev); - - return sprintf(buf, "%d\n", chip->taos_settings.als_gain_trim); -} - -static ssize_t taos_als_trim_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct tsl2583_chip *chip = iio_priv(indio_dev); - int value; - - if (kstrtoint(buf, 0, &value)) - return -EINVAL; - - if (value) - chip->taos_settings.als_gain_trim = value; - - return len; -} - -static ssize_t taos_als_cal_target_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct tsl2583_chip *chip = iio_priv(indio_dev); - - return sprintf(buf, "%d\n", chip->taos_settings.als_cal_target); -} - -static ssize_t taos_als_cal_target_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct tsl2583_chip *chip = iio_priv(indio_dev); - int value; - - if (kstrtoint(buf, 0, &value)) - return -EINVAL; - - if (value) - chip->taos_settings.als_cal_target = value; - - return len; -} - -static ssize_t taos_lux_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - int ret; - - ret = taos_get_lux(dev_to_iio_dev(dev)); - if (ret < 0) - return ret; - - return sprintf(buf, "%d\n", ret); -} - -static ssize_t taos_do_calibrate(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - int value; - - if (kstrtoint(buf, 0, &value)) - return -EINVAL; - - if (value == 1) - taos_als_calibrate(indio_dev); - - return len; -} - -static ssize_t taos_luxtable_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - int i; - int offset = 0; - - for (i = 0; i < ARRAY_SIZE(taos_device_lux); i++) { - offset += sprintf(buf + offset, "%u,%u,%u,", - taos_device_lux[i].ratio, - taos_device_lux[i].ch0, - taos_device_lux[i].ch1); - if (taos_device_lux[i].ratio == 0) { - /* - * We just printed the first "0" entry. - * Now get rid of the extra "," and break. - */ - offset--; - break; - } - } - - offset += sprintf(buf + offset, "\n"); - return offset; -} - -static ssize_t taos_luxtable_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct tsl2583_chip *chip = iio_priv(indio_dev); - int value[ARRAY_SIZE(taos_device_lux) * 3 + 1]; - int n; - - get_options(buf, ARRAY_SIZE(value), value); - - /* We now have an array of ints starting at value[1], and - * enumerated by value[0]. - * We expect each group of three ints is one table entry, - * and the last table entry is all 0. - */ - n = value[0]; - if ((n % 3) || n < 6 || n > ((ARRAY_SIZE(taos_device_lux) - 1) * 3)) { - dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n); - return -EINVAL; - } - if ((value[(n - 2)] | value[(n - 1)] | value[n]) != 0) { - dev_info(dev, "LUX TABLE INPUT ERROR 2 Value[0]=%d\n", n); - return -EINVAL; - } - - if (chip->taos_chip_status == TSL258X_CHIP_WORKING) - taos_chip_off(indio_dev); - - /* Zero out the table */ - memset(taos_device_lux, 0, sizeof(taos_device_lux)); - memcpy(taos_device_lux, &value[1], (value[0] * 4)); - - taos_chip_on(indio_dev); - - return len; -} - -static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, - taos_power_state_show, taos_power_state_store); - -static DEVICE_ATTR(illuminance0_calibscale, S_IRUGO | S_IWUSR, - taos_gain_show, taos_gain_store); -static DEVICE_ATTR(illuminance0_calibscale_available, S_IRUGO, - taos_gain_available_show, NULL); - -static DEVICE_ATTR(illuminance0_integration_time, S_IRUGO | S_IWUSR, - taos_als_time_show, taos_als_time_store); -static DEVICE_ATTR(illuminance0_integration_time_available, S_IRUGO, - taos_als_time_available_show, NULL); - -static DEVICE_ATTR(illuminance0_calibbias, S_IRUGO | S_IWUSR, - taos_als_trim_show, taos_als_trim_store); - -static DEVICE_ATTR(illuminance0_input_target, S_IRUGO | S_IWUSR, - taos_als_cal_target_show, taos_als_cal_target_store); - -static DEVICE_ATTR(illuminance0_input, S_IRUGO, taos_lux_show, NULL); -static DEVICE_ATTR(illuminance0_calibrate, S_IWUSR, NULL, taos_do_calibrate); -static DEVICE_ATTR(illuminance0_lux_table, S_IRUGO | S_IWUSR, - taos_luxtable_show, taos_luxtable_store); - -static struct attribute *sysfs_attrs_ctrl[] = { - &dev_attr_power_state.attr, - &dev_attr_illuminance0_calibscale.attr, /* Gain */ - &dev_attr_illuminance0_calibscale_available.attr, - &dev_attr_illuminance0_integration_time.attr, /* I time*/ - &dev_attr_illuminance0_integration_time_available.attr, - &dev_attr_illuminance0_calibbias.attr, /* trim */ - &dev_attr_illuminance0_input_target.attr, - &dev_attr_illuminance0_input.attr, - &dev_attr_illuminance0_calibrate.attr, - &dev_attr_illuminance0_lux_table.attr, - NULL -}; - -static const struct attribute_group tsl2583_attribute_group = { - .attrs = sysfs_attrs_ctrl, -}; - -/* Use the default register values to identify the Taos device */ -static int taos_tsl258x_device(unsigned char *bufp) -{ - return ((bufp[TSL258X_CHIPID] & 0xf0) == 0x90); -} - -static const struct iio_info tsl2583_info = { - .attrs = &tsl2583_attribute_group, - .driver_module = THIS_MODULE, -}; - -/* - * Client probe function - When a valid device is found, the driver's device - * data structure is updated, and initialization completes successfully. - */ -static int taos_probe(struct i2c_client *clientp, - const struct i2c_device_id *idp) -{ - int i, ret; - unsigned char buf[TSL258X_MAX_DEVICE_REGS]; - struct tsl2583_chip *chip; - struct iio_dev *indio_dev; - - if (!i2c_check_functionality(clientp->adapter, - I2C_FUNC_SMBUS_BYTE_DATA)) { - dev_err(&clientp->dev, "taos_probe() - i2c smbus byte data func unsupported\n"); - return -EOPNOTSUPP; - } - - indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip)); - if (!indio_dev) - return -ENOMEM; - chip = iio_priv(indio_dev); - chip->client = clientp; - i2c_set_clientdata(clientp, indio_dev); - - mutex_init(&chip->als_mutex); - chip->taos_chip_status = TSL258X_CHIP_UNKNOWN; - memcpy(chip->taos_config, taos_config, sizeof(chip->taos_config)); - - for (i = 0; i < TSL258X_MAX_DEVICE_REGS; i++) { - ret = i2c_smbus_write_byte(clientp, - (TSL258X_CMD_REG | (TSL258X_CNTRL + i))); - if (ret < 0) { - dev_err(&clientp->dev, - "i2c_smbus_write_byte to cmd reg failed in taos_probe(), err = %d\n", - ret); - return ret; - } - ret = i2c_smbus_read_byte(clientp); - if (ret < 0) { - dev_err(&clientp->dev, - "i2c_smbus_read_byte from reg failed in taos_probe(), err = %d\n", - ret); - return ret; - } - buf[i] = ret; - } - - if (!taos_tsl258x_device(buf)) { - dev_info(&clientp->dev, - "i2c device found but does not match expected id in taos_probe()\n"); - return -EINVAL; - } - - ret = i2c_smbus_write_byte(clientp, (TSL258X_CMD_REG | TSL258X_CNTRL)); - if (ret < 0) { - dev_err(&clientp->dev, - "i2c_smbus_write_byte() to cmd reg failed in taos_probe(), err = %d\n", - ret); - return ret; - } - - indio_dev->info = &tsl2583_info; - indio_dev->dev.parent = &clientp->dev; - indio_dev->modes = INDIO_DIRECT_MODE; - indio_dev->name = chip->client->name; - ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev); - if (ret) { - dev_err(&clientp->dev, "iio registration failed\n"); - return ret; - } - - /* Load up the V2 defaults (these are hard coded defaults for now) */ - taos_defaults(chip); - - /* Make sure the chip is on */ - taos_chip_on(indio_dev); - - dev_info(&clientp->dev, "Light sensor found.\n"); - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int taos_suspend(struct device *dev) -{ - struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); - struct tsl2583_chip *chip = iio_priv(indio_dev); - int ret = 0; - - mutex_lock(&chip->als_mutex); - - if (chip->taos_chip_status == TSL258X_CHIP_WORKING) { - ret = taos_chip_off(indio_dev); - chip->taos_chip_status = TSL258X_CHIP_SUSPENDED; - } - - mutex_unlock(&chip->als_mutex); - return ret; -} - -static int taos_resume(struct device *dev) -{ - struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); - struct tsl2583_chip *chip = iio_priv(indio_dev); - int ret = 0; - - mutex_lock(&chip->als_mutex); - - if (chip->taos_chip_status == TSL258X_CHIP_SUSPENDED) - ret = taos_chip_on(indio_dev); - - mutex_unlock(&chip->als_mutex); - return ret; -} - -static SIMPLE_DEV_PM_OPS(taos_pm_ops, taos_suspend, taos_resume); -#define TAOS_PM_OPS (&taos_pm_ops) -#else -#define TAOS_PM_OPS NULL -#endif - -static struct i2c_device_id taos_idtable[] = { - { "tsl2580", 0 }, - { "tsl2581", 1 }, - { "tsl2583", 2 }, - {} -}; -MODULE_DEVICE_TABLE(i2c, taos_idtable); - -/* Driver definition */ -static struct i2c_driver taos_driver = { - .driver = { - .name = "tsl2583", - .pm = TAOS_PM_OPS, - }, - .id_table = taos_idtable, - .probe = taos_probe, -}; -module_i2c_driver(taos_driver); - -MODULE_AUTHOR("J. August Brenner"); -MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c index ebb8a199330374b904a7852daa339a85d3662f49..3af8f77b8e4194e63cfe1c75d44eaf4ece9327ca 100644 --- a/drivers/staging/iio/meter/ade7758_core.c +++ b/drivers/staging/iio/meter/ade7758_core.c @@ -465,38 +465,26 @@ static int ade7758_initial_setup(struct iio_dev *indio_dev) return ret; } -static ssize_t ade7758_read_frequency(struct device *dev, - struct device_attribute *attr, char *buf) +static int ade7758_read_samp_freq(struct device *dev, int *val) { int ret; u8 t; - int sps; ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &t); if (ret) return ret; t = (t >> 5) & 0x3; - sps = 26040 / (1 << t); + *val = 26040 / (1 << t); - return sprintf(buf, "%d SPS\n", sps); + return 0; } -static ssize_t ade7758_write_frequency(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) +static int ade7758_write_samp_freq(struct device *dev, int val) { - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - u16 val; int ret; u8 reg, t; - ret = kstrtou16(buf, 10, &val); - if (ret) - return ret; - - mutex_lock(&indio_dev->mlock); - switch (val) { case 26040: t = 0; @@ -525,9 +513,49 @@ static ssize_t ade7758_write_frequency(struct device *dev, ret = ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg); out: - mutex_unlock(&indio_dev->mlock); + return ret; +} - return ret ? ret : len; +static int ade7758_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, + int *val2, + long mask) +{ + int ret; + + switch (mask) { + case IIO_CHAN_INFO_SAMP_FREQ: + mutex_lock(&indio_dev->mlock); + ret = ade7758_read_samp_freq(&indio_dev->dev, val); + mutex_unlock(&indio_dev->mlock); + return ret; + default: + return -EINVAL; + } + + return ret; +} + +static int ade7758_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + int ret; + + switch (mask) { + case IIO_CHAN_INFO_SAMP_FREQ: + if (val2) + return -EINVAL; + mutex_lock(&indio_dev->mlock); + ret = ade7758_write_samp_freq(&indio_dev->dev, val); + mutex_unlock(&indio_dev->mlock); + return ret; + default: + return -EINVAL; + } + + return ret; } static IIO_DEV_ATTR_TEMP_RAW(ade7758_read_8bit); @@ -553,17 +581,12 @@ static IIO_DEV_ATTR_BVAHR(ade7758_read_16bit, static IIO_DEV_ATTR_CVAHR(ade7758_read_16bit, ADE7758_CVAHR); -static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, - ade7758_read_frequency, - ade7758_write_frequency); - static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26040 13020 6510 3255"); static struct attribute *ade7758_attributes[] = { &iio_dev_attr_in_temp_raw.dev_attr.attr, &iio_const_attr_in_temp_offset.dev_attr.attr, &iio_const_attr_in_temp_scale.dev_attr.attr, - &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_const_attr_sampling_frequency_available.dev_attr.attr, &iio_dev_attr_awatthr.dev_attr.attr, &iio_dev_attr_bwatthr.dev_attr.attr, @@ -611,6 +634,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_VOLTAGE, .indexed = 1, .channel = 0, + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE), .scan_index = 0, .scan_type = { @@ -622,6 +646,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_CURRENT, .indexed = 1, .channel = 0, + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT), .scan_index = 1, .scan_type = { @@ -634,6 +659,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .indexed = 1, .channel = 0, .extend_name = "apparent", + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR), .scan_index = 2, .scan_type = { @@ -646,6 +672,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .indexed = 1, .channel = 0, .extend_name = "active", + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR), .scan_index = 3, .scan_type = { @@ -658,6 +685,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .indexed = 1, .channel = 0, .extend_name = "reactive", + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR), .scan_index = 4, .scan_type = { @@ -669,6 +697,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_VOLTAGE, .indexed = 1, .channel = 1, + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE), .scan_index = 5, .scan_type = { @@ -680,6 +709,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_CURRENT, .indexed = 1, .channel = 1, + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT), .scan_index = 6, .scan_type = { @@ -692,6 +722,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .indexed = 1, .channel = 1, .extend_name = "apparent", + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR), .scan_index = 7, .scan_type = { @@ -704,6 +735,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .indexed = 1, .channel = 1, .extend_name = "active", + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR), .scan_index = 8, .scan_type = { @@ -716,6 +748,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .indexed = 1, .channel = 1, .extend_name = "reactive", + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR), .scan_index = 9, .scan_type = { @@ -727,6 +760,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_VOLTAGE, .indexed = 1, .channel = 2, + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE), .scan_index = 10, .scan_type = { @@ -738,6 +772,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_CURRENT, .indexed = 1, .channel = 2, + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT), .scan_index = 11, .scan_type = { @@ -750,6 +785,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .indexed = 1, .channel = 2, .extend_name = "apparent", + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR), .scan_index = 12, .scan_type = { @@ -762,6 +798,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .indexed = 1, .channel = 2, .extend_name = "active", + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR), .scan_index = 13, .scan_type = { @@ -774,6 +811,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .indexed = 1, .channel = 2, .extend_name = "reactive", + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR), .scan_index = 14, .scan_type = { @@ -787,6 +825,8 @@ static const struct iio_chan_spec ade7758_channels[] = { static const struct iio_info ade7758_info = { .attrs = &ade7758_attribute_group, + .read_raw = &ade7758_read_raw, + .write_raw = &ade7758_write_raw, .driver_module = THIS_MODULE, }; diff --git a/drivers/staging/iio/ring_hw.h b/drivers/staging/iio/ring_hw.h deleted file mode 100644 index 75bf47bfee7820ac4602239e806093642a841b79..0000000000000000000000000000000000000000 --- a/drivers/staging/iio/ring_hw.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * ring_hw.h - common functionality for iio hardware ring buffers - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * Copyright (c) 2009 Jonathan Cameron - * - */ - -#ifndef _RING_HW_H_ -#define _RING_HW_H_ - -/** - * struct iio_hw_ring_buffer- hardware ring buffer - * @buf: generic ring buffer elements - * @private: device specific data - */ -struct iio_hw_buffer { - struct iio_buffer buf; - void *private; -}; - -#define iio_to_hw_buf(r) container_of(r, struct iio_hw_buffer, buf) - -#endif /* _RING_HW_H_ */ diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c index 81c46f4d0935abac2b8d15f848099ba52ba376b7..a604c83c957e1c68eee1f1567ba4ea60b2b53a29 100644 --- a/drivers/staging/ks7010/ks7010_sdio.c +++ b/drivers/staging/ks7010/ks7010_sdio.c @@ -35,18 +35,18 @@ MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids); /* macro */ #define inc_txqhead(priv) \ - (priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE) + (priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE) #define inc_txqtail(priv) \ - (priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE) + (priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE) #define cnt_txqbody(priv) \ - (((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE) + (((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE) #define inc_rxqhead(priv) \ - (priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE) + (priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE) #define inc_rxqtail(priv) \ - (priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE) + (priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE) #define cnt_rxqbody(priv) \ - (((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE) + (((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE) static int ks7010_sdio_read(struct ks_wlan_private *priv, unsigned int address, unsigned char *buffer, int length) @@ -76,10 +76,9 @@ static int ks7010_sdio_write(struct ks_wlan_private *priv, unsigned int address, card = priv->ks_wlan_hw.sdio_card; if (length == 1) /* CMD52 */ - sdio_writeb(card->func, *buffer, (unsigned int)address, &rc); + sdio_writeb(card->func, *buffer, address, &rc); else /* CMD53 */ - rc = sdio_memcpy_toio(card->func, (unsigned int)address, buffer, - length); + rc = sdio_memcpy_toio(card->func, address, buffer, length); if (rc != 0) DPRINTK(1, "sdio error=%d size=%d\n", rc, length); @@ -255,7 +254,7 @@ int ks_wlan_hw_power_save(struct ks_wlan_private *priv) static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p, unsigned long size, - void (*complete_handler) (void *arg1, void *arg2), + void (*complete_handler)(void *arg1, void *arg2), void *arg1, void *arg2) { struct tx_device_buffer *sp; @@ -294,6 +293,7 @@ static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer, int retval; unsigned char rw_data; struct hostif_hdr *hdr; + hdr = (struct hostif_hdr *)buffer; DPRINTK(4, "size=%d\n", hdr->size); @@ -353,11 +353,12 @@ static void tx_device_task(void *dev) } int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size, - void (*complete_handler) (void *arg1, void *arg2), + void (*complete_handler)(void *arg1, void *arg2), void *arg1, void *arg2) { int result = 0; struct hostif_hdr *hdr; + hdr = (struct hostif_hdr *)p; if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) { @@ -412,7 +413,7 @@ static void ks_wlan_hw_rx(void *dev, uint16_t size) /* receive data */ if (cnt_rxqbody(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) { /* in case of buffer overflow */ - DPRINTK(1, "rx buffer overflow \n"); + DPRINTK(1, "rx buffer overflow\n"); goto error_out; } rx_buffer = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qtail]; @@ -658,10 +659,12 @@ static void ks_sdio_interrupt(struct sdio_func *func) static int trx_device_init(struct ks_wlan_private *priv) { /* initialize values (tx) */ - priv->tx_dev.qtail = priv->tx_dev.qhead = 0; + priv->tx_dev.qhead = 0; + priv->tx_dev.qtail = 0; /* initialize values (rx) */ - priv->rx_dev.qtail = priv->rx_dev.qhead = 0; + priv->rx_dev.qhead = 0; + priv->rx_dev.qtail = 0; /* initialize spinLock (tx,rx) */ spin_lock_init(&priv->tx_dev.tx_dev_lock); @@ -718,7 +721,7 @@ static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index) return rc; } -#define ROM_BUFF_SIZE (64*1024) +#define ROM_BUFF_SIZE (64 * 1024) static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address, unsigned char *data, unsigned int size) { @@ -955,7 +958,7 @@ static int ks7010_sdio_probe(struct sdio_func *func, priv = NULL; netdev = NULL; - /* initilize ks_sdio_card */ + /* initialize ks_sdio_card */ card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; @@ -1117,6 +1120,7 @@ static void ks7010_sdio_remove(struct sdio_func *func) int ret; struct ks_sdio_card *card; struct ks_wlan_private *priv; + DPRINTK(1, "ks7010_sdio_remove()\n"); card = sdio_get_drvdata(func); @@ -1142,6 +1146,7 @@ static void ks7010_sdio_remove(struct sdio_func *func) /* send stop request to MAC */ { struct hostif_stop_request_t *pp; + pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL); if (!pp) { DPRINTK(3, "allocate memory failed..\n"); diff --git a/drivers/staging/ks7010/ks7010_sdio.h b/drivers/staging/ks7010/ks7010_sdio.h index c72064b48bd8fe60b105ca49ddee33a5d80c35fd..0f5fd848e23dee93c50dcdf7ab70c47fa5248b65 100644 --- a/drivers/staging/ks7010/ks7010_sdio.h +++ b/drivers/staging/ks7010/ks7010_sdio.h @@ -1,5 +1,5 @@ /* - * Driver for KeyStream, KS7010 based SDIO cards. + * Driver for KeyStream, KS7010 based SDIO cards. * * Copyright (C) 2006-2008 KeyStream Corp. * Copyright (C) 2009 Renesas Technology Corp. @@ -41,7 +41,7 @@ /* Write Index Register */ #define WRITE_INDEX 0x000010 -/* Write Status/Read Data Size Register +/* Write Status/Read Data Size Register * for network packet (less than 2048 bytes data) */ #define WSTATUS_RSIZE 0x000014 @@ -53,14 +53,14 @@ /* ARM to SD interrupt Pending */ #define INT_PENDING 0x000024 -#define INT_GCR_B (1<<7) -#define INT_GCR_A (1<<6) -#define INT_WRITE_STATUS (1<<5) -#define INT_WRITE_INDEX (1<<4) -#define INT_WRITE_SIZE (1<<3) -#define INT_READ_STATUS (1<<2) -#define INT_READ_INDEX (1<<1) -#define INT_READ_SIZE (1<<0) +#define INT_GCR_B BIT(7) +#define INT_GCR_A BIT(6) +#define INT_WRITE_STATUS BIT(5) +#define INT_WRITE_INDEX BIT(4) +#define INT_WRITE_SIZE BIT(3) +#define INT_READ_STATUS BIT(2) +#define INT_READ_INDEX BIT(1) +#define INT_READ_SIZE BIT(0) /* General Communication Register A */ #define GCR_A 0x000028 @@ -100,7 +100,7 @@ struct hw_info_t { struct ks_sdio_packet { struct ks_sdio_packet *next; u16 nb; - u8 buffer[0] __attribute__ ((aligned(4))); + u8 buffer[0] __aligned(4); }; struct ks_sdio_card { diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c index c57ca581550a8a4a1ca5905ea906d7afe67bf098..1fbd495e5e635a66dbaa5b86baa3cadacfa33762 100644 --- a/drivers/staging/ks7010/ks_hostif.c +++ b/drivers/staging/ks7010/ks_hostif.c @@ -23,11 +23,11 @@ /* macro */ #define inc_smeqhead(priv) \ - ( priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE ) + (priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE) #define inc_smeqtail(priv) \ - ( priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE ) + (priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE) #define cnt_smeqbody(priv) \ - (((priv->sme_i.qtail + SME_EVENT_BUFF_SIZE) - (priv->sme_i.qhead)) % SME_EVENT_BUFF_SIZE ) + (((priv->sme_i.qtail + SME_EVENT_BUFF_SIZE) - (priv->sme_i.qhead)) % SME_EVENT_BUFF_SIZE) #define KS_WLAN_MEM_FLAG (GFP_ATOMIC) @@ -97,11 +97,10 @@ int ks_wlan_do_power_save(struct ks_wlan_private *priv) { DPRINTK(4, "psstatus.status=%d\n", atomic_read(&priv->psstatus.status)); - if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) { + if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST); - } else { + else priv->dev_state = DEVICE_STATE_READY; - } return 0; } @@ -187,13 +186,7 @@ int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info) memcpy(wrqu.ap_addr.sa_data, &(priv->current_ap.bssid[0]), ETH_ALEN); DPRINTK(3, - "IWEVENT: connect bssid=%02x:%02x:%02x:%02x:%02x:%02x\n", - (unsigned char)wrqu.ap_addr.sa_data[0], - (unsigned char)wrqu.ap_addr.sa_data[1], - (unsigned char)wrqu.ap_addr.sa_data[2], - (unsigned char)wrqu.ap_addr.sa_data[3], - (unsigned char)wrqu.ap_addr.sa_data[4], - (unsigned char)wrqu.ap_addr.sa_data[5]); + "IWEVENT: connect bssid=%pM\n", wrqu.ap_addr.sa_data); wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL); } DPRINTK(4, "\n Link AP\n"); @@ -420,16 +413,11 @@ void hostif_data_indication(struct ks_wlan_private *priv) /* needed parameters: count, keyid, key type, TSC */ sprintf(buf, "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr=" - "%02x:%02x:%02x:%02x:%02x:%02x)", + "%pM)", auth_type - 1, eth_hdr-> h_dest[0] & 0x01 ? "broad" : - "uni", eth_hdr->h_source[0], - eth_hdr->h_source[1], - eth_hdr->h_source[2], - eth_hdr->h_source[3], - eth_hdr->h_source[4], - eth_hdr->h_source[5]); + "uni", eth_hdr->h_source); memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = strlen(buf); DPRINTK(4, @@ -476,8 +464,6 @@ void hostif_data_indication(struct ks_wlan_private *priv) skb->dev->last_rx = jiffies; netif_rx(skb); } else { - printk(KERN_WARNING - "ks_wlan: Memory squeeze, dropping packet.\n"); priv->nstats.rx_dropped++; } break; @@ -511,8 +497,6 @@ void hostif_data_indication(struct ks_wlan_private *priv) skb->dev->last_rx = jiffies; netif_rx(skb); } else { - printk(KERN_WARNING - "ks_wlan: Memory squeeze, dropping packet.\n"); priv->nstats.rx_dropped++; } break; @@ -560,10 +544,7 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv) dev->dev_addr[5] = priv->eth_addr[5]; dev->dev_addr[6] = 0x00; dev->dev_addr[7] = 0x00; - printk(KERN_INFO - "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n", - priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2], - priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]); + netdev_info(dev, "MAC ADDRESS = %pM\n", priv->eth_addr); break; case DOT11_PRODUCT_VERSION: /* firmware version */ @@ -571,8 +552,8 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv) priv->version_size = priv->rx_size; memcpy(priv->firmware_version, priv->rxp, priv->rx_size); priv->firmware_version[priv->rx_size] = '\0'; - printk(KERN_INFO "ks_wlan: firmware ver. = %s\n", - priv->firmware_version); + netdev_info(dev, "firmware ver. = %s\n", + priv->firmware_version); hostif_sme_enqueue(priv, SME_GET_PRODUCT_VERSION); /* wake_up_interruptible_all(&priv->confirm_wait); */ complete(&priv->confirm_wait); @@ -592,12 +573,12 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv) } else if (priv->eeprom_sum.type == 1) { if (priv->eeprom_sum.result == 0) { priv->eeprom_checksum = EEPROM_NG; - printk("LOCAL_EEPROM_SUM NG\n"); + netdev_info(dev, "LOCAL_EEPROM_SUM NG\n"); } else if (priv->eeprom_sum.result == 1) { priv->eeprom_checksum = EEPROM_OK; } } else { - printk("LOCAL_EEPROM_SUM error!\n"); + netdev_err(dev, "LOCAL_EEPROM_SUM error!\n"); } break; default: @@ -705,15 +686,13 @@ void hostif_mib_set_confirm(struct ks_wlan_private *priv) break; case DOT11_GMK1_TSC: DPRINTK(2, "DOT11_GMK1_TSC:mib_status=%d\n", (int)mib_status); - if (atomic_read(&priv->psstatus.snooze_guard)) { + if (atomic_read(&priv->psstatus.snooze_guard)) atomic_set(&priv->psstatus.snooze_guard, 0); - } break; case DOT11_GMK2_TSC: DPRINTK(2, "DOT11_GMK2_TSC:mib_status=%d\n", (int)mib_status); - if (atomic_read(&priv->psstatus.snooze_guard)) { + if (atomic_read(&priv->psstatus.snooze_guard)) atomic_set(&priv->psstatus.snooze_guard, 0); - } break; case LOCAL_PMK: DPRINTK(2, "LOCAL_PMK:mib_status=%d\n", (int)mib_status); @@ -766,8 +745,9 @@ void hostif_sleep_confirm(struct ks_wlan_private *priv) static void hostif_start_confirm(struct ks_wlan_private *priv) { -#ifdef WPS +#ifdef WPS union iwreq_data wrqu; + wrqu.data.length = 0; wrqu.data.flags = 0; wrqu.ap_addr.sa_family = ARPHRD_ETHER; @@ -789,6 +769,7 @@ void hostif_connect_indication(struct ks_wlan_private *priv) unsigned int old_status = priv->connect_status; struct net_device *netdev = priv->net_dev; union iwreq_data wrqu0; + connect_code = get_WORD(priv); switch (connect_code) { @@ -894,7 +875,7 @@ void hostif_stop_confirm(struct ks_wlan_private *priv) netif_carrier_off(netdev); tmp = FORCE_DISCONNECT & priv->connect_status; priv->connect_status = tmp | DISCONNECT_STATUS; - printk("IWEVENT: disconnect\n"); + netdev_info(netdev, "IWEVENT: disconnect\n"); wrqu0.data.length = 0; wrqu0.data.flags = 0; @@ -904,7 +885,7 @@ void hostif_stop_confirm(struct ks_wlan_private *priv) && (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) { eth_zero_addr(wrqu0.ap_addr.sa_data); DPRINTK(3, "IWEVENT: disconnect\n"); - printk("IWEVENT: disconnect\n"); + netdev_info(netdev, "IWEVENT: disconnect\n"); DPRINTK(3, "disconnect :: scan_ind_count=%d\n", priv->scan_ind_count); wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL); @@ -928,6 +909,7 @@ static void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv) { uint16_t result_code; + DPRINTK(3, "\n"); result_code = get_WORD(priv); DPRINTK(3, "result code = %d\n", result_code); @@ -993,6 +975,7 @@ void hostif_bss_scan_confirm(struct ks_wlan_private *priv) unsigned int result_code; struct net_device *dev = priv->net_dev; union iwreq_data wrqu; + result_code = get_DWORD(priv); DPRINTK(2, "result=%d :: scan_ind_count=%d\n", result_code, priv->scan_ind_count); @@ -1110,7 +1093,7 @@ void hostif_event_check(struct ks_wlan_private *priv) case HIF_AP_SET_CONF: default: //DPRINTK(1, "undefined event[%04X]\n", event); - printk("undefined event[%04X]\n", event); + netdev_err(priv->net_dev, "undefined event[%04X]\n", event); /* wake_up_all(&priv->confirm_wait); */ complete(&priv->confirm_wait); break; @@ -1184,9 +1167,7 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet) eth = (struct ethhdr *)packet->data; if (memcmp(&priv->eth_addr[0], eth->h_source, ETH_ALEN)) { DPRINTK(1, "invalid mac address !!\n"); - DPRINTK(1, "ethernet->h_source=%02X:%02X:%02X:%02X:%02X:%02X\n", - eth->h_source[0], eth->h_source[1], eth->h_source[2], - eth->h_source[3], eth->h_source[4], eth->h_source[5]); + DPRINTK(1, "ethernet->h_source=%pM\n", eth->h_source); dev_kfree_skb(packet); kfree(pp); return -3; @@ -1244,7 +1225,7 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet) pp->auth_type = cpu_to_le16((uint16_t) TYPE_AUTH); /* no encryption */ } else { if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) { - MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[0].tx_mic_key, (uint8_t *) & pp->data[0], (int)packet_len, (uint8_t) 0, /* priority */ + MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[0].tx_mic_key, (uint8_t *) &pp->data[0], (int)packet_len, (uint8_t) 0, /* priority */ (uint8_t *) michel_mic. Result); memcpy(p, michel_mic.Result, 8); @@ -1294,10 +1275,11 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet) return result; } -#define ps_confirm_wait_inc(priv) do{if(atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET){ \ - atomic_inc(&priv->psstatus.confirm_wait); \ - /* atomic_set(&priv->psstatus.status, PS_CONF_WAIT);*/ \ - } }while(0) +#define ps_confirm_wait_inc(priv) do { \ + if (atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET) { \ + atomic_inc(&priv->psstatus.confirm_wait); \ + /* atomic_set(&priv->psstatus.status, PS_CONF_WAIT);*/ \ + } } while (0) static void hostif_mib_get_request(struct ks_wlan_private *priv, @@ -1891,6 +1873,7 @@ static void hostif_sme_set_wep(struct ks_wlan_private *priv, int type) { uint32_t val; + switch (type) { case SME_WEP_INDEX_REQUEST: val = cpu_to_le32((uint32_t) (priv->reg.wep_index)); @@ -1936,18 +1919,17 @@ void hostif_sme_set_wep(struct ks_wlan_private *priv, int type) break; } - return; } struct wpa_suite_t { unsigned short size; unsigned char suite[4][CIPHER_ID_LEN]; -} __attribute__ ((packed)); +} __packed; struct rsn_mode_t { uint32_t rsn_mode; uint16_t rsn_capability; -} __attribute__ ((packed)); +} __packed; static void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type) @@ -2125,7 +2107,6 @@ void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type) break; } - return; } static @@ -2216,10 +2197,7 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv) } else { hostif_infrastructure_set2_request(priv); DPRINTK(2, - "Infra bssid = %02x:%02x:%02x:%02x:%02x:%02x\n", - priv->reg.bssid[0], priv->reg.bssid[1], - priv->reg.bssid[2], priv->reg.bssid[3], - priv->reg.bssid[4], priv->reg.bssid[5]); + "Infra bssid = %pM\n", priv->reg.bssid); } break; case MODE_ADHOC: @@ -2229,17 +2207,13 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv) } else { hostif_adhoc_set2_request(priv); DPRINTK(2, - "Adhoc bssid = %02x:%02x:%02x:%02x:%02x:%02x\n", - priv->reg.bssid[0], priv->reg.bssid[1], - priv->reg.bssid[2], priv->reg.bssid[3], - priv->reg.bssid[4], priv->reg.bssid[5]); + "Adhoc bssid = %pM\n", priv->reg.bssid); } break; default: break; } - return; } static @@ -2340,7 +2314,6 @@ void hostif_sme_powermgt_set(struct ks_wlan_private *priv) } hostif_power_mngmt_request(priv, mode, wake_up, receiveDTIMs); - return; } static @@ -2358,13 +2331,13 @@ void hostif_sme_sleep_set(struct ks_wlan_private *priv) break; } - return; } static void hostif_sme_set_key(struct ks_wlan_private *priv, int type) { uint32_t val; + switch (type) { case SME_SET_FLAG: val = cpu_to_le32((uint32_t) (priv->reg.privacy_invoked)); @@ -2416,7 +2389,6 @@ void hostif_sme_set_key(struct ks_wlan_private *priv, int type) &priv->wpa.key[2].rx_seq[0]); break; } - return; } static @@ -2427,16 +2399,14 @@ void hostif_sme_set_pmksa(struct ks_wlan_private *priv) struct { uint8_t bssid[ETH_ALEN]; uint8_t pmkid[IW_PMKID_LEN]; - } __attribute__ ((packed)) list[PMK_LIST_MAX]; - } __attribute__ ((packed)) pmkcache; + } __packed list[PMK_LIST_MAX]; + } __packed pmkcache; struct pmk_t *pmk; - struct list_head *ptr; int i; DPRINTK(4, "pmklist.size=%d\n", priv->pmklist.size); i = 0; - list_for_each(ptr, &priv->pmklist.head) { - pmk = list_entry(ptr, struct pmk_t, list); + list_for_each_entry(pmk, &priv->pmklist.head, list) { if (i < PMK_LIST_MAX) { memcpy(pmkcache.list[i].bssid, pmk->bssid, ETH_ALEN); memcpy(pmkcache.list[i].pmkid, pmk->pmkid, @@ -2461,9 +2431,8 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event) DPRINTK(3, "event=%d\n", event); switch (event) { case SME_START: - if (priv->dev_state == DEVICE_STATE_BOOT) { + if (priv->dev_state == DEVICE_STATE_BOOT) hostif_mib_get_request(priv, DOT11_MAC_ADDRESS); - } break; case SME_MULTICAST_REQUEST: hostif_sme_multicast_set(priv); @@ -2508,14 +2477,12 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event) } break; case SME_GET_MAC_ADDRESS: - if (priv->dev_state == DEVICE_STATE_BOOT) { + if (priv->dev_state == DEVICE_STATE_BOOT) hostif_mib_get_request(priv, DOT11_PRODUCT_VERSION); - } break; case SME_GET_PRODUCT_VERSION: - if (priv->dev_state == DEVICE_STATE_BOOT) { + if (priv->dev_state == DEVICE_STATE_BOOT) priv->dev_state = DEVICE_STATE_PREINIT; - } break; case SME_STOP_REQUEST: hostif_stop_request(priv); @@ -2594,9 +2561,8 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event) /* for power save */ atomic_set(&priv->psstatus.snooze_guard, 0); atomic_set(&priv->psstatus.confirm_wait, 0); - if (priv->dev_state == DEVICE_STATE_PREINIT) { + if (priv->dev_state == DEVICE_STATE_PREINIT) priv->dev_state = DEVICE_STATE_INIT; - } /* wake_up_interruptible_all(&priv->confirm_wait); */ complete(&priv->confirm_wait); break; @@ -2652,7 +2618,6 @@ void hostif_sme_task(unsigned long dev) tasklet_schedule(&priv->sme_task); } } - return; } /* send to Station Management Entity module */ @@ -2672,7 +2637,7 @@ void hostif_sme_enqueue(struct ks_wlan_private *priv, unsigned short event) } else { /* in case of buffer overflow */ //DPRINTK(2,"sme queue buffer overflow\n"); - printk("sme queue buffer overflow\n"); + netdev_err(priv->net_dev, "sme queue buffer overflow\n"); } tasklet_schedule(&priv->sme_task); @@ -2736,5 +2701,4 @@ int hostif_init(struct ks_wlan_private *priv) void hostif_exit(struct ks_wlan_private *priv) { tasklet_kill(&priv->sme_task); - return; } diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h index c2cc288ae8991ce6411064e5c0b37ee4a371fade..279e9b06fc4b1733dec35c8754d98e81f91e4b8d 100644 --- a/drivers/staging/ks7010/ks_wlan.h +++ b/drivers/staging/ks7010/ks_wlan.h @@ -14,7 +14,6 @@ #define WPS -#include #include #include #include @@ -25,13 +24,13 @@ #include /* struct net_device_stats, struct sk_buff */ #include #include -#include /* struct atmic_t */ +#include /* struct atomic_t */ #include /* struct timer_list */ #include #include /* struct completion */ #include -#include +#include #include "ks7010_sdio.h" @@ -43,36 +42,36 @@ #endif struct ks_wlan_parameter { - uint8_t operation_mode; /* Operation Mode */ - uint8_t channel; /* Channel */ - uint8_t tx_rate; /* Transmit Rate */ + u8 operation_mode; /* Operation Mode */ + u8 channel; /* Channel */ + u8 tx_rate; /* Transmit Rate */ struct { - uint8_t size; - uint8_t body[16]; + u8 size; + u8 body[16]; } rate_set; - uint8_t bssid[ETH_ALEN]; /* BSSID */ + u8 bssid[ETH_ALEN]; /* BSSID */ struct { - uint8_t size; - uint8_t body[32 + 1]; + u8 size; + u8 body[32 + 1]; } ssid; /* SSID */ - uint8_t preamble; /* Preamble */ - uint8_t powermgt; /* PowerManagementMode */ - uint32_t scan_type; /* AP List Scan Type */ + u8 preamble; /* Preamble */ + u8 powermgt; /* PowerManagementMode */ + u32 scan_type; /* AP List Scan Type */ #define BEACON_LOST_COUNT_MIN 0 #define BEACON_LOST_COUNT_MAX 65535 - uint32_t beacon_lost_count; /* Beacon Lost Count */ - uint32_t rts; /* RTS Threashold */ - uint32_t fragment; /* Fragmentation Threashold */ - uint32_t privacy_invoked; - uint32_t wep_index; + u32 beacon_lost_count; /* Beacon Lost Count */ + u32 rts; /* RTS Threashold */ + u32 fragment; /* Fragmentation Threashold */ + u32 privacy_invoked; + u32 wep_index; struct { - uint8_t size; - uint8_t val[13 * 2 + 1]; + u8 size; + u8 val[13 * 2 + 1]; } wep_key[4]; - uint16_t authenticate_type; - uint16_t phy_type; /* 11b/11g/11bg mode type */ - uint16_t cts_mode; /* for 11g/11bg mode cts mode */ - uint16_t phy_info_timer; /* phy information timer */ + u16 authenticate_type; + u16 phy_type; /* 11b/11g/11bg mode type */ + u16 cts_mode; /* for 11g/11bg mode cts mode */ + u16 phy_info_timer; /* phy information timer */ }; enum { @@ -216,37 +215,37 @@ struct hostt_t { #define RSN_IE_BODY_MAX 64 struct rsn_ie_t { - uint8_t id; /* 0xdd = WPA or 0x30 = RSN */ - uint8_t size; /* max ? 255 ? */ - uint8_t body[RSN_IE_BODY_MAX]; + u8 id; /* 0xdd = WPA or 0x30 = RSN */ + u8 size; /* max ? 255 ? */ + u8 body[RSN_IE_BODY_MAX]; } __packed; #ifdef WPS #define WPS_IE_BODY_MAX 255 struct wps_ie_t { - uint8_t id; /* 221 'dd 00 50 F2 04' */ - uint8_t size; /* max ? 255 ? */ - uint8_t body[WPS_IE_BODY_MAX]; + u8 id; /* 221 'dd 00 50 F2 04' */ + u8 size; /* max ? 255 ? */ + u8 body[WPS_IE_BODY_MAX]; } __packed; #endif /* WPS */ struct local_ap_t { - uint8_t bssid[6]; - uint8_t rssi; - uint8_t sq; + u8 bssid[6]; + u8 rssi; + u8 sq; struct { - uint8_t size; - uint8_t body[32]; - uint8_t ssid_pad; + u8 size; + u8 body[32]; + u8 ssid_pad; } ssid; struct { - uint8_t size; - uint8_t body[16]; - uint8_t rate_pad; + u8 size; + u8 body[16]; + u8 rate_pad; } rate_set; - uint16_t capability; - uint8_t channel; - uint8_t noise; + u16 capability; + u8 channel; + u8 noise; struct rsn_ie_t wpa_ie; struct rsn_ie_t rsn_ie; #ifdef WPS @@ -262,15 +261,15 @@ struct local_aplist_t { }; struct local_gain_t { - uint8_t TxMode; - uint8_t RxMode; - uint8_t TxGain; - uint8_t RxGain; + u8 TxMode; + u8 RxMode; + u8 TxGain; + u8 RxGain; }; struct local_eeprom_sum_t { - uint8_t type; - uint8_t result; + u8 type; + u8 result; }; enum { @@ -352,25 +351,25 @@ enum { #define MIC_KEY_SIZE 8 struct wpa_key_t { - uint32_t ext_flags; /* IW_ENCODE_EXT_xxx */ - uint8_t tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ - uint8_t rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ + u32 ext_flags; /* IW_ENCODE_EXT_xxx */ + u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ + u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast * (group) keys or unicast address for * individual keys */ - uint16_t alg; - uint16_t key_len; /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */ - uint8_t key_val[IW_ENCODING_TOKEN_MAX]; - uint8_t tx_mic_key[MIC_KEY_SIZE]; - uint8_t rx_mic_key[MIC_KEY_SIZE]; + u16 alg; + u16 key_len; /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */ + u8 key_val[IW_ENCODING_TOKEN_MAX]; + u8 tx_mic_key[MIC_KEY_SIZE]; + u8 rx_mic_key[MIC_KEY_SIZE]; }; #define WPA_KEY_INDEX_MAX 4 #define WPA_RX_SEQ_LEN 6 struct mic_failure_t { - uint16_t failure; /* MIC Failure counter 0 or 1 or 2 */ - uint16_t counter; /* 1sec counter 0-60 */ - uint32_t last_failure_time; + u16 failure; /* MIC Failure counter 0 or 1 or 2 */ + u16 counter; /* 1sec counter 0-60 */ + u32 last_failure_time; int stop; /* stop flag */ }; @@ -391,12 +390,12 @@ struct wpa_status_t { #include #define PMK_LIST_MAX 8 struct pmk_list_t { - uint16_t size; + u16 size; struct list_head head; struct pmk_t { struct list_head list; - uint8_t bssid[ETH_ALEN]; - uint8_t pmkid[IW_PMKID_LEN]; + u8 bssid[ETH_ALEN]; + u8 pmkid[IW_PMKID_LEN]; } pmk[PMK_LIST_MAX]; }; @@ -404,7 +403,7 @@ struct pmk_list_t { struct wps_status_t { int wps_enabled; int ielen; - uint8_t ie[255]; + u8 ie[255]; }; #endif /* WPS */ @@ -439,7 +438,7 @@ struct ks_wlan_private { struct pmk_list_t pmklist; /* wireless parameter */ struct ks_wlan_parameter reg; - uint8_t current_rate; + u8 current_rate; char nick[IW_ESSID_MAX_SIZE + 1]; @@ -472,24 +471,24 @@ struct ks_wlan_private { /* spinlock_t lock; */ #define FORCE_DISCONNECT 0x80000000 #define CONNECT_STATUS_MASK 0x7FFFFFFF - uint32_t connect_status; /* connect status */ + u32 connect_status; /* connect status */ int infra_status; /* Infractructure status */ - uint8_t data_buff[0x1000]; + u8 data_buff[0x1000]; - uint8_t scan_ssid_len; - uint8_t scan_ssid[IW_ESSID_MAX_SIZE + 1]; + u8 scan_ssid_len; + u8 scan_ssid[IW_ESSID_MAX_SIZE + 1]; struct local_gain_t gain; #ifdef WPS struct net_device *l2_dev; int l2_fd; struct wps_status_t wps; #endif /* WPS */ - uint8_t sleep_mode; + u8 sleep_mode; - uint8_t region; + u8 region; struct local_eeprom_sum_t eeprom_sum; - uint8_t eeprom_checksum; + u8 eeprom_checksum; struct hostt_t hostt; diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c index b2b4fa4c38342e3814c8886fd75b95dca7780257..e5d04adaeb1a74cdbd2ef1436b5bcf10f4cdfcbb 100644 --- a/drivers/staging/ks7010/ks_wlan_net.c +++ b/drivers/staging/ks7010/ks_wlan_net.c @@ -24,9 +24,9 @@ #include #include #include -#include +#include #include -#include +#include static int wep_on_off; #define WEP_OFF 0 @@ -50,10 +50,10 @@ static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, /* A few details needed for WEP (Wireless Equivalent Privacy) */ #define MAX_KEY_SIZE 13 /* 128 (?) bits */ #define MIN_KEY_SIZE 5 /* 40 bits RC4 - WEP */ -typedef struct wep_key_t { +struct wep_key { u16 len; u8 key[16]; /* 40-bit and 104-bit keys */ -} wep_key_t; +}; /* Backward compatibility */ #ifndef IW_ENCODE_NOKEY @@ -88,9 +88,9 @@ int ks_wlan_update_phy_information(struct ks_wlan_private *priv) DPRINTK(4, "in_interrupt = %ld\n", in_interrupt()); - if (priv->dev_state < DEVICE_STATE_READY) { + if (priv->dev_state < DEVICE_STATE_READY) return -1; /* not finished initialize */ - } + if (atomic_read(&update_phyinfo)) return 1; @@ -182,19 +182,18 @@ static int ks_wlan_get_name(struct net_device *dev, struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ - if (priv->dev_state < DEVICE_STATE_READY) { + if (priv->dev_state < DEVICE_STATE_READY) strcpy(cwrq, "NOT READY!"); - } else if (priv->reg.phy_type == D_11B_ONLY_MODE) { + else if (priv->reg.phy_type == D_11B_ONLY_MODE) strcpy(cwrq, "IEEE 802.11b"); - } else if (priv->reg.phy_type == D_11G_ONLY_MODE) { + else if (priv->reg.phy_type == D_11G_ONLY_MODE) strcpy(cwrq, "IEEE 802.11g"); - } else { + else strcpy(cwrq, "IEEE 802.11b/g"); - } return 0; } @@ -209,9 +208,8 @@ static int ks_wlan_set_freq(struct net_device *dev, (struct ks_wlan_private *)netdev_priv(dev); int rc = -EINPROGRESS; /* Call commit handler */ - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ /* If setting by frequency, convert to a channel */ @@ -219,6 +217,7 @@ static int ks_wlan_set_freq(struct net_device *dev, (fwrq->m >= (int)2.412e8) && (fwrq->m <= (int)2.487e8)) { int f = fwrq->m / 100000; int c = 0; + while ((c < 14) && (f != frequency_list[c])) c++; /* Hack to fall through... */ @@ -257,13 +256,13 @@ static int ks_wlan_get_freq(struct net_device *dev, (struct ks_wlan_private *)netdev_priv(dev); int f; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ - if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) { + if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) f = (int)priv->current_ap.channel; - } else + else f = (int)priv->reg.channel; fwrq->m = frequency_list[f - 1] * 100000; fwrq->e = 1; @@ -283,9 +282,9 @@ static int ks_wlan_set_essid(struct net_device *dev, DPRINTK(2, " %d\n", dwrq->flags); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ /* Check if we asked for `any' */ @@ -301,14 +300,14 @@ static int ks_wlan_set_essid(struct net_device *dev, len--; /* Check the size of the string */ - if (len > IW_ESSID_MAX_SIZE) { + if (len > IW_ESSID_MAX_SIZE) return -EINVAL; - } + #else /* Check the size of the string */ - if (dwrq->length > IW_ESSID_MAX_SIZE + 1) { + if (dwrq->length > IW_ESSID_MAX_SIZE + 1) return -E2BIG; - } + #endif /* Set the SSID */ @@ -340,9 +339,9 @@ static int ks_wlan_get_essid(struct net_device *dev, struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ /* Note : if dwrq->flags != 0, we should @@ -385,25 +384,23 @@ static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info, DPRINTK(2, "\n"); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ if (priv->reg.operation_mode == MODE_ADHOC || priv->reg.operation_mode == MODE_INFRASTRUCTURE) { - memcpy(priv->reg.bssid, (u8 *) & ap_addr->sa_data, ETH_ALEN); + memcpy(priv->reg.bssid, &ap_addr->sa_data, ETH_ALEN); - if (is_valid_ether_addr((u8 *) priv->reg.bssid)) { + if (is_valid_ether_addr((u8 *)priv->reg.bssid)) priv->need_commit |= SME_MODE_SET; - } + } else { eth_zero_addr(priv->reg.bssid); return -EOPNOTSUPP; } - DPRINTK(2, "bssid = %02x:%02x:%02x:%02x:%02x:%02x\n", - priv->reg.bssid[0], priv->reg.bssid[1], priv->reg.bssid[2], - priv->reg.bssid[3], priv->reg.bssid[4], priv->reg.bssid[5]); + DPRINTK(2, "bssid = %pM\n", priv->reg.bssid); /* Write it to the card */ if (priv->need_commit) { @@ -421,15 +418,14 @@ static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info, struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ - if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) { + if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) memcpy(awrq->sa_data, &(priv->current_ap.bssid[0]), ETH_ALEN); - } else { + else eth_zero_addr(awrq->sa_data); - } awrq->sa_family = ARPHRD_ETHER; @@ -445,15 +441,14 @@ static int ks_wlan_set_nick(struct net_device *dev, struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ /* Check the size of the string */ - if (dwrq->length > 16 + 1) { + if (dwrq->length > 16 + 1) return -E2BIG; - } + memset(priv->nick, 0, sizeof(priv->nick)); memcpy(priv->nick, extra, dwrq->length); @@ -469,9 +464,9 @@ static int ks_wlan_get_nick(struct net_device *dev, struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ strncpy(extra, priv->nick, 16); extra[16] = '\0'; @@ -490,9 +485,9 @@ static int ks_wlan_set_rate(struct net_device *dev, (struct ks_wlan_private *)netdev_priv(dev); int i = 0; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ if (priv->reg.phy_type == D_11B_ONLY_MODE) { if (vwrq->fixed == 1) { @@ -727,13 +722,13 @@ static int ks_wlan_get_rate(struct net_device *dev, DPRINTK(2, "in_interrupt = %ld update_phyinfo = %d\n", in_interrupt(), atomic_read(&update_phyinfo)); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ - if (!atomic_read(&update_phyinfo)) { + if (!atomic_read(&update_phyinfo)) ks_wlan_update_phy_information(priv); - } + vwrq->value = ((priv->current_rate) & RATE_MASK) * 500000; if (priv->reg.tx_rate == TX_RATE_FIXED) vwrq->fixed = 1; @@ -752,15 +747,15 @@ static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info, (struct ks_wlan_private *)netdev_priv(dev); int rthr = vwrq->value; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ if (vwrq->disabled) rthr = 2347; - if ((rthr < 0) || (rthr > 2347)) { + if ((rthr < 0) || (rthr > 2347)) return -EINVAL; - } + priv->reg.rts = rthr; priv->need_commit |= SME_RTS; @@ -775,9 +770,9 @@ static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info, struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ vwrq->value = priv->reg.rts; vwrq->disabled = (vwrq->value >= 2347); @@ -796,15 +791,15 @@ static int ks_wlan_set_frag(struct net_device *dev, (struct ks_wlan_private *)netdev_priv(dev); int fthr = vwrq->value; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ if (vwrq->disabled) fthr = 2346; - if ((fthr < 256) || (fthr > 2346)) { + if ((fthr < 256) || (fthr > 2346)) return -EINVAL; - } + fthr &= ~0x1; /* Get an even value - is it really needed ??? */ priv->reg.fragment = fthr; priv->need_commit |= SME_FRAG; @@ -821,9 +816,9 @@ static int ks_wlan_get_frag(struct net_device *dev, struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ vwrq->value = priv->reg.fragment; vwrq->disabled = (vwrq->value >= 2346); @@ -835,7 +830,7 @@ static int ks_wlan_get_frag(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : set Mode of Operation */ static int ks_wlan_set_mode(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = @@ -843,9 +838,9 @@ static int ks_wlan_set_mode(struct net_device *dev, DPRINTK(2, "mode=%d\n", *uwrq); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ switch (*uwrq) { case IW_MODE_ADHOC: @@ -871,15 +866,14 @@ static int ks_wlan_set_mode(struct net_device *dev, /*------------------------------------------------------------------*/ /* Wireless Handler : get Mode of Operation */ static int ks_wlan_get_mode(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ /* If not managed, assume it's ad-hoc */ @@ -906,16 +900,15 @@ static int ks_wlan_set_encode(struct net_device *dev, struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - wep_key_t key; + struct wep_key key; int index = (dwrq->flags & IW_ENCODE_INDEX); int current_index = priv->reg.wep_index; int i; DPRINTK(2, "flags=%04X\n", dwrq->flags); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ /* index check */ @@ -959,9 +952,9 @@ static int ks_wlan_set_encode(struct net_device *dev, } /* Send the key to the card */ priv->reg.wep_key[index].size = key.len; - for (i = 0; i < (priv->reg.wep_key[index].size); i++) { + for (i = 0; i < (priv->reg.wep_key[index].size); i++) priv->reg.wep_key[index].val[i] = key.key[i]; - } + priv->need_commit |= (SME_WEP_VAL1 << index); priv->reg.wep_index = index; priv->need_commit |= SME_WEP_INDEX; @@ -973,9 +966,9 @@ static int ks_wlan_set_encode(struct net_device *dev, priv->reg.wep_key[2].size = 0; priv->reg.wep_key[3].size = 0; priv->reg.privacy_invoked = 0x00; - if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) { + if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) priv->need_commit |= SME_MODE_SET; - } + priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM; wep_on_off = WEP_OFF; priv->need_commit |= SME_WEP_FLAG; @@ -997,14 +990,14 @@ static int ks_wlan_set_encode(struct net_device *dev, priv->need_commit |= SME_WEP_FLAG; if (dwrq->flags & IW_ENCODE_OPEN) { - if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) { + if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) priv->need_commit |= SME_MODE_SET; - } + priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM; } else if (dwrq->flags & IW_ENCODE_RESTRICTED) { - if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM) { + if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM) priv->need_commit |= SME_MODE_SET; - } + priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY; } // return -EINPROGRESS; /* Call commit handler */ @@ -1026,9 +1019,9 @@ static int ks_wlan_get_encode(struct net_device *dev, char zeros[16]; int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ dwrq->flags = IW_ENCODE_DISABLED; @@ -1056,9 +1049,8 @@ static int ks_wlan_get_encode(struct net_device *dev, /* Copy the key to the user buffer */ if ((index >= 0) && (index < 4)) dwrq->length = priv->reg.wep_key[index].size; - if (dwrq->length > 16) { + if (dwrq->length > 16) dwrq->length = 0; - } #if 1 /* IW_ENCODE_NOKEY; */ if (dwrq->length) { if ((index >= 0) && (index < 4)) @@ -1086,9 +1078,8 @@ static int ks_wlan_get_txpow(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ /* Not Support */ @@ -1113,9 +1104,8 @@ static int ks_wlan_get_retry(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ /* Not Support */ @@ -1139,9 +1129,9 @@ static int ks_wlan_get_range(struct net_device *dev, DPRINTK(2, "\n"); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ dwrq->length = sizeof(struct iw_range); memset(range, 0, sizeof(*range)); @@ -1267,9 +1257,9 @@ static int ks_wlan_set_power(struct net_device *dev, (struct ks_wlan_private *)netdev_priv(dev); short enabled; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ enabled = vwrq->disabled ? 0 : 1; if (enabled == 0) { /* 0 */ @@ -1301,9 +1291,8 @@ static int ks_wlan_get_power(struct net_device *dev, struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ if (priv->reg.powermgt > 0) vwrq->disabled = 0; @@ -1322,9 +1311,8 @@ static int ks_wlan_get_iwstats(struct net_device *dev, struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ vwrq->qual = 0; /* not supported */ vwrq->level = priv->wstats.qual.level; @@ -1372,9 +1360,8 @@ static int ks_wlan_get_aplist(struct net_device *dev, int i; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ for (i = 0; i < priv->aplist.size; i++) { memcpy(address[i].sa_data, &(priv->aplist.ap[i].bssid[0]), @@ -1404,11 +1391,11 @@ static int ks_wlan_set_scan(struct net_device *dev, struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); struct iw_scan_req *req = NULL; + DPRINTK(2, "\n"); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ /* specified SSID SCAN */ @@ -1598,11 +1585,11 @@ static int ks_wlan_get_scan(struct net_device *dev, (struct ks_wlan_private *)netdev_priv(dev); int i; char *current_ev = extra; + DPRINTK(2, "\n"); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ if (priv->sme_i.sme_flag & SME_AP_SCAN) { DPRINTK(2, "flag AP_SCAN\n"); @@ -1675,9 +1662,8 @@ static int ks_wlan_set_genie(struct net_device *dev, DPRINTK(2, "\n"); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ return 0; // return -EOPNOTSUPP; @@ -1696,26 +1682,23 @@ static int ks_wlan_set_auth_mode(struct net_device *dev, DPRINTK(2, "index=%d:value=%08X\n", index, value); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ switch (index) { case IW_AUTH_WPA_VERSION: /* 0 */ switch (value) { case IW_AUTH_WPA_VERSION_DISABLED: priv->wpa.version = value; - if (priv->wpa.rsn_enabled) { + if (priv->wpa.rsn_enabled) priv->wpa.rsn_enabled = 0; - } priv->need_commit |= SME_RSN; break; case IW_AUTH_WPA_VERSION_WPA: case IW_AUTH_WPA_VERSION_WPA2: priv->wpa.version = value; - if (!(priv->wpa.rsn_enabled)) { + if (!(priv->wpa.rsn_enabled)) priv->wpa.rsn_enabled = 1; - } priv->need_commit |= SME_RSN; break; default: @@ -1832,11 +1815,11 @@ static int ks_wlan_get_auth_mode(struct net_device *dev, struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); int index = (vwrq->flags & IW_AUTH_INDEX); + DPRINTK(2, "index=%d\n", index); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ /* WPA (not used ?? wpa_supplicant) */ @@ -1886,18 +1869,17 @@ static int ks_wlan_set_encode_ext(struct net_device *dev, DPRINTK(2, "flags=%04X:: ext_flags=%08X\n", dwrq->flags, enc->ext_flags); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ if (index < 1 || index > 4) return -EINVAL; else index--; - if (dwrq->flags & IW_ENCODE_DISABLED) { + if (dwrq->flags & IW_ENCODE_DISABLED) priv->wpa.key[index].key_len = 0; - } if (enc) { priv->wpa.key[index].ext_flags = enc->ext_flags; @@ -1986,9 +1968,8 @@ static int ks_wlan_get_encode_ext(struct net_device *dev, struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ /* WPA (not used ?? wpa_supplicant) @@ -2015,13 +1996,13 @@ static int ks_wlan_set_pmksa(struct net_device *dev, DPRINTK(2, "\n"); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ - if (!extra) { + if (!extra) return -EINVAL; - } + pmksa = (struct iw_pmksa *)extra; DPRINTK(2, "cmd=%d\n", pmksa->cmd); @@ -2141,16 +2122,16 @@ static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev) /*------------------------------------------------------------------*/ /* Private handler : set stop request */ static int ks_wlan_set_stop_request(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); DPRINTK(2, "\n"); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ if (!(*uwrq)) return -EINVAL; @@ -2173,15 +2154,14 @@ static int ks_wlan_set_mlme(struct net_device *dev, DPRINTK(2, ":%d :%d\n", mlme->cmd, mlme->reason_code); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ switch (mlme->cmd) { case IW_MLME_DEAUTH: - if (mlme->reason_code == WLAN_REASON_MIC_FAILURE) { + if (mlme->reason_code == WLAN_REASON_MIC_FAILURE) return 0; - } case IW_MLME_DISASSOC: mode = 1; return ks_wlan_set_stop_request(dev, NULL, &mode, NULL); @@ -2207,14 +2187,14 @@ static int ks_wlan_get_firmware_version(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set force disconnect status */ static int ks_wlan_set_detach(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ if (*uwrq == CONNECT_STATUS) { /* 0 */ priv->connect_status &= ~FORCE_DISCONNECT; @@ -2232,14 +2212,14 @@ static int ks_wlan_set_detach(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get force disconnect status */ static int ks_wlan_get_detach(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ *uwrq = ((priv->connect_status & FORCE_DISCONNECT) ? 1 : 0); return 0; @@ -2248,14 +2228,14 @@ static int ks_wlan_get_detach(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get connect status */ static int ks_wlan_get_connect(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ *uwrq = (priv->connect_status & CONNECT_STATUS_MASK); return 0; @@ -2265,15 +2245,15 @@ static int ks_wlan_get_connect(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set preamble */ static int ks_wlan_set_preamble(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ if (*uwrq == LONG_PREAMBLE) { /* 0 */ priv->reg.preamble = LONG_PREAMBLE; @@ -2290,15 +2270,15 @@ static int ks_wlan_set_preamble(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get preamble */ static int ks_wlan_get_preamble(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ *uwrq = priv->reg.preamble; return 0; @@ -2307,15 +2287,15 @@ static int ks_wlan_get_preamble(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set power save mode */ static int ks_wlan_set_powermgt(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ if (*uwrq == POWMGT_ACTIVE_MODE) { /* 0 */ priv->reg.powermgt = POWMGT_ACTIVE_MODE; @@ -2340,15 +2320,15 @@ static int ks_wlan_set_powermgt(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get power save made */ static int ks_wlan_get_powermgt(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } + /* for SLEEP MODE */ *uwrq = priv->reg.powermgt; return 0; @@ -2357,15 +2337,14 @@ static int ks_wlan_get_powermgt(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set scan type */ static int ks_wlan_set_scan_type(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ if (*uwrq == ACTIVE_SCAN) { /* 0 */ priv->reg.scan_type = ACTIVE_SCAN; @@ -2380,15 +2359,14 @@ static int ks_wlan_set_scan_type(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get scan type */ static int ks_wlan_get_scan_type(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ *uwrq = priv->reg.scan_type; return 0; @@ -2404,9 +2382,8 @@ static int ks_wlan_data_write(struct net_device *dev, struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; unsigned char *wbuff = NULL; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ wbuff = (unsigned char *)kmalloc(dwrq->length, GFP_ATOMIC); if (!wbuff) @@ -2428,9 +2405,8 @@ static int ks_wlan_data_read(struct net_device *dev, struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; unsigned short read_length; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ if (!atomic_read(&priv->event_count)) { if (priv->dev_state < DEVICE_STATE_BOOT) { /* Remove device */ @@ -2488,9 +2464,8 @@ static int ks_wlan_get_wep_ascii(struct net_device *dev, int i, j, len = 0; char tmp[WEP_ASCII_BUFF_SIZE]; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ strcpy(tmp, " WEP keys ASCII \n"); len += strlen(" WEP keys ASCII \n"); @@ -2531,19 +2506,18 @@ static int ks_wlan_get_wep_ascii(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set beacon lost count */ static int ks_wlan_set_beacon_lost(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ - if (*uwrq >= BEACON_LOST_COUNT_MIN && *uwrq <= BEACON_LOST_COUNT_MAX) { + if (*uwrq >= BEACON_LOST_COUNT_MIN && *uwrq <= BEACON_LOST_COUNT_MAX) priv->reg.beacon_lost_count = *uwrq; - } else + else return -EINVAL; if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) { @@ -2556,15 +2530,14 @@ static int ks_wlan_set_beacon_lost(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get beacon lost count */ static int ks_wlan_get_beacon_lost(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ *uwrq = priv->reg.beacon_lost_count; return 0; @@ -2573,15 +2546,14 @@ static int ks_wlan_get_beacon_lost(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set phy type */ static int ks_wlan_set_phy_type(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ if (*uwrq == D_11B_ONLY_MODE) { /* 0 */ priv->reg.phy_type = D_11B_ONLY_MODE; @@ -2599,15 +2571,14 @@ static int ks_wlan_set_phy_type(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get phy type */ static int ks_wlan_get_phy_type(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ *uwrq = priv->reg.phy_type; return 0; @@ -2616,15 +2587,14 @@ static int ks_wlan_get_phy_type(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set cts mode */ static int ks_wlan_set_cts_mode(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ if (*uwrq == CTS_MODE_FALSE) { /* 0 */ priv->reg.cts_mode = CTS_MODE_FALSE; @@ -2644,15 +2614,14 @@ static int ks_wlan_set_cts_mode(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get cts mode */ static int ks_wlan_get_cts_mode(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ *uwrq = priv->reg.cts_mode; return 0; @@ -2662,7 +2631,7 @@ static int ks_wlan_get_cts_mode(struct net_device *dev, /* Private handler : set sleep mode */ static int ks_wlan_set_sleep_mode(struct net_device *dev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2692,7 +2661,7 @@ static int ks_wlan_set_sleep_mode(struct net_device *dev, /* Private handler : get sleep mode */ static int ks_wlan_get_sleep_mode(struct net_device *dev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); @@ -2708,16 +2677,15 @@ static int ks_wlan_get_sleep_mode(struct net_device *dev, /* Private handler : set phy information timer */ static int ks_wlan_set_phy_information_timer(struct net_device *dev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ if (*uwrq >= 0 && *uwrq <= 0xFFFF) /* 0-65535 */ - priv->reg.phy_info_timer = (uint16_t) * uwrq; + priv->reg.phy_info_timer = (uint16_t)*uwrq; else return -EINVAL; @@ -2730,13 +2698,12 @@ static int ks_wlan_set_phy_information_timer(struct net_device *dev, /* Private handler : get phy information timer */ static int ks_wlan_get_phy_information_timer(struct net_device *dev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ *uwrq = priv->reg.phy_info_timer; return 0; @@ -2747,16 +2714,15 @@ static int ks_wlan_get_phy_information_timer(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set WPS enable */ static int ks_wlan_set_wps_enable(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); DPRINTK(2, "\n"); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ if (*uwrq == 0 || *uwrq == 1) priv->wps.wps_enabled = *uwrq; @@ -2771,16 +2737,15 @@ static int ks_wlan_set_wps_enable(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get WPS enable */ static int ks_wlan_get_wps_enable(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); DPRINTK(2, "\n"); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ *uwrq = priv->wps.wps_enabled; netdev_info(dev, "return=%d\n", *uwrq); @@ -2801,16 +2766,14 @@ static int ks_wlan_set_wps_probe_req(struct net_device *dev, DPRINTK(2, "\n"); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ DPRINTK(2, "dwrq->length=%d\n", dwrq->length); /* length check */ - if (p[1] + 2 != dwrq->length || dwrq->length > 256) { + if (p[1] + 2 != dwrq->length || dwrq->length > 256) return -EINVAL; - } priv->wps.ielen = p[1] + 2 + 1; /* IE header + IE + sizeof(len) */ len = p[1] + 2; /* IE header + IE */ @@ -2833,14 +2796,14 @@ static int ks_wlan_set_wps_probe_req(struct net_device *dev, /* Private handler : get WPS probe req */ static int ks_wlan_get_wps_probe_req(struct net_device *dev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; + DPRINTK(2, "\n"); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ return 0; } @@ -2850,18 +2813,17 @@ static int ks_wlan_get_wps_probe_req(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set tx gain control value */ static int ks_wlan_set_tx_gain(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */ - priv->gain.TxGain = (uint8_t) * uwrq; + priv->gain.TxGain = (uint8_t)*uwrq; else return -EINVAL; @@ -2877,15 +2839,14 @@ static int ks_wlan_set_tx_gain(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get tx gain control value */ static int ks_wlan_get_tx_gain(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ *uwrq = priv->gain.TxGain; hostif_sme_enqueue(priv, SME_GET_GAIN); @@ -2895,18 +2856,17 @@ static int ks_wlan_get_tx_gain(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set rx gain control value */ static int ks_wlan_set_rx_gain(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */ - priv->gain.RxGain = (uint8_t) * uwrq; + priv->gain.RxGain = (uint8_t)*uwrq; else return -EINVAL; @@ -2922,15 +2882,14 @@ static int ks_wlan_set_rx_gain(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get rx gain control value */ static int ks_wlan_get_rx_gain(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)netdev_priv(dev); - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ *uwrq = priv->gain.RxGain; hostif_sme_enqueue(priv, SME_GET_GAIN); @@ -2941,17 +2900,16 @@ static int ks_wlan_get_rx_gain(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : set region value */ static int ks_wlan_set_region(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; - if (priv->sleep_mode == SLP_SLEEP) { + if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - } /* for SLEEP MODE */ if (*uwrq >= 0x9 && *uwrq <= 0xF) /* 0x9-0xf */ - priv->region = (uint8_t) * uwrq; + priv->region = (uint8_t)*uwrq; else return -EINVAL; @@ -2963,7 +2921,7 @@ static int ks_wlan_set_region(struct net_device *dev, /*------------------------------------------------------------------*/ /* Private handler : get eeprom checksum result */ static int ks_wlan_get_eeprom_cksum(struct net_device *dev, - struct iw_request_info *info, __u32 * uwrq, + struct iw_request_info *info, __u32 *uwrq, char *extra) { struct ks_wlan_private *priv = @@ -3090,7 +3048,7 @@ static void print_hif_event(struct net_device *dev, int event) /*------------------------------------------------------------------*/ /* Private handler : get host command history */ static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info, - __u32 * uwrq, char *extra) + __u32 *uwrq, char *extra) { int i, event; struct ks_wlan_private *priv = @@ -3293,6 +3251,7 @@ static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq, { int rc = 0; struct iwreq *wrq = (struct iwreq *)rq; + switch (cmd) { case SIOCIWFIRSTPRIV + 20: /* KS_WLAN_SET_STOP_REQ */ rc = ks_wlan_set_stop_request(dev, NULL, &(wrq->u.mode), NULL); @@ -3311,9 +3270,8 @@ struct net_device_stats *ks_wlan_get_stats(struct net_device *dev) { struct ks_wlan_private *priv = netdev_priv(dev); - if (priv->dev_state < DEVICE_STATE_READY) { + if (priv->dev_state < DEVICE_STATE_READY) return NULL; /* not finished initialize */ - } return &priv->nstats; } @@ -3323,6 +3281,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr) { struct ks_wlan_private *priv = netdev_priv(dev); struct sockaddr *mac_addr = (struct sockaddr *)addr; + if (netif_running(dev)) return -EBUSY; memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); @@ -3330,10 +3289,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr) priv->mac_address_valid = 0; hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST); - netdev_info(dev, - "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n", - priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2], - priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]); + netdev_info(dev, "ks_wlan: MAC ADDRESS = %pM\n", priv->eth_addr); return 0; } @@ -3344,9 +3300,8 @@ void ks_wlan_tx_timeout(struct net_device *dev) DPRINTK(1, "head(%d) tail(%d)!!\n", priv->tx_dev.qhead, priv->tx_dev.qtail); - if (!netif_queue_stopped(dev)) { + if (!netif_queue_stopped(dev)) netif_stop_queue(dev); - } priv->nstats.tx_errors++; netif_wake_queue(dev); } @@ -3375,9 +3330,8 @@ int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_trans_update(dev); DPRINTK(4, "rc=%d\n", rc); - if (rc) { + if (rc) rc = 0; - } return rc; } @@ -3410,9 +3364,8 @@ void ks_wlan_set_multicast_list(struct net_device *dev) struct ks_wlan_private *priv = netdev_priv(dev); DPRINTK(4, "\n"); - if (priv->dev_state < DEVICE_STATE_READY) { + if (priv->dev_state < DEVICE_STATE_READY) return; /* not finished initialize */ - } hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST); } @@ -3426,8 +3379,8 @@ int ks_wlan_open(struct net_device *dev) if (!priv->mac_address_valid) { netdev_err(dev, "ks_wlan : %s Not READY !!\n", dev->name); return -EBUSY; - } else - netif_start_queue(dev); + } + netif_start_queue(dev); return 0; } @@ -3474,9 +3427,8 @@ int ks_wlan_net_start(struct net_device *dev) /* phy information update timer */ atomic_set(&update_phyinfo, 0); - init_timer(&update_phyinfo_timer); - update_phyinfo_timer.function = ks_wlan_update_phyinfo_timeout; - update_phyinfo_timer.data = (unsigned long)priv; + setup_timer(&update_phyinfo_timer, ks_wlan_update_phyinfo_timeout, + (unsigned long)priv); /* dummy address set */ memcpy(priv->eth_addr, dummy_addr, ETH_ALEN); diff --git a/drivers/staging/ks7010/michael_mic.c b/drivers/staging/ks7010/michael_mic.c index 78ae2b8fb7f34a74c6d2982ef246ac524ed6a952..2f535c08e1728a79aaf7004d8f88e783d9dd6e79 100644 --- a/drivers/staging/ks7010/michael_mic.c +++ b/drivers/staging/ks7010/michael_mic.c @@ -14,10 +14,11 @@ #include "michael_mic.h" // Rotation functions on 32 bit values -#define ROL32( A, n ) ( ((A) << (n)) | ( ((A)>>(32-(n))) & ( (1UL << (n)) - 1 ) ) ) -#define ROR32( A, n ) ROL32( (A), 32-(n) ) +#define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1))) +#define ROR32(A, n) ROL32((A), 32-(n)) // Convert from Byte[] to UInt32 in a portable way -#define getUInt32( A, B ) (uint32_t)(A[B+0] << 0) + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24) +#define getUInt32(A, B) ((uint32_t)(A[B+0] << 0) \ + + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24)) // Convert from UInt32 to Byte[] in a portable way #define putUInt32(A, B, C) \ @@ -48,21 +49,22 @@ void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t *key) } #define MichaelBlockFunction(L, R) \ -do{ \ - R ^= ROL32( L, 17 ); \ +do { \ + R ^= ROL32(L, 17); \ L += R; \ R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8); \ L += R; \ - R ^= ROL32( L, 3 ); \ + R ^= ROL32(L, 3); \ L += R; \ - R ^= ROR32( L, 2 ); \ + R ^= ROR32(L, 2); \ L += R; \ -}while(0) +} while (0) static void MichaelAppend(struct michel_mic_t *Mic, uint8_t *src, int nBytes) { int addlen; + if (Mic->nBytesInM) { addlen = 4 - Mic->nBytesInM; if (addlen > nBytes) @@ -96,7 +98,8 @@ void MichaelAppend(struct michel_mic_t *Mic, uint8_t *src, int nBytes) static void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t *dst) { - uint8_t *data = Mic->M; + u8 *data = Mic->M; + switch (Mic->nBytesInM) { case 0: Mic->L ^= 0x5a; @@ -122,11 +125,11 @@ void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t *dst) MichaelClear(Mic); } -void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t *Key, - uint8_t *Data, int Len, uint8_t priority, - uint8_t *Result) +void MichaelMICFunction(struct michel_mic_t *Mic, u8 *Key, + u8 *Data, int Len, u8 priority, + u8 *Result) { - uint8_t pad_data[4] = { priority, 0, 0, 0 }; + u8 pad_data[4] = { priority, 0, 0, 0 }; // Compute the MIC value /* * IEEE802.11i page 47 diff --git a/drivers/staging/ks7010/michael_mic.h b/drivers/staging/ks7010/michael_mic.h index efaa21788fc77e66d6d058bb1739bcc8c7d10b5e..248f849fc4a568cb07b807f99c8cb62e537c903a 100644 --- a/drivers/staging/ks7010/michael_mic.h +++ b/drivers/staging/ks7010/michael_mic.h @@ -11,15 +11,15 @@ /* MichelMIC routine define */ struct michel_mic_t { - uint32_t K0; // Key - uint32_t K1; // Key - uint32_t L; // Current state - uint32_t R; // Current state - uint8_t M[4]; // Message accumulator (single word) - int nBytesInM; // # bytes in M - uint8_t Result[8]; + u32 K0; // Key + u32 K1; // Key + u32 L; // Current state + u32 R; // Current state + u8 M[4]; // Message accumulator (single word) + int nBytesInM; // # bytes in M + u8 Result[8]; }; -void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t *Key, - uint8_t *Data, int Len, uint8_t priority, - uint8_t *Result); +void MichaelMICFunction(struct michel_mic_t *Mic, u8 *Key, + u8 *Data, int Len, u8 priority, + u8 *Result); diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h index be0675d8ff5e00fb689394476c330ed7803b8ebe..1ea27c9e3708a4809e4e12ee06af8686b57ea4bf 100644 --- a/drivers/staging/lustre/include/linux/libcfs/curproc.h +++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h @@ -53,7 +53,7 @@ #define current_pid() (current->pid) #define current_comm() (current->comm) -typedef __u32 cfs_cap_t; +typedef u32 cfs_cap_t; #define CFS_CAP_CHOWN 0 #define CFS_CAP_DAC_OVERRIDE 1 @@ -65,15 +65,15 @@ typedef __u32 cfs_cap_t; #define CFS_CAP_SYS_BOOT 23 #define CFS_CAP_SYS_RESOURCE 24 -#define CFS_CAP_FS_MASK ((1 << CFS_CAP_CHOWN) | \ - (1 << CFS_CAP_DAC_OVERRIDE) | \ - (1 << CFS_CAP_DAC_READ_SEARCH) | \ - (1 << CFS_CAP_FOWNER) | \ - (1 << CFS_CAP_FSETID) | \ - (1 << CFS_CAP_LINUX_IMMUTABLE) | \ - (1 << CFS_CAP_SYS_ADMIN) | \ - (1 << CFS_CAP_SYS_BOOT) | \ - (1 << CFS_CAP_SYS_RESOURCE)) +#define CFS_CAP_FS_MASK (BIT(CFS_CAP_CHOWN) | \ + BIT(CFS_CAP_DAC_OVERRIDE) | \ + BIT(CFS_CAP_DAC_READ_SEARCH) | \ + BIT(CFS_CAP_FOWNER) | \ + BIT(CFS_CAP_FSETID) | \ + BIT(CFS_CAP_LINUX_IMMUTABLE) | \ + BIT(CFS_CAP_SYS_ADMIN) | \ + BIT(CFS_CAP_SYS_BOOT) | \ + BIT(CFS_CAP_SYS_RESOURCE)) void cfs_cap_raise(cfs_cap_t cap); void cfs_cap_lower(cfs_cap_t cap); diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h index 3b92d38d37e2ed6a7910158f20f6ee410a3f2a30..cc2c0e97bb7e8c339ff0a63399b59376a1ced5e8 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h @@ -61,7 +61,7 @@ sigset_t cfs_block_allsigs(void); sigset_t cfs_block_sigs(unsigned long sigs); sigset_t cfs_block_sigsinv(unsigned long sigs); -void cfs_restore_sigs(sigset_t); +void cfs_restore_sigs(sigset_t sigset); void cfs_clear_sigpending(void); /* @@ -71,7 +71,7 @@ void cfs_clear_sigpending(void); /* returns a random 32-bit integer */ unsigned int cfs_rand(void); /* seed the generator */ -void cfs_srand(unsigned int, unsigned int); +void cfs_srand(unsigned int seed1, unsigned int seed2); void cfs_get_random_bytes(void *buf, int size); #include "libcfs_debug.h" @@ -125,7 +125,6 @@ extern struct miscdevice libcfs_dev; /** * The path of debug log dump upcall script. */ -extern char lnet_upcall[1024]; extern char lnet_debug_log_upcall[1024]; extern struct cfs_wi_sched *cfs_sched_rehash; diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h index 81d8079e3b5ea8c6e09502d6ddf8e6508b524027..6d8752a368fadbaa50666651dd23bd4ae0231587 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h @@ -92,7 +92,7 @@ struct cfs_cpt_table { /* node mask */ nodemask_t ctb_nodemask; /* version */ - __u64 ctb_version; + u64 ctb_version; }; static inline cpumask_t * @@ -211,7 +211,7 @@ int cfs_cpu_ht_nsiblings(int cpu); */ void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size); /* - * destory per-cpu-partition variable + * destroy per-cpu-partition variable */ void cfs_percpt_free(void *vars); int cfs_percpt_number(void *vars); diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h index 02be7d7608a582170a895de4b910a3afe7a158c5..8f34c5ddc63e0c8fc9f840a5c72cdd41c04d6664 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h @@ -29,10 +29,12 @@ #define _LIBCFS_CRYPTO_H struct cfs_crypto_hash_type { - char *cht_name; /**< hash algorithm name, equal to - * format name for crypto api */ - unsigned int cht_key; /**< init key by default (valid for - * 4 bytes context like crc32, adler */ + char *cht_name; /*< hash algorithm name, equal to + * format name for crypto api + */ + unsigned int cht_key; /*< init key by default (valid for + * 4 bytes context like crc32, adler + */ unsigned int cht_size; /**< hash digest size */ }; @@ -135,7 +137,7 @@ static inline unsigned char cfs_crypto_hash_alg(const char *algname) enum cfs_crypto_hash_alg hash_alg; for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++) - if (strcmp(hash_types[hash_alg].cht_name, algname) == 0) + if (!strcmp(hash_types[hash_alg].cht_name, algname)) return hash_alg; return CFS_HASH_ALG_UNKNOWN; diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h index bdbbe934584c707da7f03d474b974f5732e11eb7..fedb46dff6962fdc14e5881bcc7239bbf20cb640 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h @@ -39,8 +39,8 @@ extern int cfs_fail_err; extern wait_queue_head_t cfs_race_waitq; extern int cfs_race_state; -int __cfs_fail_check_set(__u32 id, __u32 value, int set); -int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set); +int __cfs_fail_check_set(u32 id, u32 value, int set); +int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set); enum { CFS_FAIL_LOC_NOSET = 0, @@ -55,11 +55,11 @@ enum { #define CFS_FAILED_BIT 30 /* CFS_FAILED is 0x40000000 */ -#define CFS_FAILED (1 << CFS_FAILED_BIT) +#define CFS_FAILED BIT(CFS_FAILED_BIT) #define CFS_FAIL_ONCE_BIT 31 /* CFS_FAIL_ONCE is 0x80000000 */ -#define CFS_FAIL_ONCE (1 << CFS_FAIL_ONCE_BIT) +#define CFS_FAIL_ONCE BIT(CFS_FAIL_ONCE_BIT) /* The following flags aren't made to be combined */ #define CFS_FAIL_SKIP 0x20000000 /* skip N times then fail */ @@ -69,14 +69,14 @@ enum { #define CFS_FAULT 0x02000000 /* match any CFS_FAULT_CHECK */ -static inline bool CFS_FAIL_PRECHECK(__u32 id) +static inline bool CFS_FAIL_PRECHECK(u32 id) { - return cfs_fail_loc != 0 && + return cfs_fail_loc && ((cfs_fail_loc & CFS_FAIL_MASK_LOC) == (id & CFS_FAIL_MASK_LOC) || - (cfs_fail_loc & id & CFS_FAULT)); + (cfs_fail_loc & id & CFS_FAULT)); } -static inline int cfs_fail_check_set(__u32 id, __u32 value, +static inline int cfs_fail_check_set(u32 id, u32 value, int set, int quiet) { int ret = 0; @@ -103,28 +103,34 @@ static inline int cfs_fail_check_set(__u32 id, __u32 value, #define CFS_FAIL_CHECK_QUIET(id) \ cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET, 1) -/* If id hit cfs_fail_loc and cfs_fail_val == (-1 or value) return 1, - * otherwise return 0 */ +/* + * If id hit cfs_fail_loc and cfs_fail_val == (-1 or value) return 1, + * otherwise return 0 + */ #define CFS_FAIL_CHECK_VALUE(id, value) \ cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 0) #define CFS_FAIL_CHECK_VALUE_QUIET(id, value) \ cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 1) -/* If id hit cfs_fail_loc, cfs_fail_loc |= value and return 1, - * otherwise return 0 */ +/* + * If id hit cfs_fail_loc, cfs_fail_loc |= value and return 1, + * otherwise return 0 + */ #define CFS_FAIL_CHECK_ORSET(id, value) \ cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 0) #define CFS_FAIL_CHECK_ORSET_QUIET(id, value) \ cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 1) -/* If id hit cfs_fail_loc, cfs_fail_loc = value and return 1, - * otherwise return 0 */ +/* + * If id hit cfs_fail_loc, cfs_fail_loc = value and return 1, + * otherwise return 0 + */ #define CFS_FAIL_CHECK_RESET(id, value) \ cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 0) #define CFS_FAIL_CHECK_RESET_QUIET(id, value) \ cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 1) -static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set) +static inline int cfs_fail_timeout_set(u32 id, u32 value, int ms, int set) { if (unlikely(CFS_FAIL_PRECHECK(id))) return __cfs_fail_timeout_set(id, value, ms, set); @@ -138,8 +144,10 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set) #define CFS_FAIL_TIMEOUT_MS(id, ms) \ cfs_fail_timeout_set(id, 0, ms, CFS_FAIL_LOC_NOSET) -/* If id hit cfs_fail_loc, cfs_fail_loc |= value and - * sleep seconds or milliseconds */ +/* + * If id hit cfs_fail_loc, cfs_fail_loc |= value and + * sleep seconds or milliseconds + */ #define CFS_FAIL_TIMEOUT_ORSET(id, value, secs) \ cfs_fail_timeout_set(id, value, secs * 1000, CFS_FAIL_LOC_ORSET) @@ -152,13 +160,14 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set) #define CFS_FAULT_CHECK(id) \ CFS_FAIL_CHECK(CFS_FAULT | (id)) -/* The idea here is to synchronise two threads to force a race. The +/* + * The idea here is to synchronise two threads to force a race. The * first thread that calls this with a matching fail_loc is put to * sleep. The next thread that calls with the same fail_loc wakes up - * the first and continues. */ -static inline void cfs_race(__u32 id) + * the first and continues. + */ +static inline void cfs_race(u32 id) { - if (CFS_FAIL_PRECHECK(id)) { if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) { int rc; @@ -166,7 +175,7 @@ static inline void cfs_race(__u32 id) cfs_race_state = 0; CERROR("cfs_race id %x sleeping\n", id); rc = wait_event_interruptible(cfs_race_waitq, - cfs_race_state != 0); + !!cfs_race_state); CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc); } else { CERROR("cfs_fail_race id %x waking\n", id); diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h index 6949a184663527263ad44c9add33cb7e7fd0897d..0cc2fc465c1aa6305a2ac45de00189c915d15365 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h @@ -57,8 +57,10 @@ /** disable debug */ #define CFS_HASH_DEBUG_NONE 0 -/** record hash depth and output to console when it's too deep, - * computing overhead is low but consume more memory */ +/* + * record hash depth and output to console when it's too deep, + * computing overhead is low but consume more memory + */ #define CFS_HASH_DEBUG_1 1 /** expensive, check key validation */ #define CFS_HASH_DEBUG_2 2 @@ -87,8 +89,8 @@ union cfs_hash_lock { */ struct cfs_hash_bucket { union cfs_hash_lock hsb_lock; /**< bucket lock */ - __u32 hsb_count; /**< current entries */ - __u32 hsb_version; /**< change version */ + u32 hsb_count; /**< current entries */ + u32 hsb_version; /**< change version */ unsigned int hsb_index; /**< index of bucket */ int hsb_depmax; /**< max depth on bucket */ long hsb_head[0]; /**< hash-head array */ @@ -123,38 +125,40 @@ enum cfs_hash_tag { * . Some functions will be disabled with this flag, i.e: * cfs_hash_for_each_empty, cfs_hash_rehash */ - CFS_HASH_NO_LOCK = 1 << 0, + CFS_HASH_NO_LOCK = BIT(0), /** no bucket lock, use one spinlock to protect the whole hash */ - CFS_HASH_NO_BKTLOCK = 1 << 1, + CFS_HASH_NO_BKTLOCK = BIT(1), /** rwlock to protect bucket */ - CFS_HASH_RW_BKTLOCK = 1 << 2, + CFS_HASH_RW_BKTLOCK = BIT(2), /** spinlock to protect bucket */ - CFS_HASH_SPIN_BKTLOCK = 1 << 3, + CFS_HASH_SPIN_BKTLOCK = BIT(3), /** always add new item to tail */ - CFS_HASH_ADD_TAIL = 1 << 4, + CFS_HASH_ADD_TAIL = BIT(4), /** hash-table doesn't have refcount on item */ - CFS_HASH_NO_ITEMREF = 1 << 5, + CFS_HASH_NO_ITEMREF = BIT(5), /** big name for param-tree */ - CFS_HASH_BIGNAME = 1 << 6, + CFS_HASH_BIGNAME = BIT(6), /** track global count */ - CFS_HASH_COUNTER = 1 << 7, + CFS_HASH_COUNTER = BIT(7), /** rehash item by new key */ - CFS_HASH_REHASH_KEY = 1 << 8, + CFS_HASH_REHASH_KEY = BIT(8), /** Enable dynamic hash resizing */ - CFS_HASH_REHASH = 1 << 9, + CFS_HASH_REHASH = BIT(9), /** can shrink hash-size */ - CFS_HASH_SHRINK = 1 << 10, + CFS_HASH_SHRINK = BIT(10), /** assert hash is empty on exit */ - CFS_HASH_ASSERT_EMPTY = 1 << 11, + CFS_HASH_ASSERT_EMPTY = BIT(11), /** record hlist depth */ - CFS_HASH_DEPTH = 1 << 12, + CFS_HASH_DEPTH = BIT(12), /** * rehash is always scheduled in a different thread, so current * change on hash table is non-blocking */ - CFS_HASH_NBLK_CHANGE = 1 << 13, - /** NB, we typed hs_flags as __u16, please change it - * if you need to extend >=16 flags */ + CFS_HASH_NBLK_CHANGE = BIT(13), + /** + * NB, we typed hs_flags as u16, please change it + * if you need to extend >=16 flags + */ }; /** most used attributes */ @@ -201,8 +205,10 @@ enum cfs_hash_tag { */ struct cfs_hash { - /** serialize with rehash, or serialize all operations if - * the hash-table has CFS_HASH_NO_BKTLOCK */ + /** + * serialize with rehash, or serialize all operations if + * the hash-table has CFS_HASH_NO_BKTLOCK + */ union cfs_hash_lock hs_lock; /** hash operations */ struct cfs_hash_ops *hs_ops; @@ -215,31 +221,31 @@ struct cfs_hash { /** total number of items on this hash-table */ atomic_t hs_count; /** hash flags, see cfs_hash_tag for detail */ - __u16 hs_flags; + u16 hs_flags; /** # of extra-bytes for bucket, for user saving extended attributes */ - __u16 hs_extra_bytes; + u16 hs_extra_bytes; /** wants to iterate */ - __u8 hs_iterating; + u8 hs_iterating; /** hash-table is dying */ - __u8 hs_exiting; + u8 hs_exiting; /** current hash bits */ - __u8 hs_cur_bits; + u8 hs_cur_bits; /** min hash bits */ - __u8 hs_min_bits; + u8 hs_min_bits; /** max hash bits */ - __u8 hs_max_bits; + u8 hs_max_bits; /** bits for rehash */ - __u8 hs_rehash_bits; + u8 hs_rehash_bits; /** bits for each bucket */ - __u8 hs_bkt_bits; + u8 hs_bkt_bits; /** resize min threshold */ - __u16 hs_min_theta; + u16 hs_min_theta; /** resize max threshold */ - __u16 hs_max_theta; + u16 hs_max_theta; /** resize count */ - __u32 hs_rehash_count; + u32 hs_rehash_count; /** # of iterators (caller of cfs_hash_for_each_*) */ - __u32 hs_iterators; + u32 hs_iterators; /** rehash workitem */ struct cfs_workitem hs_rehash_wi; /** refcount on this hash table */ @@ -291,8 +297,8 @@ struct cfs_hash_hlist_ops { struct cfs_hash_ops { /** return hashed value from @key */ - unsigned (*hs_hash)(struct cfs_hash *hs, const void *key, - unsigned mask); + unsigned int (*hs_hash)(struct cfs_hash *hs, const void *key, + unsigned int mask); /** return key address of @hnode */ void * (*hs_key)(struct hlist_node *hnode); /** copy key from @hnode to @key */ @@ -317,110 +323,112 @@ struct cfs_hash_ops { /** total number of buckets in @hs */ #define CFS_HASH_NBKT(hs) \ - (1U << ((hs)->hs_cur_bits - (hs)->hs_bkt_bits)) + BIT((hs)->hs_cur_bits - (hs)->hs_bkt_bits) /** total number of buckets in @hs while rehashing */ #define CFS_HASH_RH_NBKT(hs) \ - (1U << ((hs)->hs_rehash_bits - (hs)->hs_bkt_bits)) + BIT((hs)->hs_rehash_bits - (hs)->hs_bkt_bits) /** number of hlist for in bucket */ -#define CFS_HASH_BKT_NHLIST(hs) (1U << (hs)->hs_bkt_bits) +#define CFS_HASH_BKT_NHLIST(hs) BIT((hs)->hs_bkt_bits) /** total number of hlist in @hs */ -#define CFS_HASH_NHLIST(hs) (1U << (hs)->hs_cur_bits) +#define CFS_HASH_NHLIST(hs) BIT((hs)->hs_cur_bits) /** total number of hlist in @hs while rehashing */ -#define CFS_HASH_RH_NHLIST(hs) (1U << (hs)->hs_rehash_bits) +#define CFS_HASH_RH_NHLIST(hs) BIT((hs)->hs_rehash_bits) static inline int cfs_hash_with_no_lock(struct cfs_hash *hs) { /* caller will serialize all operations for this hash-table */ - return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0; + return hs->hs_flags & CFS_HASH_NO_LOCK; } static inline int cfs_hash_with_no_bktlock(struct cfs_hash *hs) { /* no bucket lock, one single lock to protect the hash-table */ - return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0; + return hs->hs_flags & CFS_HASH_NO_BKTLOCK; } static inline int cfs_hash_with_rw_bktlock(struct cfs_hash *hs) { /* rwlock to protect hash bucket */ - return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0; + return hs->hs_flags & CFS_HASH_RW_BKTLOCK; } static inline int cfs_hash_with_spin_bktlock(struct cfs_hash *hs) { /* spinlock to protect hash bucket */ - return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0; + return hs->hs_flags & CFS_HASH_SPIN_BKTLOCK; } static inline int cfs_hash_with_add_tail(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0; + return hs->hs_flags & CFS_HASH_ADD_TAIL; } static inline int cfs_hash_with_no_itemref(struct cfs_hash *hs) { - /* hash-table doesn't keep refcount on item, + /* + * hash-table doesn't keep refcount on item, * item can't be removed from hash unless it's - * ZERO refcount */ - return (hs->hs_flags & CFS_HASH_NO_ITEMREF) != 0; + * ZERO refcount + */ + return hs->hs_flags & CFS_HASH_NO_ITEMREF; } static inline int cfs_hash_with_bigname(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_BIGNAME) != 0; + return hs->hs_flags & CFS_HASH_BIGNAME; } static inline int cfs_hash_with_counter(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_COUNTER) != 0; + return hs->hs_flags & CFS_HASH_COUNTER; } static inline int cfs_hash_with_rehash(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_REHASH) != 0; + return hs->hs_flags & CFS_HASH_REHASH; } static inline int cfs_hash_with_rehash_key(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0; + return hs->hs_flags & CFS_HASH_REHASH_KEY; } static inline int cfs_hash_with_shrink(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_SHRINK) != 0; + return hs->hs_flags & CFS_HASH_SHRINK; } static inline int cfs_hash_with_assert_empty(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0; + return hs->hs_flags & CFS_HASH_ASSERT_EMPTY; } static inline int cfs_hash_with_depth(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_DEPTH) != 0; + return hs->hs_flags & CFS_HASH_DEPTH; } static inline int cfs_hash_with_nblk_change(struct cfs_hash *hs) { - return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0; + return hs->hs_flags & CFS_HASH_NBLK_CHANGE; } static inline int @@ -434,14 +442,14 @@ static inline int cfs_hash_is_rehashing(struct cfs_hash *hs) { /* rehash is launched */ - return hs->hs_rehash_bits != 0; + return !!hs->hs_rehash_bits; } static inline int cfs_hash_is_iterating(struct cfs_hash *hs) { /* someone is calling cfs_hash_for_each_* */ - return hs->hs_iterating || hs->hs_iterators != 0; + return hs->hs_iterating || hs->hs_iterators; } static inline int @@ -453,7 +461,7 @@ cfs_hash_bkt_size(struct cfs_hash *hs) } static inline unsigned -cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask) +cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned int mask) { return hs->hs_ops->hs_hash(hs, key, mask); } @@ -562,7 +570,7 @@ cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd) } static inline void -cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned index, +cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned int index, struct cfs_hash_bd *bd) { bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits]; @@ -576,14 +584,14 @@ cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd) cfs_hash_bkt_size(hs) - hs->hs_extra_bytes; } -static inline __u32 +static inline u32 cfs_hash_bd_version_get(struct cfs_hash_bd *bd) { /* need hold cfs_hash_bd_lock */ return bd->bd_bucket->hsb_version; } -static inline __u32 +static inline u32 cfs_hash_bd_count_get(struct cfs_hash_bd *bd) { /* need hold cfs_hash_bd_lock */ @@ -669,10 +677,10 @@ cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, /* Hash init/cleanup functions */ struct cfs_hash * -cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, - unsigned bkt_bits, unsigned extra_bytes, - unsigned min_theta, unsigned max_theta, - struct cfs_hash_ops *ops, unsigned flags); +cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits, + unsigned int bkt_bits, unsigned int extra_bytes, + unsigned int min_theta, unsigned int max_theta, + struct cfs_hash_ops *ops, unsigned int flags); struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs); void cfs_hash_putref(struct cfs_hash *hs); @@ -700,27 +708,28 @@ typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs, void * cfs_hash_lookup(struct cfs_hash *hs, const void *key); void -cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data); +cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb, void *data); void -cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data); +cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb, + void *data); int -cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t, - void *data); +cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb, + void *data, int start); int -cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t, +cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb, void *data); void cfs_hash_for_each_key(struct cfs_hash *hs, const void *key, - cfs_hash_for_each_cb_t, void *data); + cfs_hash_for_each_cb_t cb, void *data); typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data); void -cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data); +cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t cb, void *data); void -cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex, - cfs_hash_for_each_cb_t, void *data); +cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex, + cfs_hash_for_each_cb_t cb, void *data); int cfs_hash_is_empty(struct cfs_hash *hs); -__u64 cfs_hash_size_get(struct cfs_hash *hs); +u64 cfs_hash_size_get(struct cfs_hash *hs); /* * Rehash - Theta is calculated to be the average chained @@ -766,8 +775,8 @@ cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd, #endif /* CFS_HASH_DEBUG_LEVEL */ #define CFS_HASH_THETA_BITS 10 -#define CFS_HASH_MIN_THETA (1U << (CFS_HASH_THETA_BITS - 1)) -#define CFS_HASH_MAX_THETA (1U << (CFS_HASH_THETA_BITS + 1)) +#define CFS_HASH_MIN_THETA BIT(CFS_HASH_THETA_BITS - 1) +#define CFS_HASH_MAX_THETA BIT(CFS_HASH_THETA_BITS + 1) /* Return integer component of theta */ static inline int __cfs_hash_theta_int(int theta) @@ -792,8 +801,8 @@ static inline void __cfs_hash_set_theta(struct cfs_hash *hs, int min, int max) { LASSERT(min < max); - hs->hs_min_theta = (__u16)min; - hs->hs_max_theta = (__u16)max; + hs->hs_min_theta = (u16)min; + hs->hs_max_theta = (u16)max; } /* Generic debug formatting routines mainly for proc handler */ @@ -805,11 +814,11 @@ void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m); * Generic djb2 hash algorithm for character arrays. */ static inline unsigned -cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask) +cfs_hash_djb2_hash(const void *key, size_t size, unsigned int mask) { - unsigned i, hash = 5381; + unsigned int i, hash = 5381; - LASSERT(key != NULL); + LASSERT(key); for (i = 0; i < size; i++) hash = hash * 33 + ((char *)key)[i]; @@ -821,7 +830,7 @@ cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask) * Generic u32 hash algorithm. */ static inline unsigned -cfs_hash_u32_hash(const __u32 key, unsigned mask) +cfs_hash_u32_hash(const u32 key, unsigned int mask) { return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask); } @@ -830,9 +839,9 @@ cfs_hash_u32_hash(const __u32 key, unsigned mask) * Generic u64 hash algorithm. */ static inline unsigned -cfs_hash_u64_hash(const __u64 key, unsigned mask) +cfs_hash_u64_hash(const u64 key, unsigned int mask) { - return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask); + return ((unsigned int)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask); } /** iterate over all buckets in @bds (array of struct cfs_hash_bd) */ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h index e0e1a5d0949d8f09e069f54e2fa5a182e8634db3..aab15d8112a4eced73f779102b1992f7bbe94243 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h @@ -75,7 +75,7 @@ do { \ #define KLASSERT(e) LASSERT(e) -void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *); +void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msg); #define LBUG() \ do { \ @@ -96,7 +96,7 @@ do { \ #define LIBCFS_ALLOC_POST(ptr, size) \ do { \ - if (unlikely((ptr) == NULL)) { \ + if (unlikely(!(ptr))) { \ CERROR("LNET: out of memory at %s:%d (tried to alloc '" \ #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \ } else { \ @@ -147,7 +147,7 @@ do { \ #define LIBCFS_FREE(ptr, size) \ do { \ - if (unlikely((ptr) == NULL)) { \ + if (unlikely(!(ptr))) { \ CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \ "%s:%d\n", (int)(size), __FILE__, __LINE__); \ break; \ @@ -169,8 +169,6 @@ do { \ #define ntohs(x) ___ntohs(x) #endif -void libcfs_run_upcall(char **argv); -void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *); void libcfs_debug_dumplog(void); int libcfs_debug_init(unsigned long bufsize); int libcfs_debug_cleanup(void); @@ -280,7 +278,7 @@ do { \ #define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr))) /** Compile-time assertion. - + * * Check an invariant described by a constant expression at compile time by * forcing a compiler error if it does not hold. \a cond must be a constant * expression as defined by the ISO C Standard: @@ -306,7 +304,8 @@ do { \ /* -------------------------------------------------------------------- * Light-weight trace * Support for temporary event tracing with minimal Heisenberg effect. - * -------------------------------------------------------------------- */ + * -------------------------------------------------------------------- + */ #define MKSTR(ptr) ((ptr)) ? (ptr) : "" diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h index 0ee60ff336f2dada891bb7750b5d96c36112c046..41795d9b3b9bda82f56861142c06cd45ca7d8128 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h @@ -62,9 +62,9 @@ struct cfs_range_expr { * Link to cfs_expr_list::el_exprs. */ struct list_head re_link; - __u32 re_lo; - __u32 re_hi; - __u32 re_stride; + u32 re_lo; + u32 re_hi; + u32 re_stride; }; struct cfs_expr_list { @@ -74,24 +74,26 @@ struct cfs_expr_list { char *cfs_trimwhite(char *str); int cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res); -int cfs_str2num_check(char *str, int nob, unsigned *num, - unsigned min, unsigned max); -int cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list); +int cfs_str2num_check(char *str, int nob, unsigned int *num, + unsigned int min, unsigned int max); +int cfs_expr_list_match(u32 value, struct cfs_expr_list *expr_list); int cfs_expr_list_print(char *buffer, int count, struct cfs_expr_list *expr_list); int cfs_expr_list_values(struct cfs_expr_list *expr_list, - int max, __u32 **values); + int max, u32 **values); static inline void -cfs_expr_list_values_free(__u32 *values, int num) +cfs_expr_list_values_free(u32 *values, int num) { - /* This array is allocated by LIBCFS_ALLOC(), so it shouldn't be freed + /* + * This array is allocated by LIBCFS_ALLOC(), so it shouldn't be freed * by OBD_FREE() if it's called by module other than libcfs & LNet, - * otherwise we will see fake memory leak */ + * otherwise we will see fake memory leak + */ LIBCFS_FREE(values, num * sizeof(values[0])); } void cfs_expr_list_free(struct cfs_expr_list *expr_list); -int cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max, +int cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max, struct cfs_expr_list **elpp); void cfs_expr_list_free_list(struct list_head *list); diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h index a7e1340e69a1d87c050945adf3180a710b2ab96b..2accd9a85472b76a7cbd570aeb7eaf20e64858d2 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h @@ -62,9 +62,9 @@ struct cfs_wi_sched; -void cfs_wi_sched_destroy(struct cfs_wi_sched *); +void cfs_wi_sched_destroy(struct cfs_wi_sched *sched); int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt, - int nthrs, struct cfs_wi_sched **); + int nthrs, struct cfs_wi_sched **sched_pp); struct cfs_workitem; diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h index f63cb47bc309ffaeeb240ff8e98dd4f22cf30585..dd0cd0442b866539ca20c0ded85337813f031176 100644 --- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h +++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h @@ -52,17 +52,17 @@ struct cfs_cpu_partition { /* nodes mask for this partition */ nodemask_t *cpt_nodemask; /* spread rotor for NUMA allocator */ - unsigned cpt_spread_rotor; + unsigned int cpt_spread_rotor; }; /** descriptor for CPU partitions */ struct cfs_cpt_table { /* version, reserved for hotplug */ - unsigned ctb_version; + unsigned int ctb_version; /* spread rotor for NUMA allocator */ - unsigned ctb_spread_rotor; + unsigned int ctb_spread_rotor; /* # of CPU partitions */ - unsigned ctb_nparts; + unsigned int ctb_nparts; /* partitions tables */ struct cfs_cpu_partition *ctb_parts; /* shadow HW CPU to CPU partition ID */ diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h index b646acd1f7e75b5bf150c939457f7bacd9678728..709e1ce98d8d7d12d746bdfb55bf93f966d71157 100644 --- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h +++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h @@ -76,23 +76,23 @@ static inline long cfs_duration_sec(long d) #define cfs_time_current_64 get_jiffies_64 -static inline __u64 cfs_time_add_64(__u64 t, __u64 d) +static inline u64 cfs_time_add_64(u64 t, u64 d) { return t + d; } -static inline __u64 cfs_time_shift_64(int seconds) +static inline u64 cfs_time_shift_64(int seconds) { return cfs_time_add_64(cfs_time_current_64(), cfs_time_seconds(seconds)); } -static inline int cfs_time_before_64(__u64 t1, __u64 t2) +static inline int cfs_time_before_64(u64 t1, u64 t2) { return (__s64)t2 - (__s64)t1 > 0; } -static inline int cfs_time_beforeq_64(__u64 t1, __u64 t2) +static inline int cfs_time_beforeq_64(u64 t1, u64 t2) { return (__s64)t2 - (__s64)t1 >= 0; } diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h index 417044552d3f2c352014cebf5547823e142c6837..8a84888635ff112e5b1e859f4678b0345f2663e6 100644 --- a/drivers/staging/lustre/include/linux/lnet/lnetst.h +++ b/drivers/staging/lustre/include/linux/lnet/lnetst.h @@ -244,7 +244,7 @@ typedef struct { int lstio_ses_timeout; /* IN: session timeout */ int lstio_ses_force; /* IN: force create ? */ /** IN: session features */ - unsigned lstio_ses_feats; + unsigned int lstio_ses_feats; lst_sid_t __user *lstio_ses_idp; /* OUT: session id */ int lstio_ses_nmlen; /* IN: name length */ char __user *lstio_ses_namep; /* IN: session name */ @@ -255,7 +255,7 @@ typedef struct { lst_sid_t __user *lstio_ses_idp; /* OUT: session id */ int __user *lstio_ses_keyp; /* OUT: local key */ /** OUT: session features */ - unsigned __user *lstio_ses_featp; + unsigned int __user *lstio_ses_featp; lstcon_ndlist_ent_t __user *lstio_ses_ndinfo; /* OUT: */ int lstio_ses_nmlen; /* IN: name length */ char __user *lstio_ses_namep; /* OUT: session name */ @@ -328,7 +328,7 @@ typedef struct { char __user *lstio_grp_namep; /* IN: group name */ int lstio_grp_count; /* IN: # of nodes */ /** OUT: session features */ - unsigned __user *lstio_grp_featp; + unsigned int __user *lstio_grp_featp; lnet_process_id_t __user *lstio_grp_idsp; /* IN: nodes */ struct list_head __user *lstio_grp_resultp; /* OUT: list head of result buffer */ @@ -490,6 +490,8 @@ typedef struct { int blk_size; /* size (bytes) */ int blk_time; /* time of running the test*/ int blk_flags; /* reserved flags */ + int blk_cli_off; /* bulk offset on client */ + int blk_srv_off; /* reserved: bulk offset on server */ } lst_test_bulk_param_t; typedef struct { diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h index f8be0e2f7bf7f72e40a5258b9c5745afc1273eca..8ca1e9d0cfe2b4f539e0ce981476dc80cf15c88c 100644 --- a/drivers/staging/lustre/include/linux/lnet/types.h +++ b/drivers/staging/lustre/include/linux/lnet/types.h @@ -34,6 +34,7 @@ #define __LNET_TYPES_H__ #include +#include /** \addtogroup lnet * @{ diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index 9e8802181452831c52b9a87428ccec02c353a806..7f761b327166e5c8f8a94ee406845f06c9517f57 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -1489,7 +1489,7 @@ static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps, static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps, struct list_head *zombies) { - if (!fps->fps_net) /* intialized? */ + if (!fps->fps_net) /* initialized? */ return; spin_lock(&fps->fps_lock); @@ -1637,7 +1637,7 @@ int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx, { __u64 *pages = tx->tx_pages; bool is_rx = (rd != tx->tx_rd); - bool tx_pages_mapped = 0; + bool tx_pages_mapped = false; struct kib_fmr_pool *fpo; int npages = 0; __u64 version; @@ -1812,7 +1812,7 @@ static void kiblnd_destroy_pool_list(struct list_head *head) static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies) { - if (!ps->ps_net) /* intialized? */ + if (!ps->ps_net) /* initialized? */ return; spin_lock(&ps->ps_lock); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index b27de88881499535b777d78712a53bdb2385a1e8..c7917abf99445de8ffe98e60f809e135ba9e3457 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -1912,12 +1912,12 @@ kiblnd_close_conn_locked(struct kib_conn *conn, int error) libcfs_nid2str(peer->ibp_nid)); } else { CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n", - libcfs_nid2str(peer->ibp_nid), error, - list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", - list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", - list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)", - list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)", - list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); + libcfs_nid2str(peer->ibp_nid), error, + list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", + list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", + list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)", + list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)", + list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); } dev = ((struct kib_net *)peer->ibp_ni->ni_data)->ibn_dev; @@ -2643,7 +2643,7 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version, if (incarnation) peer->ibp_incarnation = incarnation; out: - write_unlock_irqrestore(glock, flags); + write_unlock_irqrestore(glock, flags); CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n", libcfs_nid2str(peer->ibp_nid), @@ -2651,7 +2651,7 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version, reason, IBLND_MSG_VERSION, version, msg_size, conn->ibc_queue_depth, queue_dep, conn->ibc_max_frags, frag_num); - /** + /** * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer * while destroying the zombie */ @@ -2976,7 +2976,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) case RDMA_CM_EVENT_ADDR_ERROR: peer = (struct kib_peer *)cmid->context; CNETERR("%s: ADDR ERROR %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); + libcfs_nid2str(peer->ibp_nid), event->status); kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); kiblnd_peer_decref(peer); return -EHOSTUNREACH; /* rc destroys cmid */ @@ -3021,7 +3021,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) return kiblnd_active_connect(cmid); CNETERR("Can't resolve route for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); + libcfs_nid2str(peer->ibp_nid), event->status); kiblnd_peer_connect_failed(peer, 1, event->status); kiblnd_peer_decref(peer); return event->status; /* rc destroys cmid */ @@ -3031,7 +3031,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); CNETERR("%s: UNREACHABLE %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); + libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); kiblnd_connreq_done(conn, -ENETDOWN); kiblnd_conn_decref(conn); return 0; @@ -3269,14 +3269,14 @@ kiblnd_disconnect_conn(struct kib_conn *conn) #define KIB_RECONN_HIGH_RACE 10 /** * Allow connd to take a break and handle other things after consecutive - * reconnection attemps. + * reconnection attempts. */ #define KIB_RECONN_BREAK 100 int kiblnd_connd(void *arg) { - spinlock_t *lock= &kiblnd_data.kib_connd_lock; + spinlock_t *lock = &kiblnd_data.kib_connd_lock; wait_queue_t wait; unsigned long flags; struct kib_conn *conn; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c index cbc9a9c5385f4819cadb9dc93f663a993fa3c318..b74cf635faee42fc925284903736ea12e150e1dd 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c @@ -96,7 +96,8 @@ ksocknal_destroy_route(struct ksock_route *route) } static int -ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, lnet_process_id_t id) +ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, + lnet_process_id_t id) { int cpt = lnet_cpt_of_nid(id.nid); struct ksock_net *net = ni->ni_data; @@ -319,7 +320,8 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index, } static void -ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn) +ksocknal_associate_route_conn_locked(struct ksock_route *route, + struct ksock_conn *conn) { struct ksock_peer *peer = route->ksnr_peer; int type = conn->ksnc_type; @@ -821,7 +823,8 @@ ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips) if (k < peer->ksnp_n_passive_ips) /* using it already */ continue; - k = ksocknal_match_peerip(iface, peerips, n_peerips); + k = ksocknal_match_peerip(iface, peerips, + n_peerips); xor = ip ^ peerips[k]; this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0; @@ -1302,8 +1305,11 @@ ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route, /* Take packets blocking for this connection. */ list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) { - if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO) - continue; + int match = conn->ksnc_proto->pro_match_tx(conn, tx, + tx->tx_nonblk); + + if (match == SOCKNAL_MATCH_NO) + continue; list_del(&tx->tx_list); ksocknal_queue_tx_locked(tx, conn); @@ -1493,8 +1499,8 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error) spin_unlock_bh(&conn->ksnc_scheduler->kss_lock); } - peer->ksnp_proto = NULL; /* renegotiate protocol version */ - peer->ksnp_error = error; /* stash last conn close reason */ + peer->ksnp_proto = NULL; /* renegotiate protocol version */ + peer->ksnp_error = error; /* stash last conn close reason */ if (list_empty(&peer->ksnp_routes)) { /* @@ -1786,7 +1792,8 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr) (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid))) continue; - count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0); + count += ksocknal_close_peer_conns_locked(peer, ipaddr, + 0); } } @@ -2026,7 +2033,10 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) } rc = 0; - /* NB only new connections will pay attention to the new interface! */ + /* + * NB only new connections will pay attention to the + * new interface! + */ } write_unlock_bh(&ksocknal_data.ksnd_global_lock); @@ -2200,8 +2210,9 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) int txmem; int rxmem; int nagle; - struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count); + struct ksock_conn *conn; + conn = ksocknal_get_conn_by_idx(ni, data->ioc_count); if (!conn) return -ENOENT; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h index e6ca0cf52691fec8c55fd3968fe4801fdbe7054c..842c45393b38f0bfb40541ff9f92b9b5ce7d7901 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h @@ -84,7 +84,8 @@ struct ksock_sched { /* per scheduler state */ struct list_head kss_zombie_noop_txs; /* zombie noop tx list */ wait_queue_head_t kss_waitq; /* where scheduler sleeps */ int kss_nconns; /* # connections assigned to - * this scheduler */ + * this scheduler + */ struct ksock_sched_info *kss_info; /* owner of it */ }; @@ -110,15 +111,19 @@ struct ksock_interface { /* in-use interface */ struct ksock_tunables { int *ksnd_timeout; /* "stuck" socket timeout - * (seconds) */ + * (seconds) + */ int *ksnd_nscheds; /* # scheduler threads in each - * pool while starting */ + * pool while starting + */ int *ksnd_nconnds; /* # connection daemons */ int *ksnd_nconnds_max; /* max # connection daemons */ int *ksnd_min_reconnectms; /* first connection retry after - * (ms)... */ + * (ms)... + */ int *ksnd_max_reconnectms; /* ...exponentially increasing to - * this */ + * this + */ int *ksnd_eager_ack; /* make TCP ack eagerly? */ int *ksnd_typed_conns; /* drive sockets by type? */ int *ksnd_min_bulk; /* smallest "large" message */ @@ -126,9 +131,11 @@ struct ksock_tunables { int *ksnd_rx_buffer_size; /* socket rx buffer size */ int *ksnd_nagle; /* enable NAGLE? */ int *ksnd_round_robin; /* round robin for multiple - * interfaces */ + * interfaces + */ int *ksnd_keepalive; /* # secs for sending keepalive - * NOOP */ + * NOOP + */ int *ksnd_keepalive_idle; /* # idle secs before 1st probe */ int *ksnd_keepalive_count; /* # probes */ @@ -137,20 +144,26 @@ struct ksock_tunables { int *ksnd_peertxcredits; /* # concurrent sends to 1 peer */ int *ksnd_peerrtrcredits; /* # per-peer router buffer - * credits */ + * credits + */ int *ksnd_peertimeout; /* seconds to consider peer dead */ int *ksnd_enable_csum; /* enable check sum */ int *ksnd_inject_csum_error; /* set non-zero to inject - * checksum error */ + * checksum error + */ int *ksnd_nonblk_zcack; /* always send zc-ack on - * non-blocking connection */ + * non-blocking connection + */ unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload - * size */ + * size + */ int *ksnd_zc_recv; /* enable ZC receive (for - * Chelsio TOE) */ + * Chelsio TOE) + */ int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to - * enable ZC receive */ + * enable ZC receive + */ }; struct ksock_net { @@ -174,9 +187,11 @@ struct ksock_nal_data { int ksnd_nnets; /* # networks set up */ struct list_head ksnd_nets; /* list of nets */ rwlock_t ksnd_global_lock; /* stabilize peer/conn - * ops */ + * ops + */ struct list_head *ksnd_peers; /* hash table of all my - * known peers */ + * known peers + */ int ksnd_peer_hash_size; /* size of ksnd_peers */ int ksnd_nthreads; /* # live threads */ @@ -187,11 +202,14 @@ struct ksock_nal_data { atomic_t ksnd_nactive_txs; /* #active txs */ struct list_head ksnd_deathrow_conns; /* conns to close: - * reaper_lock*/ + * reaper_lock + */ struct list_head ksnd_zombie_conns; /* conns to free: - * reaper_lock */ + * reaper_lock + */ struct list_head ksnd_enomem_conns; /* conns to retry: - * reaper_lock*/ + * reaper_lock + */ wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */ unsigned long ksnd_reaper_waketime; /* when reaper will wake */ @@ -201,30 +219,34 @@ struct ksock_nal_data { int ksnd_stall_tx; /* test sluggish sender */ int ksnd_stall_rx; /* test sluggish - * receiver */ - + * receiver + */ struct list_head ksnd_connd_connreqs; /* incoming connection - * requests */ + * requests + */ struct list_head ksnd_connd_routes; /* routes waiting to be - * connected */ + * connected + */ wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */ int ksnd_connd_connecting; /* # connds connecting */ time64_t ksnd_connd_failed_stamp;/* time stamp of the * last failed - * connecting attempt */ + * connecting attempt + */ time64_t ksnd_connd_starting_stamp;/* time stamp of the * last starting connd */ - unsigned ksnd_connd_starting; /* # starting connd */ - unsigned ksnd_connd_running; /* # running connd */ + unsigned int ksnd_connd_starting; /* # starting connd */ + unsigned int ksnd_connd_running; /* # running connd */ spinlock_t ksnd_connd_lock; /* serialise */ struct list_head ksnd_idle_noop_txs; /* list head for freed - * noop tx */ + * noop tx + */ spinlock_t ksnd_tx_lock; /* serialise, g_lock - * unsafe */ - + * unsafe + */ }; #define SOCKNAL_INIT_NOTHING 0 @@ -304,18 +326,21 @@ struct ksock_conn { struct list_head ksnc_list; /* stash on peer's conn list */ struct socket *ksnc_sock; /* actual socket */ void *ksnc_saved_data_ready; /* socket's original - * data_ready() callback */ + * data_ready() callback + */ void *ksnc_saved_write_space; /* socket's original - * write_space() callback */ + * write_space() callback + */ atomic_t ksnc_conn_refcount;/* conn refcount */ atomic_t ksnc_sock_refcount;/* sock refcount */ struct ksock_sched *ksnc_scheduler; /* who schedules this connection - */ + */ __u32 ksnc_myipaddr; /* my IP */ __u32 ksnc_ipaddr; /* peer's IP */ int ksnc_port; /* peer's port */ signed int ksnc_type:3; /* type of connection, should be - * signed value */ + * signed value + */ unsigned int ksnc_closing:1; /* being shut down */ unsigned int ksnc_flip:1; /* flip or not, only for V2.x */ unsigned int ksnc_zc_capable:1; /* enable to ZC */ @@ -323,9 +348,11 @@ struct ksock_conn { /* reader */ struct list_head ksnc_rx_list; /* where I enq waiting input or a - * forwarding descriptor */ + * forwarding descriptor + */ unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times - * out */ + * out + */ __u8 ksnc_rx_started; /* started receiving a message */ __u8 ksnc_rx_ready; /* data ready to read */ __u8 ksnc_rx_scheduled; /* being progressed */ @@ -338,7 +365,8 @@ struct ksock_conn { lnet_kiov_t *ksnc_rx_kiov; /* the page frags */ union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */ __u32 ksnc_rx_csum; /* partial checksum for incoming - * data */ + * data + */ void *ksnc_cookie; /* rx lnet_finalize passthru arg */ ksock_msg_t ksnc_msg; /* incoming message buffer: @@ -346,14 +374,16 @@ struct ksock_conn { * whole struct * V1.x message is a bare * lnet_hdr_t, it's stored in - * ksnc_msg.ksm_u.lnetmsg */ - + * ksnc_msg.ksm_u.lnetmsg + */ /* WRITER */ struct list_head ksnc_tx_list; /* where I enq waiting for output - * space */ + * space + */ struct list_head ksnc_tx_queue; /* packets waiting to be sent */ - struct ksock_tx *ksnc_tx_carrier; /* next TX that can carry a LNet - * message or ZC-ACK */ + struct ksock_tx *ksnc_tx_carrier; /* next TX that can carry a LNet + * message or ZC-ACK + */ unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out */ int ksnc_tx_bufnob; /* send buffer marker */ @@ -361,7 +391,8 @@ struct ksock_conn { int ksnc_tx_ready; /* write space */ int ksnc_tx_scheduled; /* being progressed */ unsigned long ksnc_tx_last_post; /* time stamp of the last posted - * TX */ + * TX + */ }; struct ksock_route { @@ -370,20 +401,24 @@ struct ksock_route { struct ksock_peer *ksnr_peer; /* owning peer */ atomic_t ksnr_refcount; /* # users */ unsigned long ksnr_timeout; /* when (in jiffies) reconnection - * can happen next */ + * can happen next + */ long ksnr_retry_interval; /* how long between retries */ __u32 ksnr_myipaddr; /* my IP */ __u32 ksnr_ipaddr; /* IP address to connect to */ int ksnr_port; /* port to connect to */ unsigned int ksnr_scheduled:1; /* scheduled for attention */ unsigned int ksnr_connecting:1; /* connection establishment in - * progress */ + * progress + */ unsigned int ksnr_connected:4; /* connections established by - * type */ + * type + */ unsigned int ksnr_deleted:1; /* been removed from peer? */ unsigned int ksnr_share_count; /* created explicitly? */ int ksnr_conn_count; /* # conns established by this - * route */ + * route + */ }; #define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */ @@ -391,7 +426,8 @@ struct ksock_route { struct ksock_peer { struct list_head ksnp_list; /* stash on global peer list */ unsigned long ksnp_last_alive; /* when (in jiffies) I was last - * alive */ + * alive + */ lnet_process_id_t ksnp_id; /* who's on the other end(s) */ atomic_t ksnp_refcount; /* # users */ int ksnp_sharecount; /* lconf usage counter */ @@ -408,7 +444,8 @@ struct ksock_peer { struct list_head ksnp_tx_queue; /* waiting packets */ spinlock_t ksnp_lock; /* serialize, g_lock unsafe */ struct list_head ksnp_zc_req_list; /* zero copy requests wait for - * ACK */ + * ACK + */ unsigned long ksnp_send_keepalive; /* time to send keepalive */ lnet_ni_t *ksnp_ni; /* which network */ int ksnp_n_passive_ips; /* # of... */ @@ -429,7 +466,8 @@ extern struct ksock_tunables ksocknal_tunables; #define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */ #define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */ #define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not - * preferred */ + * preferred + */ struct ksock_proto { /* version number of protocol */ diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index c1c6f604e6ada9d7291b7d998085aaf77b487e10..972f6094be75d7b39ed4cc0d55b278563dd4306d 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c @@ -620,7 +620,8 @@ ksocknal_launch_all_connections_locked(struct ksock_peer *peer) } struct ksock_conn * -ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonblk) +ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, + int nonblk) { struct list_head *tmp; struct ksock_conn *conn; @@ -630,10 +631,12 @@ ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonb int fnob = 0; list_for_each(tmp, &peer->ksnp_conns) { - struct ksock_conn *c = list_entry(tmp, struct ksock_conn, ksnc_list); - int nob = atomic_read(&c->ksnc_tx_nob) + - c->ksnc_sock->sk->sk_wmem_queued; - int rc; + struct ksock_conn *c; + int nob, rc; + + c = list_entry(tmp, struct ksock_conn, ksnc_list); + nob = atomic_read(&c->ksnc_tx_nob) + + c->ksnc_sock->sk->sk_wmem_queued; LASSERT(!c->ksnc_closing); LASSERT(c->ksnc_proto && @@ -752,9 +755,9 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn) LASSERT(msg->ksm_zc_cookies[1]); LASSERT(conn->ksnc_proto->pro_queue_tx_zcack); + /* ZC ACK piggybacked on ztx release tx later */ if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0)) - ztx = tx; /* ZC ACK piggybacked on ztx release tx later */ - + ztx = tx; } else { /* * It's a normal packet - can it piggback a noop zc-ack that @@ -796,7 +799,8 @@ ksocknal_find_connectable_route_locked(struct ksock_peer *peer) LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); - if (route->ksnr_scheduled) /* connections being established */ + /* connections being established */ + if (route->ksnr_scheduled) continue; /* all route types connected ? */ @@ -1514,7 +1518,10 @@ int ksocknal_scheduler(void *arg) rc = ksocknal_process_transmit(conn, tx); if (rc == -ENOMEM || rc == -EAGAIN) { - /* Incomplete send: replace tx on HEAD of tx_queue */ + /* + * Incomplete send: replace tx on HEAD of + * tx_queue + */ spin_lock_bh(&sched->kss_lock); list_add(&tx->tx_list, &conn->ksnc_tx_queue); } else { @@ -1724,7 +1731,8 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn, timeout = active ? *ksocknal_tunables.ksnd_timeout : lnet_acceptor_timeout(); - rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout); + rc = lnet_sock_read(sock, &hello->kshm_magic, + sizeof(hello->kshm_magic), timeout); if (rc) { CERROR("Error %d reading HELLO from %pI4h\n", rc, &conn->ksnc_ipaddr); @@ -1798,7 +1806,8 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn, conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) { /* Userspace NAL assigns peer process ID from socket */ recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG; - recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr); + recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), + conn->ksnc_ipaddr); } else { recv_id.nid = hello->kshm_src_nid; recv_id.pid = hello->kshm_src_pid; @@ -1882,7 +1891,8 @@ ksocknal_connect(struct ksock_route *route) if (peer->ksnp_accepting > 0) { CDEBUG(D_NET, "peer %s(%d) already connecting to me, retry later.\n", - libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting); + libcfs_nid2str(peer->ksnp_id.nid), + peer->ksnp_accepting); retry_later = 1; } @@ -2241,7 +2251,8 @@ ksocknal_connd(void *arg) /* Nothing to do for 'timeout' */ set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait); + add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, + &wait); spin_unlock_bh(connd_lock); nloops = 0; @@ -2371,7 +2382,8 @@ ksocknal_send_keepalive_locked(struct ksock_peer *peer) struct ksock_conn *conn; struct ksock_tx *tx; - if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */ + /* last_alive will be updated by create_conn */ + if (list_empty(&peer->ksnp_conns)) return 0; if (peer->ksnp_proto != &ksocknal_protocol_v3x) @@ -2473,8 +2485,8 @@ ksocknal_check_peer_timeouts(int idx) * holding only shared lock */ if (!list_empty(&peer->ksnp_tx_queue)) { - struct ksock_tx *tx = list_entry(peer->ksnp_tx_queue.next, - struct ksock_tx, tx_list); + tx = list_entry(peer->ksnp_tx_queue.next, + struct ksock_tx, tx_list); if (cfs_time_aftereq(cfs_time_current(), tx->tx_deadline)) { diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c index 6c95e989ca129087fc5adce305830784db57c8f7..4bcab4bcc2de1327a5a491711326a19f881ed774 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c @@ -202,7 +202,8 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn) fragnob = sum; conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum, - iov[i].iov_base, fragnob); + iov[i].iov_base, + fragnob); } conn->ksnc_msg.ksm_csum = saved_csum; } @@ -291,7 +292,8 @@ ksocknal_lib_csum_tx(struct ksock_tx *tx) } int -ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle) +ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, + int *rxmem, int *nagle) { struct socket *sock = conn->ksnc_sock; int len; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c index 82e174f6d9fef26d97343238753eaec94b51bb96..8f0ff6ca1f396ca66128c5a803a5fefa3a3587b7 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c @@ -194,7 +194,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn, } if (!tx->tx_msg.ksm_zc_cookies[0]) { - /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */ + /* + * NOOP tx has only one ZC-ACK cookie, + * can carry at least one more + */ if (tx->tx_msg.ksm_zc_cookies[1] > cookie) { tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1]; tx->tx_msg.ksm_zc_cookies[1] = cookie; @@ -203,7 +206,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn, } if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) { - /* not likely to carry more ACKs, skip it to simplify logic */ + /* + * not likely to carry more ACKs, skip it + * to simplify logic + */ ksocknal_next_tx_carrier(conn); } @@ -237,7 +243,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn, } } else { - /* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */ + /* + * ksm_zc_cookies[0] < ksm_zc_cookies[1], + * it is range of cookies + */ if (cookie >= tx->tx_msg.ksm_zc_cookies[0] && cookie <= tx->tx_msg.ksm_zc_cookies[1]) { CWARN("%s: duplicated ZC cookie: %llu\n", @@ -425,7 +434,8 @@ ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2) tx_zc_list) { __u64 c = tx->tx_msg.ksm_zc_cookies[0]; - if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) { + if (c == cookie1 || c == cookie2 || + (cookie1 < c && c < cookie2)) { tx->tx_msg.ksm_zc_cookies[0] = 0; list_del(&tx->tx_zc_list); list_add(&tx->tx_zc_list, &zlist); @@ -639,7 +649,8 @@ ksocknal_recv_hello_v1(struct ksock_conn *conn, ksock_hello_msg_t *hello, } static int -ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello, int timeout) +ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello, + int timeout) { struct socket *sock = conn->ksnc_sock; int rc; @@ -737,7 +748,10 @@ ksocknal_pack_msg_v2(struct ksock_tx *tx) tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); tx->tx_resid = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); } - /* Don't checksum before start sending, because packet can be piggybacked with ACK */ + /* + * Don't checksum before start sending, because packet can be + * piggybacked with ACK + */ } static void diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c index 23b36b89096417d42df650aeef016987ce3c03b4..a38db23222251d4011d3228815b9694c96dde7de 100644 --- a/drivers/staging/lustre/lnet/libcfs/debug.c +++ b/drivers/staging/lustre/lnet/libcfs/debug.c @@ -57,7 +57,7 @@ static int libcfs_param_debug_mb_set(const char *val, const struct kernel_param *kp) { int rc; - unsigned num; + unsigned int num; rc = kstrtouint(val, 0, &num); if (rc < 0) @@ -228,7 +228,8 @@ int libcfs_panic_in_progress; static const char * libcfs_debug_subsys2str(int subsys) { - static const char *libcfs_debug_subsystems[] = LIBCFS_DEBUG_SUBSYS_NAMES; + static const char * const libcfs_debug_subsystems[] = + LIBCFS_DEBUG_SUBSYS_NAMES; if (subsys >= ARRAY_SIZE(libcfs_debug_subsystems)) return NULL; @@ -240,7 +241,8 @@ libcfs_debug_subsys2str(int subsys) static const char * libcfs_debug_dbg2str(int debug) { - static const char *libcfs_debug_masks[] = LIBCFS_DEBUG_MASKS_NAMES; + static const char * const libcfs_debug_masks[] = + LIBCFS_DEBUG_MASKS_NAMES; if (debug >= ARRAY_SIZE(libcfs_debug_masks)) return NULL; @@ -253,17 +255,17 @@ libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys) { const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str : libcfs_debug_dbg2str; - int len = 0; - const char *token; - int i; + int len = 0; + const char *token; + int i; - if (mask == 0) { /* "0" */ + if (!mask) { /* "0" */ if (size > 0) str[0] = '0'; len = 1; } else { /* space-separated tokens */ for (i = 0; i < 32; i++) { - if ((mask & (1 << i)) == 0) + if (!(mask & (1 << i))) continue; token = fn(i); @@ -276,7 +278,7 @@ libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys) len++; } - while (*token != 0) { + while (*token) { if (len < size) str[len] = *token; token++; @@ -299,10 +301,10 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys) { const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str : libcfs_debug_dbg2str; - int m = 0; - int matched; - int n; - int t; + int m = 0; + int matched; + int n; + int t; /* Allow a number for backwards compatibility */ @@ -313,7 +315,7 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys) t = sscanf(str, "%i%n", &m, &matched); if (t >= 1 && matched == n) { /* don't print warning for lctl set_param debug=0 or -1 */ - if (m != 0 && m != -1) + if (m && m != -1) CWARN("You are trying to use a numerical value for the mask - this will be deprecated in a future release.\n"); *mask = m; return 0; @@ -387,8 +389,8 @@ EXPORT_SYMBOL(libcfs_debug_dumplog); int libcfs_debug_init(unsigned long bufsize) { - int rc = 0; unsigned int max = libcfs_debug_mb; + int rc = 0; init_waitqueue_head(&debug_ctlwq); @@ -414,9 +416,9 @@ int libcfs_debug_init(unsigned long bufsize) max = max / num_possible_cpus(); max <<= (20 - PAGE_SHIFT); } - rc = cfs_tracefile_init(max); - if (rc == 0) { + rc = cfs_tracefile_init(max); + if (!rc) { libcfs_register_panic_notifier(); libcfs_debug_mb = cfs_trace_get_debug_mb(); } diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c index e4b1a0a86eae3a4941de57b4692a6e1c34e4ccd9..12dd50ad4efbd33c3a11754b2ea456ca0891c9f5 100644 --- a/drivers/staging/lustre/lnet/libcfs/fail.c +++ b/drivers/staging/lustre/lnet/libcfs/fail.c @@ -46,7 +46,7 @@ EXPORT_SYMBOL(cfs_race_waitq); int cfs_race_state; EXPORT_SYMBOL(cfs_race_state); -int __cfs_fail_check_set(__u32 id, __u32 value, int set) +int __cfs_fail_check_set(u32 id, u32 value, int set) { static atomic_t cfs_fail_count = ATOMIC_INIT(0); @@ -113,6 +113,7 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set) break; case CFS_FAIL_LOC_RESET: cfs_fail_loc = value; + atomic_set(&cfs_fail_count, 0); break; default: LASSERTF(0, "called with bad set %u\n", set); @@ -123,7 +124,7 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set) } EXPORT_SYMBOL(__cfs_fail_check_set); -int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set) +int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set) { int ret; diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c index 23283b6e09ab1903e97341ac20313605c9215f39..c93c59d8fe6c02a1077cda406854983fef832133 100644 --- a/drivers/staging/lustre/lnet/libcfs/hash.c +++ b/drivers/staging/lustre/lnet/libcfs/hash.c @@ -289,7 +289,7 @@ cfs_hash_hd_hhead_size(struct cfs_hash *hs) static struct hlist_head * cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) { - struct cfs_hash_head_dep *head; + struct cfs_hash_head_dep *head; head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0]; return &head[bd->bd_offset].hd_head; @@ -492,7 +492,7 @@ cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd) cfs_hash_bd_from_key(hs, hs->hs_buckets, hs->hs_cur_bits, key, bd); } else { - LASSERT(hs->hs_rehash_bits != 0); + LASSERT(hs->hs_rehash_bits); cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, hs->hs_rehash_bits, key, bd); } @@ -507,14 +507,14 @@ cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur) bd->bd_bucket->hsb_depmax = dep_cur; # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 - if (likely(warn_on_depth == 0 || + if (likely(!warn_on_depth || max(warn_on_depth, hs->hs_dep_max) >= dep_cur)) return; spin_lock(&hs->hs_dep_lock); - hs->hs_dep_max = dep_cur; - hs->hs_dep_bkt = bd->bd_bucket->hsb_index; - hs->hs_dep_off = bd->bd_offset; + hs->hs_dep_max = dep_cur; + hs->hs_dep_bkt = bd->bd_bucket->hsb_index; + hs->hs_dep_off = bd->bd_offset; hs->hs_dep_bits = hs->hs_cur_bits; spin_unlock(&hs->hs_dep_lock); @@ -531,7 +531,7 @@ cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode); cfs_hash_bd_dep_record(hs, bd, rc); bd->bd_bucket->hsb_version++; - if (unlikely(bd->bd_bucket->hsb_version == 0)) + if (unlikely(!bd->bd_bucket->hsb_version)) bd->bd_bucket->hsb_version++; bd->bd_bucket->hsb_count++; @@ -551,7 +551,7 @@ cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, LASSERT(bd->bd_bucket->hsb_count > 0); bd->bd_bucket->hsb_count--; bd->bd_bucket->hsb_version++; - if (unlikely(bd->bd_bucket->hsb_version == 0)) + if (unlikely(!bd->bd_bucket->hsb_version)) bd->bd_bucket->hsb_version++; if (cfs_hash_with_counter(hs)) { @@ -571,7 +571,7 @@ cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old, struct cfs_hash_bucket *nbkt = bd_new->bd_bucket; int rc; - if (cfs_hash_bd_compare(bd_old, bd_new) == 0) + if (!cfs_hash_bd_compare(bd_old, bd_new)) return; /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops @@ -584,11 +584,11 @@ cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old, LASSERT(obkt->hsb_count > 0); obkt->hsb_count--; obkt->hsb_version++; - if (unlikely(obkt->hsb_version == 0)) + if (unlikely(!obkt->hsb_version)) obkt->hsb_version++; nbkt->hsb_count++; nbkt->hsb_version++; - if (unlikely(nbkt->hsb_version == 0)) + if (unlikely(!nbkt->hsb_version)) nbkt->hsb_version++; } @@ -629,7 +629,7 @@ cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd); struct hlist_node *ehnode; struct hlist_node *match; - int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0; + int intent_add = intent & CFS_HS_LOOKUP_MASK_ADD; /* with this function, we can avoid a lot of useless refcount ops, * which are expensive atomic operations most time. @@ -643,13 +643,13 @@ cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd, continue; /* match and ... */ - if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) { + if (intent & CFS_HS_LOOKUP_MASK_DEL) { cfs_hash_bd_del_locked(hs, bd, ehnode); return ehnode; } /* caller wants refcount? */ - if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0) + if (intent & CFS_HS_LOOKUP_MASK_REF) cfs_hash_get(hs, ehnode); return ehnode; } @@ -682,7 +682,7 @@ EXPORT_SYMBOL(cfs_hash_bd_peek_locked); static void cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, int excl) + unsigned int n, int excl) { struct cfs_hash_bucket *prev = NULL; int i; @@ -704,7 +704,7 @@ cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, static void cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, int excl) + unsigned int n, int excl) { struct cfs_hash_bucket *prev = NULL; int i; @@ -719,10 +719,10 @@ cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, static struct hlist_node * cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, const void *key) + unsigned int n, const void *key) { struct hlist_node *ehnode; - unsigned i; + unsigned int i; cfs_hash_for_each_bd(bds, n, i) { ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL, @@ -735,12 +735,12 @@ cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, static struct hlist_node * cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, const void *key, + unsigned int n, const void *key, struct hlist_node *hnode, int noref) { struct hlist_node *ehnode; int intent; - unsigned i; + unsigned int i; LASSERT(hnode); intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK; @@ -766,7 +766,7 @@ cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, static struct hlist_node * cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, const void *key, + unsigned int n, const void *key, struct hlist_node *hnode) { struct hlist_node *ehnode; @@ -815,7 +815,7 @@ cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, return; } - LASSERT(hs->hs_rehash_bits != 0); + LASSERT(hs->hs_rehash_bits); cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, hs->hs_rehash_bits, key, &bds[1]); @@ -883,7 +883,7 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts, struct cfs_hash_bucket **new_bkts; int i; - LASSERT(old_size == 0 || old_bkts); + LASSERT(!old_size || old_bkts); if (old_bkts && old_size == new_size) return old_bkts; @@ -908,9 +908,9 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts, return NULL; } - new_bkts[i]->hsb_index = i; - new_bkts[i]->hsb_version = 1; /* shouldn't be zero */ - new_bkts[i]->hsb_depmax = -1; /* unknown */ + new_bkts[i]->hsb_index = i; + new_bkts[i]->hsb_version = 1; /* shouldn't be zero */ + new_bkts[i]->hsb_depmax = -1; /* unknown */ bd.bd_bucket = new_bkts[i]; cfs_hash_bd_for_each_hlist(hs, &bd, hhead) INIT_HLIST_HEAD(hhead); @@ -950,9 +950,9 @@ static int cfs_hash_dep_print(struct cfs_workitem *wi) int bits; spin_lock(&hs->hs_dep_lock); - dep = hs->hs_dep_max; - bkt = hs->hs_dep_bkt; - off = hs->hs_dep_off; + dep = hs->hs_dep_max; + bkt = hs->hs_dep_bkt; + off = hs->hs_dep_off; bits = hs->hs_dep_bits; spin_unlock(&hs->hs_dep_lock); @@ -976,7 +976,7 @@ static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) return; spin_lock(&hs->hs_dep_lock); - while (hs->hs_dep_bits != 0) { + while (hs->hs_dep_bits) { spin_unlock(&hs->hs_dep_lock); cond_resched(); spin_lock(&hs->hs_dep_lock); @@ -992,10 +992,10 @@ static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {} #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */ struct cfs_hash * -cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, - unsigned bkt_bits, unsigned extra_bytes, - unsigned min_theta, unsigned max_theta, - struct cfs_hash_ops *ops, unsigned flags) +cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits, + unsigned int bkt_bits, unsigned int extra_bytes, + unsigned int min_theta, unsigned int max_theta, + struct cfs_hash_ops *ops, unsigned int flags) { struct cfs_hash *hs; int len; @@ -1010,18 +1010,17 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, LASSERT(ops->hs_get); LASSERT(ops->hs_put_locked); - if ((flags & CFS_HASH_REHASH) != 0) + if (flags & CFS_HASH_REHASH) flags |= CFS_HASH_COUNTER; /* must have counter */ LASSERT(cur_bits > 0); LASSERT(cur_bits >= bkt_bits); LASSERT(max_bits >= cur_bits && max_bits < 31); - LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits)); - LASSERT(ergo((flags & CFS_HASH_REHASH) != 0, - (flags & CFS_HASH_NO_LOCK) == 0)); - LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, ops->hs_keycpy)); + LASSERT(ergo(!(flags & CFS_HASH_REHASH), cur_bits == max_bits)); + LASSERT(ergo(flags & CFS_HASH_REHASH, !(flags & CFS_HASH_NO_LOCK))); + LASSERT(ergo(flags & CFS_HASH_REHASH_KEY, ops->hs_keycpy)); - len = (flags & CFS_HASH_BIGNAME) == 0 ? + len = !(flags & CFS_HASH_BIGNAME) ? CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN; LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len])); if (!hs) @@ -1036,12 +1035,12 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, cfs_hash_lock_setup(hs); cfs_hash_hlist_setup(hs); - hs->hs_cur_bits = (__u8)cur_bits; - hs->hs_min_bits = (__u8)cur_bits; - hs->hs_max_bits = (__u8)max_bits; - hs->hs_bkt_bits = (__u8)bkt_bits; + hs->hs_cur_bits = (u8)cur_bits; + hs->hs_min_bits = (u8)cur_bits; + hs->hs_max_bits = (u8)max_bits; + hs->hs_bkt_bits = (u8)bkt_bits; - hs->hs_ops = ops; + hs->hs_ops = ops; hs->hs_extra_bytes = extra_bytes; hs->hs_rehash_bits = 0; cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker); @@ -1107,12 +1106,12 @@ cfs_hash_destroy(struct cfs_hash *hs) cfs_hash_exit(hs, hnode); } } - LASSERT(bd.bd_bucket->hsb_count == 0); + LASSERT(!bd.bd_bucket->hsb_count); cfs_hash_bd_unlock(hs, &bd, 1); cond_resched(); } - LASSERT(atomic_read(&hs->hs_count) == 0); + LASSERT(!atomic_read(&hs->hs_count)); cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs), 0, CFS_HASH_NBKT(hs)); @@ -1216,7 +1215,7 @@ cfs_hash_find_or_add(struct cfs_hash *hs, const void *key, struct cfs_hash_bd bds[2]; int bits = 0; - LASSERT(hlist_unhashed(hnode)); + LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode); cfs_hash_lock(hs, 0); cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1); @@ -1293,7 +1292,7 @@ cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) } if (hnode) { - obj = cfs_hash_object(hs, hnode); + obj = cfs_hash_object(hs, hnode); bits = cfs_hash_rehash_bits(hs); } @@ -1388,7 +1387,7 @@ cfs_hash_for_each_exit(struct cfs_hash *hs) bits = cfs_hash_rehash_bits(hs); cfs_hash_unlock(hs, 1); /* NB: it's race on cfs_has_t::hs_iterating, see above */ - if (remained == 0) + if (!remained) hs->hs_iterating = 0; if (bits > 0) { cfs_hash_rehash(hs, atomic_read(&hs->hs_count) < @@ -1406,14 +1405,14 @@ cfs_hash_for_each_exit(struct cfs_hash *hs) * . if @removal_safe is true, use can remove current item by * cfs_hash_bd_del_locked */ -static __u64 +static u64 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data, int remove_safe) { struct hlist_node *hnode; struct hlist_node *pos; struct cfs_hash_bd bd; - __u64 count = 0; + u64 count = 0; int excl = !!remove_safe; int loop = 0; int i; @@ -1526,7 +1525,7 @@ cfs_hash_is_empty(struct cfs_hash *hs) } EXPORT_SYMBOL(cfs_hash_is_empty); -__u64 +u64 cfs_hash_size_get(struct cfs_hash *hs) { return cfs_hash_with_counter(hs) ? @@ -1552,26 +1551,33 @@ EXPORT_SYMBOL(cfs_hash_size_get); */ static int cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) + void *data, int start) { struct hlist_node *hnode; struct hlist_node *tmp; struct cfs_hash_bd bd; - __u32 version; + u32 version; int count = 0; int stop_on_change; - int rc; + int end = -1; + int rc = 0; int i; stop_on_change = cfs_hash_with_rehash_key(hs) || !cfs_hash_with_no_itemref(hs) || !hs->hs_ops->hs_put_locked; cfs_hash_lock(hs, 0); +again: LASSERT(!cfs_hash_is_rehashing(hs)); cfs_hash_for_each_bucket(hs, &bd, i) { struct hlist_head *hhead; + if (i < start) + continue; + else if (end > 0 && i >= end) + break; + cfs_hash_bd_lock(hs, &bd, 0); version = cfs_hash_bd_version_get(&bd); @@ -1611,14 +1617,19 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, if (rc) /* callback wants to break iteration */ break; } - cfs_hash_unlock(hs, 0); + if (start > 0 && !rc) { + end = start; + start = 0; + goto again; + } + cfs_hash_unlock(hs, 0); return count; } int cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) + void *data, int start) { if (cfs_hash_with_no_lock(hs) || cfs_hash_with_rehash_key(hs) || @@ -1630,7 +1641,7 @@ cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, return -EOPNOTSUPP; cfs_hash_for_each_enter(hs); - cfs_hash_for_each_relax(hs, func, data); + cfs_hash_for_each_relax(hs, func, data, start); cfs_hash_for_each_exit(hs); return 0; @@ -1652,7 +1663,7 @@ int cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data) { - unsigned i = 0; + unsigned int i = 0; if (cfs_hash_with_no_lock(hs)) return -EOPNOTSUPP; @@ -1662,7 +1673,7 @@ cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, return -EOPNOTSUPP; cfs_hash_for_each_enter(hs); - while (cfs_hash_for_each_relax(hs, func, data)) { + while (cfs_hash_for_each_relax(hs, func, data, 0)) { CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n", hs->hs_name, i++); } @@ -1672,7 +1683,7 @@ cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, EXPORT_SYMBOL(cfs_hash_for_each_empty); void -cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex, +cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex, cfs_hash_for_each_cb_t func, void *data) { struct hlist_head *hhead; @@ -1704,7 +1715,7 @@ EXPORT_SYMBOL(cfs_hash_hlist_for_each); * the passed callback @func and pass to it as an argument each hash * item and the private @data. During the callback the bucket lock * is held so the callback must never sleep. - */ + */ void cfs_hash_for_each_key(struct cfs_hash *hs, const void *key, cfs_hash_for_each_cb_t func, void *data) @@ -1936,7 +1947,7 @@ cfs_hash_rehash_worker(struct cfs_workitem *wi) /* can't refer to @hs anymore because it could be destroyed */ if (bkts) cfs_hash_buckets_free(bkts, bsize, new_size, old_size); - if (rc != 0) + if (rc) CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc); /* return 1 only if cfs_wi_exit is called */ return rc == -ESRCH; @@ -2005,7 +2016,7 @@ cfs_hash_full_bkts(struct cfs_hash *hs) if (!hs->hs_rehash_buckets) return hs->hs_buckets; - LASSERT(hs->hs_rehash_bits != 0); + LASSERT(hs->hs_rehash_bits); return hs->hs_rehash_bits > hs->hs_cur_bits ? hs->hs_rehash_buckets : hs->hs_buckets; } @@ -2017,7 +2028,7 @@ cfs_hash_full_nbkt(struct cfs_hash *hs) if (!hs->hs_rehash_buckets) return CFS_HASH_NBKT(hs); - LASSERT(hs->hs_rehash_bits != 0); + LASSERT(hs->hs_rehash_bits); return hs->hs_rehash_bits > hs->hs_cur_bits ? CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs); } diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c index 33352af6c27fd2b07ab7dc78e5f40104cb5b3d84..55caa19def514d5e6251496c92e061c79b1e48a3 100644 --- a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c @@ -74,7 +74,7 @@ EXPORT_SYMBOL(cfs_cpt_table_free); int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) { - int rc; + int rc; rc = snprintf(buf, len, "%d\t: %d\n", 0, 0); len -= rc; diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c index 83543f928279823d5db029956e05f031edd8f063..1967b97c4afcb0f9fd6806b803f44441c98db8a9 100644 --- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c @@ -52,9 +52,9 @@ struct cfs_percpt_lock * cfs_percpt_lock_create(struct cfs_cpt_table *cptab, struct lock_class_key *keys) { - struct cfs_percpt_lock *pcl; - spinlock_t *lock; - int i; + struct cfs_percpt_lock *pcl; + spinlock_t *lock; + int i; /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */ LIBCFS_ALLOC(pcl, sizeof(*pcl)); @@ -73,7 +73,7 @@ cfs_percpt_lock_create(struct cfs_cpt_table *cptab, cfs_percpt_for_each(lock, i, pcl->pcl_locks) { spin_lock_init(lock); - if (keys != NULL) + if (keys) lockdep_set_class(lock, &keys[i]); } @@ -94,8 +94,8 @@ void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index) __acquires(pcl->pcl_locks) { - int ncpt = cfs_cpt_number(pcl->pcl_cptab); - int i; + int ncpt = cfs_cpt_number(pcl->pcl_cptab); + int i; LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt); @@ -114,7 +114,7 @@ cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index) /* exclusive lock request */ for (i = 0; i < ncpt; i++) { spin_lock(pcl->pcl_locks[i]); - if (i == 0) { + if (!i) { LASSERT(!pcl->pcl_locked); /* nobody should take private lock after this * so I wouldn't starve for too long time @@ -130,8 +130,8 @@ void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index) __releases(pcl->pcl_locks) { - int ncpt = cfs_cpt_number(pcl->pcl_cptab); - int i; + int ncpt = cfs_cpt_number(pcl->pcl_cptab); + int i; index = ncpt == 1 ? 0 : index; @@ -141,7 +141,7 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index) } for (i = ncpt - 1; i >= 0; i--) { - if (i == 0) { + if (!i) { LASSERT(pcl->pcl_locked); pcl->pcl_locked = 0; } diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c index d0e81bb41cdcfdc97cf1d34111adf25f2213d27c..ef085ba23194c4346060fcd191d324b2dd866975 100644 --- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c @@ -43,8 +43,8 @@ struct cfs_var_array { void cfs_percpt_free(void *vars) { - struct cfs_var_array *arr; - int i; + struct cfs_var_array *arr; + int i; arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); @@ -72,9 +72,9 @@ EXPORT_SYMBOL(cfs_percpt_free); void * cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size) { - struct cfs_var_array *arr; - int count; - int i; + struct cfs_var_array *arr; + int count; + int i; count = cfs_cpt_number(cptab); @@ -120,8 +120,8 @@ EXPORT_SYMBOL(cfs_percpt_number); void cfs_array_free(void *vars) { - struct cfs_var_array *arr; - int i; + struct cfs_var_array *arr; + int i; arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); @@ -144,15 +144,15 @@ EXPORT_SYMBOL(cfs_array_free); void * cfs_array_alloc(int count, unsigned int size) { - struct cfs_var_array *arr; - int i; + struct cfs_var_array *arr; + int i; LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count])); if (!arr) return NULL; - arr->va_count = count; - arr->va_size = size; + arr->va_count = count; + arr->va_size = size; for (i = 0; i < count; i++) { LIBCFS_ALLOC(arr->va_ptrs[i], size); diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c index 56a614d7713b1d81afd12974034b3f62caa8120e..02de1ee720fde23206d310cfcc5ef4088bb25a87 100644 --- a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c @@ -79,7 +79,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit), for (i = 0; i < 32; i++) { debugstr = bit2str(i); if (debugstr && strlen(debugstr) == len && - strncasecmp(str, debugstr, len) == 0) { + !strncasecmp(str, debugstr, len)) { if (op == '-') newmask &= ~(1 << i); else @@ -89,7 +89,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit), } } if (!found && len == 3 && - (strncasecmp(str, "ALL", len) == 0)) { + !strncasecmp(str, "ALL", len)) { if (op == '-') newmask = minmask; else @@ -112,7 +112,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit), char *cfs_firststr(char *str, size_t size) { size_t i = 0; - char *end; + char *end; /* trim leading spaces */ while (i < size && *str && isspace(*str)) { @@ -182,7 +182,7 @@ cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res) next->ls_len--; } - if (next->ls_len == 0) /* whitespaces only */ + if (!next->ls_len) /* whitespaces only */ return 0; if (*next->ls_str == delim) { @@ -222,8 +222,8 @@ EXPORT_SYMBOL(cfs_gettok); * \retval 0 otherwise */ int -cfs_str2num_check(char *str, int nob, unsigned *num, - unsigned min, unsigned max) +cfs_str2num_check(char *str, int nob, unsigned int *num, + unsigned int min, unsigned int max) { bool all_numbers = true; char *endp, cache; @@ -273,11 +273,11 @@ EXPORT_SYMBOL(cfs_str2num_check); * -ENOMEM will be returned. */ static int -cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max, +cfs_range_expr_parse(struct cfs_lstr *src, unsigned int min, unsigned int max, int bracketed, struct cfs_range_expr **expr) { - struct cfs_range_expr *re; - struct cfs_lstr tok; + struct cfs_range_expr *re; + struct cfs_lstr tok; LIBCFS_ALLOC(re, sizeof(*re)); if (!re) @@ -391,7 +391,7 @@ cfs_expr_list_print(char *buffer, int count, struct cfs_expr_list *expr_list) i += scnprintf(buffer + i, count - i, "["); list_for_each_entry(expr, &expr_list->el_exprs, re_link) { - if (j++ != 0) + if (j++) i += scnprintf(buffer + i, count - i, ","); i += cfs_range_expr_print(buffer + i, count - i, expr, numexprs > 1); @@ -411,13 +411,13 @@ EXPORT_SYMBOL(cfs_expr_list_print); * \retval 0 otherwise */ int -cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list) +cfs_expr_list_match(u32 value, struct cfs_expr_list *expr_list) { - struct cfs_range_expr *expr; + struct cfs_range_expr *expr; list_for_each_entry(expr, &expr_list->el_exprs, re_link) { if (value >= expr->re_lo && value <= expr->re_hi && - ((value - expr->re_lo) % expr->re_stride) == 0) + !((value - expr->re_lo) % expr->re_stride)) return 1; } @@ -433,21 +433,21 @@ EXPORT_SYMBOL(cfs_expr_list_match); * \retval < 0 for failure */ int -cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp) +cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, u32 **valpp) { - struct cfs_range_expr *expr; - __u32 *val; - int count = 0; - int i; + struct cfs_range_expr *expr; + u32 *val; + int count = 0; + int i; list_for_each_entry(expr, &expr_list->el_exprs, re_link) { for (i = expr->re_lo; i <= expr->re_hi; i++) { - if (((i - expr->re_lo) % expr->re_stride) == 0) + if (!((i - expr->re_lo) % expr->re_stride)) count++; } } - if (count == 0) /* empty expression list */ + if (!count) /* empty expression list */ return 0; if (count > max) { @@ -463,7 +463,7 @@ cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp) count = 0; list_for_each_entry(expr, &expr_list->el_exprs, re_link) { for (i = expr->re_lo; i <= expr->re_hi; i++) { - if (((i - expr->re_lo) % expr->re_stride) == 0) + if (!((i - expr->re_lo) % expr->re_stride)) val[count++] = i; } } @@ -501,13 +501,13 @@ EXPORT_SYMBOL(cfs_expr_list_free); * \retval -errno otherwise */ int -cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max, +cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max, struct cfs_expr_list **elpp) { - struct cfs_expr_list *expr_list; - struct cfs_range_expr *expr; - struct cfs_lstr src; - int rc; + struct cfs_expr_list *expr_list; + struct cfs_range_expr *expr; + struct cfs_lstr src; + int rc; LIBCFS_ALLOC(expr_list, sizeof(*expr_list)); if (!expr_list) @@ -533,18 +533,18 @@ cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max, } rc = cfs_range_expr_parse(&tok, min, max, 1, &expr); - if (rc != 0) + if (rc) break; list_add_tail(&expr->re_link, &expr_list->el_exprs); } } else { rc = cfs_range_expr_parse(&src, min, max, 0, &expr); - if (rc == 0) + if (!rc) list_add_tail(&expr->re_link, &expr_list->el_exprs); } - if (rc != 0) + if (rc) cfs_expr_list_free(expr_list); else *elpp = expr_list; diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c index e8b1a61420de42f4e140ed0af1c24d4d6dd1965a..427e2198bb9e85b8a11693fd7fb1495f2799aa7e 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c @@ -55,6 +55,8 @@ MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions"); * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket * are NUMA node ID, number before bracket is CPU partition ID. * + * i.e: "N", shortcut expression to create CPT from NUMA & CPU topology + * * NB: If user specified cpu_pattern, cpu_npartitions will be ignored */ static char *cpu_pattern = ""; @@ -88,7 +90,7 @@ cfs_node_to_cpumask(int node, cpumask_t *mask) void cfs_cpt_table_free(struct cfs_cpt_table *cptab) { - int i; + int i; if (cptab->ctb_cpu2cpt) { LIBCFS_FREE(cptab->ctb_cpu2cpt, @@ -126,7 +128,7 @@ struct cfs_cpt_table * cfs_cpt_table_alloc(unsigned int ncpt) { struct cfs_cpt_table *cptab; - int i; + int i; LIBCFS_ALLOC(cptab, sizeof(*cptab)); if (!cptab) @@ -177,10 +179,10 @@ EXPORT_SYMBOL(cfs_cpt_table_alloc); int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) { - char *tmp = buf; - int rc = 0; - int i; - int j; + char *tmp = buf; + int rc = 0; + int i; + int j; for (i = 0; i < cptab->ctb_nparts; i++) { if (len > 0) { @@ -271,7 +273,7 @@ EXPORT_SYMBOL(cfs_cpt_nodemask); int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) { - int node; + int node; LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts); @@ -311,8 +313,8 @@ EXPORT_SYMBOL(cfs_cpt_set_cpu); void cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) { - int node; - int i; + int node; + int i; LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); @@ -371,9 +373,9 @@ EXPORT_SYMBOL(cfs_cpt_unset_cpu); int cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) { - int i; + int i; - if (cpumask_weight(mask) == 0 || + if (!cpumask_weight(mask) || cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) { CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n", cpt); @@ -392,7 +394,7 @@ EXPORT_SYMBOL(cfs_cpt_set_cpumask); void cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) { - int i; + int i; for_each_cpu(i, mask) cfs_cpt_unset_cpu(cptab, cpt, i); @@ -402,8 +404,8 @@ EXPORT_SYMBOL(cfs_cpt_unset_cpumask); int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node) { - cpumask_t *mask; - int rc; + cpumask_t *mask; + int rc; if (node < 0 || node >= MAX_NUMNODES) { CDEBUG(D_INFO, @@ -449,7 +451,7 @@ EXPORT_SYMBOL(cfs_cpt_unset_node); int cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) { - int i; + int i; for_each_node_mask(i, *mask) { if (!cfs_cpt_set_node(cptab, cpt, i)) @@ -463,7 +465,7 @@ EXPORT_SYMBOL(cfs_cpt_set_nodemask); void cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) { - int i; + int i; for_each_node_mask(i, *mask) cfs_cpt_unset_node(cptab, cpt, i); @@ -473,8 +475,8 @@ EXPORT_SYMBOL(cfs_cpt_unset_nodemask); void cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt) { - int last; - int i; + int last; + int i; if (cpt == CFS_CPT_ANY) { last = cptab->ctb_nparts - 1; @@ -493,10 +495,10 @@ EXPORT_SYMBOL(cfs_cpt_clear); int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt) { - nodemask_t *mask; - int weight; - int rotor; - int node; + nodemask_t *mask; + int weight; + int rotor; + int node; /* convert CPU partition ID to HW node id */ @@ -514,7 +516,7 @@ cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt) rotor %= weight; for_each_node_mask(node, *mask) { - if (rotor-- == 0) + if (!rotor--) return node; } @@ -526,8 +528,8 @@ EXPORT_SYMBOL(cfs_cpt_spread_node); int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap) { - int cpu = smp_processor_id(); - int cpt = cptab->ctb_cpu2cpt[cpu]; + int cpu = smp_processor_id(); + int cpt = cptab->ctb_cpu2cpt[cpu]; if (cpt < 0) { if (!remap) @@ -555,10 +557,10 @@ EXPORT_SYMBOL(cfs_cpt_of_cpu); int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) { - cpumask_t *cpumask; - nodemask_t *nodemask; - int rc; - int i; + cpumask_t *cpumask; + nodemask_t *nodemask; + int rc; + int i; LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); @@ -582,7 +584,7 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) rc = set_cpus_allowed_ptr(current, cpumask); set_mems_allowed(*nodemask); - if (rc == 0) + if (!rc) schedule(); /* switch to allowed CPU */ return rc; @@ -601,10 +603,10 @@ static int cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, cpumask_t *node, int number) { - cpumask_t *socket = NULL; - cpumask_t *core = NULL; - int rc = 0; - int cpu; + cpumask_t *socket = NULL; + cpumask_t *core = NULL; + int rc = 0; + int cpu; LASSERT(number > 0); @@ -638,7 +640,7 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, LASSERT(!cpumask_empty(socket)); while (!cpumask_empty(socket)) { - int i; + int i; /* get cpumask for hts in the same core */ cpumask_copy(core, topology_sibling_cpumask(cpu)); @@ -656,14 +658,14 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, goto out; } - if (--number == 0) + if (!--number) goto out; } cpu = cpumask_first(socket); } } - out: +out: if (socket) LIBCFS_FREE(socket, cpumask_size()); if (core) @@ -676,9 +678,9 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, static unsigned int cfs_cpt_num_estimate(void) { - unsigned nnode = num_online_nodes(); - unsigned ncpu = num_online_cpus(); - unsigned ncpt; + unsigned int nnode = num_online_nodes(); + unsigned int ncpu = num_online_cpus(); + unsigned int ncpt; if (ncpu <= CPT_WEIGHT_MIN) { ncpt = 1; @@ -703,14 +705,14 @@ cfs_cpt_num_estimate(void) ncpt = nnode; - out: +out: #if (BITS_PER_LONG == 32) /* config many CPU partitions on 32-bit system could consume * too much memory */ ncpt = min(2U, ncpt); #endif - while (ncpu % ncpt != 0) + while (ncpu % ncpt) ncpt--; /* worst case is 1 */ return ncpt; @@ -720,11 +722,11 @@ static struct cfs_cpt_table * cfs_cpt_table_create(int ncpt) { struct cfs_cpt_table *cptab = NULL; - cpumask_t *mask = NULL; - int cpt = 0; - int num; - int rc; - int i; + cpumask_t *mask = NULL; + int cpt = 0; + int num; + int rc; + int i; rc = cfs_cpt_num_estimate(); if (ncpt <= 0) @@ -735,7 +737,7 @@ cfs_cpt_table_create(int ncpt) ncpt, rc); } - if (num_online_cpus() % ncpt != 0) { + if (num_online_cpus() % ncpt) { CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n", (int)num_online_cpus(), ncpt); goto failed; @@ -748,7 +750,7 @@ cfs_cpt_table_create(int ncpt) } num = num_online_cpus() / ncpt; - if (num == 0) { + if (!num) { CERROR("CPU changed while setting CPU partition\n"); goto failed; } @@ -764,7 +766,7 @@ cfs_cpt_table_create(int ncpt) while (!cpumask_empty(mask)) { struct cfs_cpu_partition *part; - int n; + int n; /* * Each emulated NUMA node has all allowed CPUs in @@ -817,27 +819,36 @@ cfs_cpt_table_create(int ncpt) static struct cfs_cpt_table * cfs_cpt_table_create_pattern(char *pattern) { - struct cfs_cpt_table *cptab; - char *str = pattern; - int node = 0; - int high; - int ncpt; - int c; - - for (ncpt = 0;; ncpt++) { /* quick scan bracket */ - str = strchr(str, '['); - if (!str) - break; - str++; - } + struct cfs_cpt_table *cptab; + char *str; + int node = 0; + int high; + int ncpt = 0; + int cpt; + int rc; + int c; + int i; str = cfs_trimwhite(pattern); if (*str == 'n' || *str == 'N') { pattern = str + 1; - node = 1; + if (*pattern != '\0') { + node = 1; + } else { /* shortcut to create CPT from NUMA & CPU topology */ + node = -1; + ncpt = num_online_nodes(); + } + } + + if (!ncpt) { /* scanning bracket which is mark of partition */ + for (str = pattern;; str++, ncpt++) { + str = strchr(str, '['); + if (!str) + break; + } } - if (ncpt == 0 || + if (!ncpt || (node && ncpt > num_online_nodes()) || (!node && ncpt > num_online_cpus())) { CERROR("Invalid pattern %s, or too many partitions %d\n", @@ -845,25 +856,39 @@ cfs_cpt_table_create_pattern(char *pattern) return NULL; } - high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1; - cptab = cfs_cpt_table_alloc(ncpt); if (!cptab) { CERROR("Failed to allocate cpu partition table\n"); return NULL; } + if (node < 0) { /* shortcut to create CPT from NUMA & CPU topology */ + cpt = 0; + + for_each_online_node(i) { + if (cpt >= ncpt) { + CERROR("CPU changed while setting CPU partition table, %d/%d\n", + cpt, ncpt); + goto failed; + } + + rc = cfs_cpt_set_node(cptab, cpt++, i); + if (!rc) + goto failed; + } + return cptab; + } + + high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1; + for (str = cfs_trimwhite(pattern), c = 0;; c++) { - struct cfs_range_expr *range; - struct cfs_expr_list *el; - char *bracket = strchr(str, '['); - int cpt; - int rc; - int i; - int n; + struct cfs_range_expr *range; + struct cfs_expr_list *el; + char *bracket = strchr(str, '['); + int n; if (!bracket) { - if (*str != 0) { + if (*str) { CERROR("Invalid pattern %s\n", str); goto failed; } @@ -886,7 +911,7 @@ cfs_cpt_table_create_pattern(char *pattern) goto failed; } - if (cfs_cpt_weight(cptab, cpt) != 0) { + if (cfs_cpt_weight(cptab, cpt)) { CERROR("Partition %d has already been set.\n", cpt); goto failed; } @@ -905,14 +930,14 @@ cfs_cpt_table_create_pattern(char *pattern) } if (cfs_expr_list_parse(str, (bracket - str) + 1, - 0, high, &el) != 0) { + 0, high, &el)) { CERROR("Can't parse number range: %s\n", str); goto failed; } list_for_each_entry(range, &el->el_exprs, re_link) { for (i = range->re_lo; i <= range->re_hi; i++) { - if ((i - range->re_lo) % range->re_stride != 0) + if ((i - range->re_lo) % range->re_stride) continue; rc = node ? cfs_cpt_set_node(cptab, cpt, i) : @@ -942,48 +967,38 @@ cfs_cpt_table_create_pattern(char *pattern) } #ifdef CONFIG_HOTPLUG_CPU -static int -cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - bool warn; - - switch (action) { - case CPU_DEAD: - case CPU_DEAD_FROZEN: - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - spin_lock(&cpt_data.cpt_lock); - cpt_data.cpt_version++; - spin_unlock(&cpt_data.cpt_lock); - /* Fall through */ - default: - if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) { - CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n", - cpu, action); - break; - } +static enum cpuhp_state lustre_cpu_online; - mutex_lock(&cpt_data.cpt_mutex); - /* if all HTs in a core are offline, it may break affinity */ - cpumask_copy(cpt_data.cpt_cpumask, - topology_sibling_cpumask(cpu)); - warn = cpumask_any_and(cpt_data.cpt_cpumask, - cpu_online_mask) >= nr_cpu_ids; - mutex_unlock(&cpt_data.cpt_mutex); - CDEBUG(warn ? D_WARNING : D_INFO, - "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n", - cpu, action); - } +static void cfs_cpu_incr_cpt_version(void) +{ + spin_lock(&cpt_data.cpt_lock); + cpt_data.cpt_version++; + spin_unlock(&cpt_data.cpt_lock); +} - return NOTIFY_OK; +static int cfs_cpu_online(unsigned int cpu) +{ + cfs_cpu_incr_cpt_version(); + return 0; } -static struct notifier_block cfs_cpu_notifier = { - .notifier_call = cfs_cpu_notify, - .priority = 0 -}; +static int cfs_cpu_dead(unsigned int cpu) +{ + bool warn; + + cfs_cpu_incr_cpt_version(); + mutex_lock(&cpt_data.cpt_mutex); + /* if all HTs in a core are offline, it may break affinity */ + cpumask_copy(cpt_data.cpt_cpumask, topology_sibling_cpumask(cpu)); + warn = cpumask_any_and(cpt_data.cpt_cpumask, + cpu_online_mask) >= nr_cpu_ids; + mutex_unlock(&cpt_data.cpt_mutex); + CDEBUG(warn ? D_WARNING : D_INFO, + "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u]\n", + cpu); + return 0; +} #endif void @@ -993,7 +1008,9 @@ cfs_cpu_fini(void) cfs_cpt_table_free(cfs_cpt_table); #ifdef CONFIG_HOTPLUG_CPU - unregister_hotcpu_notifier(&cfs_cpu_notifier); + if (lustre_cpu_online > 0) + cpuhp_remove_state_nocalls(lustre_cpu_online); + cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD); #endif if (cpt_data.cpt_cpumask) LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size()); @@ -1002,6 +1019,8 @@ cfs_cpu_fini(void) int cfs_cpu_init(void) { + int ret = 0; + LASSERT(!cfs_cpt_table); memset(&cpt_data, 0, sizeof(cpt_data)); @@ -1016,10 +1035,21 @@ cfs_cpu_init(void) mutex_init(&cpt_data.cpt_mutex); #ifdef CONFIG_HOTPLUG_CPU - register_hotcpu_notifier(&cfs_cpu_notifier); + ret = cpuhp_setup_state_nocalls(CPUHP_LUSTRE_CFS_DEAD, + "staging/lustre/cfe:dead", NULL, + cfs_cpu_dead); + if (ret < 0) + goto failed; + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "staging/lustre/cfe:online", + cfs_cpu_online, NULL); + if (ret < 0) + goto failed; + lustre_cpu_online = ret; #endif + ret = -EINVAL; - if (*cpu_pattern != 0) { + if (*cpu_pattern) { cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern); if (!cfs_cpt_table) { CERROR("Failed to create cptab from pattern %s\n", @@ -1050,7 +1080,7 @@ cfs_cpu_init(void) failed: cfs_cpu_fini(); - return -1; + return ret; } #endif diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c index 7f56d2c9dd001bda96d2beda6d2c10e0b7649786..68e34b4a76c981d7023a839d94e273ede93291d7 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c @@ -64,7 +64,7 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg, unsigned int key_len) { struct crypto_ahash *tfm; - int err = 0; + int err = 0; *type = cfs_crypto_hash_type(hash_alg); @@ -93,12 +93,12 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg, if (key) err = crypto_ahash_setkey(tfm, key, key_len); - else if ((*type)->cht_key != 0) + else if ((*type)->cht_key) err = crypto_ahash_setkey(tfm, (unsigned char *)&((*type)->cht_key), (*type)->cht_size); - if (err != 0) { + if (err) { ahash_request_free(*req); crypto_free_ahash(tfm); return err; @@ -147,16 +147,16 @@ int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg, unsigned char *key, unsigned int key_len, unsigned char *hash, unsigned int *hash_len) { - struct scatterlist sl; + struct scatterlist sl; struct ahash_request *req; - int err; - const struct cfs_crypto_hash_type *type; + int err; + const struct cfs_crypto_hash_type *type; - if (!buf || buf_len == 0 || !hash_len) + if (!buf || !buf_len || !hash_len) return -EINVAL; err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len); - if (err != 0) + if (err) return err; if (!hash || *hash_len < type->cht_size) { @@ -177,7 +177,7 @@ int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg, EXPORT_SYMBOL(cfs_crypto_hash_digest); /** - * Allocate and initialize desriptor for hash algorithm. + * Allocate and initialize descriptor for hash algorithm. * * This should be used to initialize a hash descriptor for multiple calls * to a single hash function when computing the hash across multiple @@ -198,8 +198,8 @@ cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg, unsigned char *key, unsigned int key_len) { struct ahash_request *req; - int err; - const struct cfs_crypto_hash_type *type; + int err; + const struct cfs_crypto_hash_type *type; err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len); @@ -273,7 +273,7 @@ EXPORT_SYMBOL(cfs_crypto_hash_update); int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc, unsigned char *hash, unsigned int *hash_len) { - int err; + int err; struct ahash_request *req = (void *)hdesc; int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); @@ -312,8 +312,8 @@ static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg) { int buf_len = max(PAGE_SIZE, 1048576UL); void *buf; - unsigned long start, end; - int bcount, err = 0; + unsigned long start, end; + int bcount, err = 0; struct page *page; unsigned char hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX]; unsigned int hash_len = sizeof(hash); @@ -358,7 +358,7 @@ static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg) CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n", cfs_crypto_hash_name(hash_alg), err); } else { - unsigned long tmp; + unsigned long tmp; tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) * 1000) / (1024 * 1024); @@ -440,6 +440,6 @@ int cfs_crypto_register(void) */ void cfs_crypto_unregister(void) { - if (adler32 == 0) + if (!adler32) cfs_crypto_adler32_unregister(); } diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h index 18e8cd4d8758c2d02e1e3dbde398b5a15d88d805..d0b3aa80cfa6d909223080998497c249d11ba118 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h @@ -1,4 +1,4 @@ - /* +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c index 435b784c52f8d2a8b37156df220b2ae98383dc9b..39a72e3f0c18fca4bacdc2aa15c7d1d586b51cb6 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c @@ -57,7 +57,6 @@ #include -char lnet_upcall[1024] = "/usr/lib/lustre/lnet_upcall"; char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall"; /** @@ -68,11 +67,12 @@ char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall"; void libcfs_run_debug_log_upcall(char *file) { char *argv[3]; - int rc; - char *envp[] = { + int rc; + static const char * const envp[] = { "HOME=/", "PATH=/sbin:/bin:/usr/sbin:/usr/bin", - NULL}; + NULL + }; argv[0] = lnet_debug_log_upcall; @@ -81,7 +81,7 @@ void libcfs_run_debug_log_upcall(char *file) argv[2] = NULL; - rc = call_usermodehelper(argv[0], argv, envp, 1); + rc = call_usermodehelper(argv[0], argv, (char **)envp, 1); if (rc < 0 && rc != -ENOENT) { CERROR("Error %d invoking LNET debug log upcall %s %s; check /sys/kernel/debug/lnet/debug_log_upcall\n", rc, argv[0], argv[1]); @@ -91,57 +91,6 @@ void libcfs_run_debug_log_upcall(char *file) } } -void libcfs_run_upcall(char **argv) -{ - int rc; - int argc; - char *envp[] = { - "HOME=/", - "PATH=/sbin:/bin:/usr/sbin:/usr/bin", - NULL}; - - argv[0] = lnet_upcall; - argc = 1; - while (argv[argc]) - argc++; - - LASSERT(argc >= 2); - - rc = call_usermodehelper(argv[0], argv, envp, 1); - if (rc < 0 && rc != -ENOENT) { - CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /sys/kernel/debug/lnet/upcall\n", - rc, argv[0], argv[1], - argc < 3 ? "" : ",", argc < 3 ? "" : argv[2], - argc < 4 ? "" : ",", argc < 4 ? "" : argv[3], - argc < 5 ? "" : ",", argc < 5 ? "" : argv[4], - argc < 6 ? "" : ",..."); - } else { - CDEBUG(D_HA, "Invoked LNET upcall %s %s%s%s%s%s%s%s%s\n", - argv[0], argv[1], - argc < 3 ? "" : ",", argc < 3 ? "" : argv[2], - argc < 4 ? "" : ",", argc < 4 ? "" : argv[3], - argc < 5 ? "" : ",", argc < 5 ? "" : argv[4], - argc < 6 ? "" : ",..."); - } -} - -void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata) -{ - char *argv[6]; - char buf[32]; - - snprintf(buf, sizeof(buf), "%d", msgdata->msg_line); - - argv[1] = "LBUG"; - argv[2] = (char *)msgdata->msg_file; - argv[3] = (char *)msgdata->msg_fn; - argv[4] = buf; - argv[5] = NULL; - - libcfs_run_upcall(argv); -} -EXPORT_SYMBOL(libcfs_run_lbug_upcall); - /* coverity[+kill] */ void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata) { @@ -156,7 +105,6 @@ void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata) dump_stack(); if (!libcfs_panic_on_lbug) libcfs_debug_dumplog(); - libcfs_run_lbug_upcall(msgdata); if (libcfs_panic_on_lbug) panic("LBUG"); set_task_state(current, TASK_UNINTERRUPTIBLE); diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c index 38308f8b6aae907fe0eace9075f171d8b66eff31..3f5d58babc2f0f65c757e2520a6af258e3c23577 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c @@ -83,7 +83,7 @@ static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data) CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n"); return true; } - if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) { + if ((u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) { CERROR("LIBCFS ioctl: packlen != ioc_len\n"); return true; } diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c index 291d286eab481de35bd66f38f01a6a70c2ad50cf..cf902154f0aae0925f9929a5381d41a08ddbd7f3 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c @@ -45,8 +45,8 @@ sigset_t cfs_block_allsigs(void) { - unsigned long flags; - sigset_t old; + unsigned long flags; + sigset_t old; spin_lock_irqsave(¤t->sighand->siglock, flags); old = current->blocked; @@ -60,8 +60,8 @@ EXPORT_SYMBOL(cfs_block_allsigs); sigset_t cfs_block_sigs(unsigned long sigs) { - unsigned long flags; - sigset_t old; + unsigned long flags; + sigset_t old; spin_lock_irqsave(¤t->sighand->siglock, flags); old = current->blocked; @@ -91,7 +91,7 @@ EXPORT_SYMBOL(cfs_block_sigsinv); void cfs_restore_sigs(sigset_t old) { - unsigned long flags; + unsigned long flags; spin_lock_irqsave(¤t->sighand->siglock, flags); current->blocked = old; diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c index 8b551d2708babfeed9640f0bdeee5d9a2a2c94f1..75eb84e7f0f815103864feb7e017002676808f25 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c @@ -49,8 +49,8 @@ static DECLARE_RWSEM(cfs_tracefile_sem); int cfs_tracefile_init_arch(void) { - int i; - int j; + int i; + int j; struct cfs_trace_cpu_data *tcd; /* initialize trace_data */ @@ -85,14 +85,14 @@ int cfs_tracefile_init_arch(void) out: cfs_tracefile_fini_arch(); - printk(KERN_ERR "lnet: Not enough memory\n"); + pr_err("lnet: Not enough memory\n"); return -ENOMEM; } void cfs_tracefile_fini_arch(void) { - int i; - int j; + int i; + int j; for (i = 0; i < num_possible_cpus(); i++) for (j = 0; j < 3; j++) { @@ -224,26 +224,26 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask, { char *prefix = "Lustre", *ptype = NULL; - if ((mask & D_EMERG) != 0) { + if (mask & D_EMERG) { prefix = dbghdr_to_err_string(hdr); ptype = KERN_EMERG; - } else if ((mask & D_ERROR) != 0) { + } else if (mask & D_ERROR) { prefix = dbghdr_to_err_string(hdr); ptype = KERN_ERR; - } else if ((mask & D_WARNING) != 0) { + } else if (mask & D_WARNING) { prefix = dbghdr_to_info_string(hdr); ptype = KERN_WARNING; - } else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) { + } else if (mask & (D_CONSOLE | libcfs_printk)) { prefix = dbghdr_to_info_string(hdr); ptype = KERN_INFO; } - if ((mask & D_CONSOLE) != 0) { - printk("%s%s: %.*s", ptype, prefix, len, buf); + if (mask & D_CONSOLE) { + pr_info("%s%s: %.*s", ptype, prefix, len, buf); } else { - printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix, - hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num, - fn, len, buf); + pr_info("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix, + hdr->ph_pid, hdr->ph_extern_pid, file, + hdr->ph_line_num, fn, len, buf); } } diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c index 86b4d25cad46b0a9a92a48f35603b78b2daff4f4..161e042265213939d077bc47e37aab1a51e48d3f 100644 --- a/drivers/staging/lustre/lnet/libcfs/module.c +++ b/drivers/staging/lustre/lnet/libcfs/module.c @@ -183,12 +183,12 @@ EXPORT_SYMBOL(lprocfs_call_handler); static int __proc_dobitmasks(void *data, int write, loff_t pos, void __user *buffer, int nob) { - const int tmpstrlen = 512; - char *tmpstr; - int rc; + const int tmpstrlen = 512; + char *tmpstr; + int rc; unsigned int *mask = data; - int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0; - int is_printk = (mask == &libcfs_printk) ? 1 : 0; + int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0; + int is_printk = (mask == &libcfs_printk) ? 1 : 0; rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen); if (rc < 0) @@ -293,8 +293,8 @@ static int __proc_cpt_table(void *data, int write, loff_t pos, void __user *buffer, int nob) { char *buf = NULL; - int len = 4096; - int rc = 0; + int len = 4096; + int rc = 0; if (write) return -EPERM; @@ -365,14 +365,6 @@ static struct ctl_table lnet_table[] = { .mode = 0444, .proc_handler = &proc_cpt_table, }, - - { - .procname = "upcall", - .data = lnet_upcall, - .maxlen = sizeof(lnet_upcall), - .mode = 0644, - .proc_handler = &proc_dostring, - }, { .procname = "debug_log_upcall", .data = lnet_debug_log_upcall, @@ -547,7 +539,7 @@ static int libcfs_init(void) } rc = cfs_cpu_init(); - if (rc != 0) + if (rc) goto cleanup_debug; rc = misc_register(&libcfs_dev); @@ -566,7 +558,7 @@ static int libcfs_init(void) rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4); rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY, rc, &cfs_sched_rehash); - if (rc != 0) { + if (rc) { CERROR("Startup workitem scheduler: error: %d\n", rc); goto cleanup_deregister; } diff --git a/drivers/staging/lustre/lnet/libcfs/prng.c b/drivers/staging/lustre/lnet/libcfs/prng.c index a9bdb284fd15777149da26e11fa7a24d384aaa1d..21d5a3912c5fd0c28127be122b206f80aa7515d3 100644 --- a/drivers/staging/lustre/lnet/libcfs/prng.c +++ b/drivers/staging/lustre/lnet/libcfs/prng.c @@ -33,7 +33,7 @@ * x(n)=a*x(n-1)+carry mod 2^16 and y(n)=b*y(n-1)+carry mod 2^16, * number and carry packed within the same 32 bit integer. * algorithm recommended by Marsaglia -*/ + */ #include "../../include/linux/libcfs/libcfs.h" diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c index 1c7efdfaffcf21ebc81304f6488414d4690cdedf..d7b29f8997c0121d012a55eaeb81afb5bed29d15 100644 --- a/drivers/staging/lustre/lnet/libcfs/tracefile.c +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c @@ -59,13 +59,13 @@ struct page_collection { * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise, * only ->tcd_pages are spilled. */ - int pc_want_daemon_pages; + int pc_want_daemon_pages; }; struct tracefiled_ctl { struct completion tctl_start; struct completion tctl_stop; - wait_queue_head_t tctl_waitq; + wait_queue_head_t tctl_waitq; pid_t tctl_pid; atomic_t tctl_shutdown; }; @@ -77,24 +77,24 @@ struct cfs_trace_page { /* * page itself */ - struct page *page; + struct page *page; /* * linkage into one of the lists in trace_data_union or * page_collection */ - struct list_head linkage; + struct list_head linkage; /* * number of bytes used within this page */ - unsigned int used; + unsigned int used; /* * cpu that owns this page */ - unsigned short cpu; + unsigned short cpu; /* * type(context) of this page */ - unsigned short type; + unsigned short type; }; static void put_pages_on_tcd_daemon_list(struct page_collection *pc, @@ -108,7 +108,7 @@ cfs_tage_from_list(struct list_head *list) static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp) { - struct page *page; + struct page *page; struct cfs_trace_page *tage; /* My caller is trying to free memory */ @@ -236,7 +236,7 @@ static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd) INIT_LIST_HEAD(&pc.pc_pages); list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) { - if (pgcount-- == 0) + if (!pgcount--) break; list_move_tail(&tage->linkage, &pc.pc_pages); @@ -278,7 +278,7 @@ int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata, const char *format, ...) { va_list args; - int rc; + int rc; va_start(args, format); rc = libcfs_debug_vmsg2(msgdata, format, args, NULL); @@ -293,21 +293,21 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, const char *format2, ...) { struct cfs_trace_cpu_data *tcd = NULL; - struct ptldebug_header header = {0}; - struct cfs_trace_page *tage; + struct ptldebug_header header = { 0 }; + struct cfs_trace_page *tage; /* string_buf is used only if tcd != NULL, and is always set then */ - char *string_buf = NULL; - char *debug_buf; - int known_size; - int needed = 85; /* average message length */ - int max_nob; - va_list ap; - int depth; - int i; - int remain; - int mask = msgdata->msg_mask; - const char *file = kbasename(msgdata->msg_file); - struct cfs_debug_limit_state *cdls = msgdata->msg_cdls; + char *string_buf = NULL; + char *debug_buf; + int known_size; + int needed = 85; /* average message length */ + int max_nob; + va_list ap; + int depth; + int i; + int remain; + int mask = msgdata->msg_mask; + const char *file = kbasename(msgdata->msg_file); + struct cfs_debug_limit_state *cdls = msgdata->msg_cdls; tcd = cfs_trace_get_tcd(); @@ -320,7 +320,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, if (!tcd) /* arch may not log in IRQ context */ goto console; - if (tcd->tcd_cur_pages == 0) + if (!tcd->tcd_cur_pages) header.ph_flags |= PH_FLAG_FIRST_RECORD; if (tcd->tcd_shutting_down) { @@ -423,7 +423,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, __LASSERT(tage->used <= PAGE_SIZE); console: - if ((mask & libcfs_printk) == 0) { + if (!(mask & libcfs_printk)) { /* no console output requested */ if (tcd) cfs_trace_put_tcd(tcd); @@ -432,7 +432,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, if (cdls) { if (libcfs_console_ratelimit && - cdls->cdls_next != 0 && /* not first time ever */ + cdls->cdls_next && /* not first time ever */ !cfs_time_after(cfs_time_current(), cdls->cdls_next)) { /* skipping a console message */ cdls->cdls_count++; @@ -489,7 +489,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, put_cpu(); } - if (cdls && cdls->cdls_count != 0) { + if (cdls && cdls->cdls_count) { string_buf = cfs_trace_get_console_buffer(); needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE, @@ -535,9 +535,9 @@ panic_collect_pages(struct page_collection *pc) * CPUs have been stopped during a panic. If this isn't true for some * arch, this will have to be implemented separately in each arch. */ - int i; - int j; struct cfs_trace_cpu_data *tcd; + int i; + int j; INIT_LIST_HEAD(&pc->pc_pages); @@ -698,11 +698,11 @@ void cfs_trace_debug_print(void) int cfs_tracefile_dump_all_pages(char *filename) { - struct page_collection pc; - struct file *filp; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - char *buf; + struct page_collection pc; + struct file *filp; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + char *buf; mm_segment_t __oldfs; int rc; @@ -778,7 +778,7 @@ void cfs_trace_flush_pages(void) int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, const char __user *usr_buffer, int usr_buffer_nob) { - int nob; + int nob; if (usr_buffer_nob > knl_buffer_nob) return -EOVERFLOW; @@ -810,7 +810,7 @@ int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, * NB if 'append' != NULL, it's a single character to append to the * copied out string - usually "\n" or "" (i.e. a terminating zero byte) */ - int nob = strlen(knl_buffer); + int nob = strlen(knl_buffer); if (nob > usr_buffer_nob) nob = usr_buffer_nob; @@ -843,16 +843,16 @@ int cfs_trace_allocate_string_buffer(char **str, int nob) int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob) { - char *str; - int rc; + char *str; + int rc; rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); - if (rc != 0) + if (rc) return rc; rc = cfs_trace_copyin_string(str, usr_str_nob + 1, usr_str, usr_str_nob); - if (rc != 0) + if (rc) goto out; if (str[0] != '/') { @@ -867,17 +867,17 @@ int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob) int cfs_trace_daemon_command(char *str) { - int rc = 0; + int rc = 0; cfs_tracefile_write_lock(); - if (strcmp(str, "stop") == 0) { + if (!strcmp(str, "stop")) { cfs_tracefile_write_unlock(); cfs_trace_stop_thread(); cfs_tracefile_write_lock(); memset(cfs_tracefile, 0, sizeof(cfs_tracefile)); - } else if (strncmp(str, "size=", 5) == 0) { + } else if (!strncmp(str, "size=", 5)) { unsigned long tmp; rc = kstrtoul(str + 5, 10, &tmp); @@ -909,15 +909,15 @@ int cfs_trace_daemon_command(char *str) int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob) { char *str; - int rc; + int rc; rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); - if (rc != 0) + if (rc) return rc; rc = cfs_trace_copyin_string(str, usr_str_nob + 1, usr_str, usr_str_nob); - if (rc == 0) + if (!rc) rc = cfs_trace_daemon_command(str); kfree(str); @@ -1003,7 +1003,7 @@ static int tracefiled(void *arg) filp = NULL; cfs_tracefile_read_lock(); - if (cfs_tracefile[0] != 0) { + if (cfs_tracefile[0]) { filp = filp_open(cfs_tracefile, O_CREAT | O_RDWR | O_LARGEFILE, 0600); @@ -1072,7 +1072,7 @@ static int tracefiled(void *arg) __LASSERT(list_empty(&pc.pc_pages)); end_loop: if (atomic_read(&tctl->tctl_shutdown)) { - if (last_loop == 0) { + if (!last_loop) { last_loop = 1; continue; } else { @@ -1135,13 +1135,13 @@ void cfs_trace_stop_thread(void) int cfs_tracefile_init(int max_pages) { struct cfs_trace_cpu_data *tcd; - int i; - int j; - int rc; - int factor; + int i; + int j; + int rc; + int factor; rc = cfs_tracefile_init_arch(); - if (rc != 0) + if (rc) return rc; cfs_tcd_for_each(tcd, i, j) { diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h index d878676bc375e369794fc0896f3aaae9c8fe1acb..f644cbc5a277983887be726a64a84a3d3c567824 100644 --- a/drivers/staging/lustre/lnet/libcfs/tracefile.h +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h @@ -45,7 +45,7 @@ enum cfs_trace_buf_type { /* trace file lock routines */ #define TRACEFILE_NAME_SIZE 1024 -extern char cfs_tracefile[TRACEFILE_NAME_SIZE]; +extern char cfs_tracefile[TRACEFILE_NAME_SIZE]; extern long long cfs_tracefile_size; void libcfs_run_debug_log_upcall(char *file); @@ -80,7 +80,7 @@ int cfs_trace_get_debug_mb(void); void libcfs_debug_dumplog_internal(void *arg); void libcfs_register_panic_notifier(void); void libcfs_unregister_panic_notifier(void); -extern int libcfs_panic_in_progress; +extern int libcfs_panic_in_progress; int cfs_trace_max_debug_mb(void); #define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT)) @@ -113,14 +113,14 @@ union cfs_trace_data_union { * tcd_for_each_type_lock */ spinlock_t tcd_lock; - unsigned long tcd_lock_flags; + unsigned long tcd_lock_flags; /* * pages with trace records not yet processed by tracefiled. */ - struct list_head tcd_pages; + struct list_head tcd_pages; /* number of pages on ->tcd_pages */ - unsigned long tcd_cur_pages; + unsigned long tcd_cur_pages; /* * pages with trace records already processed by @@ -132,9 +132,9 @@ union cfs_trace_data_union { * (put_pages_on_daemon_list()). LRU pages from this list are * discarded when list grows too large. */ - struct list_head tcd_daemon_pages; + struct list_head tcd_daemon_pages; /* number of pages on ->tcd_daemon_pages */ - unsigned long tcd_cur_daemon_pages; + unsigned long tcd_cur_daemon_pages; /* * Maximal number of pages allowed on ->tcd_pages and @@ -142,7 +142,7 @@ union cfs_trace_data_union { * Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current * implementation. */ - unsigned long tcd_max_pages; + unsigned long tcd_max_pages; /* * preallocated pages to write trace records into. Pages from @@ -166,15 +166,15 @@ union cfs_trace_data_union { * TCD_STOCK_PAGES pagesful are consumed by trace records all * emitted in non-blocking contexts. Which is quite unlikely. */ - struct list_head tcd_stock_pages; + struct list_head tcd_stock_pages; /* number of pages on ->tcd_stock_pages */ - unsigned long tcd_cur_stock_pages; + unsigned long tcd_cur_stock_pages; - unsigned short tcd_shutting_down; - unsigned short tcd_cpu; - unsigned short tcd_type; + unsigned short tcd_shutting_down; + unsigned short tcd_cpu; + unsigned short tcd_type; /* The factors to share debug memory. */ - unsigned short tcd_pages_factor; + unsigned short tcd_pages_factor; } tcd; char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))]; }; diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c index e98c818a14fb7f6f14a303164fbb539f8c55f802..d0512da6bcde6d8a8c1cc51b015a28825c5e46d6 100644 --- a/drivers/staging/lustre/lnet/libcfs/workitem.c +++ b/drivers/staging/lustre/lnet/libcfs/workitem.c @@ -45,7 +45,7 @@ struct cfs_wi_sched { /* chain on global list */ struct list_head ws_list; /** serialised workitems */ - spinlock_t ws_lock; + spinlock_t ws_lock; /** where schedulers sleep */ wait_queue_head_t ws_waitq; /** concurrent workitems */ @@ -59,26 +59,26 @@ struct cfs_wi_sched { */ struct list_head ws_rerunq; /** CPT-table for this scheduler */ - struct cfs_cpt_table *ws_cptab; + struct cfs_cpt_table *ws_cptab; /** CPT id for affinity */ - int ws_cpt; + int ws_cpt; /** number of scheduled workitems */ - int ws_nscheduled; + int ws_nscheduled; /** started scheduler thread, protected by cfs_wi_data::wi_glock */ - unsigned int ws_nthreads:30; + unsigned int ws_nthreads:30; /** shutting down, protected by cfs_wi_data::wi_glock */ - unsigned int ws_stopping:1; + unsigned int ws_stopping:1; /** serialize starting thread, protected by cfs_wi_data::wi_glock */ - unsigned int ws_starting:1; + unsigned int ws_starting:1; /** scheduler name */ - char ws_name[CFS_WS_NAME_LEN]; + char ws_name[CFS_WS_NAME_LEN]; }; static struct cfs_workitem_data { /** serialize */ spinlock_t wi_glock; /** list of all schedulers */ - struct list_head wi_scheds; + struct list_head wi_scheds; /** WI module is initialized */ int wi_init; /** shutting down the whole WI module */ @@ -136,7 +136,7 @@ EXPORT_SYMBOL(cfs_wi_exit); int cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi) { - int rc; + int rc; LASSERT(!in_interrupt()); /* because we use plain spinlock */ LASSERT(!sched->ws_stopping); @@ -202,13 +202,13 @@ EXPORT_SYMBOL(cfs_wi_schedule); static int cfs_wi_scheduler(void *arg) { - struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg; + struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg; cfs_block_allsigs(); /* CPT affinity scheduler? */ if (sched->ws_cptab) - if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0) + if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt)) CWARN("Failed to bind %s on CPT %d\n", sched->ws_name, sched->ws_cpt); @@ -223,8 +223,8 @@ static int cfs_wi_scheduler(void *arg) spin_lock(&sched->ws_lock); while (!sched->ws_stopping) { - int nloops = 0; - int rc; + int nloops = 0; + int rc; struct cfs_workitem *wi; while (!list_empty(&sched->ws_runq) && @@ -238,16 +238,16 @@ static int cfs_wi_scheduler(void *arg) LASSERT(sched->ws_nscheduled > 0); sched->ws_nscheduled--; - wi->wi_running = 1; + wi->wi_running = 1; wi->wi_scheduled = 0; spin_unlock(&sched->ws_lock); nloops++; - rc = (*wi->wi_action) (wi); + rc = (*wi->wi_action)(wi); spin_lock(&sched->ws_lock); - if (rc != 0) /* WI should be dead, even be freed! */ + if (rc) /* WI should be dead, even be freed! */ continue; wi->wi_running = 0; @@ -273,7 +273,7 @@ static int cfs_wi_scheduler(void *arg) spin_unlock(&sched->ws_lock); rc = wait_event_interruptible_exclusive(sched->ws_waitq, - !cfs_wi_sched_cansleep(sched)); + !cfs_wi_sched_cansleep(sched)); spin_lock(&sched->ws_lock); } @@ -289,7 +289,7 @@ static int cfs_wi_scheduler(void *arg) void cfs_wi_sched_destroy(struct cfs_wi_sched *sched) { - int i; + int i; LASSERT(cfs_wi_data.wi_init); LASSERT(!cfs_wi_data.wi_stopping); @@ -325,7 +325,7 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched) list_del(&sched->ws_list); spin_unlock(&cfs_wi_data.wi_glock); - LASSERT(sched->ws_nscheduled == 0); + LASSERT(!sched->ws_nscheduled); LIBCFS_FREE(sched, sizeof(*sched)); } @@ -335,8 +335,8 @@ int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt, int nthrs, struct cfs_wi_sched **sched_pp) { - struct cfs_wi_sched *sched; - int rc; + struct cfs_wi_sched *sched; + int rc; LASSERT(cfs_wi_data.wi_init); LASSERT(!cfs_wi_data.wi_stopping); @@ -364,7 +364,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, rc = 0; while (nthrs > 0) { - char name[16]; + char name[16]; struct task_struct *task; spin_lock(&cfs_wi_data.wi_glock); @@ -431,7 +431,7 @@ cfs_wi_startup(void) void cfs_wi_shutdown(void) { - struct cfs_wi_sched *sched; + struct cfs_wi_sched *sched; struct cfs_wi_sched *temp; spin_lock(&cfs_wi_data.wi_glock); @@ -447,7 +447,7 @@ cfs_wi_shutdown(void) list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { spin_lock(&cfs_wi_data.wi_glock); - while (sched->ws_nthreads != 0) { + while (sched->ws_nthreads) { spin_unlock(&cfs_wi_data.wi_glock); set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(cfs_time_seconds(1) / 20); diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c index 4daf828198c30aecdf76d2a8a3fa8eb59a767c16..b2ba10d59f84624753d0675b8f45dd1cc02a04ba 100644 --- a/drivers/staging/lustre/lnet/lnet/api-ni.c +++ b/drivers/staging/lustre/lnet/lnet/api-ni.c @@ -1551,16 +1551,16 @@ LNetNIInit(lnet_pid_t requested_pid) rc = lnet_check_routes(); if (rc) - goto err_destory_routes; + goto err_destroy_routes; rc = lnet_rtrpools_alloc(im_a_router); if (rc) - goto err_destory_routes; + goto err_destroy_routes; } rc = lnet_acceptor_start(); if (rc) - goto err_destory_routes; + goto err_destroy_routes; the_lnet.ln_refcount = 1; /* Now I may use my own API functions... */ @@ -1587,7 +1587,7 @@ LNetNIInit(lnet_pid_t requested_pid) err_acceptor_stop: the_lnet.ln_refcount = 0; lnet_acceptor_stop(); -err_destory_routes: +err_destroy_routes: if (!the_lnet.ln_nis_from_mod_params) lnet_destroy_routes(); err_shutdown_lndnis: diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c index b430046dc294f04572964b00e8146c4e9c6b4b3e..eb796a86e6ab937b557078ddabdf828dde441915 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-me.c +++ b/drivers/staging/lustre/lnet/lnet/lib-me.c @@ -271,21 +271,3 @@ lnet_me_unlink(lnet_me_t *me) lnet_res_lh_invalidate(&me->me_lh); lnet_me_free(me); } - -#if 0 -static void -lib_me_dump(lnet_me_t *me) -{ - CWARN("Match Entry %p (%#llx)\n", me, - me->me_lh.lh_cookie); - - CWARN("\tMatch/Ignore\t= %016lx / %016lx\n", - me->me_match_bits, me->me_ignore_bits); - - CWARN("\tMD\t= %p\n", me->md); - CWARN("\tprev\t= %p\n", - list_entry(me->me_list.prev, lnet_me_t, me_list)); - CWARN("\tnext\t= %p\n", - list_entry(me->me_list.next, lnet_me_t, me_list)); -} -#endif diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c index 48e6f8f2392f8ff8d48e2e71566701b9cf2e593b..f3dd6e42f4d464f5796dbfb0edd4b837be0a8353 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-move.c +++ b/drivers/staging/lustre/lnet/lnet/lib-move.c @@ -192,6 +192,7 @@ lnet_copy_iov2iter(struct iov_iter *to, left = siov->iov_len - soffset; do { size_t n, copy = left; + LASSERT(nsiov > 0); if (copy > nob) diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c index a6d7a6159b8f6b15af858fe117301b1e4552fbd3..a9fe3e69daae161337aed6ba13896cad30f84e64 100644 --- a/drivers/staging/lustre/lnet/lnet/nidstrings.c +++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c @@ -193,7 +193,7 @@ add_nidrange(const struct cfs_lstr *src, struct netstrfns *nf; struct nidrange *nr; int endlen; - unsigned netnum; + unsigned int netnum; if (src->ls_len >= LNET_NIDSTR_SIZE) return NULL; @@ -247,10 +247,8 @@ parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist) { struct cfs_lstr addrrange; struct cfs_lstr net; - struct cfs_lstr tmp; struct nidrange *nr; - tmp = *src; if (!cfs_gettok(src, '@', &addrrange)) goto failed; @@ -1156,7 +1154,7 @@ EXPORT_SYMBOL(libcfs_nid2str_r); static struct netstrfns * libcfs_str2net_internal(const char *str, __u32 *net) { - struct netstrfns *uninitialized_var(nf); + struct netstrfns *nf = NULL; int nob; unsigned int netnum; int i; diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c index 063ad55ec950091a95f4676ea0258502b36d293f..8afa0abf15cd426e735d2fb1259ec9f806dd0820 100644 --- a/drivers/staging/lustre/lnet/lnet/router.c +++ b/drivers/staging/lustre/lnet/lnet/router.c @@ -903,6 +903,7 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway) { lnet_rc_data_t *rcd = NULL; lnet_ping_info_t *pi; + lnet_md_t md; int rc; int i; @@ -925,15 +926,15 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway) } rcd->rcd_pinginfo = pi; + md.start = pi; + md.user_ptr = rcd; + md.length = LNET_PINGINFO_SIZE; + md.threshold = LNET_MD_THRESH_INF; + md.options = LNET_MD_TRUNCATE; + md.eq_handle = the_lnet.ln_rc_eqh; + LASSERT(!LNetHandleIsInvalid(the_lnet.ln_rc_eqh)); - rc = LNetMDBind((lnet_md_t){.start = pi, - .user_ptr = rcd, - .length = LNET_PINGINFO_SIZE, - .threshold = LNET_MD_THRESH_INF, - .options = LNET_MD_TRUNCATE, - .eq_handle = the_lnet.ln_rc_eqh}, - LNET_UNLINK, - &rcd->rcd_mdh); + rc = LNetMDBind(md, LNET_UNLINK, &rcd->rcd_mdh); if (rc < 0) { CERROR("Can't bind MD: %d\n", rc); goto out; diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c index b20c5d394e3b827bfee563015fc34ca1a075c08c..67b460f41d6e9491ecc4cad52a44b8175ef26ee6 100644 --- a/drivers/staging/lustre/lnet/selftest/brw_test.c +++ b/drivers/staging/lustre/lnet/selftest/brw_test.c @@ -44,6 +44,10 @@ static int brw_inject_errors; module_param(brw_inject_errors, int, 0644); MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default"); +#define BRW_POISON 0xbeefbeefbeefbeefULL +#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL +#define BRW_MSIZE sizeof(u64) + static void brw_client_fini(struct sfw_test_instance *tsi) { @@ -67,6 +71,7 @@ brw_client_init(struct sfw_test_instance *tsi) { struct sfw_session *sn = tsi->tsi_batch->bat_session; int flags; + int off; int npg; int len; int opc; @@ -87,6 +92,7 @@ brw_client_init(struct sfw_test_instance *tsi) * but we have to keep it for compatibility */ len = npg * PAGE_SIZE; + off = 0; } else { struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1; @@ -99,9 +105,13 @@ brw_client_init(struct sfw_test_instance *tsi) opc = breq->blk_opc; flags = breq->blk_flags; len = breq->blk_len; - npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; + off = breq->blk_offset & ~PAGE_MASK; + npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; } + if (off % BRW_MSIZE) + return -EINVAL; + if (npg > LNET_MAX_IOV || npg <= 0) return -EINVAL; @@ -114,7 +124,7 @@ brw_client_init(struct sfw_test_instance *tsi) list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) { bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid), - npg, len, opc == LST_BRW_READ); + off, npg, len, opc == LST_BRW_READ); if (!bulk) { brw_client_fini(tsi); return -ENOMEM; @@ -126,12 +136,7 @@ brw_client_init(struct sfw_test_instance *tsi) return 0; } -#define BRW_POISON 0xbeefbeefbeefbeefULL -#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL -#define BRW_MSIZE sizeof(__u64) - -static int -brw_inject_one_error(void) +int brw_inject_one_error(void) { struct timespec64 ts; @@ -147,12 +152,13 @@ brw_inject_one_error(void) } static void -brw_fill_page(struct page *pg, int pattern, __u64 magic) +brw_fill_page(struct page *pg, int off, int len, int pattern, __u64 magic) { - char *addr = page_address(pg); + char *addr = page_address(pg) + off; int i; LASSERT(addr); + LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE)); if (pattern == LST_BRW_CHECK_NONE) return; @@ -162,14 +168,16 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic) if (pattern == LST_BRW_CHECK_SIMPLE) { memcpy(addr, &magic, BRW_MSIZE); - addr += PAGE_SIZE - BRW_MSIZE; - memcpy(addr, &magic, BRW_MSIZE); + if (len > BRW_MSIZE) { + addr += PAGE_SIZE - BRW_MSIZE; + memcpy(addr, &magic, BRW_MSIZE); + } return; } if (pattern == LST_BRW_CHECK_FULL) { - for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) - memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE); + for (i = 0; i < len; i += BRW_MSIZE) + memcpy(addr + i, &magic, BRW_MSIZE); return; } @@ -177,13 +185,14 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic) } static int -brw_check_page(struct page *pg, int pattern, __u64 magic) +brw_check_page(struct page *pg, int off, int len, int pattern, __u64 magic) { - char *addr = page_address(pg); + char *addr = page_address(pg) + off; __u64 data = 0; /* make compiler happy */ int i; LASSERT(addr); + LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE)); if (pattern == LST_BRW_CHECK_NONE) return 0; @@ -193,21 +202,21 @@ brw_check_page(struct page *pg, int pattern, __u64 magic) if (data != magic) goto bad_data; - addr += PAGE_SIZE - BRW_MSIZE; - data = *((__u64 *)addr); - if (data != magic) - goto bad_data; - + if (len > BRW_MSIZE) { + addr += PAGE_SIZE - BRW_MSIZE; + data = *((__u64 *)addr); + if (data != magic) + goto bad_data; + } return 0; } if (pattern == LST_BRW_CHECK_FULL) { - for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) { - data = *(((__u64 *)addr) + i); + for (i = 0; i < len; i += BRW_MSIZE) { + data = *(u64 *)(addr + i); if (data != magic) goto bad_data; } - return 0; } @@ -226,8 +235,12 @@ brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic) struct page *pg; for (i = 0; i < bk->bk_niov; i++) { + int off, len; + pg = bk->bk_iovs[i].bv_page; - brw_fill_page(pg, pattern, magic); + off = bk->bk_iovs[i].bv_offset; + len = bk->bk_iovs[i].bv_len; + brw_fill_page(pg, off, len, pattern, magic); } } @@ -238,8 +251,12 @@ brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic) struct page *pg; for (i = 0; i < bk->bk_niov; i++) { + int off, len; + pg = bk->bk_iovs[i].bv_page; - if (brw_check_page(pg, pattern, magic)) { + off = bk->bk_iovs[i].bv_offset; + len = bk->bk_iovs[i].bv_len; + if (brw_check_page(pg, off, len, pattern, magic)) { CERROR("Bulk page %p (%d/%d) is corrupted!\n", pg, i, bk->bk_niov); return 1; @@ -276,6 +293,7 @@ brw_client_prep_rpc(struct sfw_test_unit *tsu, len = npg * PAGE_SIZE; } else { struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1; + int off; /* * I should never get this step if it's unknown feature @@ -286,7 +304,8 @@ brw_client_prep_rpc(struct sfw_test_unit *tsu, opc = breq->blk_opc; flags = breq->blk_flags; len = breq->blk_len; - npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; + off = breq->blk_offset; + npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; } rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc); diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c index b786f8b4a73dc5b5344e3bfc9aa5296443199def..94383023c1bebf49d968bac6d009fb32c46af9fe 100644 --- a/drivers/staging/lustre/lnet/selftest/conctl.c +++ b/drivers/staging/lustre/lnet/selftest/conctl.c @@ -315,7 +315,7 @@ lst_group_update_ioctl(lstio_group_update_args_t *args) static int lst_nodes_add_ioctl(lstio_group_nodes_args_t *args) { - unsigned feats; + unsigned int feats; int rc; char *name; @@ -742,6 +742,10 @@ static int lst_test_add_ioctl(lstio_test_args_t *args) PAGE_SIZE - sizeof(struct lstcon_test))) return -EINVAL; + /* Enforce zero parameter length if there's no parameter */ + if (!args->lstio_tes_param && args->lstio_tes_param_len) + return -EINVAL; + LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1); if (!batch_name) return rc; diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c index 55afb53b0743a0c8c5ba2e27a0547db8dbabd5b5..994422c62487665e43f9ae8fe58511e4c8b0a4f5 100644 --- a/drivers/staging/lustre/lnet/selftest/conrpc.c +++ b/drivers/staging/lustre/lnet/selftest/conrpc.c @@ -86,8 +86,9 @@ lstcon_rpc_done(struct srpc_client_rpc *rpc) } static int -lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats, - int bulk_npg, int bulk_len, int embedded, struct lstcon_rpc *crpc) +lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned int feats, + int bulk_npg, int bulk_len, int embedded, + struct lstcon_rpc *crpc) { crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service, feats, bulk_npg, bulk_len, @@ -111,7 +112,7 @@ lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats, } static int -lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned feats, +lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned int feats, int bulk_npg, int bulk_len, struct lstcon_rpc **crpcpp) { struct lstcon_rpc *crpc = NULL; @@ -292,8 +293,8 @@ lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error) spin_lock(&rpc->crpc_lock); - if (!crpc->crp_posted || /* not posted */ - crpc->crp_stamp) { /* rpc done or aborted already */ + if (!crpc->crp_posted || /* not posted */ + crpc->crp_stamp) { /* rpc done or aborted already */ if (!crpc->crp_stamp) { crpc->crp_stamp = cfs_time_current(); crpc->crp_status = -EINTR; @@ -589,7 +590,7 @@ lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans) int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop, - unsigned feats, struct lstcon_rpc **crpc) + unsigned int feats, struct lstcon_rpc **crpc) { struct srpc_mksn_reqst *msrq; struct srpc_rmsn_reqst *rsrq; @@ -627,7 +628,8 @@ lstcon_sesrpc_prep(struct lstcon_node *nd, int transop, } int -lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc) +lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned int feats, + struct lstcon_rpc **crpc) { struct srpc_debug_reqst *drq; int rc; @@ -645,7 +647,7 @@ lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **c } int -lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats, +lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats, struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc) { struct lstcon_batch *batch; @@ -678,7 +680,8 @@ lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats, } int -lstcon_statrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc) +lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int feats, + struct lstcon_rpc **crpc) { struct srpc_stat_reqst *srq; int rc; @@ -776,7 +779,8 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, struct srpc_test_reqst *req) } static int -lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req) +lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, + struct srpc_test_reqst *req) { struct test_bulk_req *brq = &req->tsr_u.bulk_v0; @@ -789,20 +793,21 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req } static int -lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req) +lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, bool is_client, + struct srpc_test_reqst *req) { struct test_bulk_req_v1 *brq = &req->tsr_u.bulk_v1; brq->blk_opc = param->blk_opc; brq->blk_flags = param->blk_flags; brq->blk_len = param->blk_size; - brq->blk_offset = 0; /* reserved */ + brq->blk_offset = is_client ? param->blk_cli_off : param->blk_srv_off; return 0; } int -lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats, +lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats, struct lstcon_test *test, struct lstcon_rpc **crpc) { struct lstcon_group *sgrp = test->tes_src_grp; @@ -897,7 +902,8 @@ lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats, &test->tes_param[0], trq); } else { rc = lstcon_bulkrpc_v1_prep((lst_test_bulk_param_t *) - &test->tes_param[0], trq); + &test->tes_param[0], + trq->tsr_is_client, trq); } break; @@ -1084,7 +1090,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist, struct lstcon_ndlink *ndl; struct lstcon_node *nd; struct lstcon_rpc *rpc; - unsigned feats; + unsigned int feats; int rc; /* Creating session RPG for list of nodes */ diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h index 7ec6fc96959e4277b6f28e88472bc77e4a010ce1..e629e87c461cb7ac3185b9644dfd16095d8b9d9d 100644 --- a/drivers/staging/lustre/lnet/selftest/conrpc.h +++ b/drivers/staging/lustre/lnet/selftest/conrpc.h @@ -78,8 +78,8 @@ struct lstcon_rpc_trans { struct list_head tas_olink; /* link chain on owner list */ struct list_head tas_link; /* link chain on global list */ int tas_opc; /* operation code of transaction */ - unsigned tas_feats_updated; /* features mask is uptodate */ - unsigned tas_features; /* test features mask */ + unsigned int tas_feats_updated; /* features mask is uptodate */ + unsigned int tas_features; /* test features mask */ wait_queue_head_t tas_waitq; /* wait queue head */ atomic_t tas_remaining; /* # of un-scheduled rpcs */ struct list_head tas_rpcs_list; /* queued requests */ @@ -106,14 +106,16 @@ typedef int (*lstcon_rpc_readent_func_t)(int, struct srpc_msg *, lstcon_rpc_ent_t __user *); int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop, - unsigned version, struct lstcon_rpc **crpc); + unsigned int version, struct lstcon_rpc **crpc); int lstcon_dbgrpc_prep(struct lstcon_node *nd, - unsigned version, struct lstcon_rpc **crpc); -int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version, - struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc); -int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version, - struct lstcon_test *test, struct lstcon_rpc **crpc); -int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version, + unsigned int version, struct lstcon_rpc **crpc); +int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, + unsigned int version, struct lstcon_tsb_hdr *tsb, + struct lstcon_rpc **crpc); +int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, + unsigned int version, struct lstcon_test *test, + struct lstcon_rpc **crpc); +int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int version, struct lstcon_rpc **crpc); void lstcon_rpc_put(struct lstcon_rpc *crpc); int lstcon_rpc_trans_prep(struct list_head *translist, @@ -129,7 +131,8 @@ int lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans, lstcon_rpc_readent_func_t readent); void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error); void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans); -void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *req); +void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, + struct lstcon_rpc *req); int lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout); int lstcon_rpc_pinger_start(void); void lstcon_rpc_pinger_stop(void); diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c index a0fcbf3bcc950545ef12b91ef4ba6695b68988f4..1456d2395cc953ffac4dc2e85dfd636d320d7636 100644 --- a/drivers/staging/lustre/lnet/selftest/console.c +++ b/drivers/staging/lustre/lnet/selftest/console.c @@ -86,7 +86,7 @@ lstcon_node_find(lnet_process_id_t id, struct lstcon_node **ndpp, int create) if (!create) return -ENOENT; - LIBCFS_ALLOC(*ndpp, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink)); + LIBCFS_ALLOC(*ndpp, sizeof(**ndpp) + sizeof(*ndl)); if (!*ndpp) return -ENOMEM; @@ -131,12 +131,12 @@ lstcon_node_put(struct lstcon_node *nd) list_del(&ndl->ndl_link); list_del(&ndl->ndl_hlink); - LIBCFS_FREE(nd, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink)); + LIBCFS_FREE(nd, sizeof(*nd) + sizeof(*ndl)); } static int -lstcon_ndlink_find(struct list_head *hash, - lnet_process_id_t id, struct lstcon_ndlink **ndlpp, int create) +lstcon_ndlink_find(struct list_head *hash, lnet_process_id_t id, + struct lstcon_ndlink **ndlpp, int create) { unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE; struct lstcon_ndlink *ndl; @@ -230,7 +230,8 @@ lstcon_group_addref(struct lstcon_group *grp) grp->grp_ref++; } -static void lstcon_group_ndlink_release(struct lstcon_group *, struct lstcon_ndlink *); +static void lstcon_group_ndlink_release(struct lstcon_group *, + struct lstcon_ndlink *); static void lstcon_group_drain(struct lstcon_group *grp, int keep) @@ -397,7 +398,8 @@ lstcon_sesrpc_readent(int transop, struct srpc_msg *msg, static int lstcon_group_nodes_add(struct lstcon_group *grp, int count, lnet_process_id_t __user *ids_up, - unsigned *featp, struct list_head __user *result_up) + unsigned int *featp, + struct list_head __user *result_up) { struct lstcon_rpc_trans *trans; struct lstcon_ndlink *ndl; @@ -542,7 +544,8 @@ lstcon_group_add(char *name) int lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up, - unsigned *featp, struct list_head __user *result_up) + unsigned int *featp, + struct list_head __user *result_up) { struct lstcon_group *grp; int rc; @@ -820,7 +823,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p, lstcon_group_decref(grp); - return 0; + return rc; } static int @@ -1181,7 +1184,8 @@ lstcon_testrpc_condition(int transop, struct lstcon_node *nd, void *arg) } static int -lstcon_test_nodes_add(struct lstcon_test *test, struct list_head __user *result_up) +lstcon_test_nodes_add(struct lstcon_test *test, + struct list_head __user *result_up) { struct lstcon_rpc_trans *trans; struct lstcon_group *grp; @@ -1364,7 +1368,8 @@ lstcon_test_add(char *batch_name, int type, int loop, } static int -lstcon_test_find(struct lstcon_batch *batch, int idx, struct lstcon_test **testpp) +lstcon_test_find(struct lstcon_batch *batch, int idx, + struct lstcon_test **testpp) { struct lstcon_test *test; @@ -1702,7 +1707,7 @@ lstcon_new_session_id(lst_sid_t *sid) } int -lstcon_session_new(char *name, int key, unsigned feats, +lstcon_session_new(char *name, int key, unsigned int feats, int timeout, int force, lst_sid_t __user *sid_up) { int rc = 0; @@ -1868,7 +1873,7 @@ lstcon_session_end(void) } int -lstcon_session_feats_check(unsigned feats) +lstcon_session_feats_check(unsigned int feats) { int rc = 0; diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h index 78388a611c223673fa6675d0dad4a6d3c9aacdff..5dc1de48a10ef25a07d79b79c8874399253e75e9 100644 --- a/drivers/staging/lustre/lnet/selftest/console.h +++ b/drivers/staging/lustre/lnet/selftest/console.h @@ -92,14 +92,16 @@ struct lstcon_batch { int bat_ntest; /* # of test */ int bat_state; /* state of the batch */ int bat_arg; /* parameter for run|stop, timeout - * for run, force for stop */ + * for run, force for stop + */ char bat_name[LST_NAME_SIZE];/* name of batch */ struct list_head bat_test_list; /* list head of tests (struct lstcon_test) */ struct list_head bat_trans_list; /* list head of transaction */ struct list_head bat_cli_list; /* list head of client nodes - * (struct lstcon_node) */ + * (struct lstcon_node) + */ struct list_head *bat_cli_hash; /* hash table of client nodes */ struct list_head bat_srv_list; /* list head of server nodes */ struct list_head *bat_srv_hash; /* hash table of server nodes */ @@ -144,13 +146,14 @@ struct lstcon_session { int ses_timeout; /* timeout in seconds */ time64_t ses_laststamp; /* last operation stamp (seconds) */ - unsigned ses_features; /* tests features of the session + unsigned int ses_features; /* tests features of the session */ - unsigned ses_feats_updated:1; /* features are synced with - * remote test nodes */ - unsigned ses_force:1; /* force creating */ - unsigned ses_shutdown:1; /* session is shutting down */ - unsigned ses_expired:1; /* console is timedout */ + unsigned int ses_feats_updated:1; /* features are synced with + * remote test nodes + */ + unsigned int ses_force:1; /* force creating */ + unsigned int ses_shutdown:1; /* session is shutting down */ + unsigned int ses_expired:1; /* console is timedout */ __u64 ses_id_cookie; /* batch id cookie */ char ses_name[LST_NAME_SIZE];/* session name */ struct lstcon_rpc_trans *ses_ping; /* session pinger */ @@ -188,14 +191,14 @@ int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr); int lstcon_console_init(void); int lstcon_console_fini(void); int lstcon_session_match(lst_sid_t sid); -int lstcon_session_new(char *name, int key, unsigned version, +int lstcon_session_new(char *name, int key, unsigned int version, int timeout, int flags, lst_sid_t __user *sid_up); int lstcon_session_info(lst_sid_t __user *sid_up, int __user *key, unsigned __user *verp, lstcon_ndlist_ent_t __user *entp, char __user *name_up, int len); int lstcon_session_end(void); int lstcon_session_debug(int timeout, struct list_head __user *result_up); -int lstcon_session_feats_check(unsigned feats); +int lstcon_session_feats_check(unsigned int feats); int lstcon_batch_debug(int timeout, char *name, int client, struct list_head __user *result_up); int lstcon_group_debug(int timeout, char *name, @@ -207,7 +210,7 @@ int lstcon_group_del(char *name); int lstcon_group_clean(char *name, int args); int lstcon_group_refresh(char *name, struct list_head __user *result_up); int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t __user *nds_up, - unsigned *featp, struct list_head __user *result_up); + unsigned int *featp, struct list_head __user *result_up); int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t __user *nds_up, struct list_head __user *result_up); int lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gent_up, diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c index abbd6287b4bd5fc9b896d2ff69b121418028bbc8..48dcc330dc9b21e9e732c076e99392852ba87847 100644 --- a/drivers/staging/lustre/lnet/selftest/framework.c +++ b/drivers/staging/lustre/lnet/selftest/framework.c @@ -131,7 +131,8 @@ sfw_find_test_case(int id) } static int -sfw_register_test(struct srpc_service *service, struct sfw_test_client_ops *cliops) +sfw_register_test(struct srpc_service *service, + struct sfw_test_client_ops *cliops) { struct sfw_test_case *tsc; @@ -254,7 +255,7 @@ sfw_session_expired(void *data) static inline void sfw_init_session(struct sfw_session *sn, lst_sid_t sid, - unsigned features, const char *name) + unsigned int features, const char *name) { struct stt_timer *timer = &sn->sn_timer; @@ -469,7 +470,8 @@ sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply) } static int -sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *reply) +sfw_remove_session(struct srpc_rmsn_reqst *request, + struct srpc_rmsn_reply *reply) { struct sfw_session *sn = sfw_data.fw_session; @@ -501,7 +503,8 @@ sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *repl } static int -sfw_debug_session(struct srpc_debug_reqst *request, struct srpc_debug_reply *reply) +sfw_debug_session(struct srpc_debug_reqst *request, + struct srpc_debug_reply *reply) { struct sfw_session *sn = sfw_data.fw_session; @@ -897,7 +900,7 @@ sfw_test_rpc_done(struct srpc_client_rpc *rpc) int sfw_create_test_rpc(struct sfw_test_unit *tsu, lnet_process_id_t peer, - unsigned features, int nblk, int blklen, + unsigned int features, int nblk, int blklen, struct srpc_client_rpc **rpcpp) { struct srpc_client_rpc *rpc = NULL; @@ -1064,7 +1067,8 @@ sfw_stop_batch(struct sfw_batch *tsb, int force) } static int -sfw_query_batch(struct sfw_batch *tsb, int testidx, struct srpc_batch_reply *reply) +sfw_query_batch(struct sfw_batch *tsb, int testidx, + struct srpc_batch_reply *reply) { struct sfw_test_instance *tsi; @@ -1101,7 +1105,7 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, LASSERT(!rpc->srpc_bulk); LASSERT(npages > 0 && npages <= LNET_MAX_IOV); - rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink); + rpc->srpc_bulk = srpc_alloc_bulk(cpt, 0, npages, len, sink); if (!rpc->srpc_bulk) return -ENOMEM; @@ -1179,7 +1183,8 @@ sfw_add_test(struct srpc_server_rpc *rpc) } static int -sfw_control_batch(struct srpc_batch_reqst *request, struct srpc_batch_reply *reply) +sfw_control_batch(struct srpc_batch_reqst *request, + struct srpc_batch_reply *reply) { struct sfw_session *sn = sfw_data.fw_session; int rc = 0; @@ -1225,7 +1230,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc) struct srpc_service *sv = rpc->srpc_scd->scd_svc; struct srpc_msg *reply = &rpc->srpc_replymsg; struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg; - unsigned features = LST_FEATS_MASK; + unsigned int features = LST_FEATS_MASK; int rc = 0; LASSERT(!sfw_data.fw_active_srpc); @@ -1375,7 +1380,7 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status) struct srpc_client_rpc * sfw_create_rpc(lnet_process_id_t peer, int service, - unsigned features, int nbulkiov, int bulklen, + unsigned int features, int nbulkiov, int bulklen, void (*done)(struct srpc_client_rpc *), void *priv) { struct srpc_client_rpc *rpc = NULL; diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c index 9331ca4e3606261268c10448a2876bb2c35e6039..b9601b00a273ca49da68fb7cfb8809a38f1dc20f 100644 --- a/drivers/staging/lustre/lnet/selftest/ping_test.c +++ b/drivers/staging/lustre/lnet/selftest/ping_test.c @@ -159,8 +159,8 @@ ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc) ktime_get_real_ts64(&ts); CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq, - (unsigned)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 + - (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec))); + (unsigned int)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 + + (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec))); } static int diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c index f5619d8744ef7558137242588976d5fb41a8fde8..ce9de8c9be57fa93c47820d4dc68869a10a9ccc3 100644 --- a/drivers/staging/lustre/lnet/selftest/rpc.c +++ b/drivers/staging/lustre/lnet/selftest/rpc.c @@ -84,14 +84,13 @@ void srpc_set_counters(const srpc_counters_t *cnt) } static int -srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob) +srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int off, + int nob) { - nob = min_t(int, nob, PAGE_SIZE); + LASSERT(off < PAGE_SIZE); + LASSERT(nob > 0 && nob <= PAGE_SIZE); - LASSERT(nob > 0); - LASSERT(i >= 0 && i < bk->bk_niov); - - bk->bk_iovs[i].bv_offset = 0; + bk->bk_iovs[i].bv_offset = off; bk->bk_iovs[i].bv_page = pg; bk->bk_iovs[i].bv_len = nob; return nob; @@ -117,7 +116,8 @@ srpc_free_bulk(struct srpc_bulk *bk) } struct srpc_bulk * -srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) +srpc_alloc_bulk(int cpt, unsigned int bulk_off, unsigned int bulk_npg, + unsigned int bulk_len, int sink) { struct srpc_bulk *bk; int i; @@ -148,8 +148,11 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) return NULL; } - nob = srpc_add_bulk_page(bk, pg, i, bulk_len); + nob = min_t(unsigned int, bulk_off + bulk_len, PAGE_SIZE) - + bulk_off; + srpc_add_bulk_page(bk, pg, i, bulk_off, nob); bulk_len -= nob; + bulk_off = 0; } return bk; @@ -693,7 +696,8 @@ srpc_finish_service(struct srpc_service *sv) /* called with sv->sv_lock held */ static void -srpc_service_recycle_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf) +srpc_service_recycle_buffer(struct srpc_service_cd *scd, + struct srpc_buffer *buf) __must_hold(&scd->scd_lock) { if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) { diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h index 4ab2ee26400439c71554e4bd3d8fd1726fb16f14..f353a634cc8eb52ddeeee6d9bd6cc49ee65a893c 100644 --- a/drivers/staging/lustre/lnet/selftest/rpc.h +++ b/drivers/staging/lustre/lnet/selftest/rpc.h @@ -113,7 +113,8 @@ struct srpc_join_reply { __u32 join_status; /* returned status */ lst_sid_t join_sid; /* session id */ __u32 join_timeout; /* # seconds' inactivity to - * expire */ + * expire + */ char join_session[LST_NAME_SIZE]; /* session name */ } WIRE_ATTR; @@ -175,7 +176,7 @@ struct test_bulk_req_v1 { __u16 blk_opc; /* bulk operation code */ __u16 blk_flags; /* data check flags */ __u32 blk_len; /* data length */ - __u32 blk_offset; /* reserved: offset */ + __u32 blk_offset; /* offset */ } WIRE_ATTR; struct test_ping_req { @@ -190,7 +191,8 @@ struct srpc_test_reqst { lst_bid_t tsr_bid; /* batch id */ __u32 tsr_service; /* test type: bulk|ping|... */ __u32 tsr_loop; /* test client loop count or - * # server buffers needed */ + * # server buffers needed + */ __u32 tsr_concur; /* concurrency of test */ __u8 tsr_is_client; /* is test client or not */ __u8 tsr_stop_onerr; /* stop on error */ diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h index d033ac03d953c46ec19a4f6befcc3192551bcb73..c8833a016b6d225392f646e6d32d567bb6d1ad1d 100644 --- a/drivers/staging/lustre/lnet/selftest/selftest.h +++ b/drivers/staging/lustre/lnet/selftest/selftest.h @@ -131,7 +131,8 @@ srpc_service2reply(int service) enum srpc_event_type { SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source) - * received */ + * received + */ SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */ SRPC_BULK_GET_RPLD = 3, /* active bulk GET replied (sink) */ SRPC_REPLY_RCVD = 4, /* incoming reply received */ @@ -295,7 +296,8 @@ struct srpc_service_cd { #define SFW_TEST_WI_MIN 256 #define SFW_TEST_WI_MAX 2048 /* extra buffers for tolerating buggy peers, or unbalanced number - * of peers between partitions */ + * of peers between partitions + */ #define SFW_TEST_WI_EXTRA 64 /* number of server workitems (mini-thread) for framework service */ @@ -347,9 +349,11 @@ struct sfw_batch { struct sfw_test_client_ops { int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test - * client */ + * client + */ void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test - * client */ + * client + */ int (*tso_prep_rpc)(struct sfw_test_unit *tsu, lnet_process_id_t dest, struct srpc_client_rpc **rpc); /* prep a tests rpc */ @@ -374,7 +378,8 @@ struct sfw_test_instance { spinlock_t tsi_lock; /* serialize */ unsigned int tsi_stopping:1; /* test is stopping */ atomic_t tsi_nactive; /* # of active test - * unit */ + * unit + */ struct list_head tsi_units; /* test units */ struct list_head tsi_free_rpcs; /* free rpcs */ struct list_head tsi_active_rpcs; /* active rpcs */ @@ -386,8 +391,10 @@ struct sfw_test_instance { } tsi_u; }; -/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of - * pages are not used */ +/* + * XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of + * pages are not used + */ #define SFW_MAX_CONCUR LST_MAX_CONCUR #define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t)) #define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) @@ -410,10 +417,10 @@ struct sfw_test_case { struct srpc_client_rpc * sfw_create_rpc(lnet_process_id_t peer, int service, - unsigned features, int nbulkiov, int bulklen, + unsigned int features, int nbulkiov, int bulklen, void (*done)(struct srpc_client_rpc *), void *priv); int sfw_create_test_rpc(struct sfw_test_unit *tsu, - lnet_process_id_t peer, unsigned features, + lnet_process_id_t peer, unsigned int features, int nblk, int blklen, struct srpc_client_rpc **rpc); void sfw_abort_rpc(struct srpc_client_rpc *rpc); void sfw_post_rpc(struct srpc_client_rpc *rpc); @@ -434,8 +441,9 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service, void srpc_post_rpc(struct srpc_client_rpc *rpc); void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why); void srpc_free_bulk(struct srpc_bulk *bk); -struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned bulk_npg, - unsigned bulk_len, int sink); +struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned int off, + unsigned int bulk_npg, unsigned int bulk_len, + int sink); int srpc_send_rpc(struct swi_workitem *wi); int srpc_send_reply(struct srpc_server_rpc *rpc); int srpc_add_service(struct srpc_service *sv); diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c index dcd22580b1f0cd98782110f1b6da0636cb61db15..2fe692df19d0a9f3d309d300c15bee93aec14ac1 100644 --- a/drivers/staging/lustre/lnet/selftest/timer.c +++ b/drivers/staging/lustre/lnet/selftest/timer.c @@ -46,16 +46,17 @@ * to cover a time period of 1024 seconds into the future before wrapping. */ #define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */ -#define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL) +#define STTIMER_SLOTTIME BIT(STTIMER_MINPOLL) #define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1)) -#define STTIMER_NSLOTS (1 << 7) +#define STTIMER_NSLOTS BIT(7) #define STTIMER_SLOT(t) (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \ (STTIMER_NSLOTS - 1))]) static struct st_timer_data { spinlock_t stt_lock; unsigned long stt_prev_slot; /* start time of the slot processed - * previously */ + * previously + */ struct list_head stt_hash[STTIMER_NSLOTS]; int stt_shuttingdown; wait_queue_head_t stt_waitq; diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c index edd72b926f811217fda4558024d43004eea034cc..999f250ceed019fa2e1d0fb8b4e64bb8611053a0 100644 --- a/drivers/staging/lustre/lustre/fid/fid_request.c +++ b/drivers/staging/lustre/lustre/fid/fid_request.c @@ -74,7 +74,7 @@ static int seq_client_rpc(struct lu_client_seq *seq, /* Zero out input range, this is not recovery yet. */ in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE); - range_init(in); + lu_seq_range_init(in); ptlrpc_request_set_replen(req); @@ -112,25 +112,21 @@ static int seq_client_rpc(struct lu_client_seq *seq, ptlrpc_at_set_req_timeout(req); - if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA) - mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL); rc = ptlrpc_queue_wait(req); - if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA) - mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL); if (rc) goto out_req; out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE); *output = *out; - if (!range_is_sane(output)) { + if (!lu_seq_range_is_sane(output)) { CERROR("%s: Invalid range received from server: " DRANGE "\n", seq->lcs_name, PRANGE(output)); rc = -EINVAL; goto out_req; } - if (range_is_exhausted(output)) { + if (lu_seq_range_is_exhausted(output)) { CERROR("%s: Range received from server is exhausted: " DRANGE "]\n", seq->lcs_name, PRANGE(output)); rc = -EINVAL; @@ -170,9 +166,9 @@ static int seq_client_alloc_seq(const struct lu_env *env, { int rc; - LASSERT(range_is_sane(&seq->lcs_space)); + LASSERT(lu_seq_range_is_sane(&seq->lcs_space)); - if (range_is_exhausted(&seq->lcs_space)) { + if (lu_seq_range_is_exhausted(&seq->lcs_space)) { rc = seq_client_alloc_meta(env, seq); if (rc) { CERROR("%s: Can't allocate new meta-sequence, rc %d\n", @@ -185,7 +181,7 @@ static int seq_client_alloc_seq(const struct lu_env *env, rc = 0; } - LASSERT(!range_is_exhausted(&seq->lcs_space)); + LASSERT(!lu_seq_range_is_exhausted(&seq->lcs_space)); *seqnr = seq->lcs_space.lsr_start; seq->lcs_space.lsr_start += 1; @@ -320,7 +316,7 @@ void seq_client_flush(struct lu_client_seq *seq) seq->lcs_space.lsr_index = -1; - range_init(&seq->lcs_space); + lu_seq_range_init(&seq->lcs_space); mutex_unlock(&seq->lcs_mutex); } EXPORT_SYMBOL(seq_client_flush); diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c index 3ed32d77f38bc50c0c738a643c008e1d950d72ae..97d4849c7199e081232c582ce2340ffe02b83d46 100644 --- a/drivers/staging/lustre/lustre/fid/lproc_fid.c +++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c @@ -83,7 +83,7 @@ ldebugfs_fid_write_common(const char __user *buffer, size_t count, (unsigned long long *)&tmp.lsr_end); if (rc != 2) return -EINVAL; - if (!range_is_sane(&tmp) || range_is_zero(&tmp) || + if (!lu_seq_range_is_sane(&tmp) || lu_seq_range_is_zero(&tmp) || tmp.lsr_start < range->lsr_start || tmp.lsr_end > range->lsr_end) return -EINVAL; *range = tmp; diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c index 0100a935f4ff292798f6c62f42d3213d59dca1cc..11f697496180854b451c30d11853562e493fb7e0 100644 --- a/drivers/staging/lustre/lustre/fld/fld_cache.c +++ b/drivers/staging/lustre/lustre/fld/fld_cache.c @@ -143,7 +143,7 @@ static void fld_fix_new_list(struct fld_cache *cache) c_range = &f_curr->fce_range; n_range = &f_next->fce_range; - LASSERT(range_is_sane(c_range)); + LASSERT(lu_seq_range_is_sane(c_range)); if (&f_next->fce_list == head) break; @@ -358,7 +358,7 @@ struct fld_cache_entry { struct fld_cache_entry *f_new; - LASSERT(range_is_sane(range)); + LASSERT(lu_seq_range_is_sane(range)); f_new = kzalloc(sizeof(*f_new), GFP_NOFS); if (!f_new) @@ -503,7 +503,7 @@ int fld_cache_lookup(struct fld_cache *cache, } prev = flde; - if (range_within(&flde->fce_range, seq)) { + if (lu_seq_range_within(&flde->fce_range, seq)) { *range = flde->fce_range; cache->fci_stat.fst_cache++; diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h index 08eaec735d6fb3b290ff4957862d2fd73335eda9..4a7f0b71c48db8281be03685542ce7ebbb5b5f20 100644 --- a/drivers/staging/lustre/lustre/fld/fld_internal.h +++ b/drivers/staging/lustre/lustre/fld/fld_internal.h @@ -62,11 +62,6 @@ #include "../include/lustre_req_layout.h" #include "../include/lustre_fld.h" -enum { - LUSTRE_FLD_INIT = 1 << 0, - LUSTRE_FLD_RUN = 1 << 1 -}; - struct fld_stats { __u64 fst_count; __u64 fst_cache; diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c index 0de72b717ce5c912ae3a243d7938a79fd60c1c63..4cade7a1680004d8525ec48dca90dd3e66d57903 100644 --- a/drivers/staging/lustre/lustre/fld/fld_request.c +++ b/drivers/staging/lustre/lustre/fld/fld_request.c @@ -159,11 +159,6 @@ int fld_client_add_target(struct lu_client_fld *fld, LASSERT(name); LASSERT(tar->ft_srv || tar->ft_exp); - if (fld->lcf_flags != LUSTRE_FLD_INIT) { - CERROR("%s: Attempt to add target %s (idx %llu) on fly - skip it\n", - fld->lcf_name, name, tar->ft_idx); - return 0; - } CDEBUG(D_INFO, "%s: Adding target %s (idx %llu)\n", fld->lcf_name, name, tar->ft_idx); @@ -282,7 +277,6 @@ int fld_client_init(struct lu_client_fld *fld, fld->lcf_count = 0; spin_lock_init(&fld->lcf_lock); fld->lcf_hash = &fld_hash[hash]; - fld->lcf_flags = LUSTRE_FLD_INIT; INIT_LIST_HEAD(&fld->lcf_targets); cache_size = FLD_CLIENT_CACHE_SIZE / @@ -421,8 +415,6 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds, struct lu_fld_target *target; int rc; - fld->lcf_flags |= LUSTRE_FLD_RUN; - rc = fld_cache_lookup(fld->lcf_cache, seq, &res); if (rc == 0) { *mds = res.lsr_index; diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h index 89292c93dcd5fe0130bc03e80f369f3d03e3efc7..dc685610c4c4324f983c5b1023ce49e02676b2ef 100644 --- a/drivers/staging/lustre/lustre/include/cl_object.h +++ b/drivers/staging/lustre/lustre/include/cl_object.h @@ -59,10 +59,6 @@ * read/write system call it is associated with the single user * thread, that issued the system call). * - * - cl_req represents a collection of pages for a transfer. cl_req is - * constructed by req-forming engine that tries to saturate - * transport with large and continuous transfers. - * * Terminology * * - to avoid confusion high-level I/O operation like read or write system @@ -103,11 +99,8 @@ struct inode; struct cl_device; -struct cl_device_operations; struct cl_object; -struct cl_object_page_operations; -struct cl_object_lock_operations; struct cl_page; struct cl_page_slice; @@ -120,27 +113,7 @@ struct cl_page_operations; struct cl_io; struct cl_io_slice; -struct cl_req; -struct cl_req_slice; - -/** - * Operations for each data device in the client stack. - * - * \see vvp_cl_ops, lov_cl_ops, lovsub_cl_ops, osc_cl_ops - */ -struct cl_device_operations { - /** - * Initialize cl_req. This method is called top-to-bottom on all - * devices in the stack to get them a chance to allocate layer-private - * data, and to attach them to the cl_req by calling - * cl_req_slice_add(). - * - * \see osc_req_init(), lov_req_init(), lovsub_req_init() - * \see vvp_req_init() - */ - int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev, - struct cl_req *req); -}; +struct cl_req_attr; /** * Device in the client stack. @@ -150,8 +123,6 @@ struct cl_device_operations { struct cl_device { /** Super-class. */ struct lu_device cd_lu_dev; - /** Per-layer operation vector. */ - const struct cl_device_operations *cd_ops; }; /** \addtogroup cl_object cl_object @@ -267,7 +238,7 @@ struct cl_object_conf { /** * Object layout. This is consumed by lov. */ - struct lustre_md *coc_md; + struct lu_buf coc_layout; /** * Description of particular stripe location in the * cluster. This is consumed by osc. @@ -301,6 +272,26 @@ enum { OBJECT_CONF_WAIT = 2 }; +enum { + CL_LAYOUT_GEN_NONE = (u32)-2, /* layout lock was cancelled */ + CL_LAYOUT_GEN_EMPTY = (u32)-1, /* for empty layout */ +}; + +struct cl_layout { + /** the buffer to return the layout in lov_mds_md format. */ + struct lu_buf cl_buf; + /** size of layout in lov_mds_md format. */ + size_t cl_size; + /** Layout generation. */ + u32 cl_layout_gen; + /** + * True if this is a released file. + * Temporarily added for released file truncate in ll_setattr_raw(). + * It will be removed later. -Jinshan + */ + bool cl_is_released; +}; + /** * Operations implemented for each cl object layer. * @@ -400,6 +391,27 @@ struct cl_object_operations { */ int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj, struct lov_user_md __user *lum); + /** + * Get FIEMAP mapping from the object. + */ + int (*coo_fiemap)(const struct lu_env *env, struct cl_object *obj, + struct ll_fiemap_info_key *fmkey, + struct fiemap *fiemap, size_t *buflen); + /** + * Get layout and generation of the object. + */ + int (*coo_layout_get)(const struct lu_env *env, struct cl_object *obj, + struct cl_layout *layout); + /** + * Get maximum size of the object. + */ + loff_t (*coo_maxbytes)(struct cl_object *obj); + /** + * Set request attributes. + */ + void (*coo_req_attr_set)(const struct lu_env *env, + struct cl_object *obj, + struct cl_req_attr *attr); }; /** @@ -591,7 +603,7 @@ enum cl_page_state { * * - [cl_page_state::CPS_PAGEOUT] page is dirty, the * req-formation engine decides that it wants to include this page - * into an cl_req being constructed, and yanks it from the cache; + * into an RPC being constructed, and yanks it from the cache; * * - [cl_page_state::CPS_FREEING] VM callback is executed to * evict the page form the memory; @@ -660,7 +672,7 @@ enum cl_page_state { * Page is being read in, as a part of a transfer. This is quite * similar to the cl_page_state::CPS_PAGEOUT state, except that * read-in is always "immediate"---there is no such thing a sudden - * construction of read cl_req from cached, presumably not up to date, + * construction of read request from cached, presumably not up to date, * pages. * * Underlying VM page is locked for the duration of transfer. @@ -714,8 +726,6 @@ struct cl_page { struct list_head cp_batch; /** List of slices. Immutable after creation. */ struct list_head cp_layers; - /** Linkage of pages within cl_req. */ - struct list_head cp_flight; /** * Page state. This field is const to avoid accidental update, it is * modified only internally within cl_page.c. Protected by a VM lock. @@ -732,12 +742,6 @@ struct cl_page { * by sub-io. Protected by a VM lock. */ struct cl_io *cp_owner; - /** - * Owning IO request in cl_page_state::CPS_PAGEOUT and - * cl_page_state::CPS_PAGEIN states. This field is maintained only in - * the top-level pages. Protected by a VM lock. - */ - struct cl_req *cp_req; /** List of references to this page, for debugging. */ struct lu_ref cp_reference; /** Link to an object, for debugging. */ @@ -779,7 +783,6 @@ enum cl_lock_mode { /** * Requested transfer type. - * \ingroup cl_req */ enum cl_req_type { CRT_READ, @@ -884,26 +887,6 @@ struct cl_page_operations { /** Destructor. Frees resources and slice itself. */ void (*cpo_fini)(const struct lu_env *env, struct cl_page_slice *slice); - - /** - * Checks whether the page is protected by a cl_lock. This is a - * per-layer method, because certain layers have ways to check for the - * lock much more efficiently than through the generic locks scan, or - * implement locking mechanisms separate from cl_lock, e.g., - * LL_FILE_GROUP_LOCKED in vvp. If \a pending is true, check for locks - * being canceled, or scheduled for cancellation as soon as the last - * user goes away, too. - * - * \retval -EBUSY: page is protected by a lock of a given mode; - * \retval -ENODATA: page is not protected by a lock; - * \retval 0: this layer cannot decide. - * - * \see cl_page_is_under_lock() - */ - int (*cpo_is_under_lock)(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io, pgoff_t *max); - /** * Optional debugging helper. Prints given page slice. * @@ -915,8 +898,7 @@ struct cl_page_operations { /** * \name transfer * - * Transfer methods. See comment on cl_req for a description of - * transfer formation and life-cycle. + * Transfer methods. * * @{ */ @@ -962,7 +944,7 @@ struct cl_page_operations { int ioret); /** * Called when cached page is about to be added to the - * cl_req as a part of req formation. + * ptlrpc request as a part of req formation. * * \return 0 : proceed with this page; * \return -EAGAIN : skip this page; @@ -1365,7 +1347,6 @@ struct cl_2queue { * (3) sort all locks to avoid dead-locks, and acquire them * * (4) process the chunk: call per-page methods - * (cl_io_operations::cio_read_page() for read, * cl_io_operations::cio_prepare_write(), * cl_io_operations::cio_commit_write() for write) * @@ -1388,6 +1369,8 @@ enum cl_io_type { CIT_WRITE, /** truncate, utime system calls */ CIT_SETATTR, + /** get data version */ + CIT_DATA_VERSION, /** * page fault handling */ @@ -1467,6 +1450,31 @@ struct cl_io_slice { typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *, struct cl_page *); + +struct cl_read_ahead { + /* + * Maximum page index the readahead window will end. + * This is determined DLM lock coverage, RPC and stripe boundary. + * cra_end is included. + */ + pgoff_t cra_end; + /* + * Release routine. If readahead holds resources underneath, this + * function should be called to release it. + */ + void (*cra_release)(const struct lu_env *env, void *cbdata); + /* Callback data for cra_release routine */ + void *cra_cbdata; +}; + +static inline void cl_read_ahead_release(const struct lu_env *env, + struct cl_read_ahead *ra) +{ + if (ra->cra_release) + ra->cra_release(env, ra->cra_cbdata); + memset(ra, 0, sizeof(*ra)); +} + /** * Per-layer io operations. * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops @@ -1573,16 +1581,13 @@ struct cl_io_operations { struct cl_page_list *queue, int from, int to, cl_commit_cbt cb); /** - * Read missing page. - * - * Called by a top-level cl_io_operations::op[CIT_READ]::cio_start() - * method, when it hits not-up-to-date page in the range. Optional. + * Decide maximum read ahead extent * * \pre io->ci_type == CIT_READ */ - int (*cio_read_page)(const struct lu_env *env, - const struct cl_io_slice *slice, - const struct cl_page_slice *page); + int (*cio_read_ahead)(const struct lu_env *env, + const struct cl_io_slice *slice, + pgoff_t start, struct cl_read_ahead *ra); /** * Optional debugging helper. Print given io slice. */ @@ -1765,10 +1770,15 @@ struct cl_io { struct cl_io_rw_common ci_rw; struct cl_setattr_io { struct ost_lvb sa_attr; + unsigned int sa_attr_flags; unsigned int sa_valid; int sa_stripe_index; - struct lu_fid *sa_parent_fid; + const struct lu_fid *sa_parent_fid; } ci_setattr; + struct cl_data_version_io { + u64 dv_data_version; + int dv_flags; + } ci_data_version; struct cl_fault_io { /** page index within file. */ pgoff_t ft_index; @@ -1836,179 +1846,20 @@ struct cl_io { /** @} cl_io */ -/** \addtogroup cl_req cl_req - * @{ - */ -/** \struct cl_req - * Transfer. - * - * There are two possible modes of transfer initiation on the client: - * - * - immediate transfer: this is started when a high level io wants a page - * or a collection of pages to be transferred right away. Examples: - * read-ahead, synchronous read in the case of non-page aligned write, - * page write-out as a part of extent lock cancellation, page write-out - * as a part of memory cleansing. Immediate transfer can be both - * cl_req_type::CRT_READ and cl_req_type::CRT_WRITE; - * - * - opportunistic transfer (cl_req_type::CRT_WRITE only), that happens - * when io wants to transfer a page to the server some time later, when - * it can be done efficiently. Example: pages dirtied by the write(2) - * path. - * - * In any case, transfer takes place in the form of a cl_req, which is a - * representation for a network RPC. - * - * Pages queued for an opportunistic transfer are cached until it is decided - * that efficient RPC can be composed of them. This decision is made by "a - * req-formation engine", currently implemented as a part of osc - * layer. Req-formation depends on many factors: the size of the resulting - * RPC, whether or not multi-object RPCs are supported by the server, - * max-rpc-in-flight limitations, size of the dirty cache, etc. - * - * For the immediate transfer io submits a cl_page_list, that req-formation - * engine slices into cl_req's, possibly adding cached pages to some of - * the resulting req's. - * - * Whenever a page from cl_page_list is added to a newly constructed req, its - * cl_page_operations::cpo_prep() layer methods are called. At that moment, - * page state is atomically changed from cl_page_state::CPS_OWNED to - * cl_page_state::CPS_PAGEOUT or cl_page_state::CPS_PAGEIN, cl_page::cp_owner - * is zeroed, and cl_page::cp_req is set to the - * req. cl_page_operations::cpo_prep() method at the particular layer might - * return -EALREADY to indicate that it does not need to submit this page - * at all. This is possible, for example, if page, submitted for read, - * became up-to-date in the meantime; and for write, the page don't have - * dirty bit marked. \see cl_io_submit_rw() - * - * Whenever a cached page is added to a newly constructed req, its - * cl_page_operations::cpo_make_ready() layer methods are called. At that - * moment, page state is atomically changed from cl_page_state::CPS_CACHED to - * cl_page_state::CPS_PAGEOUT, and cl_page::cp_req is set to - * req. cl_page_operations::cpo_make_ready() method at the particular layer - * might return -EAGAIN to indicate that this page is not eligible for the - * transfer right now. - * - * FUTURE - * - * Plan is to divide transfers into "priority bands" (indicated when - * submitting cl_page_list, and queuing a page for the opportunistic transfer) - * and allow glueing of cached pages to immediate transfers only within single - * band. This would make high priority transfers (like lock cancellation or - * memory pressure induced write-out) really high priority. - * - */ - /** * Per-transfer attributes. */ struct cl_req_attr { + enum cl_req_type cra_type; + u64 cra_flags; + struct cl_page *cra_page; + /** Generic attributes for the server consumption. */ struct obdo *cra_oa; /** Jobid */ char cra_jobid[LUSTRE_JOBID_SIZE]; }; -/** - * Transfer request operations definable at every layer. - * - * Concurrency: transfer formation engine synchronizes calls to all transfer - * methods. - */ -struct cl_req_operations { - /** - * Invoked top-to-bottom by cl_req_prep() when transfer formation is - * complete (all pages are added). - * - * \see osc_req_prep() - */ - int (*cro_prep)(const struct lu_env *env, - const struct cl_req_slice *slice); - /** - * Called top-to-bottom to fill in \a oa fields. This is called twice - * with different flags, see bug 10150 and osc_build_req(). - * - * \param obj an object from cl_req which attributes are to be set in - * \a oa. - * - * \param oa struct obdo where attributes are placed - * - * \param flags \a oa fields to be filled. - */ - void (*cro_attr_set)(const struct lu_env *env, - const struct cl_req_slice *slice, - const struct cl_object *obj, - struct cl_req_attr *attr, u64 flags); - /** - * Called top-to-bottom from cl_req_completion() to notify layers that - * transfer completed. Has to free all state allocated by - * cl_device_operations::cdo_req_init(). - */ - void (*cro_completion)(const struct lu_env *env, - const struct cl_req_slice *slice, int ioret); -}; - -/** - * A per-object state that (potentially multi-object) transfer request keeps. - */ -struct cl_req_obj { - /** object itself */ - struct cl_object *ro_obj; - /** reference to cl_req_obj::ro_obj. For debugging. */ - struct lu_ref_link ro_obj_ref; - /* something else? Number of pages for a given object? */ -}; - -/** - * Transfer request. - * - * Transfer requests are not reference counted, because IO sub-system owns - * them exclusively and knows when to free them. - * - * Life cycle. - * - * cl_req is created by cl_req_alloc() that calls - * cl_device_operations::cdo_req_init() device methods to allocate per-req - * state in every layer. - * - * Then pages are added (cl_req_page_add()), req keeps track of all objects it - * contains pages for. - * - * Once all pages were collected, cl_page_operations::cpo_prep() method is - * called top-to-bottom. At that point layers can modify req, let it pass, or - * deny it completely. This is to support things like SNS that have transfer - * ordering requirements invisible to the individual req-formation engine. - * - * On transfer completion (or transfer timeout, or failure to initiate the - * transfer of an allocated req), cl_req_operations::cro_completion() method - * is called, after execution of cl_page_operations::cpo_completion() of all - * req's pages. - */ -struct cl_req { - enum cl_req_type crq_type; - /** A list of pages being transferred */ - struct list_head crq_pages; - /** Number of pages in cl_req::crq_pages */ - unsigned crq_nrpages; - /** An array of objects which pages are in ->crq_pages */ - struct cl_req_obj *crq_o; - /** Number of elements in cl_req::crq_objs[] */ - unsigned crq_nrobjs; - struct list_head crq_layers; -}; - -/** - * Per-layer state for request. - */ -struct cl_req_slice { - struct cl_req *crs_req; - struct cl_device *crs_dev; - struct list_head crs_linkage; - const struct cl_req_operations *crs_ops; -}; - -/* @} cl_req */ - enum cache_stats_item { /** how many cache lookups were performed */ CS_lookup = 0, @@ -2153,9 +2004,6 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice, const struct cl_lock_operations *ops); void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice, struct cl_object *obj, const struct cl_io_operations *ops); -void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice, - struct cl_device *dev, - const struct cl_req_operations *ops); /** @} helpers */ /** \defgroup cl_object cl_object @@ -2183,6 +2031,12 @@ int cl_object_prune(const struct lu_env *env, struct cl_object *obj); void cl_object_kill(const struct lu_env *env, struct cl_object *obj); int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj, struct lov_user_md __user *lum); +int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj, + struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap, + size_t *buflen); +int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj, + struct cl_layout *cl); +loff_t cl_object_maxbytes(struct cl_object *obj); /** * Returns true, iff \a o0 and \a o1 are slices of the same object. @@ -2302,8 +2156,6 @@ void cl_page_discard(const struct lu_env *env, struct cl_io *io, void cl_page_delete(const struct lu_env *env, struct cl_page *pg); int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg); void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate); -int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, pgoff_t *max_index); loff_t cl_offset(const struct cl_object *obj, pgoff_t idx); pgoff_t cl_index(const struct cl_object *obj, loff_t offset); size_t cl_page_size(const struct cl_object *obj); @@ -2414,8 +2266,6 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io, struct cl_io_lock_link *link); int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, struct cl_lock_descr *descr); -int cl_io_read_page(const struct lu_env *env, struct cl_io *io, - struct cl_page *page); int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, enum cl_req_type iot, struct cl_2queue *queue); int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, @@ -2424,6 +2274,8 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, int cl_io_commit_async(const struct lu_env *env, struct cl_io *io, struct cl_page_list *queue, int from, int to, cl_commit_cbt cb); +int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io, + pgoff_t start, struct cl_read_ahead *ra); int cl_io_is_going(const struct lu_env *env); /** @@ -2520,19 +2372,8 @@ void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page); /** @} cl_page_list */ -/** \defgroup cl_req cl_req - * @{ - */ -struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page, - enum cl_req_type crt, int nr_objects); - -void cl_req_page_add(const struct lu_env *env, struct cl_req *req, - struct cl_page *page); -void cl_req_page_done(const struct lu_env *env, struct cl_page *page); -int cl_req_prep(const struct lu_env *env, struct cl_req *req); -void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, - struct cl_req_attr *attr, u64 flags); -void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret); +void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj, + struct cl_req_attr *attr); /** \defgroup cl_sync_io cl_sync_io * @{ @@ -2568,8 +2409,6 @@ void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor); /** @} cl_sync_io */ -/** @} cl_req */ - /** \defgroup cl_env cl_env * * lu_env handling for a client. @@ -2593,35 +2432,13 @@ void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor); * - allocation and destruction of environment is amortized by caching no * longer used environments instead of destroying them; * - * - there is a notion of "current" environment, attached to the kernel - * data structure representing current thread Top-level lustre code - * allocates an environment and makes it current, then calls into - * non-lustre code, that in turn calls lustre back. Low-level lustre - * code thus called can fetch environment created by the top-level code - * and reuse it, avoiding additional environment allocation. - * Right now, three interfaces can attach the cl_env to running thread: - * - cl_env_get - * - cl_env_implant - * - cl_env_reexit(cl_env_reenter had to be called priorly) - * * \see lu_env, lu_context, lu_context_key * @{ */ -struct cl_env_nest { - int cen_refcheck; - void *cen_cookie; -}; - struct lu_env *cl_env_get(int *refcheck); struct lu_env *cl_env_alloc(int *refcheck, __u32 tags); -struct lu_env *cl_env_nested_get(struct cl_env_nest *nest); void cl_env_put(struct lu_env *env, int *refcheck); -void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env); -void *cl_env_reenter(void); -void cl_env_reexit(void *cookie); -void cl_env_implant(struct lu_env *env, int *refcheck); -void cl_env_unplant(struct lu_env *env, int *refcheck); unsigned int cl_env_cache_purge(unsigned int nr); struct lu_env *cl_env_percpu_get(void); void cl_env_percpu_put(struct lu_env *env); diff --git a/drivers/staging/lustre/lustre/include/llog_swab.h b/drivers/staging/lustre/lustre/include/llog_swab.h new file mode 100644 index 0000000000000000000000000000000000000000..fd7ffb154ad18b820fe63933f9e96b1a3a97afc8 --- /dev/null +++ b/drivers/staging/lustre/lustre/include/llog_swab.h @@ -0,0 +1,65 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2014, Intel Corporation. + * + * Copyright 2015 Cray Inc, all rights reserved. + * Author: Ben Evans. + * + * We assume all nodes are either little-endian or big-endian, and we + * always send messages in the sender's native format. The receiver + * detects the message format by checking the 'magic' field of the message + * (see lustre_msg_swabbed() below). + * + * Each type has corresponding 'lustre_swab_xxxtypexxx()' routines + * are implemented in ptlrpc/pack_generic.c. These 'swabbers' convert the + * type from "other" endian, in-place in the message buffer. + * + * A swabber takes a single pointer argument. The caller must already have + * verified that the length of the message buffer >= sizeof (type). + * + * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine + * may be defined that swabs just the variable part, after the caller has + * verified that the message buffer is large enough. + */ + +#ifndef _LLOG_SWAB_H_ +#define _LLOG_SWAB_H_ + +#include "lustre/lustre_idl.h" +struct lustre_cfg; + +void lustre_swab_lu_fid(struct lu_fid *fid); +void lustre_swab_ost_id(struct ost_id *oid); +void lustre_swab_llogd_body(struct llogd_body *d); +void lustre_swab_llog_hdr(struct llog_log_hdr *h); +void lustre_swab_llogd_conn_body(struct llogd_conn_body *d); +void lustre_swab_llog_rec(struct llog_rec_hdr *rec); +void lustre_swab_lu_seq_range(struct lu_seq_range *range); +void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg); +void lustre_swab_cfg_marker(struct cfg_marker *marker, + int swab, int size); + +#endif diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h index cc0713ef8ae538ac4e4ddadfa9180589866fba56..62753dae0bfae65cee1f26e93da9bb3e56cfb200 100644 --- a/drivers/staging/lustre/lustre/include/lprocfs_status.h +++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h @@ -43,6 +43,8 @@ #include #include +#include "../../include/linux/libcfs/libcfs.h" +#include "lustre_cfg.h" #include "lustre/lustre_idl.h" struct lprocfs_vars { @@ -540,7 +542,8 @@ lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags); void lprocfs_clear_stats(struct lprocfs_stats *stats); void lprocfs_free_stats(struct lprocfs_stats **stats); void lprocfs_counter_init(struct lprocfs_stats *stats, int index, - unsigned conf, const char *name, const char *units); + unsigned int conf, const char *name, + const char *units); struct obd_export; int lprocfs_exp_cleanup(struct obd_export *exp); struct dentry *ldebugfs_add_simple(struct dentry *root, @@ -701,9 +704,9 @@ static struct lustre_attr lustre_attr_##name = __ATTR(name, mode, show, store) extern const struct sysfs_ops lustre_sysfs_ops; struct root_squash_info; -int lprocfs_wr_root_squash(const char *buffer, unsigned long count, +int lprocfs_wr_root_squash(const char __user *buffer, unsigned long count, struct root_squash_info *squash, char *name); -int lprocfs_wr_nosquash_nids(const char *buffer, unsigned long count, +int lprocfs_wr_nosquash_nids(const char __user *buffer, unsigned long count, struct root_squash_info *squash, char *name); /* all quota proc functions */ diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h index c2340d643e848f70b97024435a0e6243d1376ac2..b8ad5559a3b9e496a28733c590ff3f01e0a63a96 100644 --- a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h +++ b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h @@ -41,79 +41,24 @@ #ifndef _LUSTRE_FIEMAP_H #define _LUSTRE_FIEMAP_H -struct ll_fiemap_extent { - __u64 fe_logical; /* logical offset in bytes for the start of - * the extent from the beginning of the file - */ - __u64 fe_physical; /* physical offset in bytes for the start - * of the extent from the beginning of the disk - */ - __u64 fe_length; /* length in bytes for this extent */ - __u64 fe_reserved64[2]; - __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */ - __u32 fe_device; /* device number for this extent */ - __u32 fe_reserved[2]; -}; - -struct ll_user_fiemap { - __u64 fm_start; /* logical offset (inclusive) at - * which to start mapping (in) - */ - __u64 fm_length; /* logical length of mapping which - * userspace wants (in) - */ - __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */ - __u32 fm_mapped_extents;/* number of extents that were mapped (out) */ - __u32 fm_extent_count; /* size of fm_extents array (in) */ - __u32 fm_reserved; - struct ll_fiemap_extent fm_extents[0]; /* array of mapped extents (out) */ -}; - -#define FIEMAP_MAX_OFFSET (~0ULL) +#ifndef __KERNEL__ +#include +#include +#endif -#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before - * map - */ -#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute - * tree - */ -#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */ -#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */ -#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending. - * Sets EXTENT_UNKNOWN. - */ -#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read - * while fs is unmounted - */ -#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs. - * Sets EXTENT_NO_DIRECT. - */ -#define FIEMAP_EXTENT_NOT_ALIGNED 0x00000100 /* Extent offsets may not be - * block aligned. - */ -#define FIEMAP_EXTENT_DATA_INLINE 0x00000200 /* Data mixed with metadata. - * Sets EXTENT_NOT_ALIGNED.*/ -#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block. - * Sets EXTENT_NOT_ALIGNED. - */ -#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but - * no data (i.e. zero). - */ -#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively - * support extents. Result - * merged for efficiency. - */ +/* XXX: We use fiemap_extent::fe_reserved[0] */ +#define fe_device fe_reserved[0] static inline size_t fiemap_count_to_size(size_t extent_count) { - return (sizeof(struct ll_user_fiemap) + extent_count * - sizeof(struct ll_fiemap_extent)); + return sizeof(struct fiemap) + extent_count * + sizeof(struct fiemap_extent); } static inline unsigned fiemap_size_to_count(size_t array_size) { - return ((array_size - sizeof(struct ll_user_fiemap)) / - sizeof(struct ll_fiemap_extent)); + return (array_size - sizeof(struct fiemap)) / + sizeof(struct fiemap_extent); } #define FIEMAP_FLAG_DEVICE_ORDER 0x40000000 /* return device ordered mapping */ diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h index 72eaee95c6b8ef2378166c96017abab41621c961..65ce503ad5950a9d98c8418a30792b92e9308be5 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h @@ -48,8 +48,7 @@ * that the Lustre wire protocol is not influenced by external dependencies. * * The only other acceptable items in this file are VERY SIMPLE accessor - * functions to avoid callers grubbing inside the structures, and the - * prototypes of the swabber functions for each struct. Nothing that + * functions to avoid callers grubbing inside the structures. Nothing that * depends on external functions or definitions should be in here. * * Structs must be properly aligned to put 64-bit values on an 8-byte @@ -64,23 +63,6 @@ * in the code to ensure that new/old clients that see this larger struct * do not fail, otherwise you need to implement protocol compatibility). * - * We assume all nodes are either little-endian or big-endian, and we - * always send messages in the sender's native format. The receiver - * detects the message format by checking the 'magic' field of the message - * (see lustre_msg_swabbed() below). - * - * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines, - * implemented either here, inline (trivial implementations) or in - * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other" - * endian, in-place in the message buffer. - * - * A swabber takes a single pointer argument. The caller must already have - * verified that the length of the message buffer >= sizeof (type). - * - * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine - * may be defined that swabs just the variable part, after the caller has - * verified that the message buffer is large enough. - * * @{ */ @@ -192,113 +174,6 @@ struct lu_seq_range_array { #define LU_SEQ_RANGE_MASK 0x3 -static inline unsigned fld_range_type(const struct lu_seq_range *range) -{ - return range->lsr_flags & LU_SEQ_RANGE_MASK; -} - -static inline bool fld_range_is_ost(const struct lu_seq_range *range) -{ - return fld_range_type(range) == LU_SEQ_RANGE_OST; -} - -static inline bool fld_range_is_mdt(const struct lu_seq_range *range) -{ - return fld_range_type(range) == LU_SEQ_RANGE_MDT; -} - -/** - * This all range is only being used when fld client sends fld query request, - * but it does not know whether the seq is MDT or OST, so it will send req - * with ALL type, which means either seq type gotten from lookup can be - * expected. - */ -static inline unsigned fld_range_is_any(const struct lu_seq_range *range) -{ - return fld_range_type(range) == LU_SEQ_RANGE_ANY; -} - -static inline void fld_range_set_type(struct lu_seq_range *range, - unsigned flags) -{ - range->lsr_flags |= flags; -} - -static inline void fld_range_set_mdt(struct lu_seq_range *range) -{ - fld_range_set_type(range, LU_SEQ_RANGE_MDT); -} - -static inline void fld_range_set_ost(struct lu_seq_range *range) -{ - fld_range_set_type(range, LU_SEQ_RANGE_OST); -} - -static inline void fld_range_set_any(struct lu_seq_range *range) -{ - fld_range_set_type(range, LU_SEQ_RANGE_ANY); -} - -/** - * returns width of given range \a r - */ - -static inline __u64 range_space(const struct lu_seq_range *range) -{ - return range->lsr_end - range->lsr_start; -} - -/** - * initialize range to zero - */ - -static inline void range_init(struct lu_seq_range *range) -{ - memset(range, 0, sizeof(*range)); -} - -/** - * check if given seq id \a s is within given range \a r - */ - -static inline bool range_within(const struct lu_seq_range *range, - __u64 s) -{ - return s >= range->lsr_start && s < range->lsr_end; -} - -static inline bool range_is_sane(const struct lu_seq_range *range) -{ - return (range->lsr_end >= range->lsr_start); -} - -static inline bool range_is_zero(const struct lu_seq_range *range) -{ - return (range->lsr_start == 0 && range->lsr_end == 0); -} - -static inline bool range_is_exhausted(const struct lu_seq_range *range) - -{ - return range_space(range) == 0; -} - -/* return 0 if two range have the same location */ -static inline int range_compare_loc(const struct lu_seq_range *r1, - const struct lu_seq_range *r2) -{ - return r1->lsr_index != r2->lsr_index || - r1->lsr_flags != r2->lsr_flags; -} - -#define DRANGE "[%#16.16Lx-%#16.16Lx):%x:%s" - -#define PRANGE(range) \ - (range)->lsr_start, \ - (range)->lsr_end, \ - (range)->lsr_index, \ - fld_range_is_mdt(range) ? "mdt" : "ost" - /** \defgroup lu_fid lu_fid * @{ */ @@ -310,7 +185,7 @@ static inline int range_compare_loc(const struct lu_seq_range *r1, */ enum lma_compat { LMAC_HSM = 0x00000001, - LMAC_SOM = 0x00000002, +/* LMAC_SOM = 0x00000002, obsolete since 2.8.0 */ LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */ LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is * under /O//d. @@ -644,13 +519,14 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid) { if (fid_seq_is_mdt0(oi->oi.oi_seq)) { if (oid >= IDIF_MAX_OID) { - CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi)); + CERROR("Too large OID %#llx to set MDT0 " DOSTID "\n", + oid, POSTID(oi)); return; } oi->oi.oi_id = oid; } else if (fid_is_idif(&oi->oi_fid)) { if (oid >= IDIF_MAX_OID) { - CERROR("Bad %llu to set "DOSTID"\n", + CERROR("Too large OID %#llx to set IDIF " DOSTID "\n", oid, POSTID(oi)); return; } @@ -676,7 +552,7 @@ static inline int fid_set_id(struct lu_fid *fid, __u64 oid) if (fid_is_idif(fid)) { if (oid >= IDIF_MAX_OID) { - CERROR("Too large OID %#llx to set IDIF "DFID"\n", + CERROR("Too large OID %#llx to set IDIF " DFID "\n", (unsigned long long)oid, PFID(fid)); return -EBADF; } @@ -685,7 +561,7 @@ static inline int fid_set_id(struct lu_fid *fid, __u64 oid) fid->f_ver = oid >> 48; } else { if (oid >= OBIF_MAX_OID) { - CERROR("Too large OID %#llx to set REG "DFID"\n", + CERROR("Too large OID %#llx to set REG " DFID "\n", (unsigned long long)oid, PFID(fid)); return -EBADF; } @@ -785,8 +661,6 @@ static inline ino_t lu_igif_ino(const struct lu_fid *fid) return fid_seq(fid); } -void lustre_swab_ost_id(struct ost_id *oid); - /** * Get inode generation from a igif. * \param fid a igif to get inode generation from. @@ -847,9 +721,6 @@ static inline bool fid_is_sane(const struct lu_fid *fid) fid_seq_is_rsvd(fid_seq(fid))); } -void lustre_swab_lu_fid(struct lu_fid *fid); -void lustre_swab_lu_seq_range(struct lu_seq_range *range); - static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1) { return memcmp(f0, f1, sizeof(*f0)) == 0; @@ -1099,8 +970,10 @@ struct ptlrpc_body_v3 { __u32 pb_version; __u32 pb_opc; __u32 pb_status; - __u64 pb_last_xid; - __u64 pb_last_seen; + __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */ + __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */ + __u16 pb_padding0; + __u32 pb_padding1; __u64 pb_last_committed; __u64 pb_transno; __u32 pb_flags; @@ -1112,8 +985,11 @@ struct ptlrpc_body_v3 { __u64 pb_slv; /* VBR: pre-versions */ __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS]; + __u64 pb_mbits; /**< match bits for bulk request */ /* padding for future needs */ - __u64 pb_padding[4]; + __u64 pb_padding64_0; + __u64 pb_padding64_1; + __u64 pb_padding64_2; char pb_jobid[LUSTRE_JOBID_SIZE]; }; @@ -1125,8 +1001,10 @@ struct ptlrpc_body_v2 { __u32 pb_version; __u32 pb_opc; __u32 pb_status; - __u64 pb_last_xid; - __u64 pb_last_seen; + __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */ + __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */ + __u16 pb_padding0; + __u32 pb_padding1; __u64 pb_last_committed; __u64 pb_transno; __u32 pb_flags; @@ -1140,12 +1018,13 @@ struct ptlrpc_body_v2 { __u64 pb_slv; /* VBR: pre-versions */ __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS]; + __u64 pb_mbits; /**< unused in V2 */ /* padding for future needs */ - __u64 pb_padding[4]; + __u64 pb_padding64_0; + __u64 pb_padding64_1; + __u64 pb_padding64_2; }; -void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); - /* message body offset for lustre_msg_v2 */ /* ptlrpc body offset in all request/reply messages */ #define MSG_PTLRPC_BODY_OFF 0 @@ -1282,7 +1161,16 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); */ #define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */ #define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */ +#define OBD_CONNECT_MULTIMODRPCS 0x200000000000000ULL /* support multiple modify + * RPCs in parallel + */ #define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL/* striped DNE dir */ +#define OBD_CONNECT_SUBTREE 0x800000000000000ULL /* fileset mount */ +#define OBD_CONNECT_LOCK_AHEAD 0x1000000000000000ULL /* lock ahead */ +/** bulk matchbits is sent within ptlrpc_body */ +#define OBD_CONNECT_BULK_MBITS 0x2000000000000000ULL +#define OBD_CONNECT_OBDOPACK 0x4000000000000000ULL /* compact OUT obdo */ +#define OBD_CONNECT_FLAGS2 0x8000000000000000ULL /* second flags word */ /* XXX README XXX: * Please DO NOT add flag values here before first ensuring that this same @@ -1313,25 +1201,6 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); * If we eventually have separate connect data for different types, which we * almost certainly will, then perhaps we stick a union in here. */ -struct obd_connect_data_v1 { - __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ - __u32 ocd_version; /* lustre release version number */ - __u32 ocd_grant; /* initial cache grant amount (bytes) */ - __u32 ocd_index; /* LOV index to connect to */ - __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */ - __u64 ocd_ibits_known; /* inode bits this client understands */ - __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */ - __u8 ocd_inodespace; /* log2 of the per-inode space consumption */ - __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */ - __u32 ocd_unused; /* also fix lustre_swab_connect */ - __u64 ocd_transno; /* first transno from client to be replayed */ - __u32 ocd_group; /* MDS group on OST */ - __u32 ocd_cksum_types; /* supported checksum algorithms */ - __u32 ocd_max_easize; /* How big LOV EA can be on MDS */ - __u32 ocd_instance; /* also fix lustre_swab_connect */ - __u64 ocd_maxbytes; /* Maximum stripe size in bytes */ -}; - struct obd_connect_data { __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ __u32 ocd_version; /* lustre release version number */ @@ -1354,8 +1223,10 @@ struct obd_connect_data { * any field after ocd_maxbytes on the receiver without a valid flag * may result in out-of-bound memory access and kernel oops. */ - __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */ - __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */ + __u16 ocd_maxmodrpcs; /* Maximum modify RPCs in parallel */ + __u16 padding0; /* added 2.1.0. also fix lustre_swab_connect */ + __u32 padding1; /* added 2.1.0. also fix lustre_swab_connect */ + __u64 ocd_connect_flags2; __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */ __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */ __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */ @@ -1380,8 +1251,6 @@ struct obd_connect_data { * reserve the flag for future use. */ -void lustre_swab_connect(struct obd_connect_data *ocd); - /* * Supported checksum algorithms. Up to 32 checksum types are supported. * (32-bit mask stored in obd_connect_data::ocd_cksum_types) @@ -1416,7 +1285,7 @@ enum ost_cmd { OST_STATFS = 13, OST_SYNC = 16, OST_SET_INFO = 17, - OST_QUOTACHECK = 18, + OST_QUOTACHECK = 18, /* not used since 2.4 */ OST_QUOTACTL = 19, OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */ OST_LAST_OPC @@ -1580,8 +1449,6 @@ static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi, dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq); } -/* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */ - #define MAX_MD_SIZE \ (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data)) #define MIN_MD_SIZE \ @@ -1674,7 +1541,7 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) #define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */ #define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */ /*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */ -#define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */ +/* OBD_MD_FLCOOKIE (0x00800000ULL) obsolete in 2.8 */ #define OBD_MD_FLGROUP (0x01000000ULL) /* group */ #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */ #define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */ @@ -1713,7 +1580,9 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) /* OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */ #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */ -#define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */ +#define OBD_MD_CLOSE_INTENT_EXECED (0x0020000000000000ULL) /* close intent + * executed + */ #define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */ @@ -1742,11 +1611,6 @@ struct hsm_state_set { __u64 hss_clearmask; }; -void lustre_swab_hsm_user_state(struct hsm_user_state *hus); -void lustre_swab_hsm_state_set(struct hsm_state_set *hss); - -void lustre_swab_obd_statfs(struct obd_statfs *os); - /* ost_body.data values for OST_BRW */ #define OBD_BRW_READ 0x01 @@ -1786,14 +1650,16 @@ struct obd_ioobj { __u32 ioo_bufcnt; /* number of niobufs for this object */ }; +/* + * NOTE: IOOBJ_MAX_BRW_BITS defines the _offset_ of the max_brw field in + * ioo_max_brw, NOT the maximum number of bits in PTLRPC_BULK_OPS_BITS. + * That said, ioo_max_brw is a 32-bit field so the limit is also 16 bits. + */ #define IOOBJ_MAX_BRW_BITS 16 -#define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1) #define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1) #define ioobj_max_brw_set(ioo, num) \ do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0) -void lustre_swab_obd_ioobj(struct obd_ioobj *ioo); - /* multiple of 8 bytes => can array */ struct niobuf_remote { __u64 rnb_offset; @@ -1801,8 +1667,6 @@ struct niobuf_remote { __u32 rnb_flags; }; -void lustre_swab_niobuf_remote(struct niobuf_remote *nbr); - /* lock value block communicated between the filter and llite */ /* OST_LVB_ERR_INIT is needed because the return code in rc is @@ -1824,8 +1688,6 @@ struct ost_lvb_v1 { __u64 lvb_blocks; }; -void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb); - struct ost_lvb { __u64 lvb_size; __s64 lvb_mtime; @@ -1838,8 +1700,6 @@ struct ost_lvb { __u32 lvb_padding; }; -void lustre_swab_ost_lvb(struct ost_lvb *lvb); - /* * lquota data structures */ @@ -1866,8 +1726,6 @@ struct obd_quotactl { struct obd_dqblk qc_dqblk; }; -void lustre_swab_obd_quotactl(struct obd_quotactl *q); - #define Q_COPY(out, in, member) (out)->member = (in)->member #define QCTL_COPY(out, in) \ @@ -1905,8 +1763,6 @@ struct lquota_lvb { __u64 lvb_pad1; }; -void lustre_swab_lquota_lvb(struct lquota_lvb *lvb); - /* op codes */ enum quota_cmd { QUOTA_DQACQ = 601, @@ -1933,9 +1789,9 @@ enum mds_cmd { MDS_PIN = 42, /* obsolete, never used in a release */ MDS_UNPIN = 43, /* obsolete, never used in a release */ MDS_SYNC = 44, - MDS_DONE_WRITING = 45, + MDS_DONE_WRITING = 45, /* obsolete since 2.8.0 */ MDS_SET_INFO = 46, - MDS_QUOTACHECK = 47, + MDS_QUOTACHECK = 47, /* not used since 2.4 */ MDS_QUOTACTL = 48, MDS_GETXATTR = 49, MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */ @@ -1972,8 +1828,6 @@ enum mdt_reint_cmd { REINT_MAX }; -void lustre_swab_generic_32s(__u32 *val); - /* the disposition of the intent outlines what was executed */ #define DISP_IT_EXECD 0x00000001 #define DISP_LOOKUP_EXECD 0x00000002 @@ -2031,36 +1885,19 @@ enum { #define MDS_STATUS_CONN 1 #define MDS_STATUS_LOV 2 -/* mdt_thread_info.mti_flags. */ -enum md_op_flags { - /* The flag indicates Size-on-MDS attributes are changed. */ - MF_SOM_CHANGE = (1 << 0), - /* Flags indicates an epoch opens or closes. */ - MF_EPOCH_OPEN = (1 << 1), - MF_EPOCH_CLOSE = (1 << 2), - MF_MDC_CANCEL_FID1 = (1 << 3), - MF_MDC_CANCEL_FID2 = (1 << 4), - MF_MDC_CANCEL_FID3 = (1 << 5), - MF_MDC_CANCEL_FID4 = (1 << 6), - /* There is a pending attribute update. */ - MF_SOM_AU = (1 << 7), - /* Cancel OST locks while getattr OST attributes. */ - MF_GETATTR_LOCK = (1 << 8), - MF_GET_MDT_IDX = (1 << 9), -}; - -#define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE) - -#define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1 - /* these should be identical to their EXT4_*_FL counterparts, they are * redefined here only to avoid dragging in fs/ext4/ext4.h */ #define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */ #define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */ #define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */ +#define LUSTRE_NODUMP_FL 0x00000040 /* do not dump file */ #define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */ +#define LUSTRE_INDEX_FL 0x00001000 /* hash-indexed directory */ #define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */ +#define LUSTRE_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ +#define LUSTRE_DIRECTIO_FL 0x00100000 /* Use direct i/o */ +#define LUSTRE_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */ /* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire @@ -2113,7 +1950,7 @@ struct mdt_body { __u32 mbo_mode; __u32 mbo_uid; __u32 mbo_gid; - __u32 mbo_flags; + __u32 mbo_flags; /* LUSTRE_*_FL file attributes */ __u32 mbo_rdev; __u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */ __u32 mbo_unused2; /* was "generation" until 2.4.0 */ @@ -2121,7 +1958,7 @@ struct mdt_body { __u32 mbo_eadatasize; __u32 mbo_aclsize; __u32 mbo_max_mdsize; - __u32 mbo_max_cookiesize; + __u32 mbo_unused3; /* was max_cookiesize until 2.8 */ __u32 mbo_uid_h; /* high 32-bits of uid, for FUID */ __u32 mbo_gid_h; /* high 32-bits of gid, for FUID */ __u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */ @@ -2132,17 +1969,13 @@ struct mdt_body { __u64 mbo_padding_10; }; /* 216 */ -void lustre_swab_mdt_body(struct mdt_body *b); - struct mdt_ioepoch { - struct lustre_handle handle; - __u64 ioepoch; - __u32 flags; - __u32 padding; + struct lustre_handle mio_handle; + __u64 mio_unused1; /* was ioepoch */ + __u32 mio_unused2; /* was flags */ + __u32 mio_padding; }; -void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b); - /* permissions for md_perm.mp_perm */ enum { CFS_SETUID_PERM = 0x01, @@ -2178,8 +2011,6 @@ struct mdt_rec_setattr { __u32 sa_padding_5; }; -void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); - /* * Attribute flags used in mdt_rec_setattr::sa_valid. * The kernel's #defines for ATTR_* should not be used over the network @@ -2207,12 +2038,9 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); #define MDS_FMODE_CLOSED 00000000 #define MDS_FMODE_EXEC 00000004 -/* IO Epoch is opened on a closed file. */ -#define MDS_FMODE_EPOCH 01000000 -/* IO Epoch is opened on a file truncate. */ -#define MDS_FMODE_TRUNC 02000000 -/* Size-on-MDS Attribute Update is pending. */ -#define MDS_FMODE_SOM 04000000 +/* MDS_FMODE_EPOCH 01000000 obsolete since 2.8.0 */ +/* MDS_FMODE_TRUNC 02000000 obsolete since 2.8.0 */ +/* MDS_FMODE_SOM 04000000 obsolete since 2.8.0 */ #define MDS_OPEN_CREATED 00000010 #define MDS_OPEN_CROSS 00000020 @@ -2258,7 +2086,7 @@ enum mds_op_bias { MDS_CROSS_REF = 1 << 1, MDS_VTX_BYPASS = 1 << 2, MDS_PERM_BYPASS = 1 << 3, - MDS_SOM = 1 << 4, +/* MDS_SOM = 1 << 4, obsolete since 2.8.0 */ MDS_QUOTA_IGNORE = 1 << 5, MDS_CLOSE_CLEANUP = 1 << 6, MDS_KEEP_ORPHAN = 1 << 7, @@ -2268,6 +2096,7 @@ enum mds_op_bias { MDS_OWNEROVERRIDE = 1 << 11, MDS_HSM_RELEASE = 1 << 12, MDS_RENAME_MIGRATE = BIT(13), + MDS_CLOSE_LAYOUT_SWAP = BIT(14), }; /* instance of mdt_reint_rec */ @@ -2456,8 +2285,6 @@ struct mdt_rec_reint { __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */ }; -void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr); - /* lmv structures */ struct lmv_desc { __u32 ld_tgt_count; /* how many MDS's */ @@ -2547,8 +2374,6 @@ union lmv_mds_md { struct lmv_user_md lmv_user_md; }; -void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm); - static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic) { ssize_t len = -EINVAL; @@ -2652,8 +2477,6 @@ struct lov_desc { #define ld_magic ld_active_tgt_count /* for swabbing from llogs */ -void lustre_swab_lov_desc(struct lov_desc *ld); - /* * LDLM requests: */ @@ -2749,24 +2572,38 @@ struct ldlm_flock_wire { * on the resource type. */ -typedef union { +union ldlm_wire_policy_data { struct ldlm_extent l_extent; struct ldlm_flock_wire l_flock; struct ldlm_inodebits l_inodebits; -} ldlm_wire_policy_data_t; +}; union ldlm_gl_desc { struct ldlm_gl_lquota_desc lquota_desc; }; -void lustre_swab_gl_desc(union ldlm_gl_desc *); +enum ldlm_intent_flags { + IT_OPEN = BIT(0), + IT_CREAT = BIT(1), + IT_OPEN_CREAT = BIT(1) | BIT(0), + IT_READDIR = BIT(2), + IT_GETATTR = BIT(3), + IT_LOOKUP = BIT(4), + IT_UNLINK = BIT(5), + IT_TRUNC = BIT(6), + IT_GETXATTR = BIT(7), + IT_EXEC = BIT(8), + IT_PIN = BIT(9), + IT_LAYOUT = BIT(10), + IT_QUOTA_DQACQ = BIT(11), + IT_QUOTA_CONN = BIT(12), + IT_SETXATTR = BIT(13), +}; struct ldlm_intent { __u64 opc; }; -void lustre_swab_ldlm_intent(struct ldlm_intent *i); - struct ldlm_resource_desc { enum ldlm_type lr_type; __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */ @@ -2777,7 +2614,7 @@ struct ldlm_lock_desc { struct ldlm_resource_desc l_resource; enum ldlm_mode l_req_mode; enum ldlm_mode l_granted_mode; - ldlm_wire_policy_data_t l_policy_data; + union ldlm_wire_policy_data l_policy_data; }; #define LDLM_LOCKREQ_HANDLES 2 @@ -2790,8 +2627,6 @@ struct ldlm_request { struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES]; }; -void lustre_swab_ldlm_request(struct ldlm_request *rq); - /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available. * Otherwise, 2 are available. */ @@ -2813,8 +2648,6 @@ struct ldlm_reply { __u64 lock_policy_res2; }; -void lustre_swab_ldlm_reply(struct ldlm_reply *r); - #define ldlm_flags_to_wire(flags) ((__u32)(flags)) #define ldlm_flags_from_wire(flags) ((__u64)(flags)) @@ -2858,8 +2691,6 @@ struct mgs_target_info { char mti_params[MTI_PARAM_MAXLEN]; }; -void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo); - struct mgs_nidtbl_entry { __u64 mne_version; /* table version of this entry */ __u32 mne_instance; /* target instance # */ @@ -2874,8 +2705,6 @@ struct mgs_nidtbl_entry { } u; }; -void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo); - struct mgs_config_body { char mcb_name[MTI_NAME_MAXLEN]; /* logname */ __u64 mcb_offset; /* next index of config log to request */ @@ -2885,15 +2714,11 @@ struct mgs_config_body { __u32 mcb_units; /* # of units for bulk transfer */ }; -void lustre_swab_mgs_config_body(struct mgs_config_body *body); - struct mgs_config_res { __u64 mcr_offset; /* index of last config log */ __u64 mcr_size; /* size of the log */ }; -void lustre_swab_mgs_config_res(struct mgs_config_res *body); - /* Config marker flags (in config log) */ #define CM_START 0x01 #define CM_END 0x02 @@ -2913,8 +2738,6 @@ struct cfg_marker { char cm_comment[MTI_NAME_MAXLEN]; }; -void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size); - /* * Opcodes for multiple servers. */ @@ -2922,7 +2745,7 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size); enum obd_cmd { OBD_PING = 400, OBD_LOG_CANCEL, - OBD_QC_CALLBACK, + OBD_QC_CALLBACK, /* not used since 2.4 */ OBD_IDX_READ, OBD_LAST_OPC }; @@ -3155,23 +2978,32 @@ struct llog_gen_rec { struct llog_rec_tail lgr_tail; }; -/* On-disk header structure of each log object, stored in little endian order */ -#define LLOG_CHUNK_SIZE 8192 -#define LLOG_HEADER_SIZE (96) -#define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE) - -#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */ - /* flags for the logs */ enum llog_flag { LLOG_F_ZAP_WHEN_EMPTY = 0x1, LLOG_F_IS_CAT = 0x2, LLOG_F_IS_PLAIN = 0x4, LLOG_F_EXT_JOBID = BIT(3), + LLOG_F_IS_FIXSIZE = BIT(4), + /* + * Note: Flags covered by LLOG_F_EXT_MASK will be inherited from + * catlog to plain log, so do not add LLOG_F_IS_FIXSIZE here, + * because the catlog record is usually fixed size, but its plain + * log record can be variable + */ LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID, }; +/* On-disk header structure of each log object, stored in little endian order */ +#define LLOG_MIN_CHUNK_SIZE 8192 +#define LLOG_HEADER_SIZE (96) /* sizeof (llog_log_hdr) + + * sizeof(llh_tail) - sizeof(llh_bitmap) + */ +#define LLOG_BITMAP_BYTES (LLOG_MIN_CHUNK_SIZE - LLOG_HEADER_SIZE) +#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */ + +/* flags for the logs */ struct llog_log_hdr { struct llog_rec_hdr llh_hdr; __s64 llh_timestamp; @@ -3183,13 +3015,30 @@ struct llog_log_hdr { /* for a catalog the first plain slot is next to it */ struct obd_uuid llh_tgtuuid; __u32 llh_reserved[LLOG_HEADER_SIZE / sizeof(__u32) - 23]; + /* These fields must always be at the end of the llog_log_hdr. + * Note: llh_bitmap size is variable because llog chunk size could be + * bigger than LLOG_MIN_CHUNK_SIZE, i.e. sizeof(llog_log_hdr) > 8192 + * bytes, and the real size is stored in llh_hdr.lrh_len, which means + * llh_tail should only be referred by LLOG_HDR_TAIL(). + * But this structure is also used by client/server llog interface + * (see llog_client.c), it will be kept in its original way to avoid + * compatibility issue. + */ __u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)]; struct llog_rec_tail llh_tail; } __packed; -#define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \ - llh->llh_bitmap_offset - \ - sizeof(llh->llh_tail)) * 8) +#undef LLOG_HEADER_SIZE +#undef LLOG_BITMAP_BYTES + +#define LLOG_HDR_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \ + llh->llh_bitmap_offset - \ + sizeof(llh->llh_tail)) * 8) +#define LLOG_HDR_BITMAP(llh) (__u32 *)((char *)(llh) + \ + (llh)->llh_bitmap_offset) +#define LLOG_HDR_TAIL(llh) ((struct llog_rec_tail *)((char *)llh + \ + llh->llh_hdr.lrh_len - \ + sizeof(llh->llh_tail))) /** log cookies are used to reference a specific log file and a record * therein @@ -3259,7 +3108,8 @@ struct obdo { __u32 o_parent_ver; struct lustre_handle o_handle; /* brw: lock handle to prolong locks */ - struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS + struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS, + * obsolete in 2.8, reused in OSP */ __u32 o_uid_h; __u32 o_gid_h; @@ -3333,30 +3183,11 @@ struct ost_body { /* Key for FIEMAP to be used in get_info calls */ struct ll_fiemap_info_key { - char name[8]; - struct obdo oa; - struct ll_user_fiemap fiemap; + char lfik_name[8]; + struct obdo lfik_oa; + struct fiemap lfik_fiemap; }; -void lustre_swab_ost_body(struct ost_body *b); -void lustre_swab_ost_last_id(__u64 *id); -void lustre_swab_fiemap(struct ll_user_fiemap *fiemap); - -void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum); -void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum); -void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod, - int stripe_count); -void lustre_swab_lov_mds_md(struct lov_mds_md *lmm); - -/* llog_swab.c */ -void lustre_swab_llogd_body(struct llogd_body *d); -void lustre_swab_llog_hdr(struct llog_log_hdr *h); -void lustre_swab_llogd_conn_body(struct llogd_conn_body *d); -void lustre_swab_llog_rec(struct llog_rec_hdr *rec); - -struct lustre_cfg; -void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg); - /* Functions for dumping PTLRPC fields */ void dump_rniobuf(struct niobuf_remote *rnb); void dump_ioo(struct obd_ioobj *nb); @@ -3394,8 +3225,6 @@ struct lustre_capa { __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */ } __packed; -void lustre_swab_lustre_capa(struct lustre_capa *c); - /** lustre_capa::lc_opc */ enum { CAPA_OPC_BODY_WRITE = 1 << 0, /**< write object data */ @@ -3458,8 +3287,6 @@ struct getinfo_fid2path { char gf_path[0]; } __packed; -void lustre_swab_fid2path(struct getinfo_fid2path *gf); - /** path2parent request/reply structures */ struct getparent { struct lu_fid gp_fid; /**< parent FID */ @@ -3486,8 +3313,6 @@ struct layout_intent { __u64 li_end; }; -void lustre_swab_layout_intent(struct layout_intent *li); - /** * On the wire version of hsm_progress structure. * @@ -3506,13 +3331,6 @@ struct hsm_progress_kernel { __u64 hpk_padding2; } __packed; -void lustre_swab_hsm_user_state(struct hsm_user_state *hus); -void lustre_swab_hsm_current_action(struct hsm_current_action *action); -void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk); -void lustre_swab_hsm_user_state(struct hsm_user_state *hus); -void lustre_swab_hsm_user_item(struct hsm_user_item *hui); -void lustre_swab_hsm_request(struct hsm_request *hr); - /** layout swap request structure * fid1 and fid2 are in mdt_body */ @@ -3520,8 +3338,6 @@ struct mdc_swap_layouts { __u64 msl_flags; } __packed; -void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl); - struct close_data { struct lustre_handle cd_handle; struct lu_fid cd_fid; @@ -3529,7 +3345,5 @@ struct close_data { __u64 cd_reserved[8]; }; -void lustre_swab_close_data(struct close_data *data); - #endif /** @} lustreidl */ diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h index f3d7c94c3b5054c59028a997e926669a829dabc6..eb08df33b2dbfc4aba04f96b0b17f100cb83cf42 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h @@ -363,8 +363,8 @@ obd_ioctl_unpack(struct obd_ioctl_data *data, char *pbuf, int max_len) /* OBD_IOC_LOV_GETSTRIPE 155 LL_IOC_LOV_GETSTRIPE */ /* OBD_IOC_LOV_SETEA 156 LL_IOC_LOV_SETEA */ /* lustre/lustre_user.h 157-159 */ -#define OBD_IOC_QUOTACHECK _IOW('f', 160, int) -#define OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *) +/* OBD_IOC_QUOTACHECK _IOW('f', 160, int) */ +/* OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *) */ #define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl) /* lustre/lustre_user.h 163-176 */ #define OBD_IOC_CHANGELOG_REG _IOW('f', 177, struct obd_ioctl_data) diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h index 6fc985571cba9772904622d603c8fbe7debcef7b..3301ad652db1571600f6ea1676f95acdb2501a8f 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h @@ -63,9 +63,13 @@ #if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64) typedef struct stat64 lstat_t; #define lstat_f lstat64 +#define fstat_f fstat64 +#define fstatat_f fstatat64 #else typedef struct stat lstat_t; #define lstat_f lstat +#define fstat_f fstat +#define fstatat_f fstatat #endif #define HAVE_LOV_USER_MDS_DATA @@ -82,7 +86,6 @@ typedef struct stat lstat_t; #define FSFILT_IOC_SETVERSION _IOW('f', 4, long) #define FSFILT_IOC_GETVERSION_OLD _IOR('v', 1, long) #define FSFILT_IOC_SETVERSION_OLD _IOW('v', 2, long) -#define FSFILT_IOC_FIEMAP _IOWR('f', 11, struct ll_user_fiemap) #endif /* FIEMAP flags supported by Lustre */ @@ -235,7 +238,7 @@ struct ost_id { /* #define LL_IOC_POLL_QUOTACHECK 161 OBD_IOC_POLL_QUOTACHECK */ /* #define LL_IOC_QUOTACTL 162 OBD_IOC_QUOTACTL */ #define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *) -#define IOC_LOV_GETINFO _IOWR('f', 165, struct lov_user_mds_data *) +/* IOC_LOV_GETINFO 165 obsolete */ #define LL_IOC_FLUSHCTX _IOW('f', 166, long) /* LL_IOC_RMTACL 167 obsolete */ #define LL_IOC_GETOBDCOUNT _IOR('f', 168, long) @@ -343,6 +346,9 @@ enum ll_lease_type { #define LOV_ALL_STRIPES 0xffff /* only valid for directories */ #define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */ +#define XATTR_LUSTRE_PREFIX "lustre." +#define XATTR_LUSTRE_LOV "lustre.lov" + #define lov_user_ost_data lov_user_ost_data_v1 struct lov_user_ost_data_v1 { /* per-stripe data structure */ struct ost_id l_ost_oi; /* OST object ID */ @@ -451,8 +457,6 @@ static inline int lmv_user_md_size(int stripes, int lmm_magic) stripes * sizeof(struct lmv_user_mds_data); } -void lustre_swab_lmv_user_md(struct lmv_user_md *lum); - struct ll_recreate_obj { __u64 lrc_id; __u32 lrc_ost_idx; @@ -522,25 +526,20 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) } /* printf display format - * e.g. printf("file FID is "DFID"\n", PFID(fid)); + * * usage: printf("file FID is "DFID"\n", PFID(fid)); */ #define FID_NOBRACE_LEN 40 #define FID_LEN (FID_NOBRACE_LEN + 2) #define DFID_NOBRACE "%#llx:0x%x:0x%x" #define DFID "["DFID_NOBRACE"]" -#define PFID(fid) \ - (fid)->f_seq, \ - (fid)->f_oid, \ - (fid)->f_ver +#define PFID(fid) (unsigned long long)(fid)->f_seq, (fid)->f_oid, (fid)->f_ver -/* scanf input parse format -- strip '[' first. - * e.g. sscanf(fidstr, SFID, RFID(&fid)); +/* scanf input parse format for fids in DFID_NOBRACE format + * Need to strip '[' from DFID format first or use "["SFID"]" at caller. + * usage: sscanf(fidstr, SFID, RFID(&fid)); */ #define SFID "0x%llx:0x%x:0x%x" -#define RFID(fid) \ - &((fid)->f_seq), \ - &((fid)->f_oid), \ - &((fid)->f_ver) +#define RFID(fid) &((fid)->f_seq), &((fid)->f_oid), &((fid)->f_ver) /********* Quotas **********/ @@ -551,23 +550,18 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) #define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */ /* these must be explicitly translated into linux Q_* in ll_dir_ioctl */ -#define LUSTRE_Q_QUOTAON 0x800002 /* turn quotas on */ -#define LUSTRE_Q_QUOTAOFF 0x800003 /* turn quotas off */ +#define LUSTRE_Q_QUOTAON 0x800002 /* deprecated as of 2.4 */ +#define LUSTRE_Q_QUOTAOFF 0x800003 /* deprecated as of 2.4 */ #define LUSTRE_Q_GETINFO 0x800005 /* get information about quota files */ #define LUSTRE_Q_SETINFO 0x800006 /* set information about quota files */ #define LUSTRE_Q_GETQUOTA 0x800007 /* get user quota structure */ #define LUSTRE_Q_SETQUOTA 0x800008 /* set user quota structure */ /* lustre-specific control commands */ -#define LUSTRE_Q_INVALIDATE 0x80000b /* invalidate quota data */ -#define LUSTRE_Q_FINVALIDATE 0x80000c /* invalidate filter quota data */ +#define LUSTRE_Q_INVALIDATE 0x80000b /* deprecated as of 2.4 */ +#define LUSTRE_Q_FINVALIDATE 0x80000c /* deprecated as of 2.4 */ #define UGQUOTA 2 /* set both USRQUOTA and GRPQUOTA */ -struct if_quotacheck { - char obd_type[16]; - struct obd_uuid obd_uuid; -}; - #define IDENTITY_DOWNCALL_MAGIC 0x6d6dd629 /* permission */ @@ -649,6 +643,7 @@ struct if_quotactl { #define SWAP_LAYOUTS_CHECK_DV2 (1 << 1) #define SWAP_LAYOUTS_KEEP_MTIME (1 << 2) #define SWAP_LAYOUTS_KEEP_ATIME (1 << 3) +#define SWAP_LAYOUTS_CLOSE BIT(4) /* Swap XATTR_NAME_HSM as well, only on the MDT so far */ #define SWAP_LAYOUTS_MDS_HSM (1 << 31) @@ -999,6 +994,7 @@ struct ioc_data_version { * See HSM_FLAGS below. */ enum hsm_states { + HS_NONE = 0x00000000, HS_EXISTS = 0x00000001, HS_DIRTY = 0x00000002, HS_RELEASED = 0x00000004, diff --git a/drivers/staging/lustre/lustre/include/lustre_compat.h b/drivers/staging/lustre/lustre/include/lustre_compat.h index 567c438e93cb1bcfa89f7f7d2979843df9677988..300e96fb032ae9b202fccb786ee82020f3960c5f 100644 --- a/drivers/staging/lustre/lustre/include/lustre_compat.h +++ b/drivers/staging/lustre/lustre/include/lustre_compat.h @@ -74,4 +74,6 @@ # define ext2_find_next_zero_bit find_next_zero_bit_le #endif +#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET) + #endif /* _LUSTRE_COMPAT_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h index d03534432624fb4ed389695e92eb4da60d306571..b7e61d082e55e9107f5d5c8b8bc423be1fd634a0 100644 --- a/drivers/staging/lustre/lustre/include/lustre_dlm.h +++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h @@ -59,7 +59,7 @@ struct obd_device; #define OBD_LDLM_DEVICENAME "ldlm" #define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus()) -#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000)) +#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(3900)) /* 65 min */ #define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024 /** @@ -86,10 +86,10 @@ enum ldlm_error { * decisions about lack of conflicts or do any autonomous lock granting without * first speaking to a server. */ -typedef enum { +enum ldlm_side { LDLM_NAMESPACE_SERVER = 1 << 0, LDLM_NAMESPACE_CLIENT = 1 << 1 -} ldlm_side_t; +}; /** * The blocking callback is overloaded to perform two functions. These flags @@ -359,7 +359,7 @@ struct ldlm_namespace { struct obd_device *ns_obd; /** Flag indicating if namespace is on client instead of server */ - ldlm_side_t ns_client; + enum ldlm_side ns_client; /** Resource hash table for namespace. */ struct cfs_hash *ns_rs_hash; @@ -550,20 +550,18 @@ struct ldlm_flock { __u64 owner; __u64 blocking_owner; struct obd_export *blocking_export; - /* Protected by the hash lock */ - __u32 blocking_refs; __u32 pid; }; -typedef union { +union ldlm_policy_data { struct ldlm_extent l_extent; struct ldlm_flock l_flock; struct ldlm_inodebits l_inodebits; -} ldlm_policy_data_t; +}; void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type, - const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy); + const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy); enum lvb_type { LVB_T_NONE = 0, @@ -692,7 +690,7 @@ struct ldlm_lock { * Representation of private data specific for a lock type. * Examples are: extent range for extent lock or bitmask for ibits locks */ - ldlm_policy_data_t l_policy_data; + union ldlm_policy_data l_policy_data; /** * Lock state flags. Protected by lr_lock. @@ -967,8 +965,8 @@ struct ldlm_ast_work { * Common ldlm_enqueue parameters */ struct ldlm_enqueue_info { - __u32 ei_type; /** Type of the lock being enqueued. */ - __u32 ei_mode; /** Mode of the lock being enqueued. */ + enum ldlm_type ei_type; /** Type of the lock being enqueued. */ + enum ldlm_mode ei_mode; /** Mode of the lock being enqueued. */ void *ei_cb_bl; /** blocking lock callback */ void *ei_cb_cp; /** lock completion callback */ void *ei_cb_gl; /** lock glimpse callback */ @@ -979,7 +977,7 @@ struct ldlm_enqueue_info { extern struct obd_ops ldlm_obd_ops; extern char *ldlm_lockname[]; -char *ldlm_it2str(int it); +const char *ldlm_it2str(enum ldlm_intent_flags it); /** * Just a fancy CDEBUG call with log level preset to LDLM_DEBUG. @@ -1168,16 +1166,18 @@ do { \ struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock); void ldlm_lock_put(struct ldlm_lock *lock); void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc); -void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode); -int ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode); -void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode); -void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode); +void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode); +int ldlm_lock_addref_try(const struct lustre_handle *lockh, + enum ldlm_mode mode); +void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode); +void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, + enum ldlm_mode mode); void ldlm_lock_fail_match_locked(struct ldlm_lock *lock); void ldlm_lock_allow_match(struct ldlm_lock *lock); void ldlm_lock_allow_match_locked(struct ldlm_lock *lock); enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, const struct ldlm_res_id *, - enum ldlm_type type, ldlm_policy_data_t *, + enum ldlm_type type, union ldlm_policy_data *, enum ldlm_mode mode, struct lustre_handle *, int unref); enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh, @@ -1189,7 +1189,7 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req); /* resource.c */ struct ldlm_namespace * ldlm_namespace_new(struct obd_device *obd, char *name, - ldlm_side_t client, enum ldlm_appetite apt, + enum ldlm_side client, enum ldlm_appetite apt, enum ldlm_ns_type ns_type); int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags); void ldlm_namespace_get(struct ldlm_namespace *ns); @@ -1208,7 +1208,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock); void ldlm_resource_unlink_lock(struct ldlm_lock *lock); void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc); -void ldlm_dump_all_namespaces(ldlm_side_t client, int level); +void ldlm_dump_all_namespaces(enum ldlm_side client, int level); void ldlm_namespace_dump(int level, struct ldlm_namespace *); void ldlm_resource_dump(int level, struct ldlm_resource *); int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *, @@ -1241,7 +1241,7 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data); int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, struct ldlm_enqueue_info *einfo, const struct ldlm_res_id *res_id, - ldlm_policy_data_t const *policy, __u64 *flags, + union ldlm_policy_data const *policy, __u64 *flags, void *lvb, __u32 lvb_len, enum lvb_type lvb_type, struct lustre_handle *lockh, int async); int ldlm_prep_enqueue_req(struct obd_export *exp, @@ -1265,13 +1265,13 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *, enum ldlm_cancel_flags flags, void *opaque); int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, - ldlm_policy_data_t *policy, + union ldlm_policy_data *policy, enum ldlm_mode mode, enum ldlm_cancel_flags flags, void *opaque); int ldlm_cancel_resource_local(struct ldlm_resource *res, struct list_head *cancels, - ldlm_policy_data_t *policy, + union ldlm_policy_data *policy, enum ldlm_mode mode, __u64 lock_flags, enum ldlm_cancel_flags cancel_flags, void *opaque); @@ -1333,7 +1333,7 @@ int ldlm_pools_init(void); void ldlm_pools_fini(void); int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns, - int idx, ldlm_side_t client); + int idx, enum ldlm_side client); void ldlm_pool_fini(struct ldlm_pool *pl); void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock); void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock); diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h index 3167806931935218f415a439a21d1440b16a8965..b5a1aadbcb9380927e40a632956b3d572a41078b 100644 --- a/drivers/staging/lustre/lustre/include/lustre_fid.h +++ b/drivers/staging/lustre/lustre/include/lustre_fid.h @@ -150,6 +150,7 @@ #include "../../include/linux/libcfs/libcfs.h" #include "lustre/lustre_idl.h" +#include "seq_range.h" struct lu_env; struct lu_site; diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h index 932410d3e3ccf19a6bcb904265ee695ec62786fe..6ef1b03cb986e9ae89469a43e637be3c234d1821 100644 --- a/drivers/staging/lustre/lustre/include/lustre_fld.h +++ b/drivers/staging/lustre/lustre/include/lustre_fld.h @@ -103,8 +103,6 @@ struct lu_client_fld { /** Client fld debugfs entry name. */ char lcf_name[LUSTRE_MDT_MAXNAMELEN]; - - int lcf_flags; }; /* Client methods */ diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h index cde7ed702c86507399c47ea971be79d3cb12fb8a..dec1e99d594d1b4773ad4556546eb79cf7d3575c 100644 --- a/drivers/staging/lustre/lustre/include/lustre_ha.h +++ b/drivers/staging/lustre/lustre/include/lustre_ha.h @@ -53,6 +53,7 @@ void ptlrpc_activate_import(struct obd_import *imp); void ptlrpc_deactivate_import(struct obd_import *imp); void ptlrpc_invalidate_import(struct obd_import *imp); void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt); +void ptlrpc_pinger_force(struct obd_import *imp); /** @} ha */ diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h index 5461ba33d90c0a1e581787902d7b86ab0681ef5c..f0c931ce1a67183b272cdc7126536e7317b29687 100644 --- a/drivers/staging/lustre/lustre/include/lustre_import.h +++ b/drivers/staging/lustre/lustre/include/lustre_import.h @@ -185,6 +185,11 @@ struct obd_import { struct list_head *imp_replay_cursor; /** @} */ + /** List of not replied requests */ + struct list_head imp_unreplied_list; + /** Known maximal replied XID */ + __u64 imp_known_replied_xid; + /** obd device for this import */ struct obd_device *imp_obd; @@ -294,7 +299,9 @@ struct obd_import { */ imp_force_reconnect:1, /* import has tried to connect with server */ - imp_connect_tried:1; + imp_connect_tried:1, + /* connected but not FULL yet */ + imp_connected:1; __u32 imp_connect_op; struct obd_connect_data imp_connect_data; __u64 imp_connect_flags_orig; diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h index 6b231913ba2ecb807ab8ec6a9ee0da1bbdd1e7e4..27f3148c43442e9918bd87094b9053fdc1846f1e 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lib.h +++ b/drivers/staging/lustre/lustre/include/lustre_lib.h @@ -350,8 +350,6 @@ do { \ l_wait_event_exclusive_head(wq, condition, &lwi); \ }) -#define LIBLUSTRE_CLIENT (0) - /** @} lib */ #endif /* _LUSTRE_LIB_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre_lmv.h b/drivers/staging/lustre/lustre/include/lustre_lmv.h index d7f7afa8dfa7ef65f09806f7efa08b085ca2b46b..5aa3645e64dc2b04ab431056274948d808450d8b 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lmv.h +++ b/drivers/staging/lustre/lustre/include/lustre_lmv.h @@ -76,18 +76,7 @@ lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2) union lmv_mds_md; -int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp, - const union lmv_mds_md *lmm, int stripe_count); - -static inline int lmv_alloc_memmd(struct lmv_stripe_md **lsmp, int stripe_count) -{ - return lmv_unpack_md(NULL, lsmp, NULL, stripe_count); -} - -static inline void lmv_free_memmd(struct lmv_stripe_md *lsm) -{ - lmv_unpack_md(NULL, &lsm, NULL, 0); -} +void lmv_free_memmd(struct lmv_stripe_md *lsm); static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst, const struct lmv_mds_md_v1 *lmv_src) diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h index 995b266932e33c71b4fe4cb43a9cd99cd20f6ffc..35e37eb1bc2c266a3e7aa66ae56efbd7b4690064 100644 --- a/drivers/staging/lustre/lustre/include/lustre_log.h +++ b/drivers/staging/lustre/lustre/include/lustre_log.h @@ -214,6 +214,7 @@ struct llog_handle { spinlock_t lgh_hdr_lock; /* protect lgh_hdr data */ struct llog_logid lgh_id; /* id of this log */ struct llog_log_hdr *lgh_hdr; + size_t lgh_hdr_size; int lgh_last_idx; int lgh_cur_idx; /* used during llog_process */ __u64 lgh_cur_offset; /* used during llog_process */ @@ -244,6 +245,11 @@ struct llog_ctxt { struct mutex loc_mutex; /* protect loc_imp */ atomic_t loc_refcount; long loc_flags; /* flags, see above defines */ + /* + * llog chunk size, and llog record size can not be bigger than + * loc_chunk_size + */ + __u32 loc_chunk_size; }; #define LLOG_PROC_BREAK 0x0001 diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h index 8fc2d3f2dfd67e9702d8f89bd8506df2ba1eeca0..198ceb0c66f93eb018d8718f43a03e334b9d9bd1 100644 --- a/drivers/staging/lustre/lustre/include/lustre_mdc.h +++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h @@ -156,16 +156,39 @@ static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck, mutex_unlock(&lck->rpcl_mutex); } +static inline void mdc_get_mod_rpc_slot(struct ptlrpc_request *req, + struct lookup_intent *it) +{ + struct client_obd *cli = &req->rq_import->imp_obd->u.cli; + u32 opc; + u16 tag; + + opc = lustre_msg_get_opc(req->rq_reqmsg); + tag = obd_get_mod_rpc_slot(cli, opc, it); + lustre_msg_set_tag(req->rq_reqmsg, tag); +} + +static inline void mdc_put_mod_rpc_slot(struct ptlrpc_request *req, + struct lookup_intent *it) +{ + struct client_obd *cli = &req->rq_import->imp_obd->u.cli; + u32 opc; + u16 tag; + + opc = lustre_msg_get_opc(req->rq_reqmsg); + tag = lustre_msg_get_tag(req->rq_reqmsg); + obd_put_mod_rpc_slot(cli, opc, it, tag); +} + /** - * Update the maximum possible easize and cookiesize. + * Update the maximum possible easize. * - * The values are learned from ptlrpc replies sent by the MDT. The - * default easize and cookiesize is initialized to the minimum value but - * allowed to grow up to a single page in size if required to handle the + * This value is learned from ptlrpc replies sent by the MDT. The + * default easize is initialized to the minimum value but allowed + * to grow up to a single page in size if required to handle the * common case. * - * \see client_obd::cl_default_mds_easize and - * client_obd::cl_default_mds_cookiesize + * \see client_obd::cl_default_mds_easize * * \param[in] exp export for MDC device * \param[in] body body of ptlrpc reply from MDT @@ -176,7 +199,7 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp, { if (body->mbo_valid & OBD_MD_FLMODEASIZE) { struct client_obd *cli = &exp->exp_obd->u.cli; - u32 def_cookiesize, def_easize; + u32 def_easize; if (cli->cl_max_mds_easize < body->mbo_max_mdsize) cli->cl_max_mds_easize = body->mbo_max_mdsize; @@ -184,13 +207,6 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp, def_easize = min_t(__u32, body->mbo_max_mdsize, OBD_MAX_DEFAULT_EA_SIZE); cli->cl_default_mds_easize = def_easize; - - if (cli->cl_max_mds_cookiesize < body->mbo_max_cookiesize) - cli->cl_max_mds_cookiesize = body->mbo_max_cookiesize; - - def_cookiesize = min_t(__u32, body->mbo_max_cookiesize, - OBD_MAX_DEFAULT_COOKIE_SIZE); - cli->cl_default_mds_cookiesize = def_cookiesize; } } diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h index e9aba99ee52a774c732f84162cdc35ae87d70def..411eb0dc7f381ddac6ed5a8f5a2876454036ac9c 100644 --- a/drivers/staging/lustre/lustre/include/lustre_net.h +++ b/drivers/staging/lustre/lustre/include/lustre_net.h @@ -50,6 +50,7 @@ * @{ */ +#include #include "../../include/linux/libcfs/libcfs.h" #include "../../include/linux/lnet/nidstr.h" #include "../../include/linux/lnet/api.h" @@ -68,13 +69,17 @@ #define PTLRPC_MD_OPTIONS 0 /** - * Max # of bulk operations in one request. + * log2 max # of bulk operations in one request: 2=4MB/RPC, 5=32MB/RPC, ... * In order for the client and server to properly negotiate the maximum * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two * value. The client is free to limit the actual RPC size for any bulk * transfer via cl_max_pages_per_rpc to some non-power-of-two value. + * NOTE: This is limited to 16 (=64GB RPCs) by IOOBJ_MAX_BRW_BITS. */ -#define PTLRPC_BULK_OPS_BITS 2 +#define PTLRPC_BULK_OPS_BITS 4 +#if PTLRPC_BULK_OPS_BITS > 16 +#error "More than 65536 BRW RPCs not allowed by IOOBJ_MAX_BRW_BITS." +#endif #define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS) /** * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and @@ -437,6 +442,10 @@ struct ptlrpc_reply_state { unsigned long rs_committed:1;/* the transaction was committed * and the rs was dispatched */ + atomic_t rs_refcount; /* number of users */ + /** Number of locks awaiting client ACK */ + int rs_nlocks; + /** Size of the state */ int rs_size; /** opcode */ @@ -449,7 +458,6 @@ struct ptlrpc_reply_state { struct ptlrpc_service_part *rs_svcpt; /** Lnet metadata handle for the reply */ lnet_handle_md_t rs_md_h; - atomic_t rs_refcount; /** Context for the service thread */ struct ptlrpc_svc_ctx *rs_svc_ctx; @@ -466,8 +474,6 @@ struct ptlrpc_reply_state { */ struct lustre_msg *rs_msg; /* reply message */ - /** Number of locks awaiting client ACK */ - int rs_nlocks; /** Handles of locks awaiting client reply ACK */ struct lustre_handle rs_locks[RS_MAX_LOCKS]; /** Lock modes of locks in \a rs_locks */ @@ -515,717 +521,7 @@ struct lu_env; struct ldlm_lock; -/** - * \defgroup nrs Network Request Scheduler - * @{ - */ -struct ptlrpc_nrs_policy; -struct ptlrpc_nrs_resource; -struct ptlrpc_nrs_request; - -/** - * NRS control operations. - * - * These are common for all policies. - */ -enum ptlrpc_nrs_ctl { - /** - * Not a valid opcode. - */ - PTLRPC_NRS_CTL_INVALID, - /** - * Activate the policy. - */ - PTLRPC_NRS_CTL_START, - /** - * Reserved for multiple primary policies, which may be a possibility - * in the future. - */ - PTLRPC_NRS_CTL_STOP, - /** - * Policies can start using opcodes from this value and onwards for - * their own purposes; the assigned value itself is arbitrary. - */ - PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20, -}; - -/** - * ORR policy operations - */ -enum nrs_ctl_orr { - NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC, - NRS_CTL_ORR_WR_QUANTUM, - NRS_CTL_ORR_RD_OFF_TYPE, - NRS_CTL_ORR_WR_OFF_TYPE, - NRS_CTL_ORR_RD_SUPP_REQ, - NRS_CTL_ORR_WR_SUPP_REQ, -}; - -/** - * NRS policy operations. - * - * These determine the behaviour of a policy, and are called in response to - * NRS core events. - */ -struct ptlrpc_nrs_pol_ops { - /** - * Called during policy registration; this operation is optional. - * - * \param[in,out] policy The policy being initialized - */ - int (*op_policy_init)(struct ptlrpc_nrs_policy *policy); - /** - * Called during policy unregistration; this operation is optional. - * - * \param[in,out] policy The policy being unregistered/finalized - */ - void (*op_policy_fini)(struct ptlrpc_nrs_policy *policy); - /** - * Called when activating a policy via lprocfs; policies allocate and - * initialize their resources here; this operation is optional. - * - * \param[in,out] policy The policy being started - * - * \see nrs_policy_start_locked() - */ - int (*op_policy_start)(struct ptlrpc_nrs_policy *policy); - /** - * Called when deactivating a policy via lprocfs; policies deallocate - * their resources here; this operation is optional - * - * \param[in,out] policy The policy being stopped - * - * \see nrs_policy_stop0() - */ - void (*op_policy_stop)(struct ptlrpc_nrs_policy *policy); - /** - * Used for policy-specific operations; i.e. not generic ones like - * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous - * to an ioctl; this operation is optional. - * - * \param[in,out] policy The policy carrying out operation \a opc - * \param[in] opc The command operation being carried out - * \param[in,out] arg An generic buffer for communication between the - * user and the control operation - * - * \retval -ve error - * \retval 0 success - * - * \see ptlrpc_nrs_policy_control() - */ - int (*op_policy_ctl)(struct ptlrpc_nrs_policy *policy, - enum ptlrpc_nrs_ctl opc, void *arg); - - /** - * Called when obtaining references to the resources of the resource - * hierarchy for a request that has arrived for handling at the PTLRPC - * service. Policies should return -ve for requests they do not wish - * to handle. This operation is mandatory. - * - * \param[in,out] policy The policy we're getting resources for. - * \param[in,out] nrq The request we are getting resources for. - * \param[in] parent The parent resource of the resource being - * requested; set to NULL if none. - * \param[out] resp The resource is to be returned here; the - * fallback policy in an NRS head should - * \e always return a non-NULL pointer value. - * \param[in] moving_req When set, signifies that this is an attempt - * to obtain resources for a request being moved - * to the high-priority NRS head by - * ldlm_lock_reorder_req(). - * This implies two things: - * 1. We are under obd_export::exp_rpc_lock and - * so should not sleep. - * 2. We should not perform non-idempotent or can - * skip performing idempotent operations that - * were carried out when resources were first - * taken for the request when it was initialized - * in ptlrpc_nrs_req_initialize(). - * - * \retval 0, +ve The level of the returned resource in the resource - * hierarchy; currently only 0 (for a non-leaf resource) - * and 1 (for a leaf resource) are supported by the - * framework. - * \retval -ve error - * - * \see ptlrpc_nrs_req_initialize() - * \see ptlrpc_nrs_hpreq_add_nolock() - */ - int (*op_res_get)(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq, - const struct ptlrpc_nrs_resource *parent, - struct ptlrpc_nrs_resource **resp, - bool moving_req); - /** - * Called when releasing references taken for resources in the resource - * hierarchy for the request; this operation is optional. - * - * \param[in,out] policy The policy the resource belongs to - * \param[in] res The resource to be freed - * - * \see ptlrpc_nrs_req_finalize() - * \see ptlrpc_nrs_hpreq_add_nolock() - */ - void (*op_res_put)(struct ptlrpc_nrs_policy *policy, - const struct ptlrpc_nrs_resource *res); - - /** - * Obtains a request for handling from the policy, and optionally - * removes the request from the policy; this operation is mandatory. - * - * \param[in,out] policy The policy to poll - * \param[in] peek When set, signifies that we just want to - * examine the request, and not handle it, so the - * request is not removed from the policy. - * \param[in] force When set, it will force a policy to return a - * request if it has one queued. - * - * \retval NULL No request available for handling - * \retval valid-pointer The request polled for handling - * - * \see ptlrpc_nrs_req_get_nolock() - */ - struct ptlrpc_nrs_request * - (*op_req_get)(struct ptlrpc_nrs_policy *policy, bool peek, - bool force); - /** - * Called when attempting to add a request to a policy for later - * handling; this operation is mandatory. - * - * \param[in,out] policy The policy on which to enqueue \a nrq - * \param[in,out] nrq The request to enqueue - * - * \retval 0 success - * \retval != 0 error - * - * \see ptlrpc_nrs_req_add_nolock() - */ - int (*op_req_enqueue)(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq); - /** - * Removes a request from the policy's set of pending requests. Normally - * called after a request has been polled successfully from the policy - * for handling; this operation is mandatory. - * - * \param[in,out] policy The policy the request \a nrq belongs to - * \param[in,out] nrq The request to dequeue - */ - void (*op_req_dequeue)(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq); - /** - * Called after the request being carried out. Could be used for - * job/resource control; this operation is optional. - * - * \param[in,out] policy The policy which is stopping to handle request - * \a nrq - * \param[in,out] nrq The request - * - * \pre assert_spin_locked(&svcpt->scp_req_lock) - * - * \see ptlrpc_nrs_req_stop_nolock() - */ - void (*op_req_stop)(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq); - /** - * Registers the policy's lprocfs interface with a PTLRPC service. - * - * \param[in] svc The service - * - * \retval 0 success - * \retval != 0 error - */ - int (*op_lprocfs_init)(struct ptlrpc_service *svc); - /** - * Unegisters the policy's lprocfs interface with a PTLRPC service. - * - * In cases of failed policy registration in - * \e ptlrpc_nrs_policy_register(), this function may be called for a - * service which has not registered the policy successfully, so - * implementations of this method should make sure their operations are - * safe in such cases. - * - * \param[in] svc The service - */ - void (*op_lprocfs_fini)(struct ptlrpc_service *svc); -}; - -/** - * Policy flags - */ -enum nrs_policy_flags { - /** - * Fallback policy, use this flag only on a single supported policy per - * service. The flag cannot be used on policies that use - * \e PTLRPC_NRS_FL_REG_EXTERN - */ - PTLRPC_NRS_FL_FALLBACK = (1 << 0), - /** - * Start policy immediately after registering. - */ - PTLRPC_NRS_FL_REG_START = (1 << 1), - /** - * This is a policy registering from a module different to the one NRS - * core ships in (currently ptlrpc). - */ - PTLRPC_NRS_FL_REG_EXTERN = (1 << 2), -}; - -/** - * NRS queue type. - * - * Denotes whether an NRS instance is for handling normal or high-priority - * RPCs, or whether an operation pertains to one or both of the NRS instances - * in a service. - */ -enum ptlrpc_nrs_queue_type { - PTLRPC_NRS_QUEUE_REG = (1 << 0), - PTLRPC_NRS_QUEUE_HP = (1 << 1), - PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP) -}; - -/** - * NRS head - * - * A PTLRPC service has at least one NRS head instance for handling normal - * priority RPCs, and may optionally have a second NRS head instance for - * handling high-priority RPCs. Each NRS head maintains a list of available - * policies, of which one and only one policy is acting as the fallback policy, - * and optionally a different policy may be acting as the primary policy. For - * all RPCs handled by this NRS head instance, NRS core will first attempt to - * enqueue the RPC using the primary policy (if any). The fallback policy is - * used in the following cases: - * - when there was no primary policy in the - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request - * was initialized. - * - when the primary policy that was at the - * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the - * RPC was initialized, denoted it did not wish, or for some other reason was - * not able to handle the request, by returning a non-valid NRS resource - * reference. - * - when the primary policy that was at the - * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the - * RPC was initialized, fails later during the request enqueueing stage. - * - * \see nrs_resource_get_safe() - * \see nrs_request_enqueue() - */ -struct ptlrpc_nrs { - spinlock_t nrs_lock; - /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */ - /** - * List of registered policies - */ - struct list_head nrs_policy_list; - /** - * List of policies with queued requests. Policies that have any - * outstanding requests are queued here, and this list is queried - * in a round-robin manner from NRS core when obtaining a request - * for handling. This ensures that requests from policies that at some - * point transition away from the - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained. - */ - struct list_head nrs_policy_queued; - /** - * Service partition for this NRS head - */ - struct ptlrpc_service_part *nrs_svcpt; - /** - * Primary policy, which is the preferred policy for handling RPCs - */ - struct ptlrpc_nrs_policy *nrs_policy_primary; - /** - * Fallback policy, which is the backup policy for handling RPCs - */ - struct ptlrpc_nrs_policy *nrs_policy_fallback; - /** - * This NRS head handles either HP or regular requests - */ - enum ptlrpc_nrs_queue_type nrs_queue_type; - /** - * # queued requests from all policies in this NRS head - */ - unsigned long nrs_req_queued; - /** - * # scheduled requests from all policies in this NRS head - */ - unsigned long nrs_req_started; - /** - * # policies on this NRS - */ - unsigned nrs_num_pols; - /** - * This NRS head is in progress of starting a policy - */ - unsigned nrs_policy_starting:1; - /** - * In progress of shutting down the whole NRS head; used during - * unregistration - */ - unsigned nrs_stopping:1; -}; - -#define NRS_POL_NAME_MAX 16 - -struct ptlrpc_nrs_pol_desc; - -/** - * Service compatibility predicate; this determines whether a policy is adequate - * for handling RPCs of a particular PTLRPC service. - * - * XXX:This should give the same result during policy registration and - * unregistration, and for all partitions of a service; so the result should not - * depend on temporal service or other properties, that may influence the - * result. - */ -typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc, - const struct ptlrpc_nrs_pol_desc *desc); - -struct ptlrpc_nrs_pol_conf { - /** - * Human-readable policy name - */ - char nc_name[NRS_POL_NAME_MAX]; - /** - * NRS operations for this policy - */ - const struct ptlrpc_nrs_pol_ops *nc_ops; - /** - * Service compatibility predicate - */ - nrs_pol_desc_compat_t nc_compat; - /** - * Set for policies that support a single ptlrpc service, i.e. ones that - * have \a pd_compat set to nrs_policy_compat_one(). The variable value - * depicts the name of the single service that such policies are - * compatible with. - */ - const char *nc_compat_svc_name; - /** - * Owner module for this policy descriptor; policies registering from a - * different module to the one the NRS framework is held within - * (currently ptlrpc), should set this field to THIS_MODULE. - */ - struct module *nc_owner; - /** - * Policy registration flags; a bitmask of \e nrs_policy_flags - */ - unsigned nc_flags; -}; - -/** - * NRS policy registering descriptor - * - * Is used to hold a description of a policy that can be passed to NRS core in - * order to register the policy with NRS heads in different PTLRPC services. - */ -struct ptlrpc_nrs_pol_desc { - /** - * Human-readable policy name - */ - char pd_name[NRS_POL_NAME_MAX]; - /** - * Link into nrs_core::nrs_policies - */ - struct list_head pd_list; - /** - * NRS operations for this policy - */ - const struct ptlrpc_nrs_pol_ops *pd_ops; - /** - * Service compatibility predicate - */ - nrs_pol_desc_compat_t pd_compat; - /** - * Set for policies that are compatible with only one PTLRPC service. - * - * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name - */ - const char *pd_compat_svc_name; - /** - * Owner module for this policy descriptor. - * - * We need to hold a reference to the module whenever we might make use - * of any of the module's contents, i.e. - * - If one or more instances of the policy are at a state where they - * might be handling a request, i.e. - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to - * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference - * is taken on the module when - * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it - * becomes 0, so that we hold only one reference to the module maximum - * at any time. - * - * We do not need to hold a reference to the module, even though we - * might use code and data from the module, in the following cases: - * - During external policy registration, because this should happen in - * the module's init() function, in which case the module is safe from - * removal because a reference is being held on the module by the - * kernel, and iirc kmod (and I guess module-init-tools also) will - * serialize any racing processes properly anyway. - * - During external policy unregistration, because this should happen - * in a module's exit() function, and any attempts to start a policy - * instance would need to take a reference on the module, and this is - * not possible once we have reached the point where the exit() - * handler is called. - * - During service registration and unregistration, as service setup - * and cleanup, and policy registration, unregistration and policy - * instance starting, are serialized by \e nrs_core::nrs_mutex, so - * as long as users adhere to the convention of registering policies - * in init() and unregistering them in module exit() functions, there - * should not be a race between these operations. - * - During any policy-specific lprocfs operations, because a reference - * is held by the kernel on a proc entry that has been entered by a - * syscall, so as long as proc entries are removed during unregistration time, - * then unregistration and lprocfs operations will be properly - * serialized. - */ - struct module *pd_owner; - /** - * Bitmask of \e nrs_policy_flags - */ - unsigned pd_flags; - /** - * # of references on this descriptor - */ - atomic_t pd_refs; -}; - -/** - * NRS policy state - * - * Policies transition from one state to the other during their lifetime - */ -enum ptlrpc_nrs_pol_state { - /** - * Not a valid policy state. - */ - NRS_POL_STATE_INVALID, - /** - * Policies are at this state either at the start of their life, or - * transition here when the user selects a different policy to act - * as the primary one. - */ - NRS_POL_STATE_STOPPED, - /** - * Policy is progress of stopping - */ - NRS_POL_STATE_STOPPING, - /** - * Policy is in progress of starting - */ - NRS_POL_STATE_STARTING, - /** - * A policy is in this state in two cases: - * - it is the fallback policy, which is always in this state. - * - it has been activated by the user; i.e. it is the primary policy, - */ - NRS_POL_STATE_STARTED, -}; - -/** - * NRS policy information - * - * Used for obtaining information for the status of a policy via lprocfs - */ -struct ptlrpc_nrs_pol_info { - /** - * Policy name - */ - char pi_name[NRS_POL_NAME_MAX]; - /** - * Current policy state - */ - enum ptlrpc_nrs_pol_state pi_state; - /** - * # RPCs enqueued for later dispatching by the policy - */ - long pi_req_queued; - /** - * # RPCs started for dispatch by the policy - */ - long pi_req_started; - /** - * Is this a fallback policy? - */ - unsigned pi_fallback:1; -}; - -/** - * NRS policy - * - * There is one instance of this for each policy in each NRS head of each - * PTLRPC service partition. - */ -struct ptlrpc_nrs_policy { - /** - * Linkage into the NRS head's list of policies, - * ptlrpc_nrs:nrs_policy_list - */ - struct list_head pol_list; - /** - * Linkage into the NRS head's list of policies with enqueued - * requests ptlrpc_nrs:nrs_policy_queued - */ - struct list_head pol_list_queued; - /** - * Current state of this policy - */ - enum ptlrpc_nrs_pol_state pol_state; - /** - * Bitmask of nrs_policy_flags - */ - unsigned pol_flags; - /** - * # RPCs enqueued for later dispatching by the policy - */ - long pol_req_queued; - /** - * # RPCs started for dispatch by the policy - */ - long pol_req_started; - /** - * Usage Reference count taken on the policy instance - */ - long pol_ref; - /** - * The NRS head this policy has been created at - */ - struct ptlrpc_nrs *pol_nrs; - /** - * Private policy data; varies by policy type - */ - void *pol_private; - /** - * Policy descriptor for this policy instance. - */ - struct ptlrpc_nrs_pol_desc *pol_desc; -}; - -/** - * NRS resource - * - * Resources are embedded into two types of NRS entities: - * - Inside NRS policies, in the policy's private data in - * ptlrpc_nrs_policy::pol_private - * - In objects that act as prime-level scheduling entities in different NRS - * policies; e.g. on a policy that performs round robin or similar order - * scheduling across client NIDs, there would be one NRS resource per unique - * client NID. On a policy which performs round robin scheduling across - * backend filesystem objects, there would be one resource associated with - * each of the backend filesystem objects partaking in the scheduling - * performed by the policy. - * - * NRS resources share a parent-child relationship, in which resources embedded - * in policy instances are the parent entities, with all scheduling entities - * a policy schedules across being the children, thus forming a simple resource - * hierarchy. This hierarchy may be extended with one or more levels in the - * future if the ability to have more than one primary policy is added. - * - * Upon request initialization, references to the then active NRS policies are - * taken and used to later handle the dispatching of the request with one of - * these policies. - * - * \see nrs_resource_get_safe() - * \see ptlrpc_nrs_req_add() - */ -struct ptlrpc_nrs_resource { - /** - * This NRS resource's parent; is NULL for resources embedded in NRS - * policy instances; i.e. those are top-level ones. - */ - struct ptlrpc_nrs_resource *res_parent; - /** - * The policy associated with this resource. - */ - struct ptlrpc_nrs_policy *res_policy; -}; - -enum { - NRS_RES_FALLBACK, - NRS_RES_PRIMARY, - NRS_RES_MAX -}; - -/* \name fifo - * - * FIFO policy - * - * This policy is a logical wrapper around previous, non-NRS functionality. - * It dispatches RPCs in the same order as they arrive from the network. This - * policy is currently used as the fallback policy, and the only enabled policy - * on all NRS heads of all PTLRPC service partitions. - * @{ - */ - -/** - * Private data structure for the FIFO policy - */ -struct nrs_fifo_head { - /** - * Resource object for policy instance. - */ - struct ptlrpc_nrs_resource fh_res; - /** - * List of queued requests. - */ - struct list_head fh_list; - /** - * For debugging purposes. - */ - __u64 fh_sequence; -}; - -struct nrs_fifo_req { - struct list_head fr_list; - __u64 fr_sequence; -}; - -/** @} fifo */ - -/** - * NRS request - * - * Instances of this object exist embedded within ptlrpc_request; the main - * purpose of this object is to hold references to the request's resources - * for the lifetime of the request, and to hold properties that policies use - * use for determining the request's scheduling priority. - */ -struct ptlrpc_nrs_request { - /** - * The request's resource hierarchy. - */ - struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX]; - /** - * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the - * policy that was used to enqueue the request. - * - * \see nrs_request_enqueue() - */ - unsigned nr_res_idx; - unsigned nr_initialized:1; - unsigned nr_enqueued:1; - unsigned nr_started:1; - unsigned nr_finalized:1; - - /** - * Policy-specific fields, used for determining a request's scheduling - * priority, and other supporting functionality. - */ - union { - /** - * Fields for the FIFO policy - */ - struct nrs_fifo_req fifo; - } nr_u; - /** - * Externally-registering policies may want to use this to allocate - * their own request properties. - */ - void *ext; -}; - -/** @} nrs */ +#include "lustre_nrs.h" /** * Basic request prioritization operations structure. @@ -1304,6 +600,8 @@ struct ptlrpc_cli_req { union ptlrpc_async_args cr_async_args; /** Opaq data for replay and commit callbacks. */ void *cr_cb_data; + /** Link to the imp->imp_unreplied_list */ + struct list_head cr_unreplied_list; /** * Commit callback, called when request is committed and about to be * freed. @@ -1343,6 +641,7 @@ struct ptlrpc_cli_req { #define rq_interpret_reply rq_cli.cr_reply_interp #define rq_async_args rq_cli.cr_async_args #define rq_cb_data rq_cli.cr_cb_data +#define rq_unreplied_list rq_cli.cr_unreplied_list #define rq_commit_cb rq_cli.cr_commit_cb #define rq_replay_cb rq_cli.cr_replay_cb @@ -1505,6 +804,8 @@ struct ptlrpc_request { __u64 rq_transno; /** xid */ __u64 rq_xid; + /** bulk match bits */ + u64 rq_mbits; /** * List item to for replay list. Not yet committed requests get linked * there. @@ -1793,10 +1094,93 @@ struct ptlrpc_bulk_page { struct page *bp_page; }; -#define BULK_GET_SOURCE 0 -#define BULK_PUT_SINK 1 -#define BULK_GET_SINK 2 -#define BULK_PUT_SOURCE 3 +enum ptlrpc_bulk_op_type { + PTLRPC_BULK_OP_ACTIVE = 0x00000001, + PTLRPC_BULK_OP_PASSIVE = 0x00000002, + PTLRPC_BULK_OP_PUT = 0x00000004, + PTLRPC_BULK_OP_GET = 0x00000008, + PTLRPC_BULK_BUF_KVEC = 0x00000010, + PTLRPC_BULK_BUF_KIOV = 0x00000020, + PTLRPC_BULK_GET_SOURCE = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_GET, + PTLRPC_BULK_PUT_SINK = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_PUT, + PTLRPC_BULK_GET_SINK = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_GET, + PTLRPC_BULK_PUT_SOURCE = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_PUT, +}; + +static inline bool ptlrpc_is_bulk_op_get(enum ptlrpc_bulk_op_type type) +{ + return (type & PTLRPC_BULK_OP_GET) == PTLRPC_BULK_OP_GET; +} + +static inline bool ptlrpc_is_bulk_get_source(enum ptlrpc_bulk_op_type type) +{ + return (type & PTLRPC_BULK_GET_SOURCE) == PTLRPC_BULK_GET_SOURCE; +} + +static inline bool ptlrpc_is_bulk_put_sink(enum ptlrpc_bulk_op_type type) +{ + return (type & PTLRPC_BULK_PUT_SINK) == PTLRPC_BULK_PUT_SINK; +} + +static inline bool ptlrpc_is_bulk_get_sink(enum ptlrpc_bulk_op_type type) +{ + return (type & PTLRPC_BULK_GET_SINK) == PTLRPC_BULK_GET_SINK; +} + +static inline bool ptlrpc_is_bulk_put_source(enum ptlrpc_bulk_op_type type) +{ + return (type & PTLRPC_BULK_PUT_SOURCE) == PTLRPC_BULK_PUT_SOURCE; +} + +static inline bool ptlrpc_is_bulk_desc_kvec(enum ptlrpc_bulk_op_type type) +{ + return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV)) + == PTLRPC_BULK_BUF_KVEC; +} + +static inline bool ptlrpc_is_bulk_desc_kiov(enum ptlrpc_bulk_op_type type) +{ + return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV)) + == PTLRPC_BULK_BUF_KIOV; +} + +static inline bool ptlrpc_is_bulk_op_active(enum ptlrpc_bulk_op_type type) +{ + return ((type & PTLRPC_BULK_OP_ACTIVE) | + (type & PTLRPC_BULK_OP_PASSIVE)) == PTLRPC_BULK_OP_ACTIVE; +} + +static inline bool ptlrpc_is_bulk_op_passive(enum ptlrpc_bulk_op_type type) +{ + return ((type & PTLRPC_BULK_OP_ACTIVE) | + (type & PTLRPC_BULK_OP_PASSIVE)) == PTLRPC_BULK_OP_PASSIVE; +} + +struct ptlrpc_bulk_frag_ops { + /** + * Add a page \a page to the bulk descriptor \a desc + * Data to transfer in the page starts at offset \a pageoffset and + * amount of data to transfer from the page is \a len + */ + void (*add_kiov_frag)(struct ptlrpc_bulk_desc *desc, + struct page *page, int pageoffset, int len); + + /* + * Add a \a fragment to the bulk descriptor \a desc. + * Data to transfer in the fragment is pointed to by \a frag + * The size of the fragment is \a len + */ + int (*add_iov_frag)(struct ptlrpc_bulk_desc *desc, void *frag, int len); + + /** + * Uninitialize and free bulk descriptor \a desc. + * Works on bulk descriptors both from server and client side. + */ + void (*release_frags)(struct ptlrpc_bulk_desc *desc); +}; + +extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops; +extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops; /** * Definition of bulk descriptor. @@ -1811,14 +1195,14 @@ struct ptlrpc_bulk_page { struct ptlrpc_bulk_desc { /** completed with failure */ unsigned long bd_failure:1; - /** {put,get}{source,sink} */ - unsigned long bd_type:2; /** client side */ unsigned long bd_registered:1; /** For serialization with callback */ spinlock_t bd_lock; /** Import generation when request for this bulk was sent */ int bd_import_generation; + /** {put,get}{source,sink}{kvec,kiov} */ + enum ptlrpc_bulk_op_type bd_type; /** LNet portal for this bulk */ __u32 bd_portal; /** Server side - export this bulk created for */ @@ -1827,13 +1211,14 @@ struct ptlrpc_bulk_desc { struct obd_import *bd_import; /** Back pointer to the request */ struct ptlrpc_request *bd_req; + struct ptlrpc_bulk_frag_ops *bd_frag_ops; wait_queue_head_t bd_waitq; /* server side only WQ */ int bd_iov_count; /* # entries in bd_iov */ int bd_max_iov; /* allocated size of bd_iov */ int bd_nob; /* # bytes covered */ int bd_nob_transferred; /* # bytes GOT/PUT */ - __u64 bd_last_xid; + u64 bd_last_mbits; struct ptlrpc_cb_id bd_cbid; /* network callback info */ lnet_nid_t bd_sender; /* stash event::sender */ @@ -1842,14 +1227,31 @@ struct ptlrpc_bulk_desc { /** array of associated MDs */ lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT]; - /* - * encrypt iov, size is either 0 or bd_iov_count. - */ - lnet_kiov_t *bd_enc_iov; - - lnet_kiov_t bd_iov[0]; + union { + struct { + /* + * encrypt iov, size is either 0 or bd_iov_count. + */ + struct bio_vec *bd_enc_vec; + struct bio_vec *bd_vec; /* Array of bio_vecs */ + } bd_kiov; + + struct { + struct kvec *bd_enc_kvec; + struct kvec *bd_kvec; /* Array of kvecs */ + } bd_kvec; + } bd_u; }; +#define GET_KIOV(desc) ((desc)->bd_u.bd_kiov.bd_vec) +#define BD_GET_KIOV(desc, i) ((desc)->bd_u.bd_kiov.bd_vec[i]) +#define GET_ENC_KIOV(desc) ((desc)->bd_u.bd_kiov.bd_enc_vec) +#define BD_GET_ENC_KIOV(desc, i) ((desc)->bd_u.bd_kiov.bd_enc_vec[i]) +#define GET_KVEC(desc) ((desc)->bd_u.bd_kvec.bd_kvec) +#define BD_GET_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_kvec[i]) +#define GET_ENC_KVEC(desc) ((desc)->bd_u.bd_kvec.bd_enc_kvec) +#define BD_GET_ENC_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_enc_kvec[i]) + enum { SVC_STOPPED = 1 << 0, SVC_STOPPING = 1 << 1, @@ -2464,21 +1866,17 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, void ptlrpc_req_finished(struct ptlrpc_request *request); struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req); struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, - unsigned npages, unsigned max_brw, - unsigned type, unsigned portal); -void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin); -static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk) -{ - __ptlrpc_free_bulk(bulk, 1); -} - -static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk) -{ - __ptlrpc_free_bulk(bulk, 0); -} - + unsigned int nfrags, + unsigned int max_brw, + unsigned int type, + unsigned int portal, + const struct ptlrpc_bulk_frag_ops *ops); + +int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc, + void *frag, int len); void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, - struct page *page, int pageoffset, int len, int); + struct page *page, int pageoffset, int len, + int pin); static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc, struct page *page, int pageoffset, int len) @@ -2493,6 +1891,16 @@ static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc, __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0); } +void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk); + +static inline void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc) +{ + int i; + + for (i = 0; i < desc->bd_iov_count ; i++) + put_page(BD_GET_KIOV(desc, i).bv_page); +} + void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, struct obd_import *imp); __u64 ptlrpc_next_xid(void); @@ -2652,6 +2060,7 @@ struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg); __u32 lustre_msg_get_type(struct lustre_msg *msg); void lustre_msg_add_version(struct lustre_msg *msg, u32 version); __u32 lustre_msg_get_opc(struct lustre_msg *msg); +__u16 lustre_msg_get_tag(struct lustre_msg *msg); __u64 lustre_msg_get_last_committed(struct lustre_msg *msg); __u64 *lustre_msg_get_versions(struct lustre_msg *msg); __u64 lustre_msg_get_transno(struct lustre_msg *msg); @@ -2670,6 +2079,8 @@ void lustre_msg_set_handle(struct lustre_msg *msg, struct lustre_handle *handle); void lustre_msg_set_type(struct lustre_msg *msg, __u32 type); void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc); +void lustre_msg_set_last_xid(struct lustre_msg *msg, u64 last_xid); +void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag); void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions); void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno); void lustre_msg_set_status(struct lustre_msg *msg, __u32 status); @@ -2679,6 +2090,7 @@ void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout); void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time); void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid); void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum); +void lustre_msg_set_mbits(struct lustre_msg *msg, u64 mbits); static inline void lustre_shrink_reply(struct ptlrpc_request *req, int segment, diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs.h b/drivers/staging/lustre/lustre/include/lustre_nrs.h new file mode 100644 index 0000000000000000000000000000000000000000..a5028aaa19cd82d5e7a72329905e8f628cb5b616 --- /dev/null +++ b/drivers/staging/lustre/lustre/include/lustre_nrs.h @@ -0,0 +1,717 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License version 2 for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2014, Intel Corporation. + * + * Copyright 2012 Xyratex Technology Limited + */ +/* + * + * Network Request Scheduler (NRS) + * + */ + +#ifndef _LUSTRE_NRS_H +#define _LUSTRE_NRS_H + +/** + * \defgroup nrs Network Request Scheduler + * @{ + */ +struct ptlrpc_nrs_policy; +struct ptlrpc_nrs_resource; +struct ptlrpc_nrs_request; + +/** + * NRS control operations. + * + * These are common for all policies. + */ +enum ptlrpc_nrs_ctl { + /** + * Not a valid opcode. + */ + PTLRPC_NRS_CTL_INVALID, + /** + * Activate the policy. + */ + PTLRPC_NRS_CTL_START, + /** + * Reserved for multiple primary policies, which may be a possibility + * in the future. + */ + PTLRPC_NRS_CTL_STOP, + /** + * Policies can start using opcodes from this value and onwards for + * their own purposes; the assigned value itself is arbitrary. + */ + PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20, +}; + +/** + * NRS policy operations. + * + * These determine the behaviour of a policy, and are called in response to + * NRS core events. + */ +struct ptlrpc_nrs_pol_ops { + /** + * Called during policy registration; this operation is optional. + * + * \param[in,out] policy The policy being initialized + */ + int (*op_policy_init)(struct ptlrpc_nrs_policy *policy); + /** + * Called during policy unregistration; this operation is optional. + * + * \param[in,out] policy The policy being unregistered/finalized + */ + void (*op_policy_fini)(struct ptlrpc_nrs_policy *policy); + /** + * Called when activating a policy via lprocfs; policies allocate and + * initialize their resources here; this operation is optional. + * + * \param[in,out] policy The policy being started + * + * \see nrs_policy_start_locked() + */ + int (*op_policy_start)(struct ptlrpc_nrs_policy *policy); + /** + * Called when deactivating a policy via lprocfs; policies deallocate + * their resources here; this operation is optional + * + * \param[in,out] policy The policy being stopped + * + * \see nrs_policy_stop0() + */ + void (*op_policy_stop)(struct ptlrpc_nrs_policy *policy); + /** + * Used for policy-specific operations; i.e. not generic ones like + * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous + * to an ioctl; this operation is optional. + * + * \param[in,out] policy The policy carrying out operation \a opc + * \param[in] opc The command operation being carried out + * \param[in,out] arg An generic buffer for communication between the + * user and the control operation + * + * \retval -ve error + * \retval 0 success + * + * \see ptlrpc_nrs_policy_control() + */ + int (*op_policy_ctl)(struct ptlrpc_nrs_policy *policy, + enum ptlrpc_nrs_ctl opc, void *arg); + + /** + * Called when obtaining references to the resources of the resource + * hierarchy for a request that has arrived for handling at the PTLRPC + * service. Policies should return -ve for requests they do not wish + * to handle. This operation is mandatory. + * + * \param[in,out] policy The policy we're getting resources for. + * \param[in,out] nrq The request we are getting resources for. + * \param[in] parent The parent resource of the resource being + * requested; set to NULL if none. + * \param[out] resp The resource is to be returned here; the + * fallback policy in an NRS head should + * \e always return a non-NULL pointer value. + * \param[in] moving_req When set, signifies that this is an attempt + * to obtain resources for a request being moved + * to the high-priority NRS head by + * ldlm_lock_reorder_req(). + * This implies two things: + * 1. We are under obd_export::exp_rpc_lock and + * so should not sleep. + * 2. We should not perform non-idempotent or can + * skip performing idempotent operations that + * were carried out when resources were first + * taken for the request when it was initialized + * in ptlrpc_nrs_req_initialize(). + * + * \retval 0, +ve The level of the returned resource in the resource + * hierarchy; currently only 0 (for a non-leaf resource) + * and 1 (for a leaf resource) are supported by the + * framework. + * \retval -ve error + * + * \see ptlrpc_nrs_req_initialize() + * \see ptlrpc_nrs_hpreq_add_nolock() + * \see ptlrpc_nrs_req_hp_move() + */ + int (*op_res_get)(struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_request *nrq, + const struct ptlrpc_nrs_resource *parent, + struct ptlrpc_nrs_resource **resp, + bool moving_req); + /** + * Called when releasing references taken for resources in the resource + * hierarchy for the request; this operation is optional. + * + * \param[in,out] policy The policy the resource belongs to + * \param[in] res The resource to be freed + * + * \see ptlrpc_nrs_req_finalize() + * \see ptlrpc_nrs_hpreq_add_nolock() + * \see ptlrpc_nrs_req_hp_move() + */ + void (*op_res_put)(struct ptlrpc_nrs_policy *policy, + const struct ptlrpc_nrs_resource *res); + + /** + * Obtains a request for handling from the policy, and optionally + * removes the request from the policy; this operation is mandatory. + * + * \param[in,out] policy The policy to poll + * \param[in] peek When set, signifies that we just want to + * examine the request, and not handle it, so the + * request is not removed from the policy. + * \param[in] force When set, it will force a policy to return a + * request if it has one queued. + * + * \retval NULL No request available for handling + * \retval valid-pointer The request polled for handling + * + * \see ptlrpc_nrs_req_get_nolock() + */ + struct ptlrpc_nrs_request * + (*op_req_get)(struct ptlrpc_nrs_policy *policy, bool peek, + bool force); + /** + * Called when attempting to add a request to a policy for later + * handling; this operation is mandatory. + * + * \param[in,out] policy The policy on which to enqueue \a nrq + * \param[in,out] nrq The request to enqueue + * + * \retval 0 success + * \retval != 0 error + * + * \see ptlrpc_nrs_req_add_nolock() + */ + int (*op_req_enqueue)(struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_request *nrq); + /** + * Removes a request from the policy's set of pending requests. Normally + * called after a request has been polled successfully from the policy + * for handling; this operation is mandatory. + * + * \param[in,out] policy The policy the request \a nrq belongs to + * \param[in,out] nrq The request to dequeue + * + * \see ptlrpc_nrs_req_del_nolock() + */ + void (*op_req_dequeue)(struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_request *nrq); + /** + * Called after the request being carried out. Could be used for + * job/resource control; this operation is optional. + * + * \param[in,out] policy The policy which is stopping to handle request + * \a nrq + * \param[in,out] nrq The request + * + * \pre assert_spin_locked(&svcpt->scp_req_lock) + * + * \see ptlrpc_nrs_req_stop_nolock() + */ + void (*op_req_stop)(struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_request *nrq); + /** + * Registers the policy's lprocfs interface with a PTLRPC service. + * + * \param[in] svc The service + * + * \retval 0 success + * \retval != 0 error + */ + int (*op_lprocfs_init)(struct ptlrpc_service *svc); + /** + * Unegisters the policy's lprocfs interface with a PTLRPC service. + * + * In cases of failed policy registration in + * \e ptlrpc_nrs_policy_register(), this function may be called for a + * service which has not registered the policy successfully, so + * implementations of this method should make sure their operations are + * safe in such cases. + * + * \param[in] svc The service + */ + void (*op_lprocfs_fini)(struct ptlrpc_service *svc); +}; + +/** + * Policy flags + */ +enum nrs_policy_flags { + /** + * Fallback policy, use this flag only on a single supported policy per + * service. The flag cannot be used on policies that use + * \e PTLRPC_NRS_FL_REG_EXTERN + */ + PTLRPC_NRS_FL_FALLBACK = BIT(0), + /** + * Start policy immediately after registering. + */ + PTLRPC_NRS_FL_REG_START = BIT(1), + /** + * This is a policy registering from a module different to the one NRS + * core ships in (currently ptlrpc). + */ + PTLRPC_NRS_FL_REG_EXTERN = BIT(2), +}; + +/** + * NRS queue type. + * + * Denotes whether an NRS instance is for handling normal or high-priority + * RPCs, or whether an operation pertains to one or both of the NRS instances + * in a service. + */ +enum ptlrpc_nrs_queue_type { + PTLRPC_NRS_QUEUE_REG = BIT(0), + PTLRPC_NRS_QUEUE_HP = BIT(1), + PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP) +}; + +/** + * NRS head + * + * A PTLRPC service has at least one NRS head instance for handling normal + * priority RPCs, and may optionally have a second NRS head instance for + * handling high-priority RPCs. Each NRS head maintains a list of available + * policies, of which one and only one policy is acting as the fallback policy, + * and optionally a different policy may be acting as the primary policy. For + * all RPCs handled by this NRS head instance, NRS core will first attempt to + * enqueue the RPC using the primary policy (if any). The fallback policy is + * used in the following cases: + * - when there was no primary policy in the + * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request + * was initialized. + * - when the primary policy that was at the + * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the + * RPC was initialized, denoted it did not wish, or for some other reason was + * not able to handle the request, by returning a non-valid NRS resource + * reference. + * - when the primary policy that was at the + * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the + * RPC was initialized, fails later during the request enqueueing stage. + * + * \see nrs_resource_get_safe() + * \see nrs_request_enqueue() + */ +struct ptlrpc_nrs { + spinlock_t nrs_lock; + /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */ + /** + * List of registered policies + */ + struct list_head nrs_policy_list; + /** + * List of policies with queued requests. Policies that have any + * outstanding requests are queued here, and this list is queried + * in a round-robin manner from NRS core when obtaining a request + * for handling. This ensures that requests from policies that at some + * point transition away from the + * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained. + */ + struct list_head nrs_policy_queued; + /** + * Service partition for this NRS head + */ + struct ptlrpc_service_part *nrs_svcpt; + /** + * Primary policy, which is the preferred policy for handling RPCs + */ + struct ptlrpc_nrs_policy *nrs_policy_primary; + /** + * Fallback policy, which is the backup policy for handling RPCs + */ + struct ptlrpc_nrs_policy *nrs_policy_fallback; + /** + * This NRS head handles either HP or regular requests + */ + enum ptlrpc_nrs_queue_type nrs_queue_type; + /** + * # queued requests from all policies in this NRS head + */ + unsigned long nrs_req_queued; + /** + * # scheduled requests from all policies in this NRS head + */ + unsigned long nrs_req_started; + /** + * # policies on this NRS + */ + unsigned int nrs_num_pols; + /** + * This NRS head is in progress of starting a policy + */ + unsigned int nrs_policy_starting:1; + /** + * In progress of shutting down the whole NRS head; used during + * unregistration + */ + unsigned int nrs_stopping:1; + /** + * NRS policy is throttling request + */ + unsigned int nrs_throttling:1; +}; + +#define NRS_POL_NAME_MAX 16 +#define NRS_POL_ARG_MAX 16 + +struct ptlrpc_nrs_pol_desc; + +/** + * Service compatibility predicate; this determines whether a policy is adequate + * for handling RPCs of a particular PTLRPC service. + * + * XXX:This should give the same result during policy registration and + * unregistration, and for all partitions of a service; so the result should not + * depend on temporal service or other properties, that may influence the + * result. + */ +typedef bool (*nrs_pol_desc_compat_t)(const struct ptlrpc_service *svc, + const struct ptlrpc_nrs_pol_desc *desc); + +struct ptlrpc_nrs_pol_conf { + /** + * Human-readable policy name + */ + char nc_name[NRS_POL_NAME_MAX]; + /** + * NRS operations for this policy + */ + const struct ptlrpc_nrs_pol_ops *nc_ops; + /** + * Service compatibility predicate + */ + nrs_pol_desc_compat_t nc_compat; + /** + * Set for policies that support a single ptlrpc service, i.e. ones that + * have \a pd_compat set to nrs_policy_compat_one(). The variable value + * depicts the name of the single service that such policies are + * compatible with. + */ + const char *nc_compat_svc_name; + /** + * Owner module for this policy descriptor; policies registering from a + * different module to the one the NRS framework is held within + * (currently ptlrpc), should set this field to THIS_MODULE. + */ + struct module *nc_owner; + /** + * Policy registration flags; a bitmask of \e nrs_policy_flags + */ + unsigned int nc_flags; +}; + +/** + * NRS policy registering descriptor + * + * Is used to hold a description of a policy that can be passed to NRS core in + * order to register the policy with NRS heads in different PTLRPC services. + */ +struct ptlrpc_nrs_pol_desc { + /** + * Human-readable policy name + */ + char pd_name[NRS_POL_NAME_MAX]; + /** + * Link into nrs_core::nrs_policies + */ + struct list_head pd_list; + /** + * NRS operations for this policy + */ + const struct ptlrpc_nrs_pol_ops *pd_ops; + /** + * Service compatibility predicate + */ + nrs_pol_desc_compat_t pd_compat; + /** + * Set for policies that are compatible with only one PTLRPC service. + * + * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name + */ + const char *pd_compat_svc_name; + /** + * Owner module for this policy descriptor. + * + * We need to hold a reference to the module whenever we might make use + * of any of the module's contents, i.e. + * - If one or more instances of the policy are at a state where they + * might be handling a request, i.e. + * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or + * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to + * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference + * is taken on the module when + * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it + * becomes 0, so that we hold only one reference to the module maximum + * at any time. + * + * We do not need to hold a reference to the module, even though we + * might use code and data from the module, in the following cases: + * - During external policy registration, because this should happen in + * the module's init() function, in which case the module is safe from + * removal because a reference is being held on the module by the + * kernel, and iirc kmod (and I guess module-init-tools also) will + * serialize any racing processes properly anyway. + * - During external policy unregistration, because this should happen + * in a module's exit() function, and any attempts to start a policy + * instance would need to take a reference on the module, and this is + * not possible once we have reached the point where the exit() + * handler is called. + * - During service registration and unregistration, as service setup + * and cleanup, and policy registration, unregistration and policy + * instance starting, are serialized by \e nrs_core::nrs_mutex, so + * as long as users adhere to the convention of registering policies + * in init() and unregistering them in module exit() functions, there + * should not be a race between these operations. + * - During any policy-specific lprocfs operations, because a reference + * is held by the kernel on a proc entry that has been entered by a + * syscall, so as long as proc entries are removed during + * unregistration time, then unregistration and lprocfs operations + * will be properly serialized. + */ + struct module *pd_owner; + /** + * Bitmask of \e nrs_policy_flags + */ + unsigned int pd_flags; + /** + * # of references on this descriptor + */ + atomic_t pd_refs; +}; + +/** + * NRS policy state + * + * Policies transition from one state to the other during their lifetime + */ +enum ptlrpc_nrs_pol_state { + /** + * Not a valid policy state. + */ + NRS_POL_STATE_INVALID, + /** + * Policies are at this state either at the start of their life, or + * transition here when the user selects a different policy to act + * as the primary one. + */ + NRS_POL_STATE_STOPPED, + /** + * Policy is progress of stopping + */ + NRS_POL_STATE_STOPPING, + /** + * Policy is in progress of starting + */ + NRS_POL_STATE_STARTING, + /** + * A policy is in this state in two cases: + * - it is the fallback policy, which is always in this state. + * - it has been activated by the user; i.e. it is the primary policy, + */ + NRS_POL_STATE_STARTED, +}; + +/** + * NRS policy information + * + * Used for obtaining information for the status of a policy via lprocfs + */ +struct ptlrpc_nrs_pol_info { + /** + * Policy name + */ + char pi_name[NRS_POL_NAME_MAX]; + /** + * Policy argument + */ + char pi_arg[NRS_POL_ARG_MAX]; + /** + * Current policy state + */ + enum ptlrpc_nrs_pol_state pi_state; + /** + * # RPCs enqueued for later dispatching by the policy + */ + long pi_req_queued; + /** + * # RPCs started for dispatch by the policy + */ + long pi_req_started; + /** + * Is this a fallback policy? + */ + unsigned pi_fallback:1; +}; + +/** + * NRS policy + * + * There is one instance of this for each policy in each NRS head of each + * PTLRPC service partition. + */ +struct ptlrpc_nrs_policy { + /** + * Linkage into the NRS head's list of policies, + * ptlrpc_nrs:nrs_policy_list + */ + struct list_head pol_list; + /** + * Linkage into the NRS head's list of policies with enqueued + * requests ptlrpc_nrs:nrs_policy_queued + */ + struct list_head pol_list_queued; + /** + * Current state of this policy + */ + enum ptlrpc_nrs_pol_state pol_state; + /** + * Bitmask of nrs_policy_flags + */ + unsigned int pol_flags; + /** + * # RPCs enqueued for later dispatching by the policy + */ + long pol_req_queued; + /** + * # RPCs started for dispatch by the policy + */ + long pol_req_started; + /** + * Usage Reference count taken on the policy instance + */ + long pol_ref; + /** + * Human-readable policy argument + */ + char pol_arg[NRS_POL_ARG_MAX]; + /** + * The NRS head this policy has been created at + */ + struct ptlrpc_nrs *pol_nrs; + /** + * Private policy data; varies by policy type + */ + void *pol_private; + /** + * Policy descriptor for this policy instance. + */ + struct ptlrpc_nrs_pol_desc *pol_desc; +}; + +/** + * NRS resource + * + * Resources are embedded into two types of NRS entities: + * - Inside NRS policies, in the policy's private data in + * ptlrpc_nrs_policy::pol_private + * - In objects that act as prime-level scheduling entities in different NRS + * policies; e.g. on a policy that performs round robin or similar order + * scheduling across client NIDs, there would be one NRS resource per unique + * client NID. On a policy which performs round robin scheduling across + * backend filesystem objects, there would be one resource associated with + * each of the backend filesystem objects partaking in the scheduling + * performed by the policy. + * + * NRS resources share a parent-child relationship, in which resources embedded + * in policy instances are the parent entities, with all scheduling entities + * a policy schedules across being the children, thus forming a simple resource + * hierarchy. This hierarchy may be extended with one or more levels in the + * future if the ability to have more than one primary policy is added. + * + * Upon request initialization, references to the then active NRS policies are + * taken and used to later handle the dispatching of the request with one of + * these policies. + * + * \see nrs_resource_get_safe() + * \see ptlrpc_nrs_req_add() + */ +struct ptlrpc_nrs_resource { + /** + * This NRS resource's parent; is NULL for resources embedded in NRS + * policy instances; i.e. those are top-level ones. + */ + struct ptlrpc_nrs_resource *res_parent; + /** + * The policy associated with this resource. + */ + struct ptlrpc_nrs_policy *res_policy; +}; + +enum { + NRS_RES_FALLBACK, + NRS_RES_PRIMARY, + NRS_RES_MAX +}; + +#include "lustre_nrs_fifo.h" + +/** + * NRS request + * + * Instances of this object exist embedded within ptlrpc_request; the main + * purpose of this object is to hold references to the request's resources + * for the lifetime of the request, and to hold properties that policies use + * use for determining the request's scheduling priority. + **/ +struct ptlrpc_nrs_request { + /** + * The request's resource hierarchy. + */ + struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX]; + /** + * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the + * policy that was used to enqueue the request. + * + * \see nrs_request_enqueue() + */ + unsigned int nr_res_idx; + unsigned int nr_initialized:1; + unsigned int nr_enqueued:1; + unsigned int nr_started:1; + unsigned int nr_finalized:1; + + /** + * Policy-specific fields, used for determining a request's scheduling + * priority, and other supporting functionality. + */ + union { + /** + * Fields for the FIFO policy + */ + struct nrs_fifo_req fifo; + } nr_u; + /** + * Externally-registering policies may want to use this to allocate + * their own request properties. + */ + void *ext; +}; + +/** @} nrs */ +#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h new file mode 100644 index 0000000000000000000000000000000000000000..3b5418eac6c44297eb26e61b051b363fa1fce428 --- /dev/null +++ b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h @@ -0,0 +1,70 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License version 2 for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2014, Intel Corporation. + * + * Copyright 2012 Xyratex Technology Limited + */ +/* + * + * Network Request Scheduler (NRS) First-in First-out (FIFO) policy + * + */ + +#ifndef _LUSTRE_NRS_FIFO_H +#define _LUSTRE_NRS_FIFO_H + +/* \name fifo + * + * FIFO policy + * + * This policy is a logical wrapper around previous, non-NRS functionality. + * It dispatches RPCs in the same order as they arrive from the network. This + * policy is currently used as the fallback policy, and the only enabled policy + * on all NRS heads of all PTLRPC service partitions. + * @{ + */ + +/** + * Private data structure for the FIFO policy + */ +struct nrs_fifo_head { + /** + * Resource object for policy instance. + */ + struct ptlrpc_nrs_resource fh_res; + /** + * List of queued requests. + */ + struct list_head fh_list; + /** + * For debugging purposes. + */ + __u64 fh_sequence; +}; + +struct nrs_fifo_req { + struct list_head fr_list; + __u64 fr_sequence; +}; + +/** @} fifo */ +#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h index a13558e53274bcbd6151f84eff2b76a70a468e0e..fbcd39572cd01e63c8dec07be8ce59cad752799f 100644 --- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h +++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h @@ -148,13 +148,12 @@ extern struct req_format RQF_MDS_GETATTR; */ extern struct req_format RQF_MDS_GETATTR_NAME; extern struct req_format RQF_MDS_CLOSE; -extern struct req_format RQF_MDS_RELEASE_CLOSE; +extern struct req_format RQF_MDS_INTENT_CLOSE; extern struct req_format RQF_MDS_CONNECT; extern struct req_format RQF_MDS_DISCONNECT; extern struct req_format RQF_MDS_GET_INFO; extern struct req_format RQF_MDS_READPAGE; extern struct req_format RQF_MDS_WRITEPAGE; -extern struct req_format RQF_MDS_DONE_WRITING; extern struct req_format RQF_MDS_REINT; extern struct req_format RQF_MDS_REINT_CREATE; extern struct req_format RQF_MDS_REINT_CREATE_ACL; @@ -166,10 +165,9 @@ extern struct req_format RQF_MDS_REINT_LINK; extern struct req_format RQF_MDS_REINT_RENAME; extern struct req_format RQF_MDS_REINT_SETATTR; extern struct req_format RQF_MDS_REINT_SETXATTR; -extern struct req_format RQF_MDS_QUOTACHECK; extern struct req_format RQF_MDS_QUOTACTL; -extern struct req_format RQF_QC_CALLBACK; extern struct req_format RQF_MDS_SWAP_LAYOUTS; +extern struct req_format RQF_MDS_REINT_MIGRATE; /* MDS hsm formats */ extern struct req_format RQF_MDS_HSM_STATE_GET; extern struct req_format RQF_MDS_HSM_STATE_SET; @@ -181,7 +179,6 @@ extern struct req_format RQF_MDS_HSM_REQUEST; /* OST req_format */ extern struct req_format RQF_OST_CONNECT; extern struct req_format RQF_OST_DISCONNECT; -extern struct req_format RQF_OST_QUOTACHECK; extern struct req_format RQF_OST_QUOTACTL; extern struct req_format RQF_OST_GETATTR; extern struct req_format RQF_OST_SETATTR; diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h index 90c183424802499cc27f651ae955f526aa2aad07..03a970bcac5544b6387f7ddc775db62a12058ca5 100644 --- a/drivers/staging/lustre/lustre/include/lustre_sec.h +++ b/drivers/staging/lustre/lustre/include/lustre_sec.h @@ -50,6 +50,7 @@ struct brw_page; /* Linux specific */ struct key; struct seq_file; +struct lustre_cfg; /* * forward declaration @@ -1029,6 +1030,8 @@ int sptlrpc_target_export_check(struct obd_export *exp, /* bulk security api */ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc); +int get_free_pages_in_pool(void); +int pool_is_at_full_capacity(void); int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc); diff --git a/drivers/staging/lustre/lustre/include/lustre_swab.h b/drivers/staging/lustre/lustre/include/lustre_swab.h new file mode 100644 index 0000000000000000000000000000000000000000..26d01c2d66336f6e09f62c9864da7a1ad18a0f64 --- /dev/null +++ b/drivers/staging/lustre/lustre/include/lustre_swab.h @@ -0,0 +1,102 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2014, Intel Corporation. + * + * Copyright 2015 Cray Inc, all rights reserved. + * Author: Ben Evans. + * + * We assume all nodes are either little-endian or big-endian, and we + * always send messages in the sender's native format. The receiver + * detects the message format by checking the 'magic' field of the message + * (see lustre_msg_swabbed() below). + * + * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines + * are implemented in ptlrpc/lustre_swab.c. These 'swabbers' convert the + * type from "other" endian, in-place in the message buffer. + * + * A swabber takes a single pointer argument. The caller must already have + * verified that the length of the message buffer >= sizeof (type). + * + * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine + * may be defined that swabs just the variable part, after the caller has + * verified that the message buffer is large enough. + */ + +#ifndef _LUSTRE_SWAB_H_ +#define _LUSTRE_SWAB_H_ + +#include "lustre/lustre_idl.h" + +void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); +void lustre_swab_connect(struct obd_connect_data *ocd); +void lustre_swab_hsm_user_state(struct hsm_user_state *hus); +void lustre_swab_hsm_state_set(struct hsm_state_set *hss); +void lustre_swab_obd_statfs(struct obd_statfs *os); +void lustre_swab_obd_ioobj(struct obd_ioobj *ioo); +void lustre_swab_niobuf_remote(struct niobuf_remote *nbr); +void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb); +void lustre_swab_ost_lvb(struct ost_lvb *lvb); +void lustre_swab_obd_quotactl(struct obd_quotactl *q); +void lustre_swab_lquota_lvb(struct lquota_lvb *lvb); +void lustre_swab_generic_32s(__u32 *val); +void lustre_swab_mdt_body(struct mdt_body *b); +void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b); +void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); +void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr); +void lustre_swab_lmv_desc(struct lmv_desc *ld); +void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm); +void lustre_swab_lov_desc(struct lov_desc *ld); +void lustre_swab_gl_desc(union ldlm_gl_desc *desc); +void lustre_swab_ldlm_intent(struct ldlm_intent *i); +void lustre_swab_ldlm_request(struct ldlm_request *rq); +void lustre_swab_ldlm_reply(struct ldlm_reply *r); +void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo); +void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo); +void lustre_swab_mgs_config_body(struct mgs_config_body *body); +void lustre_swab_mgs_config_res(struct mgs_config_res *body); +void lustre_swab_ost_body(struct ost_body *b); +void lustre_swab_ost_last_id(__u64 *id); +void lustre_swab_fiemap(struct fiemap *fiemap); +void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum); +void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum); +void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod, + int stripe_count); +void lustre_swab_lov_mds_md(struct lov_mds_md *lmm); +void lustre_swab_lustre_capa(struct lustre_capa *c); +void lustre_swab_lustre_capa_key(struct lustre_capa_key *k); +void lustre_swab_fid2path(struct getinfo_fid2path *gf); +void lustre_swab_layout_intent(struct layout_intent *li); +void lustre_swab_hsm_user_state(struct hsm_user_state *hus); +void lustre_swab_hsm_current_action(struct hsm_current_action *action); +void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk); +void lustre_swab_hsm_user_state(struct hsm_user_state *hus); +void lustre_swab_hsm_user_item(struct hsm_user_item *hui); +void lustre_swab_hsm_request(struct hsm_request *hr); +void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl); +void lustre_swab_close_data(struct close_data *data); +void lustre_swab_lmv_user_md(struct lmv_user_md *lum); + +#endif diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h index f6fc4dd05bd62f3a23cb5d5566fe1edd0225de04..0f48e9c3d9e30f14153ec2533299ed3c52b9a321 100644 --- a/drivers/staging/lustre/lustre/include/obd.h +++ b/drivers/staging/lustre/lustre/include/obd.h @@ -73,70 +73,17 @@ static inline void loi_init(struct lov_oinfo *loi) { } -/* - * If we are unable to get the maximum object size from the OST in - * ocd_maxbytes using OBD_CONNECT_MAXBYTES, then we fall back to using - * the old maximum object size from ext3. - */ -#define LUSTRE_EXT3_STRIPE_MAXBYTES 0x1fffffff000ULL - -struct lov_stripe_md { - atomic_t lsm_refc; - spinlock_t lsm_lock; - pid_t lsm_lock_owner; /* debugging */ - - /* maximum possible file size, might change as OSTs status changes, - * e.g. disconnected, deactivated - */ - __u64 lsm_maxbytes; - struct ost_id lsm_oi; - __u32 lsm_magic; - __u32 lsm_stripe_size; - __u32 lsm_pattern; /* striping pattern (RAID0, RAID1) */ - __u16 lsm_stripe_count; - __u16 lsm_layout_gen; - char lsm_pool_name[LOV_MAXPOOLNAME + 1]; - struct lov_oinfo *lsm_oinfo[0]; -}; - -static inline bool lsm_is_released(struct lov_stripe_md *lsm) -{ - return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED); -} - -static inline bool lsm_has_objects(struct lov_stripe_md *lsm) -{ - if (!lsm) - return false; - if (lsm_is_released(lsm)) - return false; - return true; -} - -static inline int lov_stripe_md_size(unsigned int stripe_count) -{ - struct lov_stripe_md lsm; - - return sizeof(lsm) + stripe_count * sizeof(lsm.lsm_oinfo[0]); -} - +struct lov_stripe_md; struct obd_info; typedef int (*obd_enqueue_update_f)(void *cookie, int rc); /* obd info for a particular level (lov, osc). */ struct obd_info { - /* Flags used for set request specific flags: - - while lock handling, the flags obtained on the enqueue - request are set here. - - while stats, the flags used for control delay/resend. - - while setattr, the flags used for distinguish punch operation - */ + /* OBD_STATFS_* flags */ __u64 oi_flags; /* lsm data specific for every OSC. */ struct lov_stripe_md *oi_md; - /* obdo data specific for every OSC, if needed at all. */ - struct obdo *oi_oa; /* statfs data specific for every OSC, if needed at all. */ struct obd_statfs *oi_osfs; /* An update callback which is called to update some data on upper @@ -204,7 +151,6 @@ enum obd_cl_sem_lock_class { * on the MDS. */ #define OBD_MAX_DEFAULT_EA_SIZE 4096 -#define OBD_MAX_DEFAULT_COOKIE_SIZE 4096 struct mdc_rpc_lock; struct obd_import; @@ -214,7 +160,7 @@ struct client_obd { struct obd_import *cl_import; /* ptlrpc connection state */ size_t cl_conn_count; /* - * Cache maximum and default values for easize and cookiesize. This is + * Cache maximum and default values for easize. This is * strictly a performance optimization to minimize calls to * obd_size_diskmd(). The default values are used to calculate the * initial size of a request buffer. The ptlrpc layer will resize the @@ -235,18 +181,6 @@ struct client_obd { * run-time if a larger observed size is advertised by the MDT. */ u32 cl_max_mds_easize; - /* Default cookie size for llog cookies (see struct llog_cookie). It is - * initialized to zero at mount-time, then it tracks the largest - * observed cookie size advertised by the MDT, up to a maximum value of - * OBD_MAX_DEFAULT_COOKIE_SIZE. Note that llog_cookies are not - * used by clients communicating with MDS versions 2.4.0 and later. - */ - u32 cl_default_mds_cookiesize; - /* Maximum possible cookie size computed at mount-time based on - * the number of OSTs in the filesystem. May be increased at - * run-time if a larger observed size is advertised by the MDT. - */ - u32 cl_max_mds_cookiesize; enum lustre_sec_part cl_sp_me; enum lustre_sec_part cl_sp_to; @@ -313,15 +247,42 @@ struct client_obd { struct obd_histogram cl_read_offset_hist; struct obd_histogram cl_write_offset_hist; - /* lru for osc caching pages */ + /* LRU for osc caching pages */ struct cl_client_cache *cl_cache; - struct list_head cl_lru_osc; /* member of cl_cache->ccc_lru */ + /** member of cl_cache->ccc_lru */ + struct list_head cl_lru_osc; + /** # of available LRU slots left in the per-OSC cache. + * Available LRU slots are shared by all OSCs of the same file system, + * therefore this is a pointer to cl_client_cache::ccc_lru_left. + */ atomic_long_t *cl_lru_left; + /** # of busy LRU pages. A page is considered busy if it's in writeback + * queue, or in transfer. Busy pages can't be discarded so they are not + * in LRU cache. + */ atomic_long_t cl_lru_busy; + /** # of LRU pages in the cache for this client_obd */ atomic_long_t cl_lru_in_list; + /** # of threads are shrinking LRU cache. To avoid contention, it's not + * allowed to have multiple threads shrinking LRU cache. + */ atomic_t cl_lru_shrinkers; - struct list_head cl_lru_list; /* lru page list */ - spinlock_t cl_lru_list_lock; /* page list protector */ + /** The time when this LRU cache was last used. */ + time64_t cl_lru_last_used; + /** stats: how many reclaims have happened for this client_obd. + * reclaim and shrink - shrink is async, voluntarily rebalancing; + * reclaim is sync, initiated by IO thread when the LRU slots are + * in shortage. + */ + u64 cl_lru_reclaim; + /** List of LRU pages for this client_obd */ + struct list_head cl_lru_list; + /** Lock for LRU page list */ + spinlock_t cl_lru_list_lock; + /** # of unstable pages in this client_obd. + * An unstable page is a page state that WRITE RPC has finished but + * the transaction has NOT yet committed. + */ atomic_long_t cl_unstable_count; /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */ @@ -329,7 +290,17 @@ struct client_obd { wait_queue_head_t cl_destroy_waitq; struct mdc_rpc_lock *cl_rpc_lock; - struct mdc_rpc_lock *cl_close_lock; + + /* modify rpcs in flight + * currently used for metadata only + */ + spinlock_t cl_mod_rpcs_lock; + u16 cl_max_mod_rpcs_in_flight; + u16 cl_mod_rpcs_in_flight; + u16 cl_close_rpcs_in_flight; + wait_queue_head_t cl_mod_rpcs_waitq; + unsigned long *cl_mod_tag_bitmap; + struct obd_histogram cl_mod_rpcs_hist; /* mgc datastruct */ atomic_t cl_mgc_refcount; @@ -345,13 +316,6 @@ struct client_obd { /* also protected by the poorly named _loi_list_lock lock above */ struct osc_async_rc cl_ar; - /* used by quotacheck when the servers are older than 2.4 */ - int cl_qchk_stat; /* quotacheck stat of the peer */ -#define CL_NOT_QUOTACHECKED 1 /* client->cl_qchk_stat init value */ -#if OBD_OCD_VERSION(2, 7, 53, 0) < LUSTRE_VERSION_CODE -#warning "please consider removing quotacheck compatibility code" -#endif - /* sequence manager */ struct lu_client_seq *cl_seq; @@ -454,8 +418,6 @@ struct lmv_obd { int connected; int max_easize; int max_def_easize; - int max_cookiesize; - int max_def_cookiesize; u32 tgts_size; /* size of tgts array */ struct lmv_tgt_desc **tgts; @@ -469,9 +431,9 @@ struct niobuf_local { __u32 lnb_page_offset; __u32 lnb_len; __u32 lnb_flags; + int lnb_rc; struct page *lnb_page; void *lnb_data; - int lnb_rc; }; #define LUSTRE_FLD_NAME "fld" @@ -512,21 +474,6 @@ struct niobuf_local { /* Don't conflict with on-wire flags OBD_BRW_WRITE, etc */ #define N_LOCAL_TEMP_PAGE 0x10000000 -struct obd_trans_info { - __u64 oti_xid; - /* Only used on the server side for tracking acks. */ - struct oti_req_ack_lock { - struct lustre_handle lock; - __u32 mode; - } oti_ack_locks[4]; - void *oti_handle; - struct llog_cookie oti_onecookie; - struct llog_cookie *oti_logcookies; - - /** VBR: versions */ - __u64 oti_pre_version; -}; - /* * Events signalled through obd_notify() upcall-chain. */ @@ -587,15 +534,14 @@ struct lvfs_run_ctxt { struct obd_device { struct obd_type *obd_type; - __u32 obd_magic; + u32 obd_magic; /* OBD_DEVICE_MAGIC */ + int obd_minor; /* device number: lctl dl */ + struct lu_device *obd_lu_dev; /* common and UUID name of this device */ - char obd_name[MAX_OBD_NAME]; - struct obd_uuid obd_uuid; - - struct lu_device *obd_lu_dev; + struct obd_uuid obd_uuid; + char obd_name[MAX_OBD_NAME]; - int obd_minor; /* bitfield modification is protected by obd_dev_lock */ unsigned long obd_attached:1, /* finished attach */ obd_set_up:1, /* finished setup */ @@ -619,22 +565,22 @@ struct obd_device { unsigned long obd_recovery_expired:1; /* uuid-export hash body */ struct cfs_hash *obd_uuid_hash; - atomic_t obd_refcount; wait_queue_head_t obd_refcount_waitq; struct list_head obd_exports; struct list_head obd_unlinked_exports; struct list_head obd_delayed_exports; + atomic_t obd_refcount; int obd_num_exports; spinlock_t obd_nid_lock; struct ldlm_namespace *obd_namespace; struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */ /* a spinlock is OK for what we do now, may need a semaphore later */ spinlock_t obd_dev_lock; /* protect OBD bitfield above */ - struct mutex obd_dev_mutex; - __u64 obd_last_committed; spinlock_t obd_osfs_lock; struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */ __u64 obd_osfs_age; + u64 obd_last_committed; + struct mutex obd_dev_mutex; struct lvfs_run_ctxt obd_lvfs_ctxt; struct obd_llog_group obd_olg; /* default llog group */ struct obd_device *obd_observer; @@ -648,12 +594,13 @@ struct obd_device { struct lov_obd lov; struct lmv_obd lmv; } u; + /* Fields used by LProcFS */ - unsigned int obd_cntr_base; - struct lprocfs_stats *obd_stats; + struct lprocfs_stats *obd_stats; + unsigned int obd_cntr_base; - unsigned int md_cntr_base; - struct lprocfs_stats *md_stats; + struct lprocfs_stats *md_stats; + unsigned int md_cntr_base; struct dentry *obd_debugfs_entry; struct dentry *obd_svc_debugfs_entry; @@ -665,9 +612,11 @@ struct obd_device { /** * Ldlm pool part. Save last calculated SLV and Limit. */ - rwlock_t obd_pool_lock; - int obd_pool_limit; - __u64 obd_pool_slv; + rwlock_t obd_pool_lock; + u64 obd_pool_slv; + int obd_pool_limit; + + int obd_conn_inprogress; /** * A list of outstanding class_incref()'s against this obd. For @@ -675,19 +624,10 @@ struct obd_device { */ struct lu_ref obd_reference; - int obd_conn_inprogress; - struct kobject obd_kobj; /* sysfs object */ struct completion obd_kobj_unregister; }; -enum obd_cleanup_stage { -/* Special case hack for MDS LOVs */ - OBD_CLEANUP_EARLY, -/* can be directly mapped to .ldto_device_fini() */ - OBD_CLEANUP_EXPORTS, -}; - /* get/set_info keys */ #define KEY_ASYNC "async" #define KEY_CHANGELOG_CLEAR "changelog_clear" @@ -704,7 +644,6 @@ enum obd_cleanup_stage { #define KEY_INTERMDS "inter_mds" #define KEY_LAST_ID "last_id" #define KEY_LAST_FID "last_fid" -#define KEY_LOVDESC "lovdesc" #define KEY_MAX_EASIZE "max_easize" #define KEY_DEFAULT_EASIZE "default_easize" #define KEY_MGSSEC "mgssec" @@ -720,22 +659,6 @@ enum obd_cleanup_stage { struct lu_context; -/* /!\ must be coherent with include/linux/namei.h on patched kernel */ -#define IT_OPEN (1 << 0) -#define IT_CREAT (1 << 1) -#define IT_READDIR (1 << 2) -#define IT_GETATTR (1 << 3) -#define IT_LOOKUP (1 << 4) -#define IT_UNLINK (1 << 5) -#define IT_TRUNC (1 << 6) -#define IT_GETXATTR (1 << 7) -#define IT_EXEC (1 << 8) -#define IT_PIN (1 << 9) -#define IT_LAYOUT (1 << 10) -#define IT_QUOTA_DQACQ (1 << 11) -#define IT_QUOTA_CONN (1 << 12) -#define IT_SETXATTR (1 << 13) - static inline int it_to_lock_mode(struct lookup_intent *it) { /* CREAT needs to be tested before open (both could be set) */ @@ -755,6 +678,14 @@ static inline int it_to_lock_mode(struct lookup_intent *it) return -EINVAL; } +enum md_op_flags { + MF_MDC_CANCEL_FID1 = BIT(0), + MF_MDC_CANCEL_FID2 = BIT(1), + MF_MDC_CANCEL_FID3 = BIT(2), + MF_MDC_CANCEL_FID4 = BIT(3), + MF_GET_MDT_IDX = BIT(4), +}; + enum md_cli_flags { CLI_SET_MEA = BIT(0), CLI_RM_ENTRY = BIT(1), @@ -789,8 +720,6 @@ struct md_op_data { __u64 op_valid; loff_t op_attr_blocks; - /* Size-on-MDS epoch and flags. */ - __u64 op_ioepoch; __u32 op_flags; /* Various operation flags. */ @@ -839,15 +768,13 @@ struct obd_ops { int (*iocontrol)(unsigned int cmd, struct obd_export *exp, int len, void *karg, void __user *uarg); int (*get_info)(const struct lu_env *env, struct obd_export *, - __u32 keylen, void *key, __u32 *vallen, void *val, - struct lov_stripe_md *lsm); + __u32 keylen, void *key, __u32 *vallen, void *val); int (*set_info_async)(const struct lu_env *, struct obd_export *, __u32 keylen, void *key, __u32 vallen, void *val, struct ptlrpc_request_set *set); int (*setup)(struct obd_device *dev, struct lustre_cfg *cfg); - int (*precleanup)(struct obd_device *dev, - enum obd_cleanup_stage cleanup_stage); + int (*precleanup)(struct obd_device *dev); int (*cleanup)(struct obd_device *dev); int (*process_config)(struct obd_device *dev, u32 len, void *data); int (*postrecov)(struct obd_device *dev); @@ -887,35 +814,23 @@ struct obd_ops { struct obd_statfs *osfs, __u64 max_age, __u32 flags); int (*statfs_async)(struct obd_export *exp, struct obd_info *oinfo, __u64 max_age, struct ptlrpc_request_set *set); - int (*packmd)(struct obd_export *exp, struct lov_mds_md **disk_tgt, - struct lov_stripe_md *mem_src); - int (*unpackmd)(struct obd_export *exp, - struct lov_stripe_md **mem_tgt, - struct lov_mds_md *disk_src, int disk_len); int (*create)(const struct lu_env *env, struct obd_export *exp, - struct obdo *oa, struct obd_trans_info *oti); + struct obdo *oa); int (*destroy)(const struct lu_env *env, struct obd_export *exp, - struct obdo *oa, struct obd_trans_info *oti); + struct obdo *oa); int (*setattr)(const struct lu_env *, struct obd_export *exp, - struct obd_info *oinfo, struct obd_trans_info *oti); - int (*setattr_async)(struct obd_export *exp, struct obd_info *oinfo, - struct obd_trans_info *oti, - struct ptlrpc_request_set *rqset); + struct obdo *oa); int (*getattr)(const struct lu_env *env, struct obd_export *exp, - struct obd_info *oinfo); - int (*getattr_async)(struct obd_export *exp, struct obd_info *oinfo, - struct ptlrpc_request_set *set); + struct obdo *oa); int (*preprw)(const struct lu_env *env, int cmd, struct obd_export *exp, struct obdo *oa, int objcount, struct obd_ioobj *obj, struct niobuf_remote *remote, - int *nr_pages, struct niobuf_local *local, - struct obd_trans_info *oti); + int *nr_pages, struct niobuf_local *local); int (*commitrw)(const struct lu_env *env, int cmd, struct obd_export *exp, struct obdo *oa, int objcount, struct obd_ioobj *obj, struct niobuf_remote *remote, int pages, - struct niobuf_local *local, - struct obd_trans_info *oti, int rc); + struct niobuf_local *local, int rc); int (*init_export)(struct obd_export *exp); int (*destroy_export)(struct obd_export *exp); @@ -930,8 +845,6 @@ struct obd_ops { struct obd_uuid *(*get_uuid)(struct obd_export *exp); /* quota methods */ - int (*quotacheck)(struct obd_device *, struct obd_export *, - struct obd_quotactl *); int (*quotactl)(struct obd_device *, struct obd_export *, struct obd_quotactl *); @@ -954,7 +867,7 @@ struct obd_ops { /* lmv structures */ struct lustre_md { struct mdt_body *body; - struct lov_stripe_md *lsm; + struct lu_buf layout; struct lmv_stripe_md *lmv; #ifdef CONFIG_FS_POSIX_ACL struct posix_acl *posix_acl; @@ -992,10 +905,8 @@ struct md_ops { int (*create)(struct obd_export *, struct md_op_data *, const void *, size_t, umode_t, uid_t, gid_t, cfs_cap_t, __u64, struct ptlrpc_request **); - int (*done_writing)(struct obd_export *, struct md_op_data *, - struct md_open_data *); int (*enqueue)(struct obd_export *, struct ldlm_enqueue_info *, - const ldlm_policy_data_t *, + const union ldlm_policy_data *, struct lookup_intent *, struct md_op_data *, struct lustre_handle *, __u64); int (*getattr)(struct obd_export *, struct md_op_data *, @@ -1012,8 +923,7 @@ struct md_ops { const char *, size_t, const char *, size_t, struct ptlrpc_request **); int (*setattr)(struct obd_export *, struct md_op_data *, void *, - size_t, void *, size_t, struct ptlrpc_request **, - struct md_open_data **mod); + size_t, struct ptlrpc_request **); int (*sync)(struct obd_export *, const struct lu_fid *, struct ptlrpc_request **); int (*read_page)(struct obd_export *, struct md_op_data *, @@ -1030,7 +940,7 @@ struct md_ops { u64, const char *, const char *, int, int, int, struct ptlrpc_request **); - int (*init_ea_size)(struct obd_export *, u32, u32, u32, u32); + int (*init_ea_size)(struct obd_export *, u32, u32); int (*get_lustre_md)(struct obd_export *, struct ptlrpc_request *, struct obd_export *, struct obd_export *, @@ -1052,11 +962,11 @@ struct md_ops { enum ldlm_mode (*lock_match)(struct obd_export *, __u64, const struct lu_fid *, enum ldlm_type, - ldlm_policy_data_t *, enum ldlm_mode, + union ldlm_policy_data *, enum ldlm_mode, struct lustre_handle *); int (*cancel_unused)(struct obd_export *, const struct lu_fid *, - ldlm_policy_data_t *, enum ldlm_mode, + union ldlm_policy_data *, enum ldlm_mode, enum ldlm_cancel_flags flags, void *opaque); int (*get_fid_from_lsm)(struct obd_export *, @@ -1071,6 +981,8 @@ struct md_ops { int (*revalidate_lock)(struct obd_export *, struct lookup_intent *, struct lu_fid *, __u64 *bits); + int (*unpackmd)(struct obd_export *exp, struct lmv_stripe_md **plsm, + const union lmv_mds_md *lmv, size_t lmv_size); /* * NOTE: If adding ops, add another LPROCFS_MD_OP_INIT() line to * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a @@ -1078,33 +990,6 @@ struct md_ops { */ }; -struct lsm_operations { - void (*lsm_free)(struct lov_stripe_md *); - void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, u64 *, - u64 *); - void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, u64 *, - u64 *); - int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes, - __u16 *stripe_count); - int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm, - struct lov_mds_md *lmm); -}; - -extern const struct lsm_operations lsm_v1_ops; -extern const struct lsm_operations lsm_v3_ops; -static inline const struct lsm_operations *lsm_op_find(int magic) -{ - switch (magic) { - case LOV_MAGIC_V1: - return &lsm_v1_ops; - case LOV_MAGIC_V3: - return &lsm_v3_ops; - default: - CERROR("Cannot recognize lsm_magic %08x\n", magic); - return NULL; - } -} - static inline struct md_open_data *obd_mod_alloc(void) { struct md_open_data *mod; diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h index 16094dbec08b09bc042c594d7ccc4c98f4e9370c..7ec25202cd2238f6d36537ba17a48621d7ac6573 100644 --- a/drivers/staging/lustre/lustre/include/obd_class.h +++ b/drivers/staging/lustre/lustre/include/obd_class.h @@ -100,6 +100,13 @@ int obd_get_request_slot(struct client_obd *cli); void obd_put_request_slot(struct client_obd *cli); __u32 obd_get_max_rpcs_in_flight(struct client_obd *cli); int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max); +int obd_set_max_mod_rpcs_in_flight(struct client_obd *cli, u16 max); +int obd_mod_rpc_stats_seq_show(struct client_obd *cli, struct seq_file *seq); + +u16 obd_get_mod_rpc_slot(struct client_obd *cli, u32 opc, + struct lookup_intent *it); +void obd_put_mod_rpc_slot(struct client_obd *cli, u32 opc, + struct lookup_intent *it, u16 tag); struct llog_handle; struct llog_rec_hdr; @@ -175,10 +182,13 @@ struct lustre_profile { char *lp_profile; char *lp_dt; char *lp_md; + int lp_refs; + bool lp_list_deleted; }; struct lustre_profile *class_get_profile(const char *prof); void class_del_profile(const char *prof); +void class_put_profile(struct lustre_profile *lprof); void class_del_profiles(void); #if LUSTRE_TRACKS_LOCK_EXP_REFS @@ -269,10 +279,8 @@ static inline int lprocfs_climp_check(struct obd_device *obd) struct inode; struct lu_attr; struct obdo; -void obdo_refresh_inode(struct inode *dst, const struct obdo *src, u32 valid); void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj); -void md_from_obdo(struct md_op_data *op_data, const struct obdo *oa, u32 valid); #define OBT(dev) (dev)->obd_type #define OBP(dev, op) (dev)->obd_type->typ_dt_ops->op @@ -417,16 +425,14 @@ static inline int class_devno_max(void) static inline int obd_get_info(const struct lu_env *env, struct obd_export *exp, __u32 keylen, - void *key, __u32 *vallen, void *val, - struct lov_stripe_md *lsm) + void *key, __u32 *vallen, void *val) { int rc; EXP_CHECK_DT_OP(exp, get_info); EXP_COUNTER_INCREMENT(exp, get_info); - rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val, - lsm); + rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val); return rc; } @@ -505,8 +511,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg) return rc; } -static inline int obd_precleanup(struct obd_device *obd, - enum obd_cleanup_stage cleanup_stage) +static inline int obd_precleanup(struct obd_device *obd) { int rc; DECLARE_LU_VARS(ldt, d); @@ -517,20 +522,18 @@ static inline int obd_precleanup(struct obd_device *obd, ldt = obd->obd_type->typ_lu; d = obd->obd_lu_dev; if (ldt && d) { - if (cleanup_stage == OBD_CLEANUP_EXPORTS) { - struct lu_env env; + struct lu_env env; - rc = lu_env_init(&env, ldt->ldt_ctx_tags); - if (rc == 0) { - ldt->ldt_ops->ldto_device_fini(&env, d); - lu_env_fini(&env); - } + rc = lu_env_init(&env, ldt->ldt_ctx_tags); + if (!rc) { + ldt->ldt_ops->ldto_device_fini(&env, d); + lu_env_fini(&env); } } OBD_CHECK_DT_OP(obd, precleanup, 0); OBD_COUNTER_INCREMENT(obd, precleanup); - rc = OBP(obd, precleanup)(obd, cleanup_stage); + rc = OBP(obd, precleanup)(obd); return rc; } @@ -612,181 +615,51 @@ obd_process_config(struct obd_device *obd, int datalen, void *data) return rc; } -/* Pack an in-memory MD struct for storage on disk. - * Returns +ve size of packed MD (0 for free), or -ve error. - * - * If @disk_tgt == NULL, MD size is returned (max size if @mem_src == NULL). - * If @*disk_tgt != NULL and @mem_src == NULL, @*disk_tgt will be freed. - * If @*disk_tgt == NULL, it will be allocated - */ -static inline int obd_packmd(struct obd_export *exp, - struct lov_mds_md **disk_tgt, - struct lov_stripe_md *mem_src) -{ - int rc; - - EXP_CHECK_DT_OP(exp, packmd); - EXP_COUNTER_INCREMENT(exp, packmd); - - rc = OBP(exp->exp_obd, packmd)(exp, disk_tgt, mem_src); - return rc; -} - -static inline int obd_size_diskmd(struct obd_export *exp, - struct lov_stripe_md *mem_src) -{ - return obd_packmd(exp, NULL, mem_src); -} - -static inline int obd_free_diskmd(struct obd_export *exp, - struct lov_mds_md **disk_tgt) -{ - LASSERT(disk_tgt); - LASSERT(*disk_tgt); - /* - * LU-2590, for caller's convenience, *disk_tgt could be host - * endianness, it needs swab to LE if necessary, while just - * lov_mds_md header needs it for figuring out how much memory - * needs to be freed. - */ - if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) && - (((*disk_tgt)->lmm_magic == LOV_MAGIC_V1) || - ((*disk_tgt)->lmm_magic == LOV_MAGIC_V3))) - lustre_swab_lov_mds_md(*disk_tgt); - return obd_packmd(exp, disk_tgt, NULL); -} - -/* Unpack an MD struct from disk to in-memory format. - * Returns +ve size of unpacked MD (0 for free), or -ve error. - * - * If @mem_tgt == NULL, MD size is returned (max size if @disk_src == NULL). - * If @*mem_tgt != NULL and @disk_src == NULL, @*mem_tgt will be freed. - * If @*mem_tgt == NULL, it will be allocated - */ -static inline int obd_unpackmd(struct obd_export *exp, - struct lov_stripe_md **mem_tgt, - struct lov_mds_md *disk_src, - int disk_len) -{ - int rc; - - EXP_CHECK_DT_OP(exp, unpackmd); - EXP_COUNTER_INCREMENT(exp, unpackmd); - - rc = OBP(exp->exp_obd, unpackmd)(exp, mem_tgt, disk_src, disk_len); - return rc; -} - -static inline int obd_free_memmd(struct obd_export *exp, - struct lov_stripe_md **mem_tgt) -{ - int rc; - - LASSERT(mem_tgt); - LASSERT(*mem_tgt); - rc = obd_unpackmd(exp, mem_tgt, NULL, 0); - *mem_tgt = NULL; - return rc; -} - static inline int obd_create(const struct lu_env *env, struct obd_export *exp, - struct obdo *obdo, struct obd_trans_info *oti) + struct obdo *obdo) { int rc; EXP_CHECK_DT_OP(exp, create); EXP_COUNTER_INCREMENT(exp, create); - rc = OBP(exp->exp_obd, create)(env, exp, obdo, oti); + rc = OBP(exp->exp_obd, create)(env, exp, obdo); return rc; } static inline int obd_destroy(const struct lu_env *env, struct obd_export *exp, - struct obdo *obdo, struct obd_trans_info *oti) + struct obdo *obdo) { int rc; EXP_CHECK_DT_OP(exp, destroy); EXP_COUNTER_INCREMENT(exp, destroy); - rc = OBP(exp->exp_obd, destroy)(env, exp, obdo, oti); + rc = OBP(exp->exp_obd, destroy)(env, exp, obdo); return rc; } static inline int obd_getattr(const struct lu_env *env, struct obd_export *exp, - struct obd_info *oinfo) + struct obdo *oa) { int rc; EXP_CHECK_DT_OP(exp, getattr); EXP_COUNTER_INCREMENT(exp, getattr); - rc = OBP(exp->exp_obd, getattr)(env, exp, oinfo); - return rc; -} - -static inline int obd_getattr_async(struct obd_export *exp, - struct obd_info *oinfo, - struct ptlrpc_request_set *set) -{ - int rc; - - EXP_CHECK_DT_OP(exp, getattr_async); - EXP_COUNTER_INCREMENT(exp, getattr_async); - - rc = OBP(exp->exp_obd, getattr_async)(exp, oinfo, set); + rc = OBP(exp->exp_obd, getattr)(env, exp, oa); return rc; } static inline int obd_setattr(const struct lu_env *env, struct obd_export *exp, - struct obd_info *oinfo, - struct obd_trans_info *oti) + struct obdo *oa) { int rc; EXP_CHECK_DT_OP(exp, setattr); EXP_COUNTER_INCREMENT(exp, setattr); - rc = OBP(exp->exp_obd, setattr)(env, exp, oinfo, oti); - return rc; -} - -/* This performs all the requests set init/wait/destroy actions. */ -static inline int obd_setattr_rqset(struct obd_export *exp, - struct obd_info *oinfo, - struct obd_trans_info *oti) -{ - struct ptlrpc_request_set *set = NULL; - int rc; - - EXP_CHECK_DT_OP(exp, setattr_async); - EXP_COUNTER_INCREMENT(exp, setattr_async); - - set = ptlrpc_prep_set(); - if (!set) - return -ENOMEM; - - rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set); - if (rc == 0) - rc = ptlrpc_set_wait(set); - ptlrpc_set_destroy(set); - return rc; -} - -/* This adds all the requests into @set if @set != NULL, otherwise - * all requests are sent asynchronously without waiting for response. - */ -static inline int obd_setattr_async(struct obd_export *exp, - struct obd_info *oinfo, - struct obd_trans_info *oti, - struct ptlrpc_request_set *set) -{ - int rc; - - EXP_CHECK_DT_OP(exp, setattr_async); - EXP_COUNTER_INCREMENT(exp, setattr_async); - - rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set); + rc = OBP(exp->exp_obd, setattr)(env, exp, oa); return rc; } @@ -1053,15 +926,16 @@ static inline int obd_statfs_rqset(struct obd_export *exp, __u32 flags) { struct ptlrpc_request_set *set = NULL; - struct obd_info oinfo = { }; + struct obd_info oinfo = { + .oi_osfs = osfs, + .oi_flags = flags, + }; int rc = 0; - set = ptlrpc_prep_set(); + set = ptlrpc_prep_set(); if (!set) return -ENOMEM; - oinfo.oi_osfs = osfs; - oinfo.oi_flags = flags; rc = obd_statfs_async(exp, &oinfo, max_age, set); if (rc == 0) rc = ptlrpc_set_wait(set); @@ -1112,8 +986,7 @@ static inline int obd_preprw(const struct lu_env *env, int cmd, struct obd_export *exp, struct obdo *oa, int objcount, struct obd_ioobj *obj, struct niobuf_remote *remote, int *pages, - struct niobuf_local *local, - struct obd_trans_info *oti) + struct niobuf_local *local) { int rc; @@ -1121,7 +994,7 @@ static inline int obd_preprw(const struct lu_env *env, int cmd, EXP_COUNTER_INCREMENT(exp, preprw); rc = OBP(exp->exp_obd, preprw)(env, cmd, exp, oa, objcount, obj, remote, - pages, local, oti); + pages, local); return rc; } @@ -1129,14 +1002,13 @@ static inline int obd_commitrw(const struct lu_env *env, int cmd, struct obd_export *exp, struct obdo *oa, int objcount, struct obd_ioobj *obj, struct niobuf_remote *rnb, int pages, - struct niobuf_local *local, - struct obd_trans_info *oti, int rc) + struct niobuf_local *local, int rc) { EXP_CHECK_DT_OP(exp, commitrw); EXP_COUNTER_INCREMENT(exp, commitrw); rc = OBP(exp->exp_obd, commitrw)(env, cmd, exp, oa, objcount, obj, - rnb, pages, local, oti, rc); + rnb, pages, local, rc); return rc; } @@ -1219,18 +1091,6 @@ static inline int obd_notify_observer(struct obd_device *observer, return rc1 ? rc1 : rc2; } -static inline int obd_quotacheck(struct obd_export *exp, - struct obd_quotactl *oqctl) -{ - int rc; - - EXP_CHECK_DT_OP(exp, quotacheck); - EXP_COUNTER_INCREMENT(exp, quotacheck); - - rc = OBP(exp->exp_obd, quotacheck)(exp->exp_obd, exp, oqctl); - return rc; -} - static inline int obd_quotactl(struct obd_export *exp, struct obd_quotactl *oqctl) { @@ -1346,21 +1206,9 @@ static inline int md_create(struct obd_export *exp, struct md_op_data *op_data, return rc; } -static inline int md_done_writing(struct obd_export *exp, - struct md_op_data *op_data, - struct md_open_data *mod) -{ - int rc; - - EXP_CHECK_MD_OP(exp, done_writing); - EXP_MD_COUNTER_INCREMENT(exp, done_writing); - rc = MDP(exp->exp_obd, done_writing)(exp, op_data, mod); - return rc; -} - static inline int md_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, - const ldlm_policy_data_t *policy, + const union ldlm_policy_data *policy, struct lookup_intent *it, struct md_op_data *op_data, struct lustre_handle *lockh, @@ -1428,16 +1276,14 @@ static inline int md_rename(struct obd_export *exp, struct md_op_data *op_data, } static inline int md_setattr(struct obd_export *exp, struct md_op_data *op_data, - void *ea, size_t ealen, void *ea2, size_t ea2len, - struct ptlrpc_request **request, - struct md_open_data **mod) + void *ea, size_t ealen, + struct ptlrpc_request **request) { int rc; EXP_CHECK_MD_OP(exp, setattr); EXP_MD_COUNTER_INCREMENT(exp, setattr); - rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen, - ea2, ea2len, request, mod); + rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen, request); return rc; } @@ -1561,7 +1407,7 @@ static inline int md_set_lock_data(struct obd_export *exp, static inline int md_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, - ldlm_policy_data_t *policy, + union ldlm_policy_data *policy, enum ldlm_mode mode, enum ldlm_cancel_flags flags, void *opaque) @@ -1579,7 +1425,7 @@ static inline int md_cancel_unused(struct obd_export *exp, static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags, const struct lu_fid *fid, enum ldlm_type type, - ldlm_policy_data_t *policy, + union ldlm_policy_data *policy, enum ldlm_mode mode, struct lustre_handle *lockh) { @@ -1589,14 +1435,12 @@ static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags, policy, mode, lockh); } -static inline int md_init_ea_size(struct obd_export *exp, int easize, - int def_asize, int cookiesize, - int def_cookiesize) +static inline int md_init_ea_size(struct obd_export *exp, u32 easize, + u32 def_asize) { EXP_CHECK_MD_OP(exp, init_ea_size); EXP_MD_COUNTER_INCREMENT(exp, init_ea_size); - return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize, - cookiesize, def_cookiesize); + return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize); } static inline int md_intent_getattr_async(struct obd_export *exp, @@ -1636,6 +1480,24 @@ static inline int md_get_fid_from_lsm(struct obd_export *exp, return rc; } +/* Unpack an MD struct from disk to in-memory format. + * Returns +ve size of unpacked MD (0 for free), or -ve error. + * + * If *plsm != NULL and lmm == NULL then *lsm will be freed. + * If *plsm == NULL then it will be allocated. + */ +static inline int md_unpackmd(struct obd_export *exp, + struct lmv_stripe_md **plsm, + const union lmv_mds_md *lmm, size_t lmm_size) +{ + int rc; + + EXP_CHECK_MD_OP(exp, unpackmd); + EXP_MD_COUNTER_INCREMENT(exp, unpackmd); + rc = MDP(exp->exp_obd, unpackmd)(exp, plsm, lmm, lmm_size); + return rc; +} + /* OBD Metadata Support */ int obd_init_caches(void); diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h index b346a7f10aa46cef66dc8f081d40d2ebc33e54f4..aaedec7d793c8566a3367103d799a67ec4d0e6a9 100644 --- a/drivers/staging/lustre/lustre/include/obd_support.h +++ b/drivers/staging/lustre/lustre/include/obd_support.h @@ -172,14 +172,14 @@ extern char obd_jobid_var[]; #define OBD_FAIL_MDS_ALL_REQUEST_NET 0x123 #define OBD_FAIL_MDS_SYNC_NET 0x124 #define OBD_FAIL_MDS_SYNC_PACK 0x125 -#define OBD_FAIL_MDS_DONE_WRITING_NET 0x126 -#define OBD_FAIL_MDS_DONE_WRITING_PACK 0x127 +/* OBD_FAIL_MDS_DONE_WRITING_NET 0x126 obsolete since 2.8.0 */ +/* OBD_FAIL_MDS_DONE_WRITING_PACK 0x127 obsolete since 2.8.0 */ #define OBD_FAIL_MDS_ALLOC_OBDO 0x128 #define OBD_FAIL_MDS_PAUSE_OPEN 0x129 #define OBD_FAIL_MDS_STATFS_LCW_SLEEP 0x12a #define OBD_FAIL_MDS_OPEN_CREATE 0x12b #define OBD_FAIL_MDS_OST_SETATTR 0x12c -#define OBD_FAIL_MDS_QUOTACHECK_NET 0x12d +/* OBD_FAIL_MDS_QUOTACHECK_NET 0x12d obsolete since 2.4 */ #define OBD_FAIL_MDS_QUOTACTL_NET 0x12e #define OBD_FAIL_MDS_CLIENT_ADD 0x12f #define OBD_FAIL_MDS_GETXATTR_NET 0x130 @@ -264,7 +264,7 @@ extern char obd_jobid_var[]; #define OBD_FAIL_OST_ENOSPC 0x215 #define OBD_FAIL_OST_EROFS 0x216 #define OBD_FAIL_OST_ENOENT 0x217 -#define OBD_FAIL_OST_QUOTACHECK_NET 0x218 +/* OBD_FAIL_OST_QUOTACHECK_NET 0x218 obsolete since 2.4 */ #define OBD_FAIL_OST_QUOTACTL_NET 0x219 #define OBD_FAIL_OST_CHECKSUM_RECEIVE 0x21a #define OBD_FAIL_OST_CHECKSUM_SEND 0x21b @@ -321,6 +321,8 @@ extern char obd_jobid_var[]; #define OBD_FAIL_LDLM_CP_CB_WAIT4 0x322 #define OBD_FAIL_LDLM_CP_CB_WAIT5 0x323 +#define OBD_FAIL_LDLM_GRANT_CHECK 0x32a + /* LOCKLESS IO */ #define OBD_FAIL_LDLM_SET_CONTENTION 0x385 @@ -343,6 +345,7 @@ extern char obd_jobid_var[]; #define OBD_FAIL_OSC_CP_ENQ_RACE 0x410 #define OBD_FAIL_OSC_NO_GRANT 0x411 #define OBD_FAIL_OSC_DELAY_SETTIME 0x412 +#define OBD_FAIL_OSC_DELAY_IO 0x414 #define OBD_FAIL_PTLRPC 0x500 #define OBD_FAIL_PTLRPC_ACK 0x501 @@ -373,7 +376,7 @@ extern char obd_jobid_var[]; #define OBD_FAIL_OBD_PING_NET 0x600 #define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601 #define OBD_FAIL_OBD_LOGD_NET 0x602 -#define OBD_FAIL_OBD_QC_CALLBACK_NET 0x603 +/* OBD_FAIL_OBD_QC_CALLBACK_NET 0x603 obsolete since 2.4 */ #define OBD_FAIL_OBD_DQACQ 0x604 #define OBD_FAIL_OBD_LLOG_SETUP 0x605 #define OBD_FAIL_OBD_LOG_CANCEL_REP 0x606 @@ -458,6 +461,8 @@ extern char obd_jobid_var[]; #define OBD_FAIL_LOV_INIT 0x1403 #define OBD_FAIL_GLIMPSE_DELAY 0x1404 #define OBD_FAIL_LLITE_XATTR_ENOMEM 0x1405 +#define OBD_FAIL_MAKE_LOVEA_HOLE 0x1406 +#define OBD_FAIL_LLITE_LOST_LAYOUT 0x1407 #define OBD_FAIL_GETATTR_DELAY 0x1409 #define OBD_FAIL_FID_INDIR 0x1501 diff --git a/drivers/staging/lustre/lustre/include/seq_range.h b/drivers/staging/lustre/lustre/include/seq_range.h new file mode 100644 index 0000000000000000000000000000000000000000..30c4dd66d5c4836b9af6ec1ed7dd9724f0c415ea --- /dev/null +++ b/drivers/staging/lustre/lustre/include/seq_range.h @@ -0,0 +1,199 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2014, Intel Corporation. + * + * Copyright 2015 Cray Inc, all rights reserved. + * Author: Ben Evans. + * + * Define lu_seq_range associated functions + */ + +#ifndef _SEQ_RANGE_H_ +#define _SEQ_RANGE_H_ + +#include "lustre/lustre_idl.h" + +/** + * computes the sequence range type \a range + */ + +static inline unsigned int fld_range_type(const struct lu_seq_range *range) +{ + return range->lsr_flags & LU_SEQ_RANGE_MASK; +} + +/** + * Is this sequence range an OST? \a range + */ + +static inline bool fld_range_is_ost(const struct lu_seq_range *range) +{ + return fld_range_type(range) == LU_SEQ_RANGE_OST; +} + +/** + * Is this sequence range an MDT? \a range + */ + +static inline bool fld_range_is_mdt(const struct lu_seq_range *range) +{ + return fld_range_type(range) == LU_SEQ_RANGE_MDT; +} + +/** + * ANY range is only used when the fld client sends a fld query request, + * but it does not know whether the seq is an MDT or OST, so it will send the + * request with ANY type, which means any seq type from the lookup can be + * expected. /a range + */ +static inline unsigned int fld_range_is_any(const struct lu_seq_range *range) +{ + return fld_range_type(range) == LU_SEQ_RANGE_ANY; +} + +/** + * Apply flags to range \a range \a flags + */ + +static inline void fld_range_set_type(struct lu_seq_range *range, + unsigned int flags) +{ + range->lsr_flags |= flags; +} + +/** + * Add MDT to range type \a range + */ + +static inline void fld_range_set_mdt(struct lu_seq_range *range) +{ + fld_range_set_type(range, LU_SEQ_RANGE_MDT); +} + +/** + * Add OST to range type \a range + */ + +static inline void fld_range_set_ost(struct lu_seq_range *range) +{ + fld_range_set_type(range, LU_SEQ_RANGE_OST); +} + +/** + * Add ANY to range type \a range + */ + +static inline void fld_range_set_any(struct lu_seq_range *range) +{ + fld_range_set_type(range, LU_SEQ_RANGE_ANY); +} + +/** + * computes width of given sequence range \a range + */ + +static inline u64 lu_seq_range_space(const struct lu_seq_range *range) +{ + return range->lsr_end - range->lsr_start; +} + +/** + * initialize range to zero \a range + */ + +static inline void lu_seq_range_init(struct lu_seq_range *range) +{ + memset(range, 0, sizeof(*range)); +} + +/** + * check if given seq id \a s is within given range \a range + */ + +static inline bool lu_seq_range_within(const struct lu_seq_range *range, + u64 seq) +{ + return seq >= range->lsr_start && seq < range->lsr_end; +} + +/** + * Is the range sane? Is the end after the beginning? \a range + */ + +static inline bool lu_seq_range_is_sane(const struct lu_seq_range *range) +{ + return range->lsr_end >= range->lsr_start; +} + +/** + * Is the range 0? \a range + */ + +static inline bool lu_seq_range_is_zero(const struct lu_seq_range *range) +{ + return range->lsr_start == 0 && range->lsr_end == 0; +} + +/** + * Is the range out of space? \a range + */ + +static inline bool lu_seq_range_is_exhausted(const struct lu_seq_range *range) +{ + return lu_seq_range_space(range) == 0; +} + +/** + * return 0 if two ranges have the same location, nonzero if they are + * different \a r1 \a r2 + */ + +static inline int lu_seq_range_compare_loc(const struct lu_seq_range *r1, + const struct lu_seq_range *r2) +{ + return r1->lsr_index != r2->lsr_index || + r1->lsr_flags != r2->lsr_flags; +} + +#if !defined(__REQ_LAYOUT_USER__) +/** + * byte swap range structure \a range + */ + +void lustre_swab_lu_seq_range(struct lu_seq_range *range); +#endif +/** + * printf string and argument list for sequence range + */ +#define DRANGE "[%#16.16llx-%#16.16llx]:%x:%s" + +#define PRANGE(range) \ + (range)->lsr_start, \ + (range)->lsr_end, \ + (range)->lsr_index, \ + fld_range_is_mdt(range) ? "mdt" : "ost" + +#endif diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c index ecf472e4813da118603eae0701696ce9094153b5..32b73ee62639b199fbb87c81d49425587baacb54 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c @@ -193,6 +193,26 @@ void ldlm_extent_add_lock(struct ldlm_resource *res, * add the locks into grant list, for debug purpose, .. */ ldlm_resource_add_lock(res, &res->lr_granted, lock); + + if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) { + struct ldlm_lock *lck; + + list_for_each_entry_reverse(lck, &res->lr_granted, + l_res_link) { + if (lck == lock) + continue; + if (lockmode_compat(lck->l_granted_mode, + lock->l_granted_mode)) + continue; + if (ldlm_extent_overlap(&lck->l_req_extent, + &lock->l_req_extent)) { + CDEBUG(D_ERROR, "granting conflicting lock %p %p\n", + lck, lock); + ldlm_resource_dump(D_ERROR, res); + LBUG(); + } + } + } } /** Remove cancelled lock from resource interval tree. */ @@ -220,8 +240,8 @@ void ldlm_extent_unlink_lock(struct ldlm_lock *lock) } } -void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy) +void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy) { memset(lpolicy, 0, sizeof(*lpolicy)); lpolicy->l_extent.start = wpolicy->l_extent.start; @@ -229,8 +249,8 @@ void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, lpolicy->l_extent.gid = wpolicy->l_extent.gid; } -void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, - ldlm_wire_policy_data_t *wpolicy) +void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy) { memset(wpolicy, 0, sizeof(*wpolicy)); wpolicy->l_extent.start = lpolicy->l_extent.start; diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c index 861f36f039b56f7e52bb5d77833be5171231efa9..722160784f8355e78afbcfff54196667853c6a5b 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c @@ -612,22 +612,8 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) } EXPORT_SYMBOL(ldlm_flock_completion_ast); -void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy) -{ - memset(lpolicy, 0, sizeof(*lpolicy)); - lpolicy->l_flock.start = wpolicy->l_flock.lfw_start; - lpolicy->l_flock.end = wpolicy->l_flock.lfw_end; - lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid; - /* Compat code, old clients had no idea about owner field and - * relied solely on pid for ownership. Introduced in LU-104, 2.1, - * April 2011 - */ - lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid; -} - -void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy) +void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy) { memset(lpolicy, 0, sizeof(*lpolicy)); lpolicy->l_flock.start = wpolicy->l_flock.lfw_start; @@ -636,8 +622,8 @@ void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy, lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner; } -void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, - ldlm_wire_policy_data_t *wpolicy) +void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy) { memset(wpolicy, 0, sizeof(*wpolicy)); wpolicy->l_flock.lfw_start = lpolicy->l_flock.start; diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c index 79f4e6fa193e1d0b1d34b9953a52d16276ec52f2..8e1709dc073c282641dd9c4b130b8035e8838244 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c @@ -54,15 +54,15 @@ #include "../include/lustre_lib.h" #include "ldlm_internal.h" -void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy) +void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy) { memset(lpolicy, 0, sizeof(*lpolicy)); lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits; } -void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, - ldlm_wire_policy_data_t *wpolicy) +void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy) { memset(wpolicy, 0, sizeof(*wpolicy)); wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits; diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h index 5e82cfc245b2bb732c066e5623bf95cac5981673..5c02501d0560e7ea45b86cfba3069dd1a56becb7 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h @@ -39,13 +39,13 @@ extern struct list_head ldlm_srv_namespace_list; extern struct mutex ldlm_cli_namespace_lock; extern struct list_head ldlm_cli_active_namespace_list; -static inline int ldlm_namespace_nr_read(ldlm_side_t client) +static inline int ldlm_namespace_nr_read(enum ldlm_side client) { return client == LDLM_NAMESPACE_SERVER ? ldlm_srv_namespace_nr : ldlm_cli_namespace_nr; } -static inline void ldlm_namespace_nr_inc(ldlm_side_t client) +static inline void ldlm_namespace_nr_inc(enum ldlm_side client) { if (client == LDLM_NAMESPACE_SERVER) ldlm_srv_namespace_nr++; @@ -53,7 +53,7 @@ static inline void ldlm_namespace_nr_inc(ldlm_side_t client) ldlm_cli_namespace_nr++; } -static inline void ldlm_namespace_nr_dec(ldlm_side_t client) +static inline void ldlm_namespace_nr_dec(enum ldlm_side client) { if (client == LDLM_NAMESPACE_SERVER) ldlm_srv_namespace_nr--; @@ -61,13 +61,13 @@ static inline void ldlm_namespace_nr_dec(ldlm_side_t client) ldlm_cli_namespace_nr--; } -static inline struct list_head *ldlm_namespace_list(ldlm_side_t client) +static inline struct list_head *ldlm_namespace_list(enum ldlm_side client) { return client == LDLM_NAMESPACE_SERVER ? &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list; } -static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client) +static inline struct mutex *ldlm_namespace_lock(enum ldlm_side client) { return client == LDLM_NAMESPACE_SERVER ? &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock; @@ -79,22 +79,23 @@ static inline int ldlm_ns_empty(struct ldlm_namespace *ns) return atomic_read(&ns->ns_bref) == 0; } -void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, ldlm_side_t); +void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, + enum ldlm_side); void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *, - ldlm_side_t); -struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t); + enum ldlm_side); +struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side); /* ldlm_request.c */ /* Cancel lru flag, it indicates we cancel aged locks. */ enum { - LDLM_CANCEL_AGED = 1 << 0, /* Cancel aged locks (non lru resize). */ - LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */ - LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */ - LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */ - LDLM_CANCEL_NO_WAIT = 1 << 4, /* Cancel locks w/o blocking (neither - * sending nor waiting for any rpcs) - */ - LDLM_CANCEL_LRUR_NO_WAIT = 1 << 5, /* LRUR + NO_WAIT */ + LDLM_LRU_FLAG_AGED = BIT(0), /* Cancel aged locks (non lru resize). */ + LDLM_LRU_FLAG_PASSED = BIT(1), /* Cancel passed number of locks. */ + LDLM_LRU_FLAG_SHRINK = BIT(2), /* Cancel locks from shrinker. */ + LDLM_LRU_FLAG_LRUR = BIT(3), /* Cancel locks from lru resize. */ + LDLM_LRU_FLAG_NO_WAIT = BIT(4), /* Cancel locks w/o blocking (neither + * sending nor waiting for any rpcs) + */ + LDLM_LRU_FLAG_LRUR_NO_WAIT = BIT(5), /* LRUR + NO_WAIT */ }; int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, @@ -137,10 +138,10 @@ ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *, void *data, __u32 lvb_len, enum lvb_type lvb_type); enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **, void *cookie, __u64 *flags); -void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode); -void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode); -void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode); -void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode); +void ldlm_lock_addref_internal(struct ldlm_lock *, enum ldlm_mode mode); +void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode); +void ldlm_lock_decref_internal(struct ldlm_lock *, enum ldlm_mode mode); +void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode); int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list, enum ldlm_desc_ast_t ast_type); int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use); @@ -311,28 +312,25 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock) return ret; } -typedef void (*ldlm_policy_wire_to_local_t)(const ldlm_wire_policy_data_t *, - ldlm_policy_data_t *); - -typedef void (*ldlm_policy_local_to_wire_t)(const ldlm_policy_data_t *, - ldlm_wire_policy_data_t *); - -void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy); -void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, - ldlm_wire_policy_data_t *wpolicy); -void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy); -void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, - ldlm_wire_policy_data_t *wpolicy); -void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy); -void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, - ldlm_wire_policy_data_t *wpolicy); -void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy); -void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy); - -void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, - ldlm_wire_policy_data_t *wpolicy); +typedef void (*ldlm_policy_wire_to_local_t)(const union ldlm_wire_policy_data *, + union ldlm_policy_data *); + +typedef void (*ldlm_policy_local_to_wire_t)(const union ldlm_policy_data *, + union ldlm_wire_policy_data *); + +void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy); +void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy); +void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy); +void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy); +void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy); +void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy); +void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy); +void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c index 153e990c494e6f66bbb10541b17382f547e1eb5d..9be01426c955f6d4b54187a2a3b2d9328ff39541 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c @@ -170,6 +170,9 @@ int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid) ptlrpc_connection_put(dlmexp->exp_connection); dlmexp->exp_connection = NULL; } + + if (dlmexp) + class_export_put(dlmexp); } list_del(&imp_conn->oic_item); @@ -372,6 +375,25 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) } else { cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT; } + + spin_lock_init(&cli->cl_mod_rpcs_lock); + spin_lock_init(&cli->cl_mod_rpcs_hist.oh_lock); + cli->cl_max_mod_rpcs_in_flight = 0; + cli->cl_mod_rpcs_in_flight = 0; + cli->cl_close_rpcs_in_flight = 0; + init_waitqueue_head(&cli->cl_mod_rpcs_waitq); + cli->cl_mod_tag_bitmap = NULL; + + if (connect_op == MDS_CONNECT) { + cli->cl_max_mod_rpcs_in_flight = cli->cl_max_rpcs_in_flight - 1; + cli->cl_mod_tag_bitmap = kcalloc(BITS_TO_LONGS(OBD_MAX_RIF_MAX), + sizeof(long), GFP_NOFS); + if (!cli->cl_mod_tag_bitmap) { + rc = -ENOMEM; + goto err; + } + } + rc = ldlm_get_ref(); if (rc) { CERROR("ldlm_get_ref failed: %d\n", rc); @@ -399,9 +421,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) } cli->cl_import = imp; - /* cli->cl_max_mds_{easize,cookiesize} updated by mdc_init_ea_size() */ + /* cli->cl_max_mds_easize updated by mdc_init_ea_size() */ cli->cl_max_mds_easize = sizeof(struct lov_mds_md_v3); - cli->cl_max_mds_cookiesize = sizeof(struct llog_cookie); if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) { if (!strcmp(lustre_cfg_string(lcfg, 3), "inactive")) { @@ -425,8 +446,6 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) goto err_import; } - cli->cl_qchk_stat = CL_NOT_QUOTACHECKED; - return rc; err_import: @@ -434,12 +453,16 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) err_ldlm: ldlm_put_ref(); err: + kfree(cli->cl_mod_tag_bitmap); + cli->cl_mod_tag_bitmap = NULL; return rc; } EXPORT_SYMBOL(client_obd_setup); int client_obd_cleanup(struct obd_device *obddev) { + struct client_obd *cli = &obddev->u.cli; + ldlm_namespace_free_post(obddev->obd_namespace); obddev->obd_namespace = NULL; @@ -447,6 +470,10 @@ int client_obd_cleanup(struct obd_device *obddev) LASSERT(!obddev->u.cli.cl_import); ldlm_put_ref(); + + kfree(cli->cl_mod_tag_bitmap); + cli->cl_mod_tag_bitmap = NULL; + return 0; } EXPORT_SYMBOL(client_obd_cleanup); @@ -461,6 +488,7 @@ int client_connect_import(const struct lu_env *env, struct obd_import *imp = cli->cl_import; struct obd_connect_data *ocd; struct lustre_handle conn = { 0 }; + bool is_mdc = false; int rc; *exp = NULL; @@ -487,6 +515,10 @@ int client_connect_import(const struct lu_env *env, ocd = &imp->imp_connect_data; if (data) { *ocd = *data; + is_mdc = !strncmp(imp->imp_obd->obd_type->typ_name, + LUSTRE_MDC_NAME, 3); + if (is_mdc) + data->ocd_connect_flags |= OBD_CONNECT_MULTIMODRPCS; imp->imp_connect_flags_orig = data->ocd_connect_flags; } @@ -502,6 +534,11 @@ int client_connect_import(const struct lu_env *env, ocd->ocd_connect_flags, "old %#llx, new %#llx\n", data->ocd_connect_flags, ocd->ocd_connect_flags); data->ocd_connect_flags = ocd->ocd_connect_flags; + /* clear the flag as it was not set and is not known + * by upper layers + */ + if (is_mdc) + data->ocd_connect_flags &= ~OBD_CONNECT_MULTIMODRPCS; } ptlrpc_pinger_add_import(imp); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c index 3c48b4fb96f1bf6b0aa1a7def67cba3a25780edf..a4a291acb6592c110c7ddd7379880fd0a3413e33 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c @@ -39,6 +39,7 @@ #include "../../include/linux/libcfs/libcfs.h" #include "../include/lustre_intent.h" +#include "../include/lustre_swab.h" #include "../include/obd_class.h" #include "ldlm_internal.h" @@ -63,17 +64,10 @@ static char *ldlm_typename[] = { [LDLM_IBITS] = "IBT", }; -static ldlm_policy_wire_to_local_t ldlm_policy_wire18_to_local[] = { +static ldlm_policy_wire_to_local_t ldlm_policy_wire_to_local[] = { [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local, [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local, - [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire18_to_local, - [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local, -}; - -static ldlm_policy_wire_to_local_t ldlm_policy_wire21_to_local[] = { - [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local, - [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local, - [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire21_to_local, + [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire_to_local, [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local, }; @@ -88,8 +82,8 @@ static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = { * Converts lock policy from local format to on the wire lock_desc format */ static void ldlm_convert_policy_to_wire(enum ldlm_type type, - const ldlm_policy_data_t *lpolicy, - ldlm_wire_policy_data_t *wpolicy) + const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy) { ldlm_policy_local_to_wire_t convert; @@ -102,23 +96,17 @@ static void ldlm_convert_policy_to_wire(enum ldlm_type type, * Converts lock policy from on the wire lock_desc format to local format */ void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type, - const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy) + const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy) { ldlm_policy_wire_to_local_t convert; - int new_client; - /** some badness for 2.0.0 clients, but 2.0.0 isn't supported */ - new_client = (exp_connect_flags(exp) & OBD_CONNECT_FULL20) != 0; - if (new_client) - convert = ldlm_policy_wire21_to_local[type - LDLM_MIN_TYPE]; - else - convert = ldlm_policy_wire18_to_local[type - LDLM_MIN_TYPE]; + convert = ldlm_policy_wire_to_local[type - LDLM_MIN_TYPE]; convert(wpolicy, lpolicy); } -char *ldlm_it2str(int it) +const char *ldlm_it2str(enum ldlm_intent_flags it) { switch (it) { case IT_OPEN: @@ -140,7 +128,7 @@ char *ldlm_it2str(int it) case IT_LAYOUT: return "layout"; default: - CERROR("Unknown intent %d\n", it); + CERROR("Unknown intent 0x%08x\n", it); return "UNKNOWN"; } } @@ -512,7 +500,6 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock, return 0; } -EXPORT_SYMBOL(ldlm_lock_change_resource); /** \defgroup ldlm_handles LDLM HANDLES * Ways to get hold of locks without any addresses. @@ -595,7 +582,6 @@ void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc) &lock->l_policy_data, &desc->l_policy_data); } -EXPORT_SYMBOL(ldlm_lock2desc); /** * Add a lock to list of conflicting locks to send AST to. @@ -658,7 +644,7 @@ static void ldlm_add_ast_work_item(struct ldlm_lock *lock, * r/w reference type is determined by \a mode * Calls ldlm_lock_addref_internal. */ -void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode) +void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode) { struct ldlm_lock *lock; @@ -676,7 +662,8 @@ EXPORT_SYMBOL(ldlm_lock_addref); * Removes lock from LRU if it is there. * Assumes the LDLM lock is already locked. */ -void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode) +void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, + enum ldlm_mode mode) { ldlm_lock_remove_from_lru(lock); if (mode & (LCK_NL | LCK_CR | LCK_PR)) { @@ -700,7 +687,7 @@ void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode) * * \retval -EAGAIN lock is being canceled. */ -int ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode) +int ldlm_lock_addref_try(const struct lustre_handle *lockh, enum ldlm_mode mode) { struct ldlm_lock *lock; int result; @@ -726,7 +713,7 @@ EXPORT_SYMBOL(ldlm_lock_addref_try); * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work. * Only called for local locks. */ -void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode) +void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode) { lock_res_and_lock(lock); ldlm_lock_addref_internal_nolock(lock, mode); @@ -740,7 +727,8 @@ void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode) * Does NOT add lock to LRU if no r/w references left to accommodate flock locks * that cannot be placed in LRU. */ -void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode) +void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, + enum ldlm_mode mode) { LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); if (mode & (LCK_NL | LCK_CR | LCK_PR)) { @@ -766,7 +754,7 @@ void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode) * on the namespace. * For blocked LDLM locks if r/w count drops to zero, blocking_ast is called. */ -void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) +void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode) { struct ldlm_namespace *ns; @@ -786,11 +774,16 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) } if (!lock->l_readers && !lock->l_writers && - ldlm_is_cbpending(lock)) { + (ldlm_is_cbpending(lock) || lock->l_req_mode == LCK_GROUP)) { /* If we received a blocked AST and this was the last reference, * run the callback. + * Group locks are special: + * They must not go in LRU, but they are not called back + * like non-group locks, instead they are manually released. + * They have an l_writers reference which they keep until + * they are manually released, so we remove them when they have + * no more reader or writer references. - LU-6368 */ - LDLM_DEBUG(lock, "final decref done on cbpending lock"); LDLM_LOCK_GET(lock); /* dropped by bl thread */ @@ -832,7 +825,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) /** * Decrease reader/writer refcount for LDLM lock with handle \a lockh */ -void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode) +void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode) { struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); @@ -846,10 +839,9 @@ EXPORT_SYMBOL(ldlm_lock_decref); * Decrease reader/writer refcount for LDLM lock with handle * \a lockh and mark it for subsequent cancellation once r/w refcount * drops to zero instead of putting into LRU. - * - * Typical usage is for GROUP locks which we cannot allow to be cached. */ -void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode) +void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, + enum ldlm_mode mode) { struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); @@ -1055,88 +1047,173 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) } /** - * Search for a lock with given properties in a queue. + * Describe the overlap between two locks. itree_overlap_cb data. + */ +struct lock_match_data { + struct ldlm_lock *lmd_old; + struct ldlm_lock *lmd_lock; + enum ldlm_mode *lmd_mode; + union ldlm_policy_data *lmd_policy; + __u64 lmd_flags; + int lmd_unref; +}; + +/** + * Check if the given @lock meets the criteria for a match. + * A reference on the lock is taken if matched. * - * \retval a referenced lock or NULL. See the flag descriptions below, in the - * comment above ldlm_lock_match + * \param lock test-against this lock + * \param data parameters */ -static struct ldlm_lock *search_queue(struct list_head *queue, - enum ldlm_mode *mode, - ldlm_policy_data_t *policy, - struct ldlm_lock *old_lock, - __u64 flags, int unref) +static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data) { - struct ldlm_lock *lock; - struct list_head *tmp; + union ldlm_policy_data *lpol = &lock->l_policy_data; + enum ldlm_mode match; - list_for_each(tmp, queue) { - enum ldlm_mode match; + if (lock == data->lmd_old) + return INTERVAL_ITER_STOP; - lock = list_entry(tmp, struct ldlm_lock, l_res_link); - - if (lock == old_lock) - break; + /* + * Check if this lock can be matched. + * Used by LU-2919(exclusive open) for open lease lock + */ + if (ldlm_is_excl(lock)) + return INTERVAL_ITER_CONT; - /* Check if this lock can be matched. - * Used by LU-2919(exclusive open) for open lease lock - */ - if (ldlm_is_excl(lock)) - continue; + /* + * llite sometimes wants to match locks that will be + * canceled when their users drop, but we allow it to match + * if it passes in CBPENDING and the lock still has users. + * this is generally only going to be used by children + * whose parents already hold a lock so forward progress + * can still happen. + */ + if (ldlm_is_cbpending(lock) && + !(data->lmd_flags & LDLM_FL_CBPENDING)) + return INTERVAL_ITER_CONT; - /* llite sometimes wants to match locks that will be - * canceled when their users drop, but we allow it to match - * if it passes in CBPENDING and the lock still has users. - * this is generally only going to be used by children - * whose parents already hold a lock so forward progress - * can still happen. - */ - if (ldlm_is_cbpending(lock) && !(flags & LDLM_FL_CBPENDING)) - continue; - if (!unref && ldlm_is_cbpending(lock) && - lock->l_readers == 0 && lock->l_writers == 0) - continue; + if (!data->lmd_unref && ldlm_is_cbpending(lock) && + !lock->l_readers && !lock->l_writers) + return INTERVAL_ITER_CONT; - if (!(lock->l_req_mode & *mode)) - continue; - match = lock->l_req_mode; + if (!(lock->l_req_mode & *data->lmd_mode)) + return INTERVAL_ITER_CONT; + match = lock->l_req_mode; - if (lock->l_resource->lr_type == LDLM_EXTENT && - (lock->l_policy_data.l_extent.start > - policy->l_extent.start || - lock->l_policy_data.l_extent.end < policy->l_extent.end)) - continue; + switch (lock->l_resource->lr_type) { + case LDLM_EXTENT: + if (lpol->l_extent.start > data->lmd_policy->l_extent.start || + lpol->l_extent.end < data->lmd_policy->l_extent.end) + return INTERVAL_ITER_CONT; if (unlikely(match == LCK_GROUP) && - lock->l_resource->lr_type == LDLM_EXTENT && - policy->l_extent.gid != LDLM_GID_ANY && - lock->l_policy_data.l_extent.gid != policy->l_extent.gid) - continue; - - /* We match if we have existing lock with same or wider set + data->lmd_policy->l_extent.gid != LDLM_GID_ANY && + lpol->l_extent.gid != data->lmd_policy->l_extent.gid) + return INTERVAL_ITER_CONT; + break; + case LDLM_IBITS: + /* + * We match if we have existing lock with same or wider set * of bits. */ - if (lock->l_resource->lr_type == LDLM_IBITS && - ((lock->l_policy_data.l_inodebits.bits & - policy->l_inodebits.bits) != - policy->l_inodebits.bits)) - continue; + if ((lpol->l_inodebits.bits & + data->lmd_policy->l_inodebits.bits) != + data->lmd_policy->l_inodebits.bits) + return INTERVAL_ITER_CONT; + break; + default: + break; + } + /* + * We match if we have existing lock with same or wider set + * of bits. + */ + if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE)) + return INTERVAL_ITER_CONT; + + if ((data->lmd_flags & LDLM_FL_LOCAL_ONLY) && + !ldlm_is_local(lock)) + return INTERVAL_ITER_CONT; + + if (data->lmd_flags & LDLM_FL_TEST_LOCK) { + LDLM_LOCK_GET(lock); + ldlm_lock_touch_in_lru(lock); + } else { + ldlm_lock_addref_internal_nolock(lock, match); + } + + *data->lmd_mode = match; + data->lmd_lock = lock; + + return INTERVAL_ITER_STOP; +} + +static unsigned int itree_overlap_cb(struct interval_node *in, void *args) +{ + struct ldlm_interval *node = to_ldlm_interval(in); + struct lock_match_data *data = args; + struct ldlm_lock *lock; + int rc; + + list_for_each_entry(lock, &node->li_group, l_sl_policy) { + rc = lock_matches(lock, data); + if (rc == INTERVAL_ITER_STOP) + return INTERVAL_ITER_STOP; + } + return INTERVAL_ITER_CONT; +} + +/** + * Search for a lock with given parameters in interval trees. + * + * \param res search for a lock in this resource + * \param data parameters + * + * \retval a referenced lock or NULL. + */ +static struct ldlm_lock *search_itree(struct ldlm_resource *res, + struct lock_match_data *data) +{ + struct interval_node_extent ext = { + .start = data->lmd_policy->l_extent.start, + .end = data->lmd_policy->l_extent.end + }; + int idx; + + for (idx = 0; idx < LCK_MODE_NUM; idx++) { + struct ldlm_interval_tree *tree = &res->lr_itree[idx]; - if (!unref && LDLM_HAVE_MASK(lock, GONE)) + if (!tree->lit_root) continue; - if ((flags & LDLM_FL_LOCAL_ONLY) && !ldlm_is_local(lock)) + if (!(tree->lit_mode & *data->lmd_mode)) continue; - if (flags & LDLM_FL_TEST_LOCK) { - LDLM_LOCK_GET(lock); - ldlm_lock_touch_in_lru(lock); - } else { - ldlm_lock_addref_internal_nolock(lock, match); - } - *mode = match; - return lock; + interval_search(tree->lit_root, &ext, + itree_overlap_cb, data); } + return data->lmd_lock; +} +/** + * Search for a lock with given properties in a queue. + * + * \param queue search for a lock in this queue + * \param data parameters + * + * \retval a referenced lock or NULL. + */ +static struct ldlm_lock *search_queue(struct list_head *queue, + struct lock_match_data *data) +{ + struct ldlm_lock *lock; + int rc; + + list_for_each_entry(lock, queue, l_res_link) { + rc = lock_matches(lock, data); + if (rc == INTERVAL_ITER_STOP) + return data->lmd_lock; + } return NULL; } @@ -1147,7 +1224,6 @@ void ldlm_lock_fail_match_locked(struct ldlm_lock *lock) wake_up_all(&lock->l_waitq); } } -EXPORT_SYMBOL(ldlm_lock_fail_match_locked); /** * Mark lock as "matchable" by OST. @@ -1208,35 +1284,45 @@ EXPORT_SYMBOL(ldlm_lock_allow_match); enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, const struct ldlm_res_id *res_id, enum ldlm_type type, - ldlm_policy_data_t *policy, + union ldlm_policy_data *policy, enum ldlm_mode mode, struct lustre_handle *lockh, int unref) { + struct lock_match_data data = { + .lmd_old = NULL, + .lmd_lock = NULL, + .lmd_mode = &mode, + .lmd_policy = policy, + .lmd_flags = flags, + .lmd_unref = unref, + }; struct ldlm_resource *res; - struct ldlm_lock *lock, *old_lock = NULL; + struct ldlm_lock *lock; int rc = 0; if (!ns) { - old_lock = ldlm_handle2lock(lockh); - LASSERT(old_lock); + data.lmd_old = ldlm_handle2lock(lockh); + LASSERT(data.lmd_old); - ns = ldlm_lock_to_ns(old_lock); - res_id = &old_lock->l_resource->lr_name; - type = old_lock->l_resource->lr_type; - mode = old_lock->l_req_mode; + ns = ldlm_lock_to_ns(data.lmd_old); + res_id = &data.lmd_old->l_resource->lr_name; + type = data.lmd_old->l_resource->lr_type; + *data.lmd_mode = data.lmd_old->l_req_mode; } res = ldlm_resource_get(ns, NULL, res_id, type, 0); if (IS_ERR(res)) { - LASSERT(!old_lock); + LASSERT(!data.lmd_old); return 0; } LDLM_RESOURCE_ADDREF(res); lock_res(res); - lock = search_queue(&res->lr_granted, &mode, policy, old_lock, - flags, unref); + if (res->lr_type == LDLM_EXTENT) + lock = search_itree(res, &data); + else + lock = search_queue(&res->lr_granted, &data); if (lock) { rc = 1; goto out; @@ -1245,14 +1331,12 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, rc = 0; goto out; } - lock = search_queue(&res->lr_waiting, &mode, policy, old_lock, - flags, unref); + lock = search_queue(&res->lr_waiting, &data); if (lock) { rc = 1; goto out; } - - out: +out: unlock_res(res); LDLM_RESOURCE_DELREF(res); ldlm_resource_putref(res); @@ -1324,8 +1408,8 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, (type == LDLM_PLAIN || type == LDLM_IBITS) ? res_id->name[3] : policy->l_extent.end); } - if (old_lock) - LDLM_LOCK_PUT(old_lock); + if (data.lmd_old) + LDLM_LOCK_PUT(data.lmd_old); return rc ? mode : 0; } diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c index fde697ebaadc18167c935d1cf192941b1d98721f..12647af5a33691c798f62e5620944063f535a110 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c @@ -511,23 +511,6 @@ static inline void ldlm_callback_errmsg(struct ptlrpc_request *req, CWARN("Send reply failed, maybe cause bug 21636.\n"); } -static int ldlm_handle_qc_callback(struct ptlrpc_request *req) -{ - struct obd_quotactl *oqctl; - struct client_obd *cli = &req->rq_export->exp_obd->u.cli; - - oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); - if (!oqctl) { - CERROR("Can't unpack obd_quotactl\n"); - return -EPROTO; - } - - oqctl->qc_stat = ptlrpc_status_ntoh(oqctl->qc_stat); - - cli->cl_qchk_stat = oqctl->qc_stat; - return 0; -} - /* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */ static int ldlm_callback_handler(struct ptlrpc_request *req) { @@ -577,13 +560,6 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) rc = ldlm_handle_setinfo(req); ldlm_callback_reply(req, rc); return 0; - case OBD_QC_CALLBACK: - req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK); - if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET)) - return 0; - rc = ldlm_handle_qc_callback(req); - ldlm_callback_reply(req, rc); - return 0; default: CERROR("unknown opcode %u\n", lustre_msg_get_opc(req->rq_reqmsg)); @@ -858,7 +834,6 @@ int ldlm_get_ref(void) return rc; } -EXPORT_SYMBOL(ldlm_get_ref); void ldlm_put_ref(void) { @@ -875,7 +850,6 @@ void ldlm_put_ref(void) } mutex_unlock(&ldlm_ref_mutex); } -EXPORT_SYMBOL(ldlm_put_ref); static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj, struct attribute *attr, diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c index 0aed39c46154500dcf2059d073fc79694e910812..862ea0a1dc97cbcbc602885be5407c74c8060615 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c @@ -54,14 +54,14 @@ #include "ldlm_internal.h" -void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy) +void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy) { /* No policy for plain locks */ } -void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, - ldlm_wire_policy_data_t *wpolicy) +void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy) { /* No policy for plain locks */ } diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c index 9a1136e32dfc5df975323ad99b8017e87b84e115..8dfb3c8e6b7a9986a81cba89b221d92d42848047 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c @@ -293,7 +293,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl) * take into account pl->pl_recalc_time here. */ ret = ldlm_cancel_lru(container_of(pl, struct ldlm_namespace, ns_pool), - 0, LCF_ASYNC, LDLM_CANCEL_LRUR); + 0, LCF_ASYNC, LDLM_LRU_FLAG_LRUR); out: spin_lock(&pl->pl_lock); @@ -339,7 +339,7 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl, if (nr == 0) return (unused / 100) * sysctl_vfs_cache_pressure; else - return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK); + return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_LRU_FLAG_SHRINK); } static const struct ldlm_pool_ops ldlm_cli_pool_ops = { @@ -356,10 +356,10 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl) u32 recalc_interval_sec; int count; - recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time; + recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time; if (recalc_interval_sec > 0) { spin_lock(&pl->pl_lock); - recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time; + recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time; if (recalc_interval_sec > 0) { /* @@ -382,7 +382,7 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl) count); } - recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() + + recalc_interval_sec = pl->pl_recalc_time - ktime_get_real_seconds() + pl->pl_recalc_period; if (recalc_interval_sec <= 0) { /* DEBUG: should be re-removed after LU-4536 is fixed */ @@ -651,13 +651,13 @@ static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl) } int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns, - int idx, ldlm_side_t client) + int idx, enum ldlm_side client) { int rc; spin_lock_init(&pl->pl_lock); atomic_set(&pl->pl_granted, 0); - pl->pl_recalc_time = ktime_get_seconds(); + pl->pl_recalc_time = ktime_get_real_seconds(); atomic_set(&pl->pl_lock_volume_factor, 1); atomic_set(&pl->pl_grant_rate, 0); @@ -684,7 +684,6 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns, return rc; } -EXPORT_SYMBOL(ldlm_pool_init); void ldlm_pool_fini(struct ldlm_pool *pl) { @@ -698,7 +697,6 @@ void ldlm_pool_fini(struct ldlm_pool *pl) */ POISON(pl, 0x5a, sizeof(*pl)); } -EXPORT_SYMBOL(ldlm_pool_fini); /** * Add new taken ldlm lock \a lock into pool \a pl accounting. @@ -724,7 +722,6 @@ void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock) * with too long call paths. */ } -EXPORT_SYMBOL(ldlm_pool_add); /** * Remove ldlm lock \a lock from pool \a pl accounting. @@ -743,7 +740,6 @@ void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock) lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT); } -EXPORT_SYMBOL(ldlm_pool_del); /** * Returns current \a pl SLV. @@ -792,13 +788,12 @@ static struct completion ldlm_pools_comp; * count locks from all namespaces (if possible). Returns number of * cached locks. */ -static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask) +static unsigned long ldlm_pools_count(enum ldlm_side client, gfp_t gfp_mask) { unsigned long total = 0; int nr_ns; struct ldlm_namespace *ns; struct ldlm_namespace *ns_old = NULL; /* loop detection */ - void *cookie; if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS)) return 0; @@ -806,8 +801,6 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask) CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n", client == LDLM_NAMESPACE_CLIENT ? "client" : "server"); - cookie = cl_env_reenter(); - /* * Find out how many resources we may release. */ @@ -816,7 +809,6 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask) mutex_lock(ldlm_namespace_lock(client)); if (list_empty(ldlm_namespace_list(client))) { mutex_unlock(ldlm_namespace_lock(client)); - cl_env_reexit(cookie); return 0; } ns = ldlm_namespace_first_locked(client); @@ -842,22 +834,19 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask) ldlm_namespace_put(ns); } - cl_env_reexit(cookie); return total; } -static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask) +static unsigned long ldlm_pools_scan(enum ldlm_side client, int nr, + gfp_t gfp_mask) { unsigned long freed = 0; int tmp, nr_ns; struct ldlm_namespace *ns; - void *cookie; if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS)) return -1; - cookie = cl_env_reenter(); - /* * Shrink at least ldlm_namespace_nr_read(client) namespaces. */ @@ -887,7 +876,6 @@ static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask) freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask); ldlm_namespace_put(ns); } - cl_env_reexit(cookie); /* * we only decrease the SLV in server pools shrinker, return * SHRINK_STOP to kernel to avoid needless loop. LU-1128 @@ -908,7 +896,7 @@ static unsigned long ldlm_pools_cli_scan(struct shrinker *s, sc->gfp_mask); } -static int ldlm_pools_recalc(ldlm_side_t client) +static int ldlm_pools_recalc(enum ldlm_side client) { struct ldlm_namespace *ns; struct ldlm_namespace *ns_old = NULL; @@ -1095,7 +1083,6 @@ int ldlm_pools_init(void) return rc; } -EXPORT_SYMBOL(ldlm_pools_init); void ldlm_pools_fini(void) { @@ -1104,4 +1091,3 @@ void ldlm_pools_fini(void) ldlm_pools_thread_stop(); } -EXPORT_SYMBOL(ldlm_pools_fini); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c index 35ba6f14d95f7febbf407a6bf6b2d56da468ea2e..c1f8693f94a549f43f0e72348875dc56918a68de 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c @@ -93,11 +93,7 @@ static int ldlm_expired_completion_wait(void *data) if (!lock->l_conn_export) { static unsigned long next_dump, last_dump; - LCONSOLE_WARN("lock timed out (enqueued at %lld, %llds ago)\n", - (s64)lock->l_last_activity, - (s64)(ktime_get_real_seconds() - - lock->l_last_activity)); - LDLM_DEBUG(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep", + LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep", (s64)lock->l_last_activity, (s64)(ktime_get_real_seconds() - lock->l_last_activity)); @@ -475,12 +471,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, "client-side enqueue, new policy data"); } - if ((*flags) & LDLM_FL_AST_SENT || - /* Cancel extent locks as soon as possible on a liblustre client, - * because it cannot handle asynchronous ASTs robustly (see - * bug 7311). - */ - (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) { + if ((*flags) & LDLM_FL_AST_SENT) { lock_res_and_lock(lock); lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST; unlock_res_and_lock(lock); @@ -602,7 +593,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff); flags = ns_connect_lru_resize(ns) ? - LDLM_CANCEL_LRUR_NO_WAIT : LDLM_CANCEL_AGED; + LDLM_LRU_FLAG_LRUR_NO_WAIT : LDLM_LRU_FLAG_AGED; to_free = !ns_connect_lru_resize(ns) && opc == LDLM_ENQUEUE ? 1 : 0; @@ -657,6 +648,27 @@ int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req, } EXPORT_SYMBOL(ldlm_prep_enqueue_req); +static struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp, + int lvb_len) +{ + struct ptlrpc_request *req; + int rc; + + req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE); + if (!req) + return ERR_PTR(-ENOMEM); + + rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); + if (rc) { + ptlrpc_request_free(req); + return ERR_PTR(rc); + } + + req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len); + ptlrpc_request_set_replen(req); + return req; +} + /** * Client-side lock enqueue. * @@ -670,7 +682,7 @@ EXPORT_SYMBOL(ldlm_prep_enqueue_req); int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, struct ldlm_enqueue_info *einfo, const struct ldlm_res_id *res_id, - ldlm_policy_data_t const *policy, __u64 *flags, + union ldlm_policy_data const *policy, __u64 *flags, void *lvb, __u32 lvb_len, enum lvb_type lvb_type, struct lustre_handle *lockh, int async) { @@ -727,17 +739,14 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, lock->l_last_activity = ktime_get_real_seconds(); /* lock not sent to server yet */ - if (!reqp || !*reqp) { - req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), - &RQF_LDLM_ENQUEUE, - LUSTRE_DLM_VERSION, - LDLM_ENQUEUE); - if (!req) { + req = ldlm_enqueue_pack(exp, lvb_len); + if (IS_ERR(req)) { failed_lock_cleanup(ns, lock, einfo->ei_mode); LDLM_LOCK_RELEASE(lock); - return -ENOMEM; + return PTR_ERR(req); } + req_passed_in = 0; if (reqp) *reqp = req; @@ -757,24 +766,6 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, body->lock_flags = ldlm_flags_to_wire(*flags); body->lock_handle[0] = *lockh; - /* Continue as normal. */ - if (!req_passed_in) { - if (lvb_len > 0) - req_capsule_extend(&req->rq_pill, - &RQF_LDLM_ENQUEUE_LVB); - req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, - lvb_len); - ptlrpc_request_set_replen(req); - } - - /* - * Liblustre client doesn't get extent locks, except for O_APPEND case - * where [0, OBD_OBJECT_EOF] lock is taken, or truncate, where - * [i_size, OBD_OBJECT_EOF] lock is taken. - */ - LASSERT(ergo(LIBLUSTRE_CLIENT, einfo->ei_type != LDLM_EXTENT || - policy->l_extent.end == OBD_OBJECT_EOF)); - if (async) { LASSERT(reqp); return 0; @@ -1022,7 +1013,6 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req) return 0; } -EXPORT_SYMBOL(ldlm_cli_update_pool); /** * Client side lock cancel. @@ -1067,7 +1057,7 @@ int ldlm_cli_cancel(const struct lustre_handle *lockh, ns = ldlm_lock_to_ns(lock); flags = ns_connect_lru_resize(ns) ? - LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED; + LDLM_LRU_FLAG_LRUR : LDLM_LRU_FLAG_AGED; count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1, LCF_BL_AST, flags); } @@ -1125,7 +1115,6 @@ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, return count; } -EXPORT_SYMBOL(ldlm_cli_cancel_list_local); /** * Cancel as many locks as possible w/o sending any RPCs (e.g. to write back @@ -1184,6 +1173,14 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, if (count && added >= count) return LDLM_POLICY_KEEP_LOCK; + /* + * Despite of the LV, It doesn't make sense to keep the lock which + * is unused for ns_max_age time. + */ + if (cfs_time_after(cfs_time_current(), + cfs_time_add(lock->l_last_used, ns->ns_max_age))) + return LDLM_POLICY_CANCEL_LOCK; + slv = ldlm_pool_get_slv(pl); lvf = ldlm_pool_get_lvf(pl); la = cfs_duration_sec(cfs_time_sub(cur, lock->l_last_used)); @@ -1287,21 +1284,21 @@ typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)( static ldlm_cancel_lru_policy_t ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags) { - if (flags & LDLM_CANCEL_NO_WAIT) + if (flags & LDLM_LRU_FLAG_NO_WAIT) return ldlm_cancel_no_wait_policy; if (ns_connect_lru_resize(ns)) { - if (flags & LDLM_CANCEL_SHRINK) + if (flags & LDLM_LRU_FLAG_SHRINK) /* We kill passed number of old locks. */ return ldlm_cancel_passed_policy; - else if (flags & LDLM_CANCEL_LRUR) + else if (flags & LDLM_LRU_FLAG_LRUR) return ldlm_cancel_lrur_policy; - else if (flags & LDLM_CANCEL_PASSED) + else if (flags & LDLM_LRU_FLAG_PASSED) return ldlm_cancel_passed_policy; - else if (flags & LDLM_CANCEL_LRUR_NO_WAIT) + else if (flags & LDLM_LRU_FLAG_LRUR_NO_WAIT) return ldlm_cancel_lrur_no_wait_policy; } else { - if (flags & LDLM_CANCEL_AGED) + if (flags & LDLM_LRU_FLAG_AGED) return ldlm_cancel_aged_policy; } @@ -1325,21 +1322,21 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags) * * Calling policies for enabled LRU resize: * ---------------------------------------- - * flags & LDLM_CANCEL_LRUR - use LRU resize policy (SLV from server) to - * cancel not more than \a count locks; + * flags & LDLM_LRU_FLAG_LRUR - use LRU resize policy (SLV from server) to + * cancel not more than \a count locks; * - * flags & LDLM_CANCEL_PASSED - cancel \a count number of old locks (located at - * the beginning of LRU list); + * flags & LDLM_LRU_FLAG_PASSED - cancel \a count number of old locks (located at + * the beginning of LRU list); * - * flags & LDLM_CANCEL_SHRINK - cancel not more than \a count locks according to - * memory pressure policy function; + * flags & LDLM_LRU_FLAG_SHRINK - cancel not more than \a count locks according to + * memory pressure policy function; * - * flags & LDLM_CANCEL_AGED - cancel \a count locks according to "aged policy". + * flags & LDLM_LRU_FLAG_AGED - cancel \a count locks according to "aged policy". * - * flags & LDLM_CANCEL_NO_WAIT - cancel as many unused locks as possible - * (typically before replaying locks) w/o - * sending any RPCs or waiting for any - * outstanding RPC to complete. + * flags & LDLM_LRU_FLAG_NO_WAIT - cancel as many unused locks as possible + * (typically before replaying locks) w/o + * sending any RPCs or waiting for any + * outstanding RPC to complete. */ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, struct list_head *cancels, int count, int max, @@ -1348,7 +1345,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, ldlm_cancel_lru_policy_t pf; struct ldlm_lock *lock, *next; int added = 0, unused, remained; - int no_wait = flags & (LDLM_CANCEL_NO_WAIT | LDLM_CANCEL_LRUR_NO_WAIT); + int no_wait = flags & (LDLM_LRU_FLAG_NO_WAIT | LDLM_LRU_FLAG_LRUR_NO_WAIT); spin_lock(&ns->ns_lock); unused = ns->ns_nr_unused; @@ -1531,7 +1528,7 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, */ int ldlm_cancel_resource_local(struct ldlm_resource *res, struct list_head *cancels, - ldlm_policy_data_t *policy, + union ldlm_policy_data *policy, enum ldlm_mode mode, __u64 lock_flags, enum ldlm_cancel_flags cancel_flags, void *opaque) @@ -1648,7 +1645,7 @@ EXPORT_SYMBOL(ldlm_cli_cancel_list); */ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, - ldlm_policy_data_t *policy, + union ldlm_policy_data *policy, enum ldlm_mode mode, enum ldlm_cancel_flags flags, void *opaque) @@ -1723,7 +1720,7 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns, opaque); } else { cfs_hash_for_each_nolock(ns->ns_rs_hash, - ldlm_cli_hash_cancel_unused, &arg); + ldlm_cli_hash_cancel_unused, &arg, 0); return ELDLM_OK; } } @@ -1796,7 +1793,7 @@ static void ldlm_namespace_foreach(struct ldlm_namespace *ns, }; cfs_hash_for_each_nolock(ns->ns_rs_hash, - ldlm_res_iter_helper, &helper); + ldlm_res_iter_helper, &helper, 0); } /* non-blocking function to manipulate a lock whose cb_data is being put away. @@ -1840,7 +1837,7 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure) * bug 17614: locks being actively cancelled. Get a reference * on a lock so that it does not disappear under us (e.g. due to cancel) */ - if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCELING))) { + if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_BL_DONE))) { list_add(&lock->l_pending_chain, list); LDLM_LOCK_GET(lock); } @@ -1909,7 +1906,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) int flags; /* Bug 11974: Do not replay a lock which is actively being canceled */ - if (ldlm_is_canceling(lock)) { + if (ldlm_is_bl_done(lock)) { LDLM_DEBUG(lock, "Not replaying canceled lock:"); return 0; } @@ -2003,11 +2000,11 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns) ldlm_ns_name(ns), ns->ns_nr_unused); /* We don't need to care whether or not LRU resize is enabled - * because the LDLM_CANCEL_NO_WAIT policy doesn't use the + * because the LDLM_LRU_FLAG_NO_WAIT policy doesn't use the * count parameter */ canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0, - LCF_LOCAL, LDLM_CANCEL_NO_WAIT); + LCF_LOCAL, LDLM_LRU_FLAG_NO_WAIT); CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n", canceled, ldlm_ns_name(ns)); @@ -2048,4 +2045,3 @@ int ldlm_replay_locks(struct obd_import *imp) return rc; } -EXPORT_SYMBOL(ldlm_replay_locks); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c index a09c25aea69856ccb8b18e2fc2af66434399e5cd..b22f5bae72019bd6a759a471242d7d41d70049bc 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c @@ -226,7 +226,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr, /* Try to cancel all @ns_nr_unused locks. */ canceled = ldlm_cancel_lru(ns, unused, 0, - LDLM_CANCEL_PASSED); + LDLM_LRU_FLAG_PASSED); if (canceled < unused) { CDEBUG(D_DLMTRACE, "not all requested locks are canceled, requested: %d, canceled: %d\n", @@ -237,7 +237,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr, } else { tmp = ns->ns_max_unused; ns->ns_max_unused = 0; - ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED); + ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED); ns->ns_max_unused = tmp; } return count; @@ -262,7 +262,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr, "changing namespace %s unused locks from %u to %u\n", ldlm_ns_name(ns), ns->ns_nr_unused, (unsigned int)tmp); - ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED); + ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED); if (!lru_resize) { CDEBUG(D_DLMTRACE, @@ -276,7 +276,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr, ldlm_ns_name(ns), ns->ns_max_unused, (unsigned int)tmp); ns->ns_max_unused = (unsigned int)tmp; - ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED); + ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED); /* Make sure that LRU resize was originally supported before * turning it on here. @@ -445,8 +445,8 @@ static struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res) return res; } -static unsigned ldlm_res_hop_hash(struct cfs_hash *hs, - const void *key, unsigned mask) +static unsigned int ldlm_res_hop_hash(struct cfs_hash *hs, + const void *key, unsigned int mask) { const struct ldlm_res_id *id = key; unsigned int val = 0; @@ -457,8 +457,8 @@ static unsigned ldlm_res_hop_hash(struct cfs_hash *hs, return val & mask; } -static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs, - const void *key, unsigned mask) +static unsigned int ldlm_res_hop_fid_hash(struct cfs_hash *hs, + const void *key, unsigned int mask) { const struct ldlm_res_id *id = key; struct lu_fid fid; @@ -612,7 +612,7 @@ static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] = { /** Register \a ns in the list of namespaces */ static void ldlm_namespace_register(struct ldlm_namespace *ns, - ldlm_side_t client) + enum ldlm_side client) { mutex_lock(ldlm_namespace_lock(client)); LASSERT(list_empty(&ns->ns_list_chain)); @@ -625,7 +625,7 @@ static void ldlm_namespace_register(struct ldlm_namespace *ns, * Create and initialize new empty namespace. */ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name, - ldlm_side_t client, + enum ldlm_side client, enum ldlm_appetite apt, enum ldlm_ns_type ns_type) { @@ -855,8 +855,10 @@ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags) return ELDLM_OK; } - cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags); - cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL); + cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, + &flags, 0); + cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, + NULL, 0); return ELDLM_OK; } EXPORT_SYMBOL(ldlm_namespace_cleanup); @@ -952,7 +954,7 @@ void ldlm_namespace_free_prior(struct ldlm_namespace *ns, /** Unregister \a ns from the list of namespaces. */ static void ldlm_namespace_unregister(struct ldlm_namespace *ns, - ldlm_side_t client) + enum ldlm_side client) { mutex_lock(ldlm_namespace_lock(client)); LASSERT(!list_empty(&ns->ns_list_chain)); @@ -999,7 +1001,6 @@ void ldlm_namespace_get(struct ldlm_namespace *ns) { atomic_inc(&ns->ns_bref); } -EXPORT_SYMBOL(ldlm_namespace_get); /* This is only for callers that care about refcount */ static int ldlm_namespace_get_return(struct ldlm_namespace *ns) @@ -1014,11 +1015,10 @@ void ldlm_namespace_put(struct ldlm_namespace *ns) spin_unlock(&ns->ns_lock); } } -EXPORT_SYMBOL(ldlm_namespace_put); /** Should be called with ldlm_namespace_lock(client) taken. */ void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns, - ldlm_side_t client) + enum ldlm_side client) { LASSERT(!list_empty(&ns->ns_list_chain)); LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); @@ -1027,7 +1027,7 @@ void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns, /** Should be called with ldlm_namespace_lock(client) taken. */ void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns, - ldlm_side_t client) + enum ldlm_side client) { LASSERT(!list_empty(&ns->ns_list_chain)); LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); @@ -1035,7 +1035,7 @@ void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns, } /** Should be called with ldlm_namespace_lock(client) taken. */ -struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client) +struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client) { LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); LASSERT(!list_empty(ldlm_namespace_list(client))); @@ -1305,7 +1305,7 @@ void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc) * Print information about all locks in all namespaces on this node to debug * log. */ -void ldlm_dump_all_namespaces(ldlm_side_t client, int level) +void ldlm_dump_all_namespaces(enum ldlm_side client, int level) { struct list_head *tmp; @@ -1323,7 +1323,6 @@ void ldlm_dump_all_namespaces(ldlm_side_t client, int level) mutex_unlock(ldlm_namespace_lock(client)); } -EXPORT_SYMBOL(ldlm_dump_all_namespaces); static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *arg) @@ -1355,12 +1354,11 @@ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns) cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_res_hash_dump, - (void *)(unsigned long)level); + (void *)(unsigned long)level, 0); spin_lock(&ns->ns_lock); ns->ns_next_dump = cfs_time_shift(10); spin_unlock(&ns->ns_lock); } -EXPORT_SYMBOL(ldlm_namespace_dump); /** * Print information about all locks in this resource to debug log. diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile index 1ac0940bd8dfc69e8756fb2dfd9985902be13f51..322d4fa63f5d7bf0bda312ebfcd813a9920c47a4 100644 --- a/drivers/staging/lustre/lustre/llite/Makefile +++ b/drivers/staging/lustre/lustre/llite/Makefile @@ -1,7 +1,7 @@ obj-$(CONFIG_LUSTRE_FS) += lustre.o -lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \ - rw.o namei.o symlink.o llite_mmap.o range_lock.o \ - xattr.o xattr_cache.o rw26.o super25.o statahead.o \ - glimpse.o lcommon_cl.o lcommon_misc.o \ - vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o vvp_req.o \ +lustre-y := dcache.o dir.o file.o llite_lib.o llite_nfs.o \ + rw.o rw26.o namei.o symlink.o llite_mmap.o range_lock.o \ + xattr.o xattr_cache.o xattr_security.o \ + super25.o statahead.o glimpse.o lcommon_cl.o lcommon_misc.o \ + vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o \ lproc_llite.o diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c index 0e45d8fc4d7c84f14772893e9408b0a10db85bb0..65bf0c401b445852fe289f249200c4286053d6fe 100644 --- a/drivers/staging/lustre/lustre/llite/dcache.c +++ b/drivers/staging/lustre/lustre/llite/dcache.c @@ -57,9 +57,6 @@ static void ll_release(struct dentry *de) LASSERT(de); lld = ll_d2d(de); - if (!lld) /* NFS copies the de->d_op methods (bug 4655) */ - return; - if (lld->lld_it) { ll_intent_release(lld->lld_it); kfree(lld->lld_it); @@ -126,30 +123,13 @@ static int ll_ddelete(const struct dentry *de) return 0; } -int ll_d_init(struct dentry *de) +static int ll_d_init(struct dentry *de) { - CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n", - de, de, de->d_parent, d_inode(de), d_count(de)); - - if (!de->d_fsdata) { - struct ll_dentry_data *lld; - - lld = kzalloc(sizeof(*lld), GFP_NOFS); - if (likely(lld)) { - spin_lock(&de->d_lock); - if (likely(!de->d_fsdata)) { - de->d_fsdata = lld; - __d_lustre_invalidate(de); - } else { - kfree(lld); - } - spin_unlock(&de->d_lock); - } else { - return -ENOMEM; - } - } - LASSERT(de->d_op == &ll_d_ops); - + struct ll_dentry_data *lld = kzalloc(sizeof(*lld), GFP_KERNEL); + if (unlikely(!lld)) + return -ENOMEM; + lld->lld_invalid = 1; + de->d_fsdata = lld; return 0; } @@ -300,6 +280,7 @@ static int ll_revalidate_nd(struct dentry *dentry, unsigned int flags) } const struct dentry_operations ll_d_ops = { + .d_init = ll_d_init, .d_revalidate = ll_revalidate_nd, .d_release = ll_release, .d_delete = ll_ddelete, diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c index 7f32a539d260da5499a44b0eb2307722b8f7b455..ea5d247a3f70bc8817549687c4fcd54d09cf1d92 100644 --- a/drivers/staging/lustre/lustre/llite/dir.c +++ b/drivers/staging/lustre/lustre/llite/dir.c @@ -51,6 +51,8 @@ #include "../include/lustre_dlm.h" #include "../include/lustre_fid.h" #include "../include/lustre_kernelcomm.h" +#include "../include/lustre_swab.h" + #include "llite_internal.h" /* @@ -410,6 +412,8 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump, struct ptlrpc_request *request = NULL; struct md_op_data *op_data; struct ll_sb_info *sbi = ll_i2sbi(parent); + struct inode *inode = NULL; + struct dentry dentry; int err; if (unlikely(lump->lum_magic != LMV_USER_MAGIC)) @@ -419,6 +423,10 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump, PFID(ll_inode2fid(parent)), parent, dirname, (int)lump->lum_stripe_offset, lump->lum_stripe_count); + if (lump->lum_stripe_count > 1 && + !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_DIR_STRIPE)) + return -EINVAL; + if (lump->lum_magic != cpu_to_le32(LMV_USER_MAGIC)) lustre_swab_lmv_user_md(lump); @@ -439,8 +447,17 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump, from_kgid(&init_user_ns, current_fsgid()), cfs_curproc_cap_pack(), 0, &request); ll_finish_md_op_data(op_data); + + err = ll_prep_inode(&inode, request, parent->i_sb, NULL); if (err) goto err_exit; + + memset(&dentry, 0, sizeof(dentry)); + dentry.d_inode = inode; + + err = ll_init_security(&dentry, inode, parent); + iput(inode); + err_exit: ptlrpc_req_finished(request); return err; @@ -501,8 +518,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump, return PTR_ERR(op_data); /* swabbing is done in lov_setstripe() on server side */ - rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size, - NULL, 0, &req, NULL); + rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size, &req); ll_finish_md_op_data(op_data); ptlrpc_req_finished(req); if (rc) { @@ -682,7 +698,7 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy) { struct ll_sb_info *sbi = ll_s2sbi(sb); struct hsm_progress_kernel hpk; - int rc; + int rc2, rc = 0; /* Forge a hsm_progress based on data from copy. */ hpk.hpk_fid = copy->hc_hai.hai_fid; @@ -732,10 +748,10 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy) /* On error, the request should be considered as completed */ if (hpk.hpk_errval > 0) hpk.hpk_flags |= HP_FLAG_COMPLETED; - rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk), - &hpk, NULL); + rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk), + &hpk, NULL); - return rc; + return rc ? rc : rc2; } /** @@ -757,7 +773,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy) { struct ll_sb_info *sbi = ll_s2sbi(sb); struct hsm_progress_kernel hpk; - int rc; + int rc2, rc = 0; /* If you modify the logic here, also check llapi_hsm_copy_end(). */ /* Take care: copy->hc_hai.hai_action, len, gid and data are not @@ -823,18 +839,18 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy) * when the file will not be modified for some tunable * time */ - /* we do not notify caller */ hpk.hpk_flags &= ~HP_FLAG_RETRY; + rc = -EBUSY; /* hpk_errval must be >= 0 */ - hpk.hpk_errval = EBUSY; + hpk.hpk_errval = -rc; } } progress: - rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk), - &hpk, NULL); + rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk), + &hpk, NULL); - return rc; + return rc ? rc : rc2; } static int copy_and_ioctl(int cmd, struct obd_export *exp, @@ -862,10 +878,6 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl) int rc = 0; switch (cmd) { - case LUSTRE_Q_INVALIDATE: - case LUSTRE_Q_FINVALIDATE: - case Q_QUOTAON: - case Q_QUOTAOFF: case Q_SETQUOTA: case Q_SETINFO: if (!capable(CFS_CAP_SYS_ADMIN)) @@ -930,10 +942,6 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl) QCTL_COPY(oqctl, qctl); rc = obd_quotactl(sbi->ll_md_exp, oqctl); if (rc) { - if (rc != -EALREADY && cmd == Q_QUOTAON) { - oqctl->qc_cmd = Q_QUOTAOFF; - obd_quotactl(sbi->ll_md_exp, oqctl); - } kfree(oqctl); return rc; } @@ -1077,7 +1085,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) goto out_free; } - rc = ll_get_fid_by_name(inode, filename, namelen, NULL); + rc = ll_get_fid_by_name(inode, filename, namelen, NULL, NULL); if (rc < 0) { CERROR("%s: lookup %.*s failed: rc = %d\n", ll_get_fsname(inode->i_sb, NULL, 0), namelen, @@ -1189,6 +1197,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct lmv_user_md *tmp = NULL; union lmv_mds_md *lmm = NULL; u64 valid = 0; + int max_stripe_count; int stripe_count; int mdt_index; int lum_size; @@ -1200,6 +1209,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (copy_from_user(&lum, ulmv, sizeof(*ulmv))) return -EFAULT; + max_stripe_count = lum.lum_stripe_count; /* * lum_magic will indicate which stripe the ioctl will like * to get, LMV_MAGIC_V1 is for normal LMV stripe, LMV_USER_MAGIC @@ -1219,9 +1229,6 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) /* Get default LMV EA */ if (lum.lum_magic == LMV_USER_MAGIC) { - if (rc) - goto finish_req; - if (lmmsize > sizeof(*ulmv)) { rc = -EINVAL; goto finish_req; @@ -1234,6 +1241,16 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } stripe_count = lmv_mds_md_stripe_count_get(lmm); + if (max_stripe_count < stripe_count) { + lum.lum_stripe_count = stripe_count; + if (copy_to_user(ulmv, &lum, sizeof(lum))) { + rc = -EFAULT; + goto finish_req; + } + rc = -E2BIG; + goto finish_req; + } + lum_size = lmv_user_md_size(stripe_count, LMV_MAGIC_V1); tmp = kzalloc(lum_size, GFP_NOFS); if (!tmp) { @@ -1370,134 +1387,6 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ll_putname(filename); return rc; } - case IOC_LOV_GETINFO: { - struct lov_user_mds_data __user *lumd; - struct lov_stripe_md *lsm; - struct lov_user_md __user *lum; - struct lov_mds_md *lmm; - int lmmsize; - lstat_t st; - - lumd = (struct lov_user_mds_data __user *)arg; - lum = &lumd->lmd_lmm; - - rc = ll_get_max_mdsize(sbi, &lmmsize); - if (rc) - return rc; - - lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS); - if (!lmm) - return -ENOMEM; - if (copy_from_user(lmm, lum, lmmsize)) { - rc = -EFAULT; - goto free_lmm; - } - - switch (lmm->lmm_magic) { - case LOV_USER_MAGIC_V1: - if (cpu_to_le32(LOV_USER_MAGIC_V1) == LOV_USER_MAGIC_V1) - break; - /* swab objects first so that stripes num will be sane */ - lustre_swab_lov_user_md_objects( - ((struct lov_user_md_v1 *)lmm)->lmm_objects, - ((struct lov_user_md_v1 *)lmm)->lmm_stripe_count); - lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm); - break; - case LOV_USER_MAGIC_V3: - if (cpu_to_le32(LOV_USER_MAGIC_V3) == LOV_USER_MAGIC_V3) - break; - /* swab objects first so that stripes num will be sane */ - lustre_swab_lov_user_md_objects( - ((struct lov_user_md_v3 *)lmm)->lmm_objects, - ((struct lov_user_md_v3 *)lmm)->lmm_stripe_count); - lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm); - break; - default: - rc = -EINVAL; - goto free_lmm; - } - - rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize); - if (rc < 0) { - rc = -ENOMEM; - goto free_lmm; - } - - /* Perform glimpse_size operation. */ - memset(&st, 0, sizeof(st)); - - rc = ll_glimpse_ioctl(sbi, lsm, &st); - if (rc) - goto free_lsm; - - if (copy_to_user(&lumd->lmd_st, &st, sizeof(st))) { - rc = -EFAULT; - goto free_lsm; - } - -free_lsm: - obd_free_memmd(sbi->ll_dt_exp, &lsm); -free_lmm: - kvfree(lmm); - return rc; - } - case OBD_IOC_QUOTACHECK: { - struct obd_quotactl *oqctl; - int error = 0; - - if (!capable(CFS_CAP_SYS_ADMIN)) - return -EPERM; - - oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS); - if (!oqctl) - return -ENOMEM; - oqctl->qc_type = arg; - rc = obd_quotacheck(sbi->ll_md_exp, oqctl); - if (rc < 0) { - CDEBUG(D_INFO, "md_quotacheck failed: rc %d\n", rc); - error = rc; - } - - rc = obd_quotacheck(sbi->ll_dt_exp, oqctl); - if (rc < 0) - CDEBUG(D_INFO, "obd_quotacheck failed: rc %d\n", rc); - - kfree(oqctl); - return error ?: rc; - } - case OBD_IOC_POLL_QUOTACHECK: { - struct if_quotacheck *check; - - if (!capable(CFS_CAP_SYS_ADMIN)) - return -EPERM; - - check = kzalloc(sizeof(*check), GFP_NOFS); - if (!check) - return -ENOMEM; - - rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check, - NULL); - if (rc) { - CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc); - if (copy_to_user((void __user *)arg, check, - sizeof(*check))) - CDEBUG(D_QUOTA, "copy_to_user failed\n"); - goto out_poll; - } - - rc = obd_iocontrol(cmd, sbi->ll_dt_exp, 0, (void *)check, - NULL); - if (rc) { - CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc); - if (copy_to_user((void __user *)arg, check, - sizeof(*check))) - CDEBUG(D_QUOTA, "copy_to_user failed\n"); - goto out_poll; - } -out_poll: - kfree(check); - return rc; - } case OBD_IOC_QUOTACTL: { struct if_quotactl *qctl; @@ -1536,7 +1425,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp; vallen = sizeof(count); rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT), - KEY_TGT_COUNT, &vallen, &count, NULL); + KEY_TGT_COUNT, &vallen, &count); if (rc) { CERROR("get target count failed: %d\n", rc); return rc; diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c index e1d784bae06494e0ae477cbd4818d1fea585b698..f634c11216e641311191333064bd2c35fed33643 100644 --- a/drivers/staging/lustre/lustre/llite/file.c +++ b/drivers/staging/lustre/lustre/llite/file.c @@ -44,6 +44,7 @@ #include #include "../include/lustre/ll_fiemap.h" #include "../include/lustre/lustre_ioctl.h" +#include "../include/lustre_swab.h" #include "../include/cl_object.h" #include "llite_internal.h" @@ -75,60 +76,56 @@ static void ll_file_data_put(struct ll_file_data *fd) kmem_cache_free(ll_file_data_slab, fd); } -void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data, - struct lustre_handle *fh) +/** + * Packs all the attributes into @op_data for the CLOSE rpc. + */ +static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data, + struct obd_client_handle *och) { - op_data->op_fid1 = ll_i2info(inode)->lli_fid; + struct ll_inode_info *lli = ll_i2info(inode); + + ll_prep_md_op_data(op_data, inode, NULL, NULL, + 0, 0, LUSTRE_OPC_ANY, NULL); + op_data->op_attr.ia_mode = inode->i_mode; op_data->op_attr.ia_atime = inode->i_atime; op_data->op_attr.ia_mtime = inode->i_mtime; op_data->op_attr.ia_ctime = inode->i_ctime; op_data->op_attr.ia_size = i_size_read(inode); + op_data->op_attr.ia_valid |= ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET | + ATTR_MTIME | ATTR_MTIME_SET | + ATTR_CTIME | ATTR_CTIME_SET; op_data->op_attr_blocks = inode->i_blocks; op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags); - op_data->op_ioepoch = ll_i2info(inode)->lli_ioepoch; - if (fh) - op_data->op_handle = *fh; + op_data->op_handle = och->och_fh; - if (ll_i2info(inode)->lli_flags & LLIF_DATA_MODIFIED) + /* + * For HSM: if inode data has been modified, pack it so that + * MDT can set data dirty flag in the archive. + */ + if (och->och_flags & FMODE_WRITE && + test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) op_data->op_bias |= MDS_DATA_MODIFIED; } /** - * Closes the IO epoch and packs all the attributes into @op_data for - * the CLOSE rpc. + * Perform a close, possibly with a bias. + * The meaning of "data" depends on the value of "bias". + * + * If \a bias is MDS_HSM_RELEASE then \a data is a pointer to the data version. + * If \a bias is MDS_CLOSE_LAYOUT_SWAP then \a data is a pointer to the inode to + * swap layouts with. */ -static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data, - struct obd_client_handle *och) -{ - op_data->op_attr.ia_valid = ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET | - ATTR_MTIME | ATTR_MTIME_SET | - ATTR_CTIME | ATTR_CTIME_SET; - - if (!(och->och_flags & FMODE_WRITE)) - goto out; - - if (!exp_connect_som(ll_i2mdexp(inode)) || !S_ISREG(inode->i_mode)) - op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS; - else - ll_ioepoch_close(inode, op_data, &och, 0); - -out: - ll_pack_inode2opdata(inode, op_data, &och->och_fh); - ll_prep_md_op_data(op_data, inode, NULL, NULL, - 0, 0, LUSTRE_OPC_ANY, NULL); -} - static int ll_close_inode_openhandle(struct obd_export *md_exp, - struct inode *inode, struct obd_client_handle *och, - const __u64 *data_version) + struct inode *inode, + enum mds_op_bias bias, + void *data) { struct obd_export *exp = ll_i2mdexp(inode); struct md_op_data *op_data; struct ptlrpc_request *req = NULL; struct obd_device *obd = class_exp2obd(exp); - int epoch_close = 1; int rc; if (!obd) { @@ -150,65 +147,51 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp, } ll_prepare_close(inode, op_data, och); - if (data_version) { - /* Pass in data_version implies release. */ + switch (bias) { + case MDS_CLOSE_LAYOUT_SWAP: + LASSERT(data); + op_data->op_bias |= MDS_CLOSE_LAYOUT_SWAP; + op_data->op_data_version = 0; + op_data->op_lease_handle = och->och_lease_handle; + op_data->op_fid2 = *ll_inode2fid(data); + break; + + case MDS_HSM_RELEASE: + LASSERT(data); op_data->op_bias |= MDS_HSM_RELEASE; - op_data->op_data_version = *data_version; + op_data->op_data_version = *(__u64 *)data; op_data->op_lease_handle = och->och_lease_handle; op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS; + break; + + default: + LASSERT(!data); + break; } - epoch_close = op_data->op_flags & MF_EPOCH_CLOSE; + rc = md_close(md_exp, op_data, och->och_mod, &req); - if (rc == -EAGAIN) { - /* This close must have the epoch closed. */ - LASSERT(epoch_close); - /* MDS has instructed us to obtain Size-on-MDS attribute from - * OSTs and send setattr to back to MDS. - */ - rc = ll_som_update(inode, op_data); - if (rc) { - CERROR("%s: inode "DFID" mdc Size-on-MDS update failed: rc = %d\n", - ll_i2mdexp(inode)->exp_obd->obd_name, - PFID(ll_inode2fid(inode)), rc); - rc = 0; - } - } else if (rc) { + if (rc) { CERROR("%s: inode "DFID" mdc close failed: rc = %d\n", ll_i2mdexp(inode)->exp_obd->obd_name, PFID(ll_inode2fid(inode)), rc); } - /* DATA_MODIFIED flag was successfully sent on close, cancel data - * modification flag. - */ - if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) { - struct ll_inode_info *lli = ll_i2info(inode); - - spin_lock(&lli->lli_lock); - lli->lli_flags &= ~LLIF_DATA_MODIFIED; - spin_unlock(&lli->lli_lock); - } - - if (rc == 0 && op_data->op_bias & MDS_HSM_RELEASE) { + if (op_data->op_bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP) && + !rc) { struct mdt_body *body; body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (!(body->mbo_valid & OBD_MD_FLRELEASED)) + if (!(body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED)) rc = -EBUSY; } ll_finish_md_op_data(op_data); out: - if (exp_connect_som(exp) && !epoch_close && - S_ISREG(inode->i_mode) && (och->och_flags & FMODE_WRITE)) { - ll_queue_done_writing(inode, LLIF_DONE_WRITING); - } else { - md_clear_open_replay_data(md_exp, och); - /* Free @och if it is not waiting for DONE_WRITING. */ - och->och_fh.cookie = DEAD_HANDLE_MAGIC; - kfree(och); - } + md_clear_open_replay_data(md_exp, och); + och->och_fh.cookie = DEAD_HANDLE_MAGIC; + kfree(och); + if (req) /* This is close request */ ptlrpc_req_finished(req); return rc; @@ -252,7 +235,7 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode) * be closed. */ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, - inode, och, NULL); + och, inode, 0, NULL); } return rc; @@ -266,7 +249,9 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode, int lockmode; __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK; struct lustre_handle lockh; - ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_OPEN} }; + union ldlm_policy_data policy = { + .l_inodebits = { MDS_INODELOCK_OPEN } + }; int rc = 0; /* clear group lock, if present */ @@ -288,7 +273,8 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode, } if (fd->fd_och) { - rc = ll_close_inode_openhandle(md_exp, inode, fd->fd_och, NULL); + rc = ll_close_inode_openhandle(md_exp, fd->fd_och, inode, 0, + NULL); fd->fd_och = NULL; goto out; } @@ -437,20 +423,6 @@ static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize, return rc; } -/** - * Assign an obtained @ioepoch to client's inode. No lock is needed, MDS does - * not believe attributes if a few ioepoch holders exist. Attributes for - * previous ioepoch if new one is opened are also skipped by MDS. - */ -void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch) -{ - if (ioepoch && lli->lli_ioepoch != ioepoch) { - lli->lli_ioepoch = ioepoch; - CDEBUG(D_INODE, "Epoch %llu opened on "DFID"\n", - ioepoch, PFID(&lli->lli_fid)); - } -} - static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it, struct obd_client_handle *och) { @@ -470,23 +442,17 @@ static int ll_local_open(struct file *file, struct lookup_intent *it, struct ll_file_data *fd, struct obd_client_handle *och) { struct inode *inode = file_inode(file); - struct ll_inode_info *lli = ll_i2info(inode); LASSERT(!LUSTRE_FPRIVATE(file)); LASSERT(fd); if (och) { - struct mdt_body *body; int rc; rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och); if (rc != 0) return rc; - - body = req_capsule_server_get(&it->it_request->rq_pill, - &RMF_MDT_BODY); - ll_ioepoch_open(lli, body->mbo_ioepoch); } LUSTRE_FPRIVATE(file) = fd; @@ -677,12 +643,6 @@ int ll_file_open(struct inode *inode, struct file *file) if (!S_ISREG(inode->i_mode)) goto out_och_free; - if (!lli->lli_has_smd && - (cl_is_lov_delay_create(file->f_flags) || - (file->f_mode & FMODE_WRITE) == 0)) { - CDEBUG(D_INODE, "object creation was delayed\n"); - goto out_och_free; - } cl_lov_delay_create_clear(&file->f_flags); goto out_och_free; @@ -867,7 +827,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, it.it_lock_mode = 0; och->och_lease_handle.cookie = 0ULL; } - rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL); + rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, och, inode, 0, NULL); if (rc2 < 0) CERROR("%s: error closing file "DFID": %d\n", ll_get_fsname(inode->i_sb, NULL, 0), @@ -880,6 +840,69 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, return ERR_PTR(rc); } +/** + * Check whether a layout swap can be done between two inodes. + * + * \param[in] inode1 First inode to check + * \param[in] inode2 Second inode to check + * + * \retval 0 on success, layout swap can be performed between both inodes + * \retval negative error code if requirements are not met + */ +static int ll_check_swap_layouts_validity(struct inode *inode1, + struct inode *inode2) +{ + if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode)) + return -EINVAL; + + if (inode_permission(inode1, MAY_WRITE) || + inode_permission(inode2, MAY_WRITE)) + return -EPERM; + + if (inode1->i_sb != inode2->i_sb) + return -EXDEV; + + return 0; +} + +static int ll_swap_layouts_close(struct obd_client_handle *och, + struct inode *inode, struct inode *inode2) +{ + const struct lu_fid *fid1 = ll_inode2fid(inode); + const struct lu_fid *fid2; + int rc; + + CDEBUG(D_INODE, "%s: biased close of file " DFID "\n", + ll_get_fsname(inode->i_sb, NULL, 0), PFID(fid1)); + + rc = ll_check_swap_layouts_validity(inode, inode2); + if (rc < 0) + goto out_free_och; + + /* We now know that inode2 is a lustre inode */ + fid2 = ll_inode2fid(inode2); + + rc = lu_fid_cmp(fid1, fid2); + if (!rc) { + rc = -EINVAL; + goto out_free_och; + } + + /* + * Close the file and swap layouts between inode & inode2. + * NB: lease lock handle is released in mdc_close_layout_swap_pack() + * because we still need it to pack l_remote_handle to MDT. + */ + rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, och, inode, + MDS_CLOSE_LAYOUT_SWAP, inode2); + + och = NULL; /* freed in ll_close_inode_openhandle() */ + +out_free_och: + kfree(och); + return rc; +} + /** * Release lease and close the file. * It will check if the lease has ever broken. @@ -907,84 +930,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode, *lease_broken = cancelled; return ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, - inode, och, NULL); -} - -/* Fills the obdo with the attributes for the lsm */ -static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp, - struct obdo *obdo, __u64 ioepoch, int dv_flags) -{ - struct ptlrpc_request_set *set; - struct obd_info oinfo = { }; - int rc; - - LASSERT(lsm); - - oinfo.oi_md = lsm; - oinfo.oi_oa = obdo; - oinfo.oi_oa->o_oi = lsm->lsm_oi; - oinfo.oi_oa->o_mode = S_IFREG; - oinfo.oi_oa->o_ioepoch = ioepoch; - oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE | - OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | - OBD_MD_FLBLKSZ | OBD_MD_FLATIME | - OBD_MD_FLMTIME | OBD_MD_FLCTIME | - OBD_MD_FLGROUP | OBD_MD_FLEPOCH | - OBD_MD_FLDATAVERSION; - if (dv_flags & (LL_DV_WR_FLUSH | LL_DV_RD_FLUSH)) { - oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS; - oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK; - if (dv_flags & LL_DV_WR_FLUSH) - oinfo.oi_oa->o_flags |= OBD_FL_FLUSH; - } - - set = ptlrpc_prep_set(); - if (!set) { - CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM); - rc = -ENOMEM; - } else { - rc = obd_getattr_async(exp, &oinfo, set); - if (rc == 0) - rc = ptlrpc_set_wait(set); - ptlrpc_set_destroy(set); - } - if (rc == 0) { - oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | - OBD_MD_FLATIME | OBD_MD_FLMTIME | - OBD_MD_FLCTIME | OBD_MD_FLSIZE | - OBD_MD_FLDATAVERSION | OBD_MD_FLFLAGS); - if (dv_flags & LL_DV_WR_FLUSH && - !(oinfo.oi_oa->o_valid & OBD_MD_FLFLAGS && - oinfo.oi_oa->o_flags & OBD_FL_FLUSH)) - return -ENOTSUPP; - } - return rc; -} - -/** - * Performs the getattr on the inode and updates its fields. - * If @sync != 0, perform the getattr under the server-side lock. - */ -int ll_inode_getattr(struct inode *inode, struct obdo *obdo, - __u64 ioepoch, int sync) -{ - struct lov_stripe_md *lsm; - int rc; - - lsm = ccc_inode_lsm_get(inode); - rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode), - obdo, ioepoch, sync ? LL_DV_RD_FLUSH : 0); - if (rc == 0) { - struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi; - - obdo_refresh_inode(inode, obdo, obdo->o_valid); - CDEBUG(D_INODE, "objid " DOSTID " size %llu, blocks %llu, blksize %lu\n", - POSTID(oi), i_size_read(inode), - (unsigned long long)inode->i_blocks, - 1UL << inode->i_blkbits); - } - ccc_inode_lsm_put(inode, lsm); - return rc; + och, inode, 0, NULL); } int ll_merge_attr(const struct lu_env *env, struct inode *inode) @@ -1043,23 +989,6 @@ int ll_merge_attr(const struct lu_env *env, struct inode *inode) return rc; } -int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm, - lstat_t *st) -{ - struct obdo obdo = { 0 }; - int rc; - - rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, &obdo, 0, 0); - if (rc == 0) { - st->st_size = obdo.o_size; - st->st_blocks = obdo.o_blocks; - st->st_mtime = obdo.o_mtime; - st->st_atime = obdo.o_atime; - st->st_ctime = obdo.o_ctime; - } - return rc; -} - static bool file_is_noatime(const struct file *file) { const struct vfsmount *mnt = file->f_path.mnt; @@ -1117,9 +1046,11 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args, { struct ll_inode_info *lli = ll_i2info(file_inode(file)); struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct vvp_io *vio = vvp_env_io(env); struct range_lock range; struct cl_io *io; - ssize_t result; + ssize_t result = 0; + int rc = 0; CDEBUG(D_VFSTRACE, "file: %pD, type: %d ppos: %llu, count: %zu\n", file, iot, *ppos, count); @@ -1151,18 +1082,15 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args, CDEBUG(D_VFSTRACE, "Range lock [%llu, %llu]\n", range.rl_node.in_extent.start, range.rl_node.in_extent.end); - result = range_lock(&lli->lli_write_tree, - &range); - if (result < 0) + rc = range_lock(&lli->lli_write_tree, &range); + if (rc < 0) goto out; range_locked = true; } - down_read(&lli->lli_trunc_sem); ll_cl_add(file, env, io); - result = cl_io_loop(env, io); + rc = cl_io_loop(env, io); ll_cl_remove(file, env); - up_read(&lli->lli_trunc_sem); if (range_locked) { CDEBUG(D_VFSTRACE, "Range unlock [%llu, %llu]\n", range.rl_node.in_extent.start, @@ -1171,24 +1099,26 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args, } } else { /* cl_io_rw_init() handled IO */ - result = io->ci_result; + rc = io->ci_result; } if (io->ci_nob > 0) { result = io->ci_nob; + count -= io->ci_nob; *ppos = io->u.ci_wr.wr.crw_pos; + + /* prepare IO restart */ + if (count > 0) + args->u.normal.via_iter = vio->vui_iter; } - goto out; out: cl_io_fini(env, io); - /* If any bit been read/written (result != 0), we just return - * short read/write instead of restart io. - */ - if ((result == 0 || result == -ENODATA) && io->ci_need_restart) { - CDEBUG(D_VFSTRACE, "Restart %s on %pD from %lld, count:%zu\n", + + if ((!rc || rc == -ENODATA) && count > 0 && io->ci_need_restart) { + CDEBUG(D_VFSTRACE, "%s: restart %s from %lld, count:%zu, result: %zd\n", + file_dentry(file)->d_name.name, iot == CIT_READ ? "read" : "write", - file, *ppos, count); - LASSERTF(io->ci_nob == 0, "%zd\n", io->ci_nob); + *ppos, count, result); goto restart; } @@ -1201,13 +1131,19 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args, ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_WRITE_BYTES, result); fd->fd_write_failed = false; - } else if (result != -ERESTARTSYS) { + } else if (!result && !rc) { + rc = io->ci_result; + if (rc < 0) + fd->fd_write_failed = true; + else + fd->fd_write_failed = false; + } else if (rc != -ERESTARTSYS) { fd->fd_write_failed = true; } } CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result); - return result; + return result > 0 ? result : rc; } static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to) @@ -1259,27 +1195,14 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, __u64 flags, struct lov_user_md *lum, int lum_size) { - struct lov_stripe_md *lsm = NULL; struct lookup_intent oit = { .it_op = IT_OPEN, .it_flags = flags | MDS_OPEN_BY_FID, }; int rc = 0; - lsm = ccc_inode_lsm_get(inode); - if (lsm) { - ccc_inode_lsm_put(inode, lsm); - CDEBUG(D_IOCTL, "stripe already exists for inode "DFID"\n", - PFID(ll_inode2fid(inode))); - rc = -EEXIST; - goto out; - } - ll_inode_size_lock(inode); rc = ll_intent_file_open(dentry, lum, lum_size, &oit); - if (rc < 0) - goto out_unlock; - rc = oit.it_status; if (rc < 0) goto out_unlock; @@ -1288,8 +1211,6 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, out_unlock: ll_inode_size_unlock(inode); ll_intent_release(&oit); - ccc_inode_lsm_put(inode, lsm); -out: return rc; } @@ -1566,7 +1487,7 @@ int ll_release_openhandle(struct inode *inode, struct lookup_intent *it) ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och); rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, - inode, och, NULL); + och, inode, 0, NULL); out: /* this one is in place of ll_file_open */ if (it_disposition(it, DISP_ENQ_OPEN_REF)) { @@ -1579,15 +1500,17 @@ int ll_release_openhandle(struct inode *inode, struct lookup_intent *it) /** * Get size for inode for which FIEMAP mapping is requested. * Make the FIEMAP get_info call and returns the result. + * + * \param fiemap kernel buffer to hold extens + * \param num_bytes kernel buffer size */ -static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap, +static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap, size_t num_bytes) { - struct obd_export *exp = ll_i2dtexp(inode); - struct lov_stripe_md *lsm = NULL; - struct ll_fiemap_info_key fm_key = { .name = KEY_FIEMAP, }; - __u32 vallen = num_bytes; - int rc; + struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, }; + struct lu_env *env; + int refcheck; + int rc = 0; /* Checks for fiemap flags */ if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) { @@ -1602,21 +1525,9 @@ static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap, return rc; } - lsm = ccc_inode_lsm_get(inode); - if (!lsm) - return -ENOENT; - - /* If the stripe_count > 1 and the application does not understand - * DEVICE_ORDER flag, then it cannot interpret the extents correctly. - */ - if (lsm->lsm_stripe_count > 1 && - !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) { - rc = -EOPNOTSUPP; - goto out; - } - - fm_key.oa.o_oi = lsm->lsm_oi; - fm_key.oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP; + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + return PTR_ERR(env); if (i_size_read(inode) == 0) { rc = ll_glimpse_size(inode); @@ -1624,24 +1535,23 @@ static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap, goto out; } - obdo_from_inode(&fm_key.oa, inode, OBD_MD_FLSIZE); - obdo_set_parent_fid(&fm_key.oa, &ll_i2info(inode)->lli_fid); + fmkey.lfik_oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP; + obdo_from_inode(&fmkey.lfik_oa, inode, OBD_MD_FLSIZE); + obdo_set_parent_fid(&fmkey.lfik_oa, &ll_i2info(inode)->lli_fid); + /* If filesize is 0, then there would be no objects for mapping */ - if (fm_key.oa.o_size == 0) { + if (fmkey.lfik_oa.o_size == 0) { fiemap->fm_mapped_extents = 0; rc = 0; goto out; } - memcpy(&fm_key.fiemap, fiemap, sizeof(*fiemap)); - - rc = obd_get_info(NULL, exp, sizeof(fm_key), &fm_key, &vallen, - fiemap, lsm); - if (rc) - CERROR("obd_get_info failed: rc = %d\n", rc); + memcpy(&fmkey.lfik_fiemap, fiemap, sizeof(*fiemap)); + rc = cl_object_fiemap(env, ll_i2info(inode)->lli_clob, + &fmkey, fiemap, &num_bytes); out: - ccc_inode_lsm_put(inode, lsm); + cl_env_put(env, &refcheck); return rc; } @@ -1689,113 +1599,56 @@ int ll_fid2path(struct inode *inode, void __user *arg) return rc; } -static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg) -{ - struct ll_user_fiemap *fiemap_s; - size_t num_bytes, ret_bytes; - unsigned int extent_count; - int rc = 0; - - /* Get the extent count so we can calculate the size of - * required fiemap buffer - */ - if (get_user(extent_count, - &((struct ll_user_fiemap __user *)arg)->fm_extent_count)) - return -EFAULT; - - if (extent_count >= - (SIZE_MAX - sizeof(*fiemap_s)) / sizeof(struct ll_fiemap_extent)) - return -EINVAL; - num_bytes = sizeof(*fiemap_s) + (extent_count * - sizeof(struct ll_fiemap_extent)); - - fiemap_s = libcfs_kvzalloc(num_bytes, GFP_NOFS); - if (!fiemap_s) - return -ENOMEM; - - /* get the fiemap value */ - if (copy_from_user(fiemap_s, (struct ll_user_fiemap __user *)arg, - sizeof(*fiemap_s))) { - rc = -EFAULT; - goto error; - } - - /* If fm_extent_count is non-zero, read the first extent since - * it is used to calculate end_offset and device from previous - * fiemap call. - */ - if (extent_count) { - if (copy_from_user(&fiemap_s->fm_extents[0], - (char __user *)arg + sizeof(*fiemap_s), - sizeof(struct ll_fiemap_extent))) { - rc = -EFAULT; - goto error; - } - } - - rc = ll_do_fiemap(inode, fiemap_s, num_bytes); - if (rc) - goto error; - - ret_bytes = sizeof(struct ll_user_fiemap); - - if (extent_count != 0) - ret_bytes += (fiemap_s->fm_mapped_extents * - sizeof(struct ll_fiemap_extent)); - - if (copy_to_user((void __user *)arg, fiemap_s, ret_bytes)) - rc = -EFAULT; - -error: - kvfree(fiemap_s); - return rc; -} - /* * Read the data_version for inode. * * This value is computed using stripe object version on OST. * Version is computed using server side locking. * - * @param sync if do sync on the OST side; + * @param flags if do sync on the OST side; * 0: no sync * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs */ int ll_data_version(struct inode *inode, __u64 *data_version, int flags) { - struct lov_stripe_md *lsm = NULL; - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct obdo *obdo = NULL; - int rc; + struct cl_object *obj = ll_i2info(inode)->lli_clob; + struct lu_env *env; + struct cl_io *io; + int refcheck; + int result; - /* If no stripe, we consider version is 0. */ - lsm = ccc_inode_lsm_get(inode); - if (!lsm_has_objects(lsm)) { + /* If no file object initialized, we consider its version is 0. */ + if (!obj) { *data_version = 0; - CDEBUG(D_INODE, "No object for inode\n"); - rc = 0; - goto out; + return 0; } - obdo = kzalloc(sizeof(*obdo), GFP_NOFS); - if (!obdo) { - rc = -ENOMEM; - goto out; - } + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + return PTR_ERR(env); - rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, flags); - if (rc == 0) { - if (!(obdo->o_valid & OBD_MD_FLDATAVERSION)) - rc = -EOPNOTSUPP; - else - *data_version = obdo->o_data_version; - } + io = vvp_env_thread_io(env); + io->ci_obj = obj; + io->u.ci_data_version.dv_data_version = 0; + io->u.ci_data_version.dv_flags = flags; - kfree(obdo); -out: - ccc_inode_lsm_put(inode, lsm); - return rc; +restart: + if (!cl_io_init(env, io, CIT_DATA_VERSION, io->ci_obj)) + result = cl_io_loop(env, io); + else + result = io->ci_result; + + *data_version = io->u.ci_data_version.dv_data_version; + + cl_io_fini(env, io); + + if (unlikely(io->ci_need_restart)) + goto restart; + + cl_env_put(env, &refcheck); + + return result; } /* @@ -1803,11 +1656,11 @@ int ll_data_version(struct inode *inode, __u64 *data_version, int flags) */ int ll_hsm_release(struct inode *inode) { - struct cl_env_nest nest; struct lu_env *env; struct obd_client_handle *och = NULL; __u64 data_version = 0; int rc; + int refcheck; CDEBUG(D_INODE, "%s: Releasing file "DFID".\n", ll_get_fsname(inode->i_sb, NULL, 0), @@ -1824,21 +1677,21 @@ int ll_hsm_release(struct inode *inode) if (rc != 0) goto out; - env = cl_env_nested_get(&nest); + env = cl_env_get(&refcheck); if (IS_ERR(env)) { rc = PTR_ERR(env); goto out; } ll_merge_attr(env, inode); - cl_env_nested_put(&nest, env); + cl_env_put(env, &refcheck); /* Release the file. * NB: lease lock handle is released in mdc_hsm_release_pack() because * we still need it to pack l_remote_handle to MDT. */ - rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och, - &data_version); + rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, och, inode, + MDS_HSM_RELEASE, &data_version); och = NULL; out: @@ -1849,10 +1702,12 @@ int ll_hsm_release(struct inode *inode) } struct ll_swap_stack { - struct iattr ia1, ia2; - __u64 dv1, dv2; - struct inode *inode1, *inode2; - bool check_dv1, check_dv2; + u64 dv1; + u64 dv2; + struct inode *inode1; + struct inode *inode2; + bool check_dv1; + bool check_dv2; }; static int ll_swap_layouts(struct file *file1, struct file *file2, @@ -1872,21 +1727,9 @@ static int ll_swap_layouts(struct file *file1, struct file *file2, llss->inode1 = file_inode(file1); llss->inode2 = file_inode(file2); - if (!S_ISREG(llss->inode2->i_mode)) { - rc = -EINVAL; - goto free; - } - - if (inode_permission(llss->inode1, MAY_WRITE) || - inode_permission(llss->inode2, MAY_WRITE)) { - rc = -EPERM; + rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2); + if (rc < 0) goto free; - } - - if (llss->inode2->i_sb != llss->inode1->i_sb) { - rc = -EXDEV; - goto free; - } /* we use 2 bool because it is easier to swap than 2 bits */ if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1) @@ -1900,10 +1743,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2, llss->dv2 = lsl->sl_dv2; rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2)); - if (rc == 0) /* same file, done! */ { - rc = 0; + if (!rc) /* same file, done! */ goto free; - } if (rc < 0) { /* sequentialize it */ swap(llss->inode1, llss->inode2); @@ -1925,19 +1766,6 @@ static int ll_swap_layouts(struct file *file1, struct file *file2, } } - /* to be able to restore mtime and atime after swap - * we need to first save them - */ - if (lsl->sl_flags & - (SWAP_LAYOUTS_KEEP_MTIME | SWAP_LAYOUTS_KEEP_ATIME)) { - llss->ia1.ia_mtime = llss->inode1->i_mtime; - llss->ia1.ia_atime = llss->inode1->i_atime; - llss->ia1.ia_valid = ATTR_MTIME | ATTR_ATIME; - llss->ia2.ia_mtime = llss->inode2->i_mtime; - llss->ia2.ia_atime = llss->inode2->i_atime; - llss->ia2.ia_valid = ATTR_MTIME | ATTR_ATIME; - } - /* ultimate check, before swapping the layouts we check if * dataversion has changed (if requested) */ @@ -1987,39 +1815,6 @@ static int ll_swap_layouts(struct file *file1, struct file *file2, ll_put_grouplock(llss->inode1, file1, gid); } - /* rc can be set from obd_iocontrol() or from a GOTO(putgl, ...) */ - if (rc != 0) - goto free; - - /* clear useless flags */ - if (!(lsl->sl_flags & SWAP_LAYOUTS_KEEP_MTIME)) { - llss->ia1.ia_valid &= ~ATTR_MTIME; - llss->ia2.ia_valid &= ~ATTR_MTIME; - } - - if (!(lsl->sl_flags & SWAP_LAYOUTS_KEEP_ATIME)) { - llss->ia1.ia_valid &= ~ATTR_ATIME; - llss->ia2.ia_valid &= ~ATTR_ATIME; - } - - /* update time if requested */ - rc = 0; - if (llss->ia2.ia_valid != 0) { - inode_lock(llss->inode1); - rc = ll_setattr(file1->f_path.dentry, &llss->ia2); - inode_unlock(llss->inode1); - } - - if (llss->ia1.ia_valid != 0) { - int rc1; - - inode_lock(llss->inode2); - rc1 = ll_setattr(file2->f_path.dentry, &llss->ia1); - inode_unlock(llss->inode2); - if (rc == 0) - rc = rc1; - } - free: kfree(llss); @@ -2176,24 +1971,52 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) sizeof(struct lustre_swap_layouts))) return -EFAULT; - if ((file->f_flags & O_ACCMODE) == 0) /* O_RDONLY */ + if ((file->f_flags & O_ACCMODE) == O_RDONLY) return -EPERM; file2 = fget(lsl.sl_fd); if (!file2) return -EBADF; - rc = -EPERM; - if ((file2->f_flags & O_ACCMODE) != 0) /* O_WRONLY or O_RDWR */ + /* O_WRONLY or O_RDWR */ + if ((file2->f_flags & O_ACCMODE) == O_RDONLY) { + rc = -EPERM; + goto out; + } + + if (lsl.sl_flags & SWAP_LAYOUTS_CLOSE) { + struct obd_client_handle *och = NULL; + struct ll_inode_info *lli; + struct inode *inode2; + + if (lsl.sl_flags != SWAP_LAYOUTS_CLOSE) { + rc = -EINVAL; + goto out; + } + + lli = ll_i2info(inode); + mutex_lock(&lli->lli_och_mutex); + if (fd->fd_lease_och) { + och = fd->fd_lease_och; + fd->fd_lease_och = NULL; + } + mutex_unlock(&lli->lli_och_mutex); + if (!och) { + rc = -ENOLCK; + goto out; + } + inode2 = file_inode(file2); + rc = ll_swap_layouts_close(och, inode, inode2); + } else { rc = ll_swap_layouts(file, file2, &lsl); + } +out: fput(file2); return rc; } case LL_IOC_LOV_GETSTRIPE: return ll_file_getstripe(inode, (struct lov_user_md __user *)arg); - case FSFILT_IOC_FIEMAP: - return ll_ioctl_fiemap(inode, arg); case FSFILT_IOC_GETFLAGS: case FSFILT_IOC_SETFLAGS: return ll_iocontrol(inode, file, cmd, arg); @@ -2489,17 +2312,17 @@ static int ll_flush(struct file *file, fl_owner_t id) int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end, enum cl_fsync_mode mode, int ignore_layout) { - struct cl_env_nest nest; struct lu_env *env; struct cl_io *io; struct cl_fsync_io *fio; int result; + int refcheck; if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL && mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL) return -EINVAL; - env = cl_env_nested_get(&nest); + env = cl_env_get(&refcheck); if (IS_ERR(env)) return PTR_ERR(env); @@ -2522,7 +2345,7 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end, if (result == 0) result = fio->fi_nr_written; cl_io_fini(env, io); - cl_env_nested_put(&nest, env); + cl_env_put(env, &refcheck); return result; } @@ -2549,9 +2372,11 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync) lli->lli_async_rc = 0; if (rc == 0) rc = err; - err = lov_read_and_clear_async_rc(lli->lli_clob); - if (rc == 0) - rc = err; + if (lli->lli_clob) { + err = lov_read_and_clear_async_rc(lli->lli_clob); + if (rc == 0) + rc = err; + } } err = md_sync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), &req); @@ -2588,7 +2413,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) }; struct md_op_data *op_data; struct lustre_handle lockh = {0}; - ldlm_policy_data_t flock = { {0} }; + union ldlm_policy_data flock = { { 0 } }; int fl_type = file_lock->fl_type; __u64 flags = 0; int rc; @@ -2707,7 +2532,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) } int ll_get_fid_by_name(struct inode *parent, const char *name, - int namelen, struct lu_fid *fid) + int namelen, struct lu_fid *fid, + struct inode **inode) { struct md_op_data *op_data = NULL; struct ptlrpc_request *req; @@ -2719,7 +2545,7 @@ int ll_get_fid_by_name(struct inode *parent, const char *name, if (IS_ERR(op_data)) return PTR_ERR(op_data); - op_data->op_valid = OBD_MD_FLID; + op_data->op_valid = OBD_MD_FLID | OBD_MD_FLTYPE; rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req); ll_finish_md_op_data(op_data); if (rc < 0) @@ -2732,6 +2558,9 @@ int ll_get_fid_by_name(struct inode *parent, const char *name, } if (fid) *fid = body->mbo_fid1; + + if (inode) + rc = ll_prep_inode(inode, req, parent->i_sb, NULL); out_req: ptlrpc_req_finished(req); return rc; @@ -2741,9 +2570,12 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx, const char *name, int namelen) { struct ptlrpc_request *request = NULL; + struct obd_client_handle *och = NULL; struct inode *child_inode = NULL; struct dentry *dchild = NULL; struct md_op_data *op_data; + struct mdt_body *body; + u64 data_version = 0; struct qstr qstr; int rc; @@ -2762,22 +2594,25 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx, dchild = d_lookup(file_dentry(file), &qstr); if (dchild) { op_data->op_fid3 = *ll_inode2fid(dchild->d_inode); - if (dchild->d_inode) { + if (dchild->d_inode) child_inode = igrab(dchild->d_inode); - if (child_inode) { - inode_lock(child_inode); - op_data->op_fid3 = *ll_inode2fid(child_inode); - ll_invalidate_aliases(child_inode); - } - } dput(dchild); - } else { + } + + if (!child_inode) { rc = ll_get_fid_by_name(parent, name, namelen, - &op_data->op_fid3); + &op_data->op_fid3, &child_inode); if (rc) goto out_free; } + if (!child_inode) { + rc = -EINVAL; + goto out_free; + } + + inode_lock(child_inode); + op_data->op_fid3 = *ll_inode2fid(child_inode); if (!fid_is_sane(&op_data->op_fid3)) { CERROR("%s: migrate %s, but fid "DFID" is insane\n", ll_get_fsname(parent->i_sb, NULL, 0), name, @@ -2796,6 +2631,26 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx, rc = 0; goto out_free; } +again: + if (S_ISREG(child_inode->i_mode)) { + och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0); + if (IS_ERR(och)) { + rc = PTR_ERR(och); + och = NULL; + goto out_free; + } + + rc = ll_data_version(child_inode, &data_version, + LL_DV_WR_FLUSH); + if (rc) + goto out_free; + + op_data->op_handle = och->och_fh; + op_data->op_data = och->och_mod; + op_data->op_data_version = data_version; + op_data->op_lease_handle = och->och_lease_handle; + op_data->op_bias |= MDS_RENAME_MIGRATE; + } op_data->op_mds = mdtidx; op_data->op_cli_flags = CLI_MIGRATE; @@ -2804,10 +2659,32 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx, if (!rc) ll_update_times(request, parent); - ptlrpc_req_finished(request); + body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY); + if (!body) { + rc = -EPROTO; + goto out_free; + } + + /* + * If the server does release layout lock, then we cleanup + * the client och here, otherwise release it in out_free: + */ + if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) { + obd_mod_put(och->och_mod); + md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp, och); + och->och_fh.cookie = DEAD_HANDLE_MAGIC; + kfree(och); + och = NULL; + } + ptlrpc_req_finished(request); + /* Try again if the file layout has changed. */ + if (rc == -EAGAIN && S_ISREG(child_inode->i_mode)) + goto again; out_free: if (child_inode) { + if (och) /* close the file */ + ll_lease_close(och, child_inode, NULL); clear_nlink(child_inode); inode_unlock(child_inode); iput(child_inode); @@ -2837,7 +2714,7 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits, enum ldlm_mode l_req_mode) { struct lustre_handle lockh; - ldlm_policy_data_t policy; + union ldlm_policy_data policy; enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ? (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode; struct lu_fid *fid; @@ -2878,7 +2755,7 @@ enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits, struct lustre_handle *lockh, __u64 flags, enum ldlm_mode mode) { - ldlm_policy_data_t policy = { .l_inodebits = {bits} }; + union ldlm_policy_data policy = { .l_inodebits = { bits } }; struct lu_fid *fid; fid = &ll_i2info(inode)->lli_fid; @@ -2893,6 +2770,13 @@ static int ll_inode_revalidate_fini(struct inode *inode, int rc) /* Already unlinked. Just update nlink and return success */ if (rc == -ENOENT) { clear_nlink(inode); + /* If it is striped directory, and there is bad stripe + * Let's revalidate the dentry again, instead of returning + * error + */ + if (S_ISDIR(inode->i_mode) && ll_i2info(inode)->lli_lsm_md) + return 0; + /* This path cannot be hit for regular files unless in * case of obscure races, so no need to validate size. */ @@ -3040,6 +2924,8 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits) LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_mtime; LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_ctime; } else { + struct ll_inode_info *lli = ll_i2info(inode); + /* In case of restore, the MDT has the right size and has * already send it back without granting the layout lock, * inode is up-to-date so glimpse is useless. @@ -3047,7 +2933,7 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits) * restore the MDT holds the layout lock so the glimpse will * block up to the end of restore (getattr will block) */ - if (!(ll_i2info(inode)->lli_flags & LLIF_FILE_RESTORING)) + if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags)) rc = ll_glimpse_size(inode); } return rc; @@ -3095,13 +2981,12 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, { int rc; size_t num_bytes; - struct ll_user_fiemap *fiemap; + struct fiemap *fiemap; unsigned int extent_count = fieinfo->fi_extents_max; num_bytes = sizeof(*fiemap) + (extent_count * - sizeof(struct ll_fiemap_extent)); + sizeof(struct fiemap_extent)); fiemap = libcfs_kvzalloc(num_bytes, GFP_NOFS); - if (!fiemap) return -ENOMEM; @@ -3109,9 +2994,10 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, fiemap->fm_extent_count = fieinfo->fi_extents_max; fiemap->fm_start = start; fiemap->fm_length = len; + if (extent_count > 0 && copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start, - sizeof(struct ll_fiemap_extent)) != 0) { + sizeof(struct fiemap_extent))) { rc = -EFAULT; goto out; } @@ -3123,11 +3009,10 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, if (extent_count > 0 && copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0], fiemap->fm_mapped_extents * - sizeof(struct ll_fiemap_extent)) != 0) { + sizeof(struct fiemap_extent))) { rc = -EFAULT; goto out; } - out: kvfree(fiemap); return rc; @@ -3370,35 +3255,50 @@ ll_iocontrol_call(struct inode *inode, struct file *file, int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf) { struct ll_inode_info *lli = ll_i2info(inode); - struct cl_env_nest nest; + struct cl_object *obj = lli->lli_clob; struct lu_env *env; - int result; + int rc; + int refcheck; - if (!lli->lli_clob) + if (!obj) return 0; - env = cl_env_nested_get(&nest); + env = cl_env_get(&refcheck); if (IS_ERR(env)) return PTR_ERR(env); - result = cl_conf_set(env, lli->lli_clob, conf); - cl_env_nested_put(&nest, env); + rc = cl_conf_set(env, obj, conf); + if (rc < 0) + goto out; if (conf->coc_opc == OBJECT_CONF_SET) { struct ldlm_lock *lock = conf->coc_lock; + struct cl_layout cl = { + .cl_layout_gen = 0, + }; LASSERT(lock); LASSERT(ldlm_has_layout(lock)); - if (result == 0) { - /* it can only be allowed to match after layout is - * applied to inode otherwise false layout would be - * seen. Applying layout should happen before dropping - * the intent lock. - */ - ldlm_lock_allow_match(lock); - } + + /* it can only be allowed to match after layout is + * applied to inode otherwise false layout would be + * seen. Applying layout should happen before dropping + * the intent lock. + */ + ldlm_lock_allow_match(lock); + + rc = cl_object_layout_get(env, obj, &cl); + if (rc < 0) + goto out; + + CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n", + PFID(&lli->lli_fid), ll_layout_version_get(lli), + cl.cl_layout_gen); + ll_layout_version_set(lli, cl.cl_layout_gen); } - return result; +out: + cl_env_put(env, &refcheck); + return rc; } /* Fetch layout from MDT with getxattr request, if it's not ready yet */ @@ -3477,12 +3377,11 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock) * in this function. */ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode, - struct inode *inode, __u32 *gen, bool reconf) + struct inode *inode) { struct ll_inode_info *lli = ll_i2info(inode); struct ll_sb_info *sbi = ll_i2sbi(inode); struct ldlm_lock *lock; - struct lustre_md md = { NULL }; struct cl_object_conf conf; int rc = 0; bool lvb_ready; @@ -3494,8 +3393,8 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode, LASSERT(lock); LASSERT(ldlm_has_layout(lock)); - LDLM_DEBUG(lock, "File "DFID"(%p) being reconfigured: %d", - PFID(&lli->lli_fid), inode, reconf); + LDLM_DEBUG(lock, "File " DFID "(%p) being reconfigured", + PFID(&lli->lli_fid), inode); /* in case this is a caching lock and reinstate with new inode */ md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL); @@ -3506,15 +3405,8 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode, /* checking lvb_ready is racy but this is okay. The worst case is * that multi processes may configure the file on the same time. */ - if (lvb_ready || !reconf) { - rc = -ENODATA; - if (lvb_ready) { - /* layout_gen must be valid if layout lock is not - * cancelled and stripe has already set - */ - *gen = ll_layout_version_get(lli); - rc = 0; - } + if (lvb_ready) { + rc = 0; goto out; } @@ -3524,39 +3416,19 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode, /* for layout lock, lmm is returned in lock's lvb. * lvb_data is immutable if the lock is held so it's safe to access it - * without res lock. See the description in ldlm_lock_decref_internal() - * for the condition to free lvb_data of layout lock - */ - if (lock->l_lvb_data) { - rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm, - lock->l_lvb_data, lock->l_lvb_len); - if (rc >= 0) { - *gen = LL_LAYOUT_GEN_EMPTY; - if (md.lsm) - *gen = md.lsm->lsm_layout_gen; - rc = 0; - } else { - CERROR("%s: file " DFID " unpackmd error: %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(&lli->lli_fid), rc); - } - } - if (rc < 0) - goto out; - - /* set layout to file. Unlikely this will fail as old layout was + * without res lock. + * + * set layout to file. Unlikely this will fail as old layout was * surely eliminated */ memset(&conf, 0, sizeof(conf)); conf.coc_opc = OBJECT_CONF_SET; conf.coc_inode = inode; conf.coc_lock = lock; - conf.u.coc_md = &md; + conf.u.coc_layout.lb_buf = lock->l_lvb_data; + conf.u.coc_layout.lb_len = lock->l_lvb_len; rc = ll_layout_conf(inode, &conf); - if (md.lsm) - obd_free_memmd(sbi->ll_dt_exp, &md.lsm); - /* refresh layout failed, need to wait */ wait_layout = rc == -EBUSY; @@ -3584,20 +3456,7 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode, return rc; } -/** - * This function checks if there exists a LAYOUT lock on the client side, - * or enqueues it if it doesn't have one in cache. - * - * This function will not hold layout lock so it may be revoked any time after - * this function returns. Any operations depend on layout should be redone - * in that case. - * - * This function should be called before lov_io_init() to get an uptodate - * layout version, the caller should save the version number and after IO - * is finished, this function should be called again to verify that layout - * is not changed during IO time. - */ -int ll_layout_refresh(struct inode *inode, __u32 *gen) +static int ll_layout_refresh_locked(struct inode *inode) { struct ll_inode_info *lli = ll_i2info(inode); struct ll_sb_info *sbi = ll_i2sbi(inode); @@ -3613,17 +3472,6 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen) }; int rc; - *gen = ll_layout_version_get(lli); - if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != LL_LAYOUT_GEN_NONE) - return 0; - - /* sanity checks */ - LASSERT(fid_is_sane(ll_inode2fid(inode))); - LASSERT(S_ISREG(inode->i_mode)); - - /* take layout lock mutex to enqueue layout lock exclusively. */ - mutex_lock(&lli->lli_layout_mutex); - again: /* mostly layout lock is caching on the local side, so try to match * it before grabbing layout lock mutex. @@ -3631,20 +3479,16 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen) mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0, LCK_CR | LCK_CW | LCK_PR | LCK_PW); if (mode != 0) { /* hit cached lock */ - rc = ll_layout_lock_set(&lockh, mode, inode, gen, true); + rc = ll_layout_lock_set(&lockh, mode, inode); if (rc == -EAGAIN) goto again; - - mutex_unlock(&lli->lli_layout_mutex); return rc; } op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0, LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) { - mutex_unlock(&lli->lli_layout_mutex); + if (IS_ERR(op_data)) return PTR_ERR(op_data); - } /* have to enqueue one */ memset(&it, 0, sizeof(it)); @@ -3668,10 +3512,50 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen) if (rc == 0) { /* set lock data in case this is a new lock */ ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL); - rc = ll_layout_lock_set(&lockh, mode, inode, gen, true); + rc = ll_layout_lock_set(&lockh, mode, inode); if (rc == -EAGAIN) goto again; } + + return rc; +} + +/** + * This function checks if there exists a LAYOUT lock on the client side, + * or enqueues it if it doesn't have one in cache. + * + * This function will not hold layout lock so it may be revoked any time after + * this function returns. Any operations depend on layout should be redone + * in that case. + * + * This function should be called before lov_io_init() to get an uptodate + * layout version, the caller should save the version number and after IO + * is finished, this function should be called again to verify that layout + * is not changed during IO time. + */ +int ll_layout_refresh(struct inode *inode, __u32 *gen) +{ + struct ll_inode_info *lli = ll_i2info(inode); + struct ll_sb_info *sbi = ll_i2sbi(inode); + int rc; + + *gen = ll_layout_version_get(lli); + if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != CL_LAYOUT_GEN_NONE) + return 0; + + /* sanity checks */ + LASSERT(fid_is_sane(ll_inode2fid(inode))); + LASSERT(S_ISREG(inode->i_mode)); + + /* take layout lock mutex to enqueue layout lock exclusively. */ + mutex_lock(&lli->lli_layout_mutex); + + rc = ll_layout_refresh_locked(inode); + if (rc < 0) + goto out; + + *gen = ll_layout_version_get(lli); +out: mutex_unlock(&lli->lli_layout_mutex); return rc; diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c index 22507b9c6d6937dd4d30d7c7945adeb16ddfba11..504498de536e1955e32a3eeda61bc6cae12b56ca 100644 --- a/drivers/staging/lustre/lustre/llite/glimpse.c +++ b/drivers/staging/lustre/lustre/llite/glimpse.c @@ -80,69 +80,60 @@ blkcnt_t dirty_cnt(struct inode *inode) int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io, struct inode *inode, struct cl_object *clob, int agl) { - struct ll_inode_info *lli = ll_i2info(inode); const struct lu_fid *fid = lu_object_fid(&clob->co_lu); - int result; + struct cl_lock *lock = vvp_env_lock(env); + struct cl_lock_descr *descr = &lock->cll_descr; + int result = 0; + + CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid)); + + /* NOTE: this looks like DLM lock request, but it may + * not be one. Due to CEF_ASYNC flag (translated + * to LDLM_FL_HAS_INTENT by osc), this is + * glimpse request, that won't revoke any + * conflicting DLM locks held. Instead, + * ll_glimpse_callback() will be called on each + * client holding a DLM lock against this file, + * and resulting size will be returned for each + * stripe. DLM lock on [0, EOF] is acquired only + * if there were no conflicting locks. If there + * were conflicting locks, enqueuing or waiting + * fails with -ENAVAIL, but valid inode + * attributes are returned anyway. + */ + *descr = whole_file; + descr->cld_obj = clob; + descr->cld_mode = CLM_READ; + descr->cld_enq_flags = CEF_ASYNC | CEF_MUST; + if (agl) + descr->cld_enq_flags |= CEF_AGL; + /* + * CEF_ASYNC is used because glimpse sub-locks cannot + * deadlock (because they never conflict with other + * locks) and, hence, can be enqueued out-of-order. + * + * CEF_MUST protects glimpse lock from conversion into + * a lockless mode. + */ + result = cl_lock_request(env, io, lock); + if (result < 0) + return result; - result = 0; - if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) { - CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid)); - if (lli->lli_has_smd) { - struct cl_lock *lock = vvp_env_lock(env); - struct cl_lock_descr *descr = &lock->cll_descr; - - /* NOTE: this looks like DLM lock request, but it may - * not be one. Due to CEF_ASYNC flag (translated - * to LDLM_FL_HAS_INTENT by osc), this is - * glimpse request, that won't revoke any - * conflicting DLM locks held. Instead, - * ll_glimpse_callback() will be called on each - * client holding a DLM lock against this file, - * and resulting size will be returned for each - * stripe. DLM lock on [0, EOF] is acquired only - * if there were no conflicting locks. If there - * were conflicting locks, enqueuing or waiting - * fails with -ENAVAIL, but valid inode - * attributes are returned anyway. - */ - *descr = whole_file; - descr->cld_obj = clob; - descr->cld_mode = CLM_READ; - descr->cld_enq_flags = CEF_ASYNC | CEF_MUST; - if (agl) - descr->cld_enq_flags |= CEF_AGL; + if (!agl) { + ll_merge_attr(env, inode); + if (i_size_read(inode) > 0 && !inode->i_blocks) { /* - * CEF_ASYNC is used because glimpse sub-locks cannot - * deadlock (because they never conflict with other - * locks) and, hence, can be enqueued out-of-order. - * - * CEF_MUST protects glimpse lock from conversion into - * a lockless mode. + * LU-417: Add dirty pages block count + * lest i_blocks reports 0, some "cp" or + * "tar" may think it's a completely + * sparse file and skip it. */ - result = cl_lock_request(env, io, lock); - if (result < 0) - return result; - - if (!agl) { - ll_merge_attr(env, inode); - if (i_size_read(inode) > 0 && - inode->i_blocks == 0) { - /* - * LU-417: Add dirty pages block count - * lest i_blocks reports 0, some "cp" or - * "tar" may think it's a completely - * sparse file and skip it. - */ - inode->i_blocks = dirty_cnt(inode); - } - } - cl_lock_release(env, lock); - } else { - CDEBUG(D_DLMTRACE, "No objects for inode\n"); - ll_merge_attr(env, inode); + inode->i_blocks = dirty_cnt(inode); } } + cl_lock_release(env, lock); + return result; } @@ -212,39 +203,3 @@ int cl_glimpse_size0(struct inode *inode, int agl) } return result; } - -int cl_local_size(struct inode *inode) -{ - struct lu_env *env = NULL; - struct cl_io *io = NULL; - struct cl_object *clob; - int result; - int refcheck; - - if (!ll_i2info(inode)->lli_has_smd) - return 0; - - result = cl_io_get(inode, &env, &io, &refcheck); - if (result <= 0) - return result; - - clob = io->ci_obj; - result = cl_io_init(env, io, CIT_MISC, clob); - if (result > 0) { - result = io->ci_result; - } else if (result == 0) { - struct cl_lock *lock = vvp_env_lock(env); - - lock->cll_descr = whole_file; - lock->cll_descr.cld_enq_flags = CEF_PEEK; - lock->cll_descr.cld_obj = clob; - result = cl_lock_request(env, io, lock); - if (result == 0) { - ll_merge_attr(env, inode); - cl_lock_release(env, lock); - } - } - cl_io_fini(env, io); - cl_env_put(env, &refcheck); - return result; -} diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c index 084330d08f7a9ec4ce8eec160e3f69947226eb0b..dd1cfd8f5213b6ffcd132250b8f5cead476254c4 100644 --- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c +++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c @@ -80,7 +80,8 @@ int cl_inode_fini_refcheck; */ static DEFINE_MUTEX(cl_inode_fini_guard); -int cl_setattr_ost(struct inode *inode, const struct iattr *attr) +int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr, + unsigned int attr_flags) { struct lu_env *env; struct cl_io *io; @@ -92,14 +93,15 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr) return PTR_ERR(env); io = vvp_env_thread_io(env); - io->ci_obj = ll_i2info(inode)->lli_clob; + io->ci_obj = obj; io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime); io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime); io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime); io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size; + io->u.ci_setattr.sa_attr_flags = attr_flags; io->u.ci_setattr.sa_valid = attr->ia_valid; - io->u.ci_setattr.sa_parent_fid = ll_inode2fid(inode); + io->u.ci_setattr.sa_parent_fid = lu_object_fid(&obj->co_lu); again: if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) { @@ -148,7 +150,7 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md) struct cl_object_conf conf = { .coc_inode = inode, .u = { - .coc_md = md + .coc_layout = md->layout, } }; int result = 0; @@ -182,7 +184,6 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md) * locked by I_NEW bit. */ lli->lli_clob = clob; - lli->lli_has_smd = lsm_has_objects(md->lsm); lu_object_ref_add(&clob->co_lu, "inode", inode); } else { result = PTR_ERR(clob); @@ -245,15 +246,11 @@ void cl_inode_fini(struct inode *inode) int emergency; if (clob) { - void *cookie; - - cookie = cl_env_reenter(); env = cl_env_get(&refcheck); emergency = IS_ERR(env); if (emergency) { mutex_lock(&cl_inode_fini_guard); LASSERT(cl_inode_fini_env); - cl_env_implant(cl_inode_fini_env, &refcheck); env = cl_inode_fini_env; } /* @@ -265,13 +262,10 @@ void cl_inode_fini(struct inode *inode) lu_object_ref_del(&clob->co_lu, "inode", inode); cl_object_put_last(env, clob); lli->lli_clob = NULL; - if (emergency) { - cl_env_unplant(cl_inode_fini_env, &refcheck); + if (emergency) mutex_unlock(&cl_inode_fini_guard); - } else { + else cl_env_put(env, &refcheck); - } - cl_env_reexit(cookie); } } @@ -302,22 +296,3 @@ __u32 cl_fid_build_gen(const struct lu_fid *fid) gen = fid_flatten(fid) >> 32; return gen; } - -/* lsm is unreliable after hsm implementation as layout can be changed at - * any time. This is only to support old, non-clio-ized interfaces. It will - * cause deadlock if clio operations are called with this extra layout refcount - * because in case the layout changed during the IO, ll_layout_refresh() will - * have to wait for the refcount to become zero to destroy the older layout. - * - * Notice that the lsm returned by this function may not be valid unless called - * inside layout lock - MDS_INODELOCK_LAYOUT. - */ -struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode) -{ - return lov_lsm_get(ll_i2info(inode)->lli_clob); -} - -inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm) -{ - lov_lsm_put(ll_i2info(inode)->lli_clob, lsm); -} diff --git a/drivers/staging/lustre/lustre/llite/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c index fb346c12dad2f4a6c07b4ad3a93c75b4b9adbcb9..f48660ed350ff469298e31edbea50a6eb7464a7d 100644 --- a/drivers/staging/lustre/lustre/llite/lcommon_misc.c +++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c @@ -47,36 +47,29 @@ */ int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp) { - struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC_V3 }; - __u32 valsize = sizeof(struct lov_desc); - int rc, easize, def_easize, cookiesize; - struct lov_desc desc; - __u16 stripes, def_stripes; - - rc = obd_get_info(NULL, dt_exp, sizeof(KEY_LOVDESC), KEY_LOVDESC, - &valsize, &desc, NULL); + u32 val_size, max_easize, def_easize; + int rc; + + val_size = sizeof(max_easize); + rc = obd_get_info(NULL, dt_exp, sizeof(KEY_MAX_EASIZE), KEY_MAX_EASIZE, + &val_size, &max_easize); if (rc) return rc; - stripes = min_t(__u32, desc.ld_tgt_count, LOV_MAX_STRIPE_COUNT); - lsm.lsm_stripe_count = stripes; - easize = obd_size_diskmd(dt_exp, &lsm); - - def_stripes = min_t(__u32, desc.ld_default_stripe_count, - LOV_MAX_STRIPE_COUNT); - lsm.lsm_stripe_count = def_stripes; - def_easize = obd_size_diskmd(dt_exp, &lsm); - - cookiesize = stripes * sizeof(struct llog_cookie); + val_size = sizeof(def_easize); + rc = obd_get_info(NULL, dt_exp, sizeof(KEY_DEFAULT_EASIZE), + KEY_DEFAULT_EASIZE, &val_size, &def_easize); + if (rc) + return rc; - /* default cookiesize is 0 because from 2.4 server doesn't send + /* + * default cookiesize is 0 because from 2.4 server doesn't send * llog cookies to client. */ - CDEBUG(D_HA, - "updating def/max_easize: %d/%d def/max_cookiesize: 0/%d\n", - def_easize, easize, cookiesize); + CDEBUG(D_HA, "updating def/max_easize: %d/%d\n", + def_easize, max_easize); - rc = md_init_ea_size(md_exp, easize, def_easize, cookiesize, 0); + rc = md_init_ea_size(md_exp, max_easize, def_easize); return rc; } @@ -169,13 +162,11 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock, return rc; } - cg->lg_env = cl_env_get(&refcheck); + cg->lg_env = env; cg->lg_io = io; cg->lg_lock = lock; cg->lg_gid = gid; - LASSERT(cg->lg_env == env); - cl_env_unplant(env, &refcheck); return 0; } @@ -184,14 +175,10 @@ void cl_put_grouplock(struct ll_grouplock *cg) struct lu_env *env = cg->lg_env; struct cl_io *io = cg->lg_io; struct cl_lock *lock = cg->lg_lock; - int refcheck; LASSERT(cg->lg_env); LASSERT(cg->lg_gid); - cl_env_implant(env, &refcheck); - cl_env_put(env, &refcheck); - cl_lock_release(env, lock); cl_io_fini(env, io); cl_env_put(env, NULL); diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c deleted file mode 100644 index 8644631bf2ba82acc815cf6c814efb79e0b13268..0000000000000000000000000000000000000000 --- a/drivers/staging/lustre/lustre/llite/llite_close.c +++ /dev/null @@ -1,395 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/llite/llite_close.c - * - * Lustre Lite routines to issue a secondary close after writeback - */ - -#include - -#define DEBUG_SUBSYSTEM S_LLITE - -#include "llite_internal.h" - -/** records that a write is in flight */ -void vvp_write_pending(struct vvp_object *club, struct vvp_page *page) -{ - struct ll_inode_info *lli = ll_i2info(club->vob_inode); - - spin_lock(&lli->lli_lock); - lli->lli_flags |= LLIF_SOM_DIRTY; - if (page && list_empty(&page->vpg_pending_linkage)) - list_add(&page->vpg_pending_linkage, &club->vob_pending_list); - spin_unlock(&lli->lli_lock); -} - -/** records that a write has completed */ -void vvp_write_complete(struct vvp_object *club, struct vvp_page *page) -{ - struct ll_inode_info *lli = ll_i2info(club->vob_inode); - int rc = 0; - - spin_lock(&lli->lli_lock); - if (page && !list_empty(&page->vpg_pending_linkage)) { - list_del_init(&page->vpg_pending_linkage); - rc = 1; - } - spin_unlock(&lli->lli_lock); - if (rc) - ll_queue_done_writing(club->vob_inode, 0); -} - -/** Queues DONE_WRITING if - * - done writing is allowed; - * - inode has no no dirty pages; - */ -void ll_queue_done_writing(struct inode *inode, unsigned long flags) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob); - - spin_lock(&lli->lli_lock); - lli->lli_flags |= flags; - - if ((lli->lli_flags & LLIF_DONE_WRITING) && - list_empty(&club->vob_pending_list)) { - struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq; - - if (lli->lli_flags & LLIF_MDS_SIZE_LOCK) - CWARN("%s: file "DFID"(flags %u) Size-on-MDS valid, done writing allowed and no diry pages\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(ll_inode2fid(inode)), lli->lli_flags); - /* DONE_WRITING is allowed and inode has no dirty page. */ - spin_lock(&lcq->lcq_lock); - - LASSERT(list_empty(&lli->lli_close_list)); - CDEBUG(D_INODE, "adding inode "DFID" to close list\n", - PFID(ll_inode2fid(inode))); - list_add_tail(&lli->lli_close_list, &lcq->lcq_head); - - /* Avoid a concurrent insertion into the close thread queue: - * an inode is already in the close thread, open(), write(), - * close() happen, epoch is closed as the inode is marked as - * LLIF_EPOCH_PENDING. When pages are written inode should not - * be inserted into the queue again, clear this flag to avoid - * it. - */ - lli->lli_flags &= ~LLIF_DONE_WRITING; - - wake_up(&lcq->lcq_waitq); - spin_unlock(&lcq->lcq_lock); - } - spin_unlock(&lli->lli_lock); -} - -/** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */ -void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data) -{ - struct ll_inode_info *lli = ll_i2info(inode); - - op_data->op_flags |= MF_SOM_CHANGE; - /* Check if Size-on-MDS attributes are valid. */ - if (lli->lli_flags & LLIF_MDS_SIZE_LOCK) - CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(ll_inode2fid(inode)), lli->lli_flags); - - if (!cl_local_size(inode)) { - /* Send Size-on-MDS Attributes if valid. */ - op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET | - ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS; - } -} - -/** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */ -void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, - struct obd_client_handle **och, unsigned long flags) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob); - - spin_lock(&lli->lli_lock); - if (!(list_empty(&club->vob_pending_list))) { - if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) { - LASSERT(*och); - LASSERT(!lli->lli_pending_och); - /* Inode is dirty and there is no pending write done - * request yet, DONE_WRITE is to be sent later. - */ - lli->lli_flags |= LLIF_EPOCH_PENDING; - lli->lli_pending_och = *och; - spin_unlock(&lli->lli_lock); - - inode = igrab(inode); - LASSERT(inode); - goto out; - } - if (flags & LLIF_DONE_WRITING) { - /* Some pages are still dirty, it is early to send - * DONE_WRITE. Wait until all pages will be flushed - * and try DONE_WRITE again later. - */ - LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING)); - lli->lli_flags |= LLIF_DONE_WRITING; - spin_unlock(&lli->lli_lock); - - inode = igrab(inode); - LASSERT(inode); - goto out; - } - } - CDEBUG(D_INODE, "Epoch %llu closed on "DFID"\n", - ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid)); - op_data->op_flags |= MF_EPOCH_CLOSE; - - if (flags & LLIF_DONE_WRITING) { - LASSERT(lli->lli_flags & LLIF_SOM_DIRTY); - LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING)); - *och = lli->lli_pending_och; - lli->lli_pending_och = NULL; - lli->lli_flags &= ~LLIF_EPOCH_PENDING; - } else { - /* Pack Size-on-MDS inode attributes only if they has changed */ - if (!(lli->lli_flags & LLIF_SOM_DIRTY)) { - spin_unlock(&lli->lli_lock); - goto out; - } - - /* There is a pending DONE_WRITE -- close epoch with no - * attribute change. - */ - if (lli->lli_flags & LLIF_EPOCH_PENDING) { - spin_unlock(&lli->lli_lock); - goto out; - } - } - - LASSERT(list_empty(&club->vob_pending_list)); - lli->lli_flags &= ~LLIF_SOM_DIRTY; - spin_unlock(&lli->lli_lock); - ll_done_writing_attr(inode, op_data); - -out: - return; -} - -/** - * Cliens updates SOM attributes on MDS (including llog cookies): - * obd_getattr with no lock and md_setattr. - */ -int ll_som_update(struct inode *inode, struct md_op_data *op_data) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct ptlrpc_request *request = NULL; - __u32 old_flags; - struct obdo *oa; - int rc; - - LASSERT(op_data); - if (lli->lli_flags & LLIF_MDS_SIZE_LOCK) - CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(ll_inode2fid(inode)), lli->lli_flags); - - oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); - if (!oa) { - CERROR("can't allocate memory for Size-on-MDS update.\n"); - return -ENOMEM; - } - - old_flags = op_data->op_flags; - op_data->op_flags = MF_SOM_CHANGE; - - /* If inode is already in another epoch, skip getattr from OSTs. */ - if (lli->lli_ioepoch == op_data->op_ioepoch) { - rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch, - old_flags & MF_GETATTR_LOCK); - if (rc) { - oa->o_valid = 0; - if (rc != -ENOENT) - CERROR("%s: inode_getattr failed - unable to send a Size-on-MDS attribute update for inode "DFID": rc = %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(ll_inode2fid(inode)), rc); - } else { - CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n", - PFID(&lli->lli_fid)); - } - /* Install attributes into op_data. */ - md_from_obdo(op_data, oa, oa->o_valid); - } - - rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, - NULL, 0, NULL, 0, &request, NULL); - ptlrpc_req_finished(request); - - kmem_cache_free(obdo_cachep, oa); - return rc; -} - -/** - * Closes the ioepoch and packs all the attributes into @op_data for - * DONE_WRITING rpc. - */ -static void ll_prepare_done_writing(struct inode *inode, - struct md_op_data *op_data, - struct obd_client_handle **och) -{ - ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING); - /* If there is no @och, we do not do D_W yet. */ - if (!*och) - return; - - ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh); - ll_prep_md_op_data(op_data, inode, NULL, NULL, - 0, 0, LUSTRE_OPC_ANY, NULL); -} - -/** Send a DONE_WRITING rpc. */ -static void ll_done_writing(struct inode *inode) -{ - struct obd_client_handle *och = NULL; - struct md_op_data *op_data; - int rc; - - LASSERT(exp_connect_som(ll_i2mdexp(inode))); - - op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - if (!op_data) - return; - - ll_prepare_done_writing(inode, op_data, &och); - /* If there is no @och, we do not do D_W yet. */ - if (!och) - goto out; - - rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL); - if (rc == -EAGAIN) - /* MDS has instructed us to obtain Size-on-MDS attribute from - * OSTs and send setattr to back to MDS. - */ - rc = ll_som_update(inode, op_data); - else if (rc) { - CERROR("%s: inode "DFID" mdc done_writing failed: rc = %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(ll_inode2fid(inode)), rc); - } -out: - ll_finish_md_op_data(op_data); - if (och) { - md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och); - kfree(och); - } -} - -static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq) -{ - struct ll_inode_info *lli = NULL; - - spin_lock(&lcq->lcq_lock); - - if (!list_empty(&lcq->lcq_head)) { - lli = list_entry(lcq->lcq_head.next, struct ll_inode_info, - lli_close_list); - list_del_init(&lli->lli_close_list); - } else if (atomic_read(&lcq->lcq_stop)) { - lli = ERR_PTR(-EALREADY); - } - - spin_unlock(&lcq->lcq_lock); - return lli; -} - -static int ll_close_thread(void *arg) -{ - struct ll_close_queue *lcq = arg; - - complete(&lcq->lcq_comp); - - while (1) { - struct l_wait_info lwi = { 0 }; - struct ll_inode_info *lli; - struct inode *inode; - - l_wait_event_exclusive(lcq->lcq_waitq, - (lli = ll_close_next_lli(lcq)) != NULL, - &lwi); - if (IS_ERR(lli)) - break; - - inode = ll_info2i(lli); - CDEBUG(D_INFO, "done_writing for inode "DFID"\n", - PFID(ll_inode2fid(inode))); - ll_done_writing(inode); - iput(inode); - } - - CDEBUG(D_INFO, "ll_close exiting\n"); - complete(&lcq->lcq_comp); - return 0; -} - -int ll_close_thread_start(struct ll_close_queue **lcq_ret) -{ - struct ll_close_queue *lcq; - struct task_struct *task; - - if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD)) - return -EINTR; - - lcq = kzalloc(sizeof(*lcq), GFP_NOFS); - if (!lcq) - return -ENOMEM; - - spin_lock_init(&lcq->lcq_lock); - INIT_LIST_HEAD(&lcq->lcq_head); - init_waitqueue_head(&lcq->lcq_waitq); - init_completion(&lcq->lcq_comp); - - task = kthread_run(ll_close_thread, lcq, "ll_close"); - if (IS_ERR(task)) { - kfree(lcq); - return PTR_ERR(task); - } - - wait_for_completion(&lcq->lcq_comp); - *lcq_ret = lcq; - return 0; -} - -void ll_close_thread_shutdown(struct ll_close_queue *lcq) -{ - init_completion(&lcq->lcq_comp); - atomic_inc(&lcq->lcq_stop); - wake_up(&lcq->lcq_waitq); - wait_for_completion(&lcq->lcq_comp); - kfree(lcq); -} diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index 4bc551279aa4558a960732ebf84f2596e0ddda1c..065a9a7e120aed967afb666ae496eb13462598cd 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -97,31 +97,20 @@ struct ll_grouplock { unsigned long lg_gid; }; -enum lli_flags { - /* MDS has an authority for the Size-on-MDS attributes. */ - LLIF_MDS_SIZE_LOCK = (1 << 0), - /* Epoch close is postponed. */ - LLIF_EPOCH_PENDING = (1 << 1), - /* DONE WRITING is allowed. */ - LLIF_DONE_WRITING = (1 << 2), - /* Sizeon-on-MDS attributes are changed. An attribute update needs to - * be sent to MDS. - */ - LLIF_SOM_DIRTY = (1 << 3), +enum ll_file_flags { /* File data is modified. */ - LLIF_DATA_MODIFIED = (1 << 4), + LLIF_DATA_MODIFIED = 0, /* File is being restored */ - LLIF_FILE_RESTORING = (1 << 5), + LLIF_FILE_RESTORING = 1, /* Xattr cache is attached to the file */ - LLIF_XATTR_CACHE = (1 << 6), + LLIF_XATTR_CACHE = 2, }; struct ll_inode_info { __u32 lli_inode_magic; - __u32 lli_flags; - __u64 lli_ioepoch; spinlock_t lli_lock; + unsigned long lli_flags; struct posix_acl *lli_posix_acl; /* identifying fields for both metadata and data stacks. */ @@ -129,14 +118,6 @@ struct ll_inode_info { /* master inode fid for stripe directory */ struct lu_fid lli_pfid; - struct list_head lli_close_list; - - /* handle is to be sent to MDS later on done_writing and setattr. - * Open handle data are needed for the recovery to reconstruct - * the inode state on the MDS. XXX: recovery is not ready yet. - */ - struct obd_client_handle *lli_pending_och; - /* We need all three because every inode may be opened in different * modes */ @@ -204,7 +185,6 @@ struct ll_inode_info { struct { struct mutex lli_size_mutex; char *lli_symlink_name; - __u64 lli_maxbytes; /* * struct rw_semaphore { * signed long count; // align d.d_def_acl @@ -245,7 +225,6 @@ struct ll_inode_info { * In the future, if more members are added only for directory, * some of the following members can be moved into u.f. */ - bool lli_has_smd; struct cl_object *lli_clob; /* mutex to request for layout lock exclusively. */ @@ -282,6 +261,9 @@ int ll_xattr_cache_destroy(struct inode *inode); int ll_xattr_cache_get(struct inode *inode, const char *name, char *buffer, size_t size, __u64 valid); +int ll_init_security(struct dentry *dentry, struct inode *inode, + struct inode *dir); + /* * Locking to guarantee consistency of non-atomic updates to long long i_size, * consistency between file size and KMS. @@ -400,7 +382,7 @@ enum stats_track_type { #define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */ #define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */ #define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */ -#define LL_SBI_SOM_PREVIEW 0x1000 /* SOM preview mount option */ +/* LL_SBI_SOM_PREVIEW 0x1000 SOM preview mount option, obsolete */ #define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */ #define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */ #define LL_SBI_AGL_ENABLED 0x8000 /* enable agl */ @@ -409,6 +391,8 @@ enum stats_track_type { #define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */ #define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */ #define LL_SBI_NOROOTSQUASH 0x100000 /* do not apply root squash */ +#define LL_SBI_ALWAYS_PING 0x200000 /* always ping even if server + * suppress_pings */ #define LL_SBI_FLAGS { \ "nolck", \ @@ -432,6 +416,7 @@ enum stats_track_type { "user_fid2path",\ "xattr_cache", \ "norootsquash", \ + "always_ping", \ } /* @@ -466,10 +451,10 @@ struct ll_sb_info { int ll_flags; unsigned int ll_umounting:1, - ll_xattr_cache_enabled:1; - struct lustre_client_ocd ll_lco; + ll_xattr_cache_enabled:1, + ll_client_common_fill_super_succeeded:1; - struct ll_close_queue *ll_lcq; + struct lustre_client_ocd ll_lco; struct lprocfs_stats *ll_stats; /* lprocfs stats counter */ @@ -630,8 +615,6 @@ struct ll_file_data { struct list_head fd_lccs; /* list of ll_cl_context */ }; -struct lov_stripe_md; - extern struct dentry *llite_root; extern struct kset *llite_kset; @@ -682,8 +665,6 @@ enum { LPROC_LL_WRITE_BYTES, LPROC_LL_BRW_READ, LPROC_LL_BRW_WRITE, - LPROC_LL_OSC_READ, - LPROC_LL_OSC_WRITE, LPROC_LL_IOCTL, LPROC_LL_OPEN, LPROC_LL_RELEASE, @@ -741,9 +722,7 @@ int ll_writepage(struct page *page, struct writeback_control *wbc); int ll_writepages(struct address_space *, struct writeback_control *wbc); int ll_readpage(struct file *file, struct page *page); void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras); -int ll_readahead(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, struct ll_readahead_state *ras, - bool hit); +int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io); struct ll_cl_context *ll_cl_find(struct file *file); void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io); void ll_cl_remove(struct file *file, const struct lu_env *env); @@ -762,25 +741,14 @@ enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits, enum ldlm_mode mode); int ll_file_open(struct inode *inode, struct file *file); int ll_file_release(struct inode *inode, struct file *file); -int ll_glimpse_ioctl(struct ll_sb_info *sbi, - struct lov_stripe_md *lsm, lstat_t *st); -void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch); int ll_release_openhandle(struct inode *, struct lookup_intent *); int ll_md_real_close(struct inode *inode, fmode_t fmode); -void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, - struct obd_client_handle **och, unsigned long flags); -void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data); -int ll_som_update(struct inode *inode, struct md_op_data *op_data); -int ll_inode_getattr(struct inode *inode, struct obdo *obdo, - __u64 ioepoch, int sync); -void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data, - struct lustre_handle *fh); int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat); struct posix_acl *ll_get_acl(struct inode *inode, int type); int ll_migrate(struct inode *parent, struct file *file, int mdtidx, const char *name, int namelen); int ll_get_fid_by_name(struct inode *parent, const char *name, - int namelen, struct lu_fid *fid); + int namelen, struct lu_fid *fid, struct inode **inode); int ll_inode_permission(struct inode *inode, int mask); int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, @@ -801,7 +769,6 @@ int ll_hsm_release(struct inode *inode); /* llite/dcache.c */ -int ll_d_init(struct dentry *de); extern const struct dentry_operations ll_d_ops; void ll_intent_drop_lock(struct lookup_intent *); void ll_intent_release(struct lookup_intent *); @@ -818,6 +785,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt); void ll_put_super(struct super_block *sb); void ll_kill_super(struct super_block *sb); struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock); +void ll_dir_clear_lsm_md(struct inode *inode); void ll_clear_inode(struct inode *inode); int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import); int ll_setattr(struct dentry *de, struct iattr *attr); @@ -891,18 +859,6 @@ int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid); /* llite/symlink.c */ extern const struct inode_operations ll_fast_symlink_inode_operations; -/* llite/llite_close.c */ -struct ll_close_queue { - spinlock_t lcq_lock; - struct list_head lcq_head; - wait_queue_head_t lcq_waitq; - struct completion lcq_comp; - atomic_t lcq_stop; -}; - -void vvp_write_pending(struct vvp_object *club, struct vvp_page *page); -void vvp_write_complete(struct vvp_object *club, struct vvp_page *page); - /** * IO arguments for various VFS I/O interfaces. */ @@ -945,15 +901,11 @@ static inline struct vvp_io_args *ll_env_args(const struct lu_env *env) return &ll_env_info(env)->lti_args; } -void ll_queue_done_writing(struct inode *inode, unsigned long flags); -void ll_close_thread_shutdown(struct ll_close_queue *lcq); -int ll_close_thread_start(struct ll_close_queue **lcq_ret); - /* llite/llite_mmap.c */ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last); int ll_file_mmap(struct file *file, struct vm_area_struct *vma); -void policy_from_vma(ldlm_policy_data_t *policy, struct vm_area_struct *vma, +void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma, unsigned long addr, size_t count); struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, size_t count); @@ -1024,9 +976,14 @@ static inline struct lu_fid *ll_inode2fid(struct inode *inode) return fid; } -static inline __u64 ll_file_maxbytes(struct inode *inode) +static inline loff_t ll_file_maxbytes(struct inode *inode) { - return ll_i2info(inode)->lli_maxbytes; + struct cl_object *obj = ll_i2info(inode)->lli_clob; + + if (!obj) + return MAX_LFS_FILESIZE; + + return min_t(loff_t, cl_object_maxbytes(obj), MAX_LFS_FILESIZE); } /* llite/xattr.c */ @@ -1043,17 +1000,18 @@ extern const struct xattr_handler *ll_xattr_handlers[]; ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size); int ll_xattr_list(struct inode *inode, const char *name, int type, void *buffer, size_t size, __u64 valid); +const struct xattr_handler *get_xattr_type(const char *name); /** * Common IO arguments for various VFS I/O interfaces. */ int cl_sb_init(struct super_block *sb); int cl_sb_fini(struct super_block *sb); -void ll_io_init(struct cl_io *io, const struct file *file, int write); -void ras_update(struct ll_sb_info *sbi, struct inode *inode, - struct ll_readahead_state *ras, unsigned long index, - unsigned hit); +enum ras_update_flags { + LL_RAS_HIT = 0x1, + LL_RAS_MMAP = 0x2 +}; void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len); void ll_ra_stats_inc(struct inode *inode, enum ra_stat which); @@ -1189,7 +1147,7 @@ dentry_may_statahead(struct inode *dir, struct dentry *dentry) * 'lld_sa_generation == lli->lli_sa_generation'. */ ldd = ll_d2d(dentry); - if (ldd && ldd->lld_sa_generation == lli->lli_sa_generation) + if (ldd->lld_sa_generation == lli->lli_sa_generation) return false; return true; @@ -1258,15 +1216,6 @@ struct ll_dio_pages { int ldp_nr; }; -static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt, - int rc) -{ - int opc = (crt == CRT_READ) ? LPROC_LL_OSC_READ : - LPROC_LL_OSC_WRITE; - - ll_stats_ops_tally(ll_s2sbi(cl2vvp_dev(dev)->vdv_sb), opc, rc); -} - ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, int rw, struct inode *inode, struct ll_dio_pages *pv); @@ -1317,17 +1266,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode, static inline int d_lustre_invalid(const struct dentry *dentry) { - struct ll_dentry_data *lld = ll_d2d(dentry); - - return !lld || lld->lld_invalid; -} - -static inline void __d_lustre_invalidate(struct dentry *dentry) -{ - struct ll_dentry_data *lld = ll_d2d(dentry); - - if (lld) - lld->lld_invalid = 1; + return ll_d2d(dentry)->lld_invalid; } /* @@ -1343,7 +1282,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested) spin_lock_nested(&dentry->d_lock, nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL); - __d_lustre_invalidate(dentry); + ll_d2d(dentry)->lld_invalid = 1; /* * We should be careful about dentries created by d_obtain_alias(). * These dentries are not put in the dentry tree, instead they are @@ -1365,11 +1304,6 @@ static inline void d_lustre_revalidate(struct dentry *dentry) spin_unlock(&dentry->d_lock); } -enum { - LL_LAYOUT_GEN_NONE = ((__u32)-2), /* layout lock was cancelled */ - LL_LAYOUT_GEN_EMPTY = ((__u32)-1) /* for empty layout */ -}; - int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf); int ll_layout_refresh(struct inode *inode, __u32 *gen); int ll_layout_restore(struct inode *inode, loff_t start, __u64 length); @@ -1383,14 +1317,14 @@ int ll_page_sync_io(const struct lu_env *env, struct cl_io *io, int ll_getparent(struct file *file, struct getparent __user *arg); /* lcommon_cl.c */ -int cl_setattr_ost(struct inode *inode, const struct iattr *attr); +int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr, + unsigned int attr_flags); extern struct lu_env *cl_inode_fini_env; extern int cl_inode_fini_refcheck; int cl_file_inode_init(struct inode *inode, struct lustre_md *md); void cl_inode_fini(struct inode *inode); -int cl_local_size(struct inode *inode); __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32); __u32 cl_fid_build_gen(const struct lu_fid *fid); diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index e5c62f4ce3d8b30ceea3caddaedc0daccb1334cc..25f5aed97f63ca98619f27109e8a55b1a580ed23 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c @@ -191,10 +191,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, OBD_CONNECT_FLOCK_DEAD | OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK | OBD_CONNECT_OPEN_BY_FID | - OBD_CONNECT_DIR_STRIPE; - - if (sbi->ll_flags & LL_SBI_SOM_PREVIEW) - data->ocd_connect_flags |= OBD_CONNECT_SOM; + OBD_CONNECT_DIR_STRIPE | + OBD_CONNECT_BULK_MBITS; if (sbi->ll_flags & LL_SBI_LRU_RESIZE) data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE; @@ -226,6 +224,10 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, /* real client */ data->ocd_connect_flags |= OBD_CONNECT_REAL; + /* always ping even if server suppress_pings */ + if (sbi->ll_flags & LL_SBI_ALWAYS_PING) + data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS; + data->ocd_brw_size = MD_MAX_BRW_SIZE; err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid, @@ -288,7 +290,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, size = sizeof(*data); err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA), - KEY_CONN_DATA, &size, data, NULL); + KEY_CONN_DATA, &size, data); if (err) { CERROR("%s: Get connect data failed: rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err); @@ -355,10 +357,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | OBD_CONNECT_EINPROGRESS | OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE | - OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS; - - if (sbi->ll_flags & LL_SBI_SOM_PREVIEW) - data->ocd_connect_flags |= OBD_CONNECT_SOM; + OBD_CONNECT_LAYOUTLOCK | + OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK | + OBD_CONNECT_BULK_MBITS; if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) { /* OBD_CONNECT_CKSUM should always be set, even if checksums are @@ -376,6 +377,10 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE; + /* always ping even if server suppress_pings */ + if (sbi->ll_flags & LL_SBI_ALWAYS_PING) + data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS; + CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n", data->ocd_connect_flags, data->ocd_version, data->ocd_grant); @@ -475,8 +480,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, ptlrpc_req_finished(request); if (IS_ERR(root)) { - if (lmd.lsm) - obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm); #ifdef CONFIG_FS_POSIX_ACL if (lmd.posix_acl) { posix_acl_release(lmd.posix_acl); @@ -488,12 +491,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, goto out_root; } - err = ll_close_thread_start(&sbi->ll_lcq); - if (err) { - CERROR("cannot start close thread: rc %d\n", err); - goto out_root; - } - checksum = sbi->ll_flags & LL_SBI_CHECKSUM; err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM), KEY_CHECKSUM, sizeof(checksum), &checksum, @@ -572,10 +569,18 @@ int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize) { int size, rc; - *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL); + size = sizeof(*lmmsize); + rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE), + KEY_MAX_EASIZE, &size, lmmsize); + if (rc) { + CERROR("%s: cannot get max LOV EA size: rc = %d\n", + sbi->ll_dt_exp->exp_obd->obd_name, rc); + return rc; + } + size = sizeof(int); rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE), - KEY_MAX_EASIZE, &size, lmmsize, NULL); + KEY_MAX_EASIZE, &size, lmmsize); if (rc) CERROR("Get max mdsize error rc %d\n", rc); @@ -599,7 +604,7 @@ int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize) size = sizeof(int); rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE), - KEY_DEFAULT_EASIZE, &size, lmmsize, NULL); + KEY_DEFAULT_EASIZE, &size, lmmsize); if (rc) CERROR("Get default mdsize error rc %d\n", rc); @@ -633,8 +638,6 @@ static void client_common_put_super(struct super_block *sb) { struct ll_sb_info *sbi = ll_s2sbi(sb); - ll_close_thread_shutdown(sbi->ll_lcq); - cl_sb_fini(sb); obd_fid_fini(sbi->ll_dt_exp->exp_obd); @@ -725,6 +728,18 @@ static int ll_options(char *options, int *flags) *flags &= ~tmp; goto next; } + tmp = ll_set_opt("context", s1, 1); + if (tmp) + goto next; + tmp = ll_set_opt("fscontext", s1, 1); + if (tmp) + goto next; + tmp = ll_set_opt("defcontext", s1, 1); + if (tmp) + goto next; + tmp = ll_set_opt("rootcontext", s1, 1); + if (tmp) + goto next; tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH); if (tmp) { *flags |= tmp; @@ -766,11 +781,6 @@ static int ll_options(char *options, int *flags) *flags &= ~tmp; goto next; } - tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW); - if (tmp) { - *flags |= tmp; - goto next; - } tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API); if (tmp) { *flags |= tmp; @@ -786,6 +796,11 @@ static int ll_options(char *options, int *flags) *flags &= ~tmp; goto next; } + tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING); + if (tmp) { + *flags |= tmp; + goto next; + } LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n", s1); return -EINVAL; @@ -804,14 +819,10 @@ void ll_lli_init(struct ll_inode_info *lli) { lli->lli_inode_magic = LLI_INODE_MAGIC; lli->lli_flags = 0; - lli->lli_ioepoch = 0; - lli->lli_maxbytes = MAX_LFS_FILESIZE; spin_lock_init(&lli->lli_lock); lli->lli_posix_acl = NULL; /* Do not set lli_fid, it has been initialized already. */ fid_zero(&lli->lli_pfid); - INIT_LIST_HEAD(&lli->lli_close_list); - lli->lli_pending_och = NULL; lli->lli_mds_read_och = NULL; lli->lli_mds_write_och = NULL; lli->lli_mds_exec_och = NULL; @@ -820,9 +831,8 @@ void ll_lli_init(struct ll_inode_info *lli) lli->lli_open_fd_exec_count = 0; mutex_init(&lli->lli_och_mutex); spin_lock_init(&lli->lli_agl_lock); - lli->lli_has_smd = false; spin_lock_init(&lli->lli_layout_lock); - ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE); + ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE); lli->lli_clob = NULL; init_rwsem(&lli->lli_xattrs_list_rwsem); @@ -941,10 +951,14 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt) /* connections, registrations, sb setup */ err = client_common_fill_super(sb, md, dt, mnt); + if (!err) + sbi->ll_client_common_fill_super_succeeded = 1; out_free: kfree(md); kfree(dt); + if (lprof) + class_put_profile(lprof); if (err) ll_put_super(sb); else if (sbi->ll_flags & LL_SBI_VERBOSE) @@ -1002,7 +1016,7 @@ void ll_put_super(struct super_block *sb) } } - if (sbi->ll_lcq) { + if (sbi->ll_client_common_fill_super_succeeded) { /* Only if client_common_fill_super succeeded */ client_common_put_super(sb); } @@ -1057,7 +1071,7 @@ struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock) return inode; } -static void ll_dir_clear_lsm_md(struct inode *inode) +void ll_dir_clear_lsm_md(struct inode *inode) { struct ll_inode_info *lli = ll_i2info(inode); @@ -1205,16 +1219,44 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md) /* set the directory layout */ if (!lli->lli_lsm_md) { + struct cl_attr *attr; + rc = ll_init_lsm_md(inode, md); if (rc) return rc; - lli->lli_lsm_md = lsm; /* * set lsm_md to NULL, so the following free lustre_md * will not free this lsm */ md->lmv = NULL; + lli->lli_lsm_md = lsm; + + attr = kzalloc(sizeof(*attr), GFP_NOFS); + if (!attr) + return -ENOMEM; + + /* validate the lsm */ + rc = md_merge_attr(ll_i2mdexp(inode), lsm, attr, + ll_md_blocking_ast); + if (rc) { + kfree(attr); + return rc; + } + + if (md->body->mbo_valid & OBD_MD_FLNLINK) + md->body->mbo_nlink = attr->cat_nlink; + if (md->body->mbo_valid & OBD_MD_FLSIZE) + md->body->mbo_size = attr->cat_size; + if (md->body->mbo_valid & OBD_MD_FLATIME) + md->body->mbo_atime = attr->cat_atime; + if (md->body->mbo_valid & OBD_MD_FLCTIME) + md->body->mbo_ctime = attr->cat_ctime; + if (md->body->mbo_valid & OBD_MD_FLMTIME) + md->body->mbo_mtime = attr->cat_mtime; + + kfree(attr); + CDEBUG(D_INODE, "Set lsm %p magic %x to "DFID"\n", lsm, lsm->lsm_md_magic, PFID(ll_inode2fid(inode))); return 0; @@ -1272,9 +1314,6 @@ void ll_clear_inode(struct inode *inode) LASSERT(lli->lli_opendir_pid == 0); } - spin_lock(&lli->lli_lock); - ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK; - spin_unlock(&lli->lli_lock); md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode)); LASSERT(!lli->lli_open_fd_write_count); @@ -1313,13 +1352,11 @@ void ll_clear_inode(struct inode *inode) * cl_object still uses inode lsm. */ cl_inode_fini(inode); - lli->lli_has_smd = false; } #define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET) -static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data, - struct md_open_data **mod) +static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data) { struct lustre_md md; struct inode *inode = d_inode(dentry); @@ -1332,8 +1369,7 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data, if (IS_ERR(op_data)) return PTR_ERR(op_data); - rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0, - &request, mod); + rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request); if (rc) { ptlrpc_req_finished(request); if (rc == -ENOENT) { @@ -1369,48 +1405,12 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data, rc = simple_setattr(dentry, &op_data->op_attr); op_data->op_attr.ia_valid = ia_valid; - /* Extract epoch data if obtained. */ - op_data->op_handle = md.body->mbo_handle; - op_data->op_ioepoch = md.body->mbo_ioepoch; - rc = ll_update_inode(inode, &md); ptlrpc_req_finished(request); return rc; } -/* Close IO epoch and send Size-on-MDS attribute update. */ -static int ll_setattr_done_writing(struct inode *inode, - struct md_op_data *op_data, - struct md_open_data *mod) -{ - struct ll_inode_info *lli = ll_i2info(inode); - int rc = 0; - - if (!S_ISREG(inode->i_mode)) - return 0; - - CDEBUG(D_INODE, "Epoch %llu closed on "DFID" for truncate\n", - op_data->op_ioepoch, PFID(&lli->lli_fid)); - - op_data->op_flags = MF_EPOCH_CLOSE; - ll_done_writing_attr(inode, op_data); - ll_pack_inode2opdata(inode, op_data, NULL); - - rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod); - if (rc == -EAGAIN) - /* MDS has instructed us to obtain Size-on-MDS attribute - * from OSTs and send setattr to back to MDS. - */ - rc = ll_som_update(inode, op_data); - else if (rc) { - CERROR("%s: inode "DFID" mdc truncate failed: rc = %d\n", - ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name, - PFID(ll_inode2fid(inode)), rc); - } - return rc; -} - /* If this inode has objects allocated to it (lsm != NULL), then the OST * object(s) determine the file size and mtime. Otherwise, the MDS will * keep these values until such a time that objects are allocated for it. @@ -1431,9 +1431,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) struct inode *inode = d_inode(dentry); struct ll_inode_info *lli = ll_i2info(inode); struct md_op_data *op_data = NULL; - struct md_open_data *mod = NULL; bool file_is_released = false; - int rc = 0, rc1 = 0; + int rc = 0; CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n", ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode, @@ -1503,14 +1502,33 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) * but other attributes must be set */ if (S_ISREG(inode->i_mode)) { - struct lov_stripe_md *lsm; + struct cl_layout cl = { + .cl_is_released = false, + }; + struct lu_env *env; + int refcheck; __u32 gen; - ll_layout_refresh(inode, &gen); - lsm = ccc_inode_lsm_get(inode); - if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED) - file_is_released = true; - ccc_inode_lsm_put(inode, lsm); + rc = ll_layout_refresh(inode, &gen); + if (rc < 0) + goto out; + + /* + * XXX: the only place we need to know the layout type, + * this will be removed by a later patch. -Jinshan + */ + env = cl_env_get(&refcheck); + if (IS_ERR(env)) { + rc = PTR_ERR(env); + goto out; + } + + rc = cl_object_layout_get(env, lli->lli_clob, &cl); + cl_env_put(env, &refcheck); + if (rc < 0) + goto out; + + file_is_released = cl.cl_is_released; if (!hsm_import && attr->ia_valid & ATTR_SIZE) { if (file_is_released) { @@ -1527,32 +1545,16 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) * modified, flag it. */ attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE; - spin_lock(&lli->lli_lock); - lli->lli_flags |= LLIF_DATA_MODIFIED; - spin_unlock(&lli->lli_lock); op_data->op_bias |= MDS_DATA_MODIFIED; } } memcpy(&op_data->op_attr, attr, sizeof(*attr)); - /* Open epoch for truncate. */ - if (exp_connect_som(ll_i2mdexp(inode)) && !hsm_import && - (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET))) - op_data->op_flags = MF_EPOCH_OPEN; - - rc = ll_md_setattr(dentry, op_data, &mod); + rc = ll_md_setattr(dentry, op_data); if (rc) goto out; - /* RPC to MDT is sent, cancel data modification flag */ - if (op_data->op_bias & MDS_DATA_MODIFIED) { - spin_lock(&lli->lli_lock); - lli->lli_flags &= ~LLIF_DATA_MODIFIED; - spin_unlock(&lli->lli_lock); - } - - ll_ioepoch_open(lli, op_data->op_ioepoch); if (!S_ISREG(inode->i_mode) || file_is_released) { rc = 0; goto out; @@ -1568,19 +1570,11 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) * setting times to past, but it is necessary due to possible * time de-synchronization between MDT inode and OST objects */ - if (attr->ia_valid & ATTR_SIZE) - down_write(&lli->lli_trunc_sem); - rc = cl_setattr_ost(inode, attr); - if (attr->ia_valid & ATTR_SIZE) - up_write(&lli->lli_trunc_sem); + rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, attr, 0); } out: - if (op_data->op_ioepoch) { - rc1 = ll_setattr_done_writing(inode, op_data, mod); - if (!rc) - rc = rc1; - } - ll_finish_md_op_data(op_data); + if (op_data) + ll_finish_md_op_data(op_data); if (!S_ISDIR(inode->i_mode)) { inode_lock(inode); @@ -1736,19 +1730,10 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md) { struct ll_inode_info *lli = ll_i2info(inode); struct mdt_body *body = md->body; - struct lov_stripe_md *lsm = md->lsm; struct ll_sb_info *sbi = ll_i2sbi(inode); - LASSERT((lsm != NULL) == ((body->mbo_valid & OBD_MD_FLEASIZE) != 0)); - if (lsm) { - if (!lli->lli_has_smd && - !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK)) - cl_file_inode_init(inode, md); - - lli->lli_maxbytes = lsm->lsm_maxbytes; - if (lli->lli_maxbytes > MAX_LFS_FILESIZE) - lli->lli_maxbytes = MAX_LFS_FILESIZE; - } + if (body->mbo_valid & OBD_MD_FLEASIZE) + cl_file_inode_init(inode, md); if (S_ISDIR(inode->i_mode)) { int rc; @@ -1828,48 +1813,11 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md) LASSERT(fid_seq(&lli->lli_fid) != 0); if (body->mbo_valid & OBD_MD_FLSIZE) { - if (exp_connect_som(ll_i2mdexp(inode)) && - S_ISREG(inode->i_mode)) { - struct lustre_handle lockh; - enum ldlm_mode mode; - - /* As it is possible a blocking ast has been processed - * by this time, we need to check there is an UPDATE - * lock on the client and set LLIF_MDS_SIZE_LOCK holding - * it. - */ - mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE, - &lockh, LDLM_FL_CBPENDING, - LCK_CR | LCK_CW | - LCK_PR | LCK_PW); - if (mode) { - if (lli->lli_flags & (LLIF_DONE_WRITING | - LLIF_EPOCH_PENDING | - LLIF_SOM_DIRTY)) { - CERROR("%s: inode "DFID" flags %u still has size authority! do not trust the size got from MDS\n", - sbi->ll_md_exp->exp_obd->obd_name, - PFID(ll_inode2fid(inode)), - lli->lli_flags); - } else { - /* Use old size assignment to avoid - * deadlock bz14138 & bz14326 - */ - i_size_write(inode, body->mbo_size); - spin_lock(&lli->lli_lock); - lli->lli_flags |= LLIF_MDS_SIZE_LOCK; - spin_unlock(&lli->lli_lock); - } - ldlm_lock_decref(&lockh, mode); - } - } else { - /* Use old size assignment to avoid - * deadlock bz14138 & bz14326 - */ - i_size_write(inode, body->mbo_size); + i_size_write(inode, body->mbo_size); - CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n", - inode->i_ino, (unsigned long long)body->mbo_size); - } + CDEBUG(D_VFSTRACE, "inode=" DFID ", updating i_size %llu\n", + PFID(ll_inode2fid(inode)), + (unsigned long long)body->mbo_size); if (body->mbo_valid & OBD_MD_FLBLOCKS) inode->i_blocks = body->mbo_blocks; @@ -1877,7 +1825,7 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md) if (body->mbo_valid & OBD_MD_TSTATE) { if (body->mbo_t_state & MS_RESTORE) - lli->lli_flags |= LLIF_FILE_RESTORING; + set_bit(LLIF_FILE_RESTORING, &lli->lli_flags); } return 0; @@ -1892,8 +1840,6 @@ int ll_read_inode2(struct inode *inode, void *opaque) CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n", PFID(&lli->lli_fid), inode); - LASSERT(!lli->lli_has_smd); - /* Core attributes from the MDS first. This is a new inode, and * the VFS doesn't zero times in the core inode so we have to do * it ourselves. They will be overwritten by either MDS or OST @@ -1988,9 +1934,9 @@ int ll_iocontrol(struct inode *inode, struct file *file, return put_user(flags, (int __user *)arg); } case FSFILT_IOC_SETFLAGS: { - struct lov_stripe_md *lsm; - struct obd_info oinfo = { }; struct md_op_data *op_data; + struct cl_object *obj; + struct iattr *attr; if (get_user(flags, (int __user *)arg)) return -EFAULT; @@ -2002,8 +1948,7 @@ int ll_iocontrol(struct inode *inode, struct file *file, op_data->op_attr_flags = flags; op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG; - rc = md_setattr(sbi->ll_md_exp, op_data, - NULL, 0, NULL, 0, &req, NULL); + rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req); ll_finish_md_op_data(op_data); ptlrpc_req_finished(req); if (rc) @@ -2011,30 +1956,17 @@ int ll_iocontrol(struct inode *inode, struct file *file, inode->i_flags = ll_ext_to_inode_flags(flags); - lsm = ccc_inode_lsm_get(inode); - if (!lsm_has_objects(lsm)) { - ccc_inode_lsm_put(inode, lsm); + obj = ll_i2info(inode)->lli_clob; + if (!obj) return 0; - } - oinfo.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); - if (!oinfo.oi_oa) { - ccc_inode_lsm_put(inode, lsm); + attr = kzalloc(sizeof(*attr), GFP_NOFS); + if (!attr) return -ENOMEM; - } - oinfo.oi_md = lsm; - oinfo.oi_oa->o_oi = lsm->lsm_oi; - oinfo.oi_oa->o_flags = flags; - oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS | - OBD_MD_FLGROUP; - obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid); - rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL); - kmem_cache_free(obdo_cachep, oinfo.oi_oa); - ccc_inode_lsm_put(inode, lsm); - - if (rc && rc != -EPERM && rc != -EACCES) - CERROR("osc_setattr_async fails: rc = %d\n", rc); + attr->ia_valid = ATTR_ATTR_FLAG; + rc = cl_setattr_ost(obj, attr, flags); + kfree(attr); return rc; } default: @@ -2164,7 +2096,6 @@ void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req) return; op_data->op_fid1 = body->mbo_fid1; - op_data->op_ioepoch = body->mbo_ioepoch; op_data->op_handle = body->mbo_handle; op_data->op_mod_time = get_seconds(); md_close(exp, op_data, NULL, &close_req); @@ -2244,17 +2175,14 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, conf.coc_opc = OBJECT_CONF_SET; conf.coc_inode = *inode; conf.coc_lock = lock; - conf.u.coc_md = &md; + conf.u.coc_layout = md.layout; (void)ll_layout_conf(*inode, &conf); } LDLM_LOCK_PUT(lock); } out: - if (md.lsm) - obd_free_memmd(sbi->ll_dt_exp, &md.lsm); md_free_lustre_md(sbi->ll_md_exp, &md); - cleanup: if (rc != 0 && it && it->it_op & IT_OPEN) ll_open_cleanup(sb ? sb : (*inode)->i_sb, req); @@ -2380,8 +2308,9 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, op_data->op_default_stripe_offset = -1; if (S_ISDIR(i1->i_mode)) { op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md; - op_data->op_default_stripe_offset = - ll_i2info(i1)->lli_def_stripe_offset; + if (opc == LUSTRE_OPC_MKDIR) + op_data->op_default_stripe_offset = + ll_i2info(i1)->lli_def_stripe_offset; } if (i2) { @@ -2405,8 +2334,6 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid()); op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid()); op_data->op_cap = cfs_curproc_cap_pack(); - op_data->op_bias = 0; - op_data->op_cli_flags = 0; if ((opc == LUSTRE_OPC_CREATE) && name && filename_is_volatile(name, namelen, &op_data->op_mds)) op_data->op_bias |= MDS_CREATE_VOLATILE; @@ -2414,10 +2341,6 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, op_data->op_mds = 0; op_data->op_data = data; - /* When called by ll_setattr_raw, file is i1. */ - if (ll_i2info(i1)->lli_flags & LLIF_DATA_MODIFIED) - op_data->op_bias |= MDS_DATA_MODIFIED; - return op_data; } @@ -2451,6 +2374,9 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry) if (sbi->ll_flags & LL_SBI_USER_FID2PATH) seq_puts(seq, ",user_fid2path"); + if (sbi->ll_flags & LL_SBI_ALWAYS_PING) + seq_puts(seq, ",always_ping"); + return 0; } diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c index 436691814a5e31455e216795fc9d9bb7219c2a77..ee01f20d8b1110bec42a3aebf82ef69b79e63d80 100644 --- a/drivers/staging/lustre/lustre/llite/llite_mmap.c +++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c @@ -47,7 +47,7 @@ static const struct vm_operations_struct ll_file_vm_ops; -void policy_from_vma(ldlm_policy_data_t *policy, +void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma, unsigned long addr, size_t count) { @@ -80,43 +80,24 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, * API independent part for page fault initialization. * \param vma - virtual memory area addressed to page fault * \param env - corespondent lu_env to processing - * \param nest - nested level * \param index - page index corespondent to fault. * \parm ra_flags - vma readahead flags. * - * \return allocated and initialized env for fault operation. - * \retval EINVAL if env can't allocated - * \return other error codes from cl_io_init. + * \return error codes from cl_io_init. */ static struct cl_io * -ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, - struct cl_env_nest *nest, pgoff_t index, - unsigned long *ra_flags) +ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma, + pgoff_t index, unsigned long *ra_flags) { struct file *file = vma->vm_file; struct inode *inode = file_inode(file); struct cl_io *io; struct cl_fault_io *fio; - struct lu_env *env; int rc; - *env_ret = NULL; if (ll_file_nolock(file)) return ERR_PTR(-EOPNOTSUPP); - /* - * page fault can be called when lustre IO is - * already active for the current thread, e.g., when doing read/write - * against user level buffer mapped from Lustre buffer. To avoid - * stomping on existing context, optionally force an allocation of a new - * one. - */ - env = cl_env_nested_get(nest); - if (IS_ERR(env)) - return ERR_PTR(-EINVAL); - - *env_ret = env; - restart: io = vvp_env_thread_io(env); io->ci_obj = ll_i2info(inode)->lli_clob; @@ -155,7 +136,6 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, if (io->ci_need_restart) goto restart; - cl_env_nested_put(nest, env); io = ERR_PTR(rc); } @@ -169,13 +149,17 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, struct lu_env *env; struct cl_io *io; struct vvp_io *vio; - struct cl_env_nest nest; int result; + int refcheck; sigset_t set; struct inode *inode; struct ll_inode_info *lli; - io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + return PTR_ERR(env); + + io = ll_fault_io_init(env, vma, vmpage->index, NULL); if (IS_ERR(io)) { result = PTR_ERR(io); goto out; @@ -231,17 +215,14 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, result = -EAGAIN; } - if (result == 0) { - spin_lock(&lli->lli_lock); - lli->lli_flags |= LLIF_DATA_MODIFIED; - spin_unlock(&lli->lli_lock); - } + if (!result) + set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags); } out_io: cl_io_fini(env, io); - cl_env_nested_put(&nest, env); out: + cl_env_put(env, &refcheck); CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result); LASSERT(ergo(result == 0, PageLocked(vmpage))); @@ -285,13 +266,19 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) struct vvp_io *vio = NULL; struct page *vmpage; unsigned long ra_flags; - struct cl_env_nest nest; - int result; + int result = 0; int fault_ret = 0; + int refcheck; + + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + return PTR_ERR(env); - io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags); - if (IS_ERR(io)) - return to_fault_error(PTR_ERR(io)); + io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags); + if (IS_ERR(io)) { + result = to_fault_error(PTR_ERR(io)); + goto out; + } result = io->ci_result; if (result == 0) { @@ -322,14 +309,15 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) } } cl_io_fini(env, io); - cl_env_nested_put(&nest, env); vma->vm_flags |= ra_flags; + +out: + cl_env_put(env, &refcheck); if (result != 0 && !(fault_ret & VM_FAULT_RETRY)) fault_ret |= to_fault_error(result); - CDEBUG(D_MMAP, "%s fault %d/%d\n", - current->comm, fault_ret, result); + CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result); return fault_ret; } @@ -381,6 +369,7 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) bool retry; int result; + file_update_time(vma->vm_file); do { retry = false; result = ll_page_mkwrite0(vma, vmf->page, &retry); diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c index 709230571b4b6fa1c524aadf8ba03062f0648654..49a930f0fc5d775164366b8a51b599b6638950fb 100644 --- a/drivers/staging/lustre/lustre/llite/llite_nfs.c +++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c @@ -169,22 +169,12 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren /* N.B. d_obtain_alias() drops inode ref on error */ result = d_obtain_alias(inode); if (!IS_ERR(result)) { - int rc; - - rc = ll_d_init(result); - if (rc < 0) { - dput(result); - result = ERR_PTR(rc); - } else { - struct ll_dentry_data *ldd = ll_d2d(result); - - /* - * Need to signal to the ll_intent_file_open that - * we came from NFS and so opencache needs to be - * enabled for this one - */ - ldd->lld_nfs_dentry = 1; - } + /* + * Need to signal to the ll_intent_file_open that + * we came from NFS and so opencache needs to be + * enabled for this one + */ + ll_d2d(result)->lld_nfs_dentry = 1; } return result; @@ -226,7 +216,7 @@ static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen, static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name, int namelen, loff_t hash, u64 ino, - unsigned type) + unsigned int type) { /* It is hack to access lde_fid for comparison with lgd_fid. * So the input 'name' must be part of the 'lu_dirent'. diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c index 23fda9d98bffd571fd0b7d3de323b6b457237e54..03682c10fc9e578d08a3eb3bf6850fda65bd3134 100644 --- a/drivers/staging/lustre/lustre/llite/lproc_llite.c +++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c @@ -1060,10 +1060,6 @@ static const struct llite_file_opcode { "brw_read" }, { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES, "brw_write" }, - { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES, - "osc_read" }, - { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES, - "osc_write" }, { LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" }, { LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" }, { LPROC_LL_RELEASE, LPROCFS_TYPE_REGS, "close" }, diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c index 180f35e3afd9524026a755d22b56faa860942a49..a8f4e7fb0a461afd64b23cfaf81090400470b075 100644 --- a/drivers/staging/lustre/lustre/llite/namei.c +++ b/drivers/staging/lustre/lustre/llite/namei.c @@ -113,13 +113,18 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash, if (inode->i_state & I_NEW) { rc = ll_read_inode2(inode, md); if (!rc && S_ISREG(inode->i_mode) && - !ll_i2info(inode)->lli_clob) { - CDEBUG(D_INODE, "%s: apply lsm %p to inode "DFID"\n", - ll_get_fsname(sb, NULL, 0), md->lsm, - PFID(ll_inode2fid(inode))); + !ll_i2info(inode)->lli_clob) rc = cl_file_inode_init(inode, md); - } + if (rc) { + /* + * Let's clear directory lsm here, otherwise + * make_bad_inode() will reset the inode mode + * to regular, then ll_clear_inode will not + * be able to clear lsm_md + */ + if (S_ISDIR(inode->i_mode)) + ll_dir_clear_lsm_md(inode); make_bad_inode(inode); unlock_new_inode(inode); iput(inode); @@ -132,6 +137,8 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash, CDEBUG(D_VFSTRACE, "got inode: "DFID"(%p): rc = %d\n", PFID(&md->body->mbo_fid1), inode, rc); if (rc) { + if (S_ISDIR(inode->i_mode)) + ll_dir_clear_lsm_md(inode); iput(inode); inode = ERR_PTR(rc); } @@ -258,7 +265,9 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, struct ll_inode_info *lli = ll_i2info(inode); spin_lock(&lli->lli_lock); - lli->lli_flags &= ~LLIF_MDS_SIZE_LOCK; + LTIME_S(inode->i_mtime) = 0; + LTIME_S(inode->i_atime) = 0; + LTIME_S(inode->i_ctime) = 0; spin_unlock(&lli->lli_lock); } @@ -287,11 +296,39 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, hash = cl_fid_build_ino(&lli->lli_pfid, ll_need_32bit_api(ll_i2sbi(inode))); - - master_inode = ilookup5(inode->i_sb, hash, - ll_test_inode_by_fid, - (void *)&lli->lli_pfid); - if (master_inode && !IS_ERR(master_inode)) { + /* + * Do not lookup the inode with ilookup5, + * otherwise it will cause dead lock, + * + * 1. Client1 send chmod req to the MDT0, then + * on MDT0, it enqueues master and all of its + * slaves lock, (mdt_attr_set() -> + * mdt_lock_slaves()), after gets master and + * stripe0 lock, it will send the enqueue req + * (for stripe1) to MDT1, then MDT1 finds the + * lock has been granted to client2. Then MDT1 + * sends blocking ast to client2. + * + * 2. At the same time, client2 tries to unlink + * the striped dir (rm -rf striped_dir), and + * during lookup, it will hold the master inode + * of the striped directory, whose inode state + * is NEW, then tries to revalidate all of its + * slaves, (ll_prep_inode()->ll_iget()-> + * ll_read_inode2()-> ll_update_inode().). And + * it will be blocked on the server side because + * of 1. + * + * 3. Then the client get the blocking_ast req, + * cancel the lock, but being blocked if using + * ->ilookup5()), because master inode state is + * NEW. + */ + master_inode = ilookup5_nowait(inode->i_sb, + hash, + ll_test_inode_by_fid, + (void *)&lli->lli_pfid); + if (master_inode) { ll_invalidate_negative_children(master_inode); iput(master_inode); } @@ -395,17 +432,9 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry) */ struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de) { - struct dentry *new; - int rc; - if (inode) { - new = ll_find_alias(inode, de); + struct dentry *new = ll_find_alias(inode, de); if (new) { - rc = ll_d_init(new); - if (rc < 0) { - dput(new); - return ERR_PTR(rc); - } d_move(new, de); iput(inode); CDEBUG(D_DENTRY, @@ -414,9 +443,6 @@ struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de) return new; } } - rc = ll_d_init(de); - if (rc < 0) - return ERR_PTR(rc); d_add(de, inode); CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n", de, d_inode(de), d_count(de), de->d_flags); @@ -535,6 +561,10 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry, } } + if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE && + dentry->d_sb->s_flags & MS_RDONLY) + return ERR_PTR(-EROFS); + if (it->it_op & IT_CREAT) opc = LUSTRE_OPC_CREATE; else @@ -801,7 +831,8 @@ static int ll_create_it(struct inode *dir, struct dentry *dentry, return PTR_ERR(inode); d_instantiate(dentry, inode); - return 0; + + return ll_init_security(dentry, inode, dir); } void ll_update_times(struct ptlrpc_request *request, struct inode *inode) @@ -896,6 +927,8 @@ static int ll_new_node(struct inode *dir, struct dentry *dentry, goto err_exit; d_instantiate(dentry, inode); + + err = ll_init_security(dentry, inode, dir); err_exit: if (request) ptlrpc_req_finished(request); diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c index 50c0152ba0224d5d8be2e9f0f4696b11054bb0d1..f10e092979fe8476be47ca779bfcadaa250f05b9 100644 --- a/drivers/staging/lustre/lustre/llite/rw.c +++ b/drivers/staging/lustre/lustre/llite/rw.c @@ -47,6 +47,7 @@ #include /* current_is_kswapd() */ #include +#include #define DEBUG_SUBSYSTEM S_LLITE @@ -180,90 +181,73 @@ void ll_ras_enter(struct file *f) spin_unlock(&ras->ras_lock); } -static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, struct cl_page *page, - struct cl_object *clob, pgoff_t *max_index) +/** + * Initiates read-ahead of a page with given index. + * + * \retval +ve: page was already uptodate so it will be skipped + * from being added; + * \retval -ve: page wasn't added to \a queue for error; + * \retval 0: page was added into \a queue for read ahead. + */ +static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, + struct cl_page_list *queue, pgoff_t index) { - struct page *vmpage = page->cp_vmpage; + enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */ + struct cl_object *clob = io->ci_obj; + struct inode *inode = vvp_object_inode(clob); + const char *msg = NULL; + struct cl_page *page; struct vvp_page *vpg; - int rc; + struct page *vmpage; + int rc = 0; + + vmpage = grab_cache_page_nowait(inode->i_mapping, index); + if (!vmpage) { + which = RA_STAT_FAILED_GRAB_PAGE; + msg = "g_c_p_n failed"; + rc = -EBUSY; + goto out; + } + + /* Check if vmpage was truncated or reclaimed */ + if (vmpage->mapping != inode->i_mapping) { + which = RA_STAT_WRONG_GRAB_PAGE; + msg = "g_c_p_n returned invalid page"; + rc = -EBUSY; + goto out; + } + + page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE); + if (IS_ERR(page)) { + which = RA_STAT_FAILED_GRAB_PAGE; + msg = "cl_page_find failed"; + rc = PTR_ERR(page); + goto out; + } - rc = 0; - cl_page_assume(env, io, page); lu_ref_add(&page->cp_reference, "ra", current); + cl_page_assume(env, io, page); vpg = cl2vvp_page(cl_object_page_slice(clob, page)); if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) { - CDEBUG(D_READA, "page index %lu, max_index: %lu\n", - vvp_index(vpg), *max_index); - if (*max_index == 0 || vvp_index(vpg) > *max_index) - rc = cl_page_is_under_lock(env, io, page, max_index); - if (rc == 0) { - vpg->vpg_defer_uptodate = 1; - vpg->vpg_ra_used = 0; - cl_page_list_add(queue, page); - rc = 1; - } else { - cl_page_discard(env, io, page); - rc = -ENOLCK; - } + vpg->vpg_defer_uptodate = 1; + vpg->vpg_ra_used = 0; + cl_page_list_add(queue, page); } else { /* skip completed pages */ cl_page_unassume(env, io, page); + /* This page is already uptodate, returning a positive number + * to tell the callers about this + */ + rc = 1; } + lu_ref_del(&page->cp_reference, "ra", current); cl_page_put(env, page); - return rc; -} - -/** - * Initiates read-ahead of a page with given index. - * - * \retval +ve: page was added to \a queue. - * - * \retval -ENOLCK: there is no extent lock for this part of a file, stop - * read-ahead. - * - * \retval -ve, 0: page wasn't added to \a queue for other reason. - */ -static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, - pgoff_t index, pgoff_t *max_index) -{ - struct cl_object *clob = io->ci_obj; - struct inode *inode = vvp_object_inode(clob); - struct page *vmpage; - struct cl_page *page; - enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */ - int rc = 0; - const char *msg = NULL; - - vmpage = grab_cache_page_nowait(inode->i_mapping, index); +out: if (vmpage) { - /* Check if vmpage was truncated or reclaimed */ - if (vmpage->mapping == inode->i_mapping) { - page = cl_page_find(env, clob, vmpage->index, - vmpage, CPT_CACHEABLE); - if (!IS_ERR(page)) { - rc = cl_read_ahead_page(env, io, queue, - page, clob, max_index); - if (rc == -ENOLCK) { - which = RA_STAT_FAILED_MATCH; - msg = "lock match failed"; - } - } else { - which = RA_STAT_FAILED_GRAB_PAGE; - msg = "cl_page_find failed"; - } - } else { - which = RA_STAT_WRONG_GRAB_PAGE; - msg = "g_c_p_n returned invalid page"; - } - if (rc != 1) + if (rc) unlock_page(vmpage); put_page(vmpage); - } else { - which = RA_STAT_FAILED_GRAB_PAGE; - msg = "g_c_p_n failed"; } if (msg) { ll_ra_stats_inc(inode, which); @@ -378,12 +362,12 @@ static int ll_read_ahead_pages(const struct lu_env *env, struct cl_io *io, struct cl_page_list *queue, struct ra_io_arg *ria, unsigned long *reserved_pages, - unsigned long *ra_end) + pgoff_t *ra_end) { + struct cl_read_ahead ra = { 0 }; int rc, count = 0; bool stride_ria; pgoff_t page_idx; - pgoff_t max_index = 0; LASSERT(ria); RIA_DEBUG(ria); @@ -392,14 +376,23 @@ static int ll_read_ahead_pages(const struct lu_env *env, for (page_idx = ria->ria_start; page_idx <= ria->ria_end && *reserved_pages > 0; page_idx++) { if (ras_inside_ra_window(page_idx, ria)) { + if (!ra.cra_end || ra.cra_end < page_idx) { + cl_read_ahead_release(env, &ra); + + rc = cl_io_read_ahead(env, io, page_idx, &ra); + if (rc < 0) + break; + + LASSERTF(ra.cra_end >= page_idx, + "object: %p, indcies %lu / %lu\n", + io->ci_obj, ra.cra_end, page_idx); + } + /* If the page is inside the read-ahead window*/ - rc = ll_read_ahead_page(env, io, queue, - page_idx, &max_index); - if (rc == 1) { + rc = ll_read_ahead_page(env, io, queue, page_idx); + if (!rc) { (*reserved_pages)--; count++; - } else if (rc == -ENOLCK) { - break; } } else if (stride_ria) { /* If it is not in the read-ahead window, and it is @@ -425,19 +418,21 @@ static int ll_read_ahead_pages(const struct lu_env *env, } } } + cl_read_ahead_release(env, &ra); + *ra_end = page_idx; return count; } -int ll_readahead(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, struct ll_readahead_state *ras, - bool hit) +static int ll_readahead(const struct lu_env *env, struct cl_io *io, + struct cl_page_list *queue, + struct ll_readahead_state *ras, bool hit) { struct vvp_io *vio = vvp_env_io(env); struct ll_thread_info *lti = ll_env_info(env); struct cl_attr *attr = vvp_env_thread_attr(env); - unsigned long start = 0, end = 0, reserved; - unsigned long ra_end, len, mlen = 0; + unsigned long len, mlen = 0, reserved; + pgoff_t ra_end, start = 0, end = 0; struct inode *inode; struct ra_io_arg *ria = <i->lti_ria; struct cl_object *clob; @@ -463,30 +458,25 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, spin_lock(&ras->ras_lock); - /* Enlarge the RA window to encompass the full read */ - if (vio->vui_ra_valid && - ras->ras_window_start + ras->ras_window_len < - vio->vui_ra_start + vio->vui_ra_count) { - ras->ras_window_len = vio->vui_ra_start + vio->vui_ra_count - - ras->ras_window_start; - } + /** + * Note: other thread might rollback the ras_next_readahead, + * if it can not get the full size of prepared pages, see the + * end of this function. For stride read ahead, it needs to + * make sure the offset is no less than ras_stride_offset, + * so that stride read ahead can work correctly. + */ + if (stride_io_mode(ras)) + start = max(ras->ras_next_readahead, ras->ras_stride_offset); + else + start = ras->ras_next_readahead; - /* Reserve a part of the read-ahead window that we'll be issuing */ - if (ras->ras_window_len > 0) { - /* - * Note: other thread might rollback the ras_next_readahead, - * if it can not get the full size of prepared pages, see the - * end of this function. For stride read ahead, it needs to - * make sure the offset is no less than ras_stride_offset, - * so that stride read ahead can work correctly. - */ - if (stride_io_mode(ras)) - start = max(ras->ras_next_readahead, - ras->ras_stride_offset); - else - start = ras->ras_next_readahead; + if (ras->ras_window_len > 0) end = ras->ras_window_start + ras->ras_window_len - 1; - } + + /* Enlarge the RA window to encompass the full read */ + if (vio->vui_ra_valid && + end < vio->vui_ra_start + vio->vui_ra_count - 1) + end = vio->vui_ra_start + vio->vui_ra_count - 1; if (end != 0) { unsigned long rpc_boundary; @@ -575,8 +565,8 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, * if the region we failed to issue read-ahead on is still ahead * of the app and behind the next index to start read-ahead from */ - CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu\n", - ra_end, end, ria->ria_end); + CDEBUG(D_READA, "ra_end = %lu end = %lu stride end = %lu pages = %d\n", + ra_end, end, ria->ria_end, ret); if (ra_end != end + 1) { ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END); @@ -608,7 +598,7 @@ static void ras_reset(struct inode *inode, struct ll_readahead_state *ras, ras->ras_consecutive_pages = 0; ras->ras_window_len = 0; ras_set_start(inode, ras, index); - ras->ras_next_readahead = max(ras->ras_window_start, index); + ras->ras_next_readahead = max(ras->ras_window_start, index + 1); RAS_CDEBUG(ras); } @@ -737,12 +727,13 @@ static void ras_increase_window(struct inode *inode, ra->ra_max_pages_per_file); } -void ras_update(struct ll_sb_info *sbi, struct inode *inode, - struct ll_readahead_state *ras, unsigned long index, - unsigned hit) +static void ras_update(struct ll_sb_info *sbi, struct inode *inode, + struct ll_readahead_state *ras, unsigned long index, + enum ras_update_flags flags) { struct ll_ra_info *ra = &sbi->ll_ra_info; int zero = 0, stride_detect = 0, ra_miss = 0; + bool hit = flags & LL_RAS_HIT; spin_lock(&ras->ras_lock); @@ -772,7 +763,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, * to for subsequent IO. The mmap case does not increment * ras_requests and thus can never trigger this behavior. */ - if (ras->ras_requests == 2 && !ras->ras_request_index) { + if (ras->ras_requests >= 2 && !ras->ras_request_index) { __u64 kms_pages; kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >> @@ -784,8 +775,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, if (kms_pages && kms_pages <= ra->ra_max_read_ahead_whole_pages) { ras->ras_window_start = 0; - ras->ras_last_readpage = 0; - ras->ras_next_readahead = 0; + ras->ras_next_readahead = index + 1; ras->ras_window_len = min(ra->ra_max_pages_per_file, ra->ra_max_read_ahead_whole_pages); goto out_unlock; @@ -815,13 +805,20 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, if (ra_miss) { if (index_in_stride_window(ras, index) && stride_io_mode(ras)) { - /*If stride-RA hit cache miss, the stride dector - *will not be reset to avoid the overhead of - *redetecting read-ahead mode - */ if (index != ras->ras_last_readpage + 1) ras->ras_consecutive_pages = 0; ras_reset(inode, ras, index); + + /* If stride-RA hit cache miss, the stride + * detector will not be reset to avoid the + * overhead of redetecting read-ahead mode, + * but on the condition that the stride window + * is still intersect with normal sequential + * read-ahead window. + */ + if (ras->ras_window_start < + ras->ras_stride_offset) + ras_stride_reset(ras); RAS_CDEBUG(ras); } else { /* Reset both stride window and normal RA @@ -866,8 +863,13 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, /* Trigger RA in the mmap case where ras_consecutive_requests * is not incremented and thus can't be used to trigger RA */ - if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) { - ras->ras_window_len = RAS_INCREASE_STEP(inode); + if (ras->ras_consecutive_pages >= 4 && flags & LL_RAS_MMAP) { + ras_increase_window(inode, ras, ra); + /* + * reset consecutive pages so that the readahead window can + * grow gradually. + */ + ras->ras_consecutive_pages = 0; goto out_unlock; } @@ -902,17 +904,17 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) struct cl_io *io; struct cl_page *page; struct cl_object *clob; - struct cl_env_nest nest; bool redirtied = false; bool unlocked = false; int result; + int refcheck; LASSERT(PageLocked(vmpage)); LASSERT(!PageWriteback(vmpage)); LASSERT(ll_i2dtexp(inode)); - env = cl_env_nested_get(&nest); + env = cl_env_get(&refcheck); if (IS_ERR(env)) { result = PTR_ERR(env); goto out; @@ -977,7 +979,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) } } - cl_env_nested_put(&nest, env); + cl_env_put(env, &refcheck); goto out; out: @@ -1087,6 +1089,63 @@ void ll_cl_remove(struct file *file, const struct lu_env *env) write_unlock(&fd->fd_lock); } +static int ll_io_read_page(const struct lu_env *env, struct cl_io *io, + struct cl_page *page) +{ + struct inode *inode = vvp_object_inode(page->cp_obj); + struct ll_file_data *fd = vvp_env_io(env)->vui_fd; + struct ll_readahead_state *ras = &fd->fd_ras; + struct cl_2queue *queue = &io->ci_queue; + struct ll_sb_info *sbi = ll_i2sbi(inode); + struct vvp_page *vpg; + int rc = 0; + + vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page)); + if (sbi->ll_ra_info.ra_max_pages_per_file > 0 && + sbi->ll_ra_info.ra_max_pages > 0) { + struct vvp_io *vio = vvp_env_io(env); + enum ras_update_flags flags = 0; + + if (vpg->vpg_defer_uptodate) + flags |= LL_RAS_HIT; + if (!vio->vui_ra_valid) + flags |= LL_RAS_MMAP; + ras_update(sbi, inode, ras, vvp_index(vpg), flags); + } + + if (vpg->vpg_defer_uptodate) { + vpg->vpg_ra_used = 1; + cl_page_export(env, page, 1); + } + + cl_2queue_init(queue); + /* + * Add page into the queue even when it is marked uptodate above. + * this will unlock it automatically as part of cl_page_list_disown(). + */ + cl_page_list_add(&queue->c2_qin, page); + if (sbi->ll_ra_info.ra_max_pages_per_file > 0 && + sbi->ll_ra_info.ra_max_pages > 0) { + int rc2; + + rc2 = ll_readahead(env, io, &queue->c2_qin, ras, + vpg->vpg_defer_uptodate); + CDEBUG(D_READA, DFID "%d pages read ahead at %lu\n", + PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg)); + } + + if (queue->c2_qin.pl_nr > 0) + rc = cl_io_submit_rw(env, io, CRT_READ, queue); + + /* + * Unlock unsent pages in case of error. + */ + cl_page_list_disown(env, io, &queue->c2_qin); + cl_2queue_fini(env, queue); + + return rc; +} + int ll_readpage(struct file *file, struct page *vmpage) { struct cl_object *clob = ll_i2info(file_inode(file))->lli_clob; @@ -1110,7 +1169,7 @@ int ll_readpage(struct file *file, struct page *vmpage) LASSERT(page->cp_type == CPT_CACHEABLE); if (likely(!PageUptodate(vmpage))) { cl_page_assume(env, io, page); - result = cl_io_read_page(env, io, page); + result = ll_io_read_page(env, io, page); } else { /* Page from a non-object file. */ unlock_page(vmpage); diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c index 26f3a37873a75ceae43a80220f8938606224786d..21e06e5b514e851b0ab2cdee97578ddf43808729 100644 --- a/drivers/staging/lustre/lustre/llite/rw26.c +++ b/drivers/staging/lustre/lustre/llite/rw26.c @@ -71,8 +71,6 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset, struct cl_page *page; struct cl_object *obj; - int refcheck; - LASSERT(PageLocked(vmpage)); LASSERT(!PageWriteback(vmpage)); @@ -82,28 +80,27 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset, * happening with locked page too */ if (offset == 0 && length == PAGE_SIZE) { - env = cl_env_get(&refcheck); - if (!IS_ERR(env)) { - inode = vmpage->mapping->host; - obj = ll_i2info(inode)->lli_clob; - if (obj) { - page = cl_vmpage_page(vmpage, obj); - if (page) { - cl_page_delete(env, page); - cl_page_put(env, page); - } - } else { - LASSERT(vmpage->private == 0); + /* See the comment in ll_releasepage() */ + env = cl_env_percpu_get(); + LASSERT(!IS_ERR(env)); + inode = vmpage->mapping->host; + obj = ll_i2info(inode)->lli_clob; + if (obj) { + page = cl_vmpage_page(vmpage, obj); + if (page) { + cl_page_delete(env, page); + cl_page_put(env, page); } - cl_env_put(env, &refcheck); + } else { + LASSERT(vmpage->private == 0); } + cl_env_percpu_put(env); } } static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask) { struct lu_env *env; - void *cookie; struct cl_object *obj; struct cl_page *page; struct address_space *mapping; @@ -129,7 +126,6 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask) if (!page) return 1; - cookie = cl_env_reenter(); env = cl_env_percpu_get(); LASSERT(!IS_ERR(env)); @@ -155,7 +151,6 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask) cl_page_put(env, page); cl_env_percpu_put(env); - cl_env_reexit(cookie); return result; } @@ -340,19 +335,15 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1)) static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter) { - struct lu_env *env; + struct ll_cl_context *lcc; + const struct lu_env *env; struct cl_io *io; struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; loff_t file_offset = iocb->ki_pos; ssize_t count = iov_iter_count(iter); ssize_t tot_bytes = 0, result = 0; - struct ll_inode_info *lli = ll_i2info(inode); long size = MAX_DIO_SIZE; - int refcheck; - - if (!lli->lli_has_smd) - return -EBADF; /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */ if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK)) @@ -367,9 +358,13 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter) if (iov_iter_alignment(iter) & ~PAGE_MASK) return -EINVAL; - env = cl_env_get(&refcheck); + lcc = ll_cl_find(file); + if (!lcc) + return -EIO; + + env = lcc->lcc_env; LASSERT(!IS_ERR(env)); - io = vvp_env_io(env)->vui_cl.cis_io; + io = lcc->lcc_io; LASSERT(io); while (iov_iter_count(iter)) { @@ -426,7 +421,6 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter) vio->u.write.vui_written += tot_bytes; } - cl_env_put(env, &refcheck); return tot_bytes ? tot_bytes : result; } @@ -466,13 +460,13 @@ static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io, } static int ll_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned int len, unsigned int flags, struct page **pagep, void **fsdata) { struct ll_cl_context *lcc; - const struct lu_env *env; + const struct lu_env *env = NULL; struct cl_io *io; - struct cl_page *page; + struct cl_page *page = NULL; struct cl_object *clob = ll_i2info(mapping->host)->lli_clob; pgoff_t index = pos >> PAGE_SHIFT; struct page *vmpage = NULL; @@ -484,6 +478,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping, lcc = ll_cl_find(file); if (!lcc) { + io = NULL; result = -EIO; goto out; } @@ -560,6 +555,12 @@ static int ll_write_begin(struct file *file, struct address_space *mapping, unlock_page(vmpage); put_page(vmpage); } + if (!IS_ERR_OR_NULL(page)) { + lu_ref_del(&page->cp_reference, "cl_io", io); + cl_page_put(env, page); + } + if (io) + io->ci_result = result; } else { *pagep = vmpage; *fsdata = lcc; @@ -576,7 +577,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping, struct cl_io *io; struct vvp_io *vio; struct cl_page *page; - unsigned from = pos & (PAGE_SIZE - 1); + unsigned int from = pos & (PAGE_SIZE - 1); bool unplug = false; int result = 0; @@ -629,6 +630,8 @@ static int ll_write_end(struct file *file, struct address_space *mapping, file->f_flags & O_SYNC || IS_SYNC(file_inode(file))) result = vvp_io_write_commit(env, io); + if (result < 0) + io->ci_result = result; return result >= 0 ? copied : result; } diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c index 0677513476ec1b975d674bdd1dc1b45f2307e583..f1ee17f9ec0da1ef311cce71a4c5ec97d66c87d5 100644 --- a/drivers/staging/lustre/lustre/llite/statahead.c +++ b/drivers/staging/lustre/lustre/llite/statahead.c @@ -659,8 +659,8 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, struct ll_inode_info *lli = ll_i2info(dir); struct ll_statahead_info *sai = lli->lli_sai; struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata; + wait_queue_head_t *waitq = NULL; __u64 handle = 0; - bool wakeup; if (it_disposition(it, DISP_LOOKUP_NEG)) rc = -ENOENT; @@ -693,7 +693,8 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, spin_lock(&lli->lli_sa_lock); if (rc) { - wakeup = __sa_make_ready(sai, entry, rc); + if (__sa_make_ready(sai, entry, rc)) + waitq = &sai->sai_waitq; } else { entry->se_minfo = minfo; entry->se_req = ptlrpc_request_addref(req); @@ -704,13 +705,15 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, * with parent's lock held, for example: unlink. */ entry->se_handle = handle; - wakeup = !sa_has_callback(sai); + if (!sa_has_callback(sai)) + waitq = &sai->sai_thread.t_ctl_waitq; + list_add_tail(&entry->se_list, &sai->sai_interim_entries); } sai->sai_replied++; - if (wakeup) - wake_up(&sai->sai_thread.t_ctl_waitq); + if (waitq) + wake_up(waitq); spin_unlock(&lli->lli_sa_lock); return rc; @@ -1397,10 +1400,10 @@ static int revalidate_statahead_dentry(struct inode *dir, struct dentry **dentryp, bool unplug) { + struct ll_inode_info *lli = ll_i2info(dir); struct sa_entry *entry = NULL; struct l_wait_info lwi = { 0 }; struct ll_dentry_data *ldd; - struct ll_inode_info *lli; int rc = 0; if ((*dentryp)->d_name.name[0] == '.') { @@ -1446,7 +1449,9 @@ static int revalidate_statahead_dentry(struct inode *dir, sa_handle_callback(sai); if (!sa_ready(entry)) { + spin_lock(&lli->lli_sa_lock); sai->sai_index_wait = entry->se_index; + spin_unlock(&lli->lli_sa_lock); lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL, LWI_ON_SIGNAL_NOOP, NULL); rc = l_wait_event(sai->sai_waitq, sa_ready(entry), &lwi); @@ -1475,6 +1480,7 @@ static int revalidate_statahead_dentry(struct inode *dir, alias = ll_splice_alias(inode, *dentryp); if (IS_ERR(alias)) { + ll_intent_release(&it); rc = PTR_ERR(alias); goto out_unplug; } @@ -1493,6 +1499,7 @@ static int revalidate_statahead_dentry(struct inode *dir, *dentryp, PFID(ll_inode2fid((*dentryp)->d_inode)), PFID(ll_inode2fid(inode))); + ll_intent_release(&it); rc = -ESTALE; goto out_unplug; } @@ -1512,10 +1519,7 @@ static int revalidate_statahead_dentry(struct inode *dir, * dentry_may_statahead(). */ ldd = ll_d2d(*dentryp); - lli = ll_i2info(dir); - /* ldd can be NULL if llite lookup failed. */ - if (ldd) - ldd->lld_sa_generation = lli->lli_sa_generation; + ldd->lld_sa_generation = lli->lli_sa_generation; sa_put(sai, entry); return rc; } diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c index 82c7c48aa619c7090127025664f58eaed769cc5a..cd77b55d3895462e144071651f91ddce2185ef8e 100644 --- a/drivers/staging/lustre/lustre/llite/symlink.c +++ b/drivers/staging/lustre/lustre/llite/symlink.c @@ -149,7 +149,6 @@ static const char *ll_get_link(struct dentry *dentry, } const struct inode_operations ll_fast_symlink_inode_operations = { - .readlink = generic_readlink, .setattr = ll_setattr, .get_link = ll_get_link, .getattr = ll_getattr, diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c index 8aa8ecc09a48ea16b0f299002b5d9f9f362c84fa..12c129f7e4ad56a61447bed71d81856c0d242dda 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_dev.c +++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c @@ -55,7 +55,6 @@ static struct kmem_cache *ll_thread_kmem; struct kmem_cache *vvp_lock_kmem; struct kmem_cache *vvp_object_kmem; -struct kmem_cache *vvp_req_kmem; static struct kmem_cache *vvp_session_kmem; static struct kmem_cache *vvp_thread_kmem; @@ -75,11 +74,6 @@ static struct lu_kmem_descr vvp_caches[] = { .ckd_name = "vvp_object_kmem", .ckd_size = sizeof(struct vvp_object), }, - { - .ckd_cache = &vvp_req_kmem, - .ckd_name = "vvp_req_kmem", - .ckd_size = sizeof(struct vvp_req), - }, { .ckd_cache = &vvp_session_kmem, .ckd_name = "vvp_session_kmem", @@ -177,10 +171,6 @@ static const struct lu_device_operations vvp_lu_ops = { .ldo_object_alloc = vvp_object_alloc }; -static const struct cl_device_operations vvp_cl_ops = { - .cdo_req_init = vvp_req_init -}; - static struct lu_device *vvp_device_free(const struct lu_env *env, struct lu_device *d) { @@ -213,7 +203,6 @@ static struct lu_device *vvp_device_alloc(const struct lu_env *env, lud = &vdv->vdv_cl.cd_lu_dev; cl_device_init(&vdv->vdv_cl, t); vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops; - vdv->vdv_cl.cd_ops = &vvp_cl_ops; site = kzalloc(sizeof(*site), GFP_NOFS); if (site) { @@ -332,7 +321,6 @@ int cl_sb_init(struct super_block *sb) cl = cl_type_setup(env, NULL, &vvp_device_type, sbi->ll_dt_exp->exp_obd->obd_lu_dev); if (!IS_ERR(cl)) { - cl2vvp_dev(cl)->vdv_sb = sb; sbi->ll_cl = cl; sbi->ll_site = cl2lu_dev(cl)->ld_site; } @@ -521,11 +509,10 @@ static void vvp_pgcache_page_show(const struct lu_env *env, vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type)); vmpage = vpg->vpg_page; - seq_printf(seq, " %5i | %p %p %s %s %s %s | %p "DFID"(%p) %lu %u [", + seq_printf(seq, " %5i | %p %p %s %s %s | %p " DFID "(%p) %lu %u [", 0 /* gen */, vpg, page, "none", - vpg->vpg_write_queued ? "wq" : "- ", vpg->vpg_defer_uptodate ? "du" : "- ", PageWriteback(vmpage) ? "wb" : "-", vmpage, PFID(ll_inode2fid(vmpage->mapping->host)), diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h index 4464ad258387f482941f3c82169509e6e7e240bc..c60d0414ac2584ce829ca13adb2d98544c7c3a57 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_internal.h +++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h @@ -42,9 +42,7 @@ enum obd_notify_event; struct inode; -struct lov_stripe_md; struct lustre_md; -struct obd_capa; struct obd_device; struct obd_export; struct page; @@ -122,7 +120,6 @@ extern struct lu_context_key vvp_thread_key; extern struct kmem_cache *vvp_lock_kmem; extern struct kmem_cache *vvp_object_kmem; -extern struct kmem_cache *vvp_req_kmem; struct vvp_thread_info { struct cl_lock vti_lock; @@ -194,14 +191,6 @@ struct vvp_object { struct cl_object vob_cl; struct inode *vob_inode; - /** - * A list of dirty pages pending IO in the cache. Used by - * SOM. Protected by ll_inode_info::lli_lock. - * - * \see vvp_page::vpg_pending_linkage - */ - struct list_head vob_pending_list; - /** * Number of transient pages. This is no longer protected by i_sem, * and needs to be atomic. This is not actually used for anything, @@ -235,15 +224,7 @@ struct vvp_object { struct vvp_page { struct cl_page_slice vpg_cl; unsigned int vpg_defer_uptodate:1, - vpg_ra_used:1, - vpg_write_queued:1; - /** - * Non-empty iff this page is already counted in - * vvp_object::vob_pending_list. This list is only used as a flag, - * that is, never iterated through, only checked for list_empty(), but - * having a list is useful for debugging. - */ - struct list_head vpg_pending_linkage; + vpg_ra_used:1; /** VM page */ struct page *vpg_page; }; @@ -260,7 +241,6 @@ static inline pgoff_t vvp_index(struct vvp_page *vvp) struct vvp_device { struct cl_device vdv_cl; - struct super_block *vdv_sb; struct cl_device *vdv_next; }; @@ -268,10 +248,6 @@ struct vvp_lock { struct cl_lock_slice vlk_cl; }; -struct vvp_req { - struct cl_req_slice vrq_cl; -}; - void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key); void ccc_key_fini(const struct lu_context *ctx, @@ -325,21 +301,8 @@ static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice) # define CLOBINVRNT(env, clob, expr) \ ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr))) -/** - * New interfaces to get and put lov_stripe_md from lov layer. This violates - * layering because lov_stripe_md is supposed to be a private data in lov. - * - * NB: If you find you have to use these interfaces for your new code, please - * think about it again. These interfaces may be removed in the future for - * better layering. - */ -struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj); -void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm); int lov_read_and_clear_async_rc(struct cl_object *clob); -struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode); -void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm); - int vvp_io_init(const struct lu_env *env, struct cl_object *obj, struct cl_io *io); int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io); @@ -347,8 +310,6 @@ int vvp_lock_init(const struct lu_env *env, struct cl_object *obj, struct cl_lock *lock, const struct cl_io *io); int vvp_page_init(const struct lu_env *env, struct cl_object *obj, struct cl_page *page, pgoff_t index); -int vvp_req_init(const struct lu_env *env, struct cl_device *dev, - struct cl_req *req); struct lu_object *vvp_object_alloc(const struct lu_env *env, const struct lu_object_header *hdr, struct lu_device *dev); diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c index 2b7f182a15e2c6aca64e0087ec8539654494e02c..697cbfbe937405810e7c56a5aa5c93071646b44f 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c @@ -72,9 +72,10 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, /* don't need lock here to check lli_layout_gen as we have held * extent lock and GROUP lock has to hold to swap layout */ - if (ll_layout_version_get(lli) != vio->vui_layout_gen) { + if (ll_layout_version_get(lli) != vio->vui_layout_gen || + OBD_FAIL_CHECK_RESET(OBD_FAIL_LLITE_LOST_LAYOUT, 0)) { io->ci_need_restart = 1; - /* this will return application a short read/write */ + /* this will cause a short read/write */ io->ci_continue = 0; rc = false; } @@ -328,8 +329,8 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) vio->vui_layout_gen, gen); /* today successful restore is the only possible case */ /* restore was done, clear restoring state */ - ll_i2info(vvp_object_inode(obj))->lli_flags &= - ~LLIF_FILE_RESTORING; + clear_bit(LLIF_FILE_RESTORING, + &ll_i2info(inode)->lli_flags); } } } @@ -369,7 +370,7 @@ static int vvp_mmap_locks(const struct lu_env *env, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct cl_lock_descr *descr = &cti->vti_descr; - ldlm_policy_data_t policy; + union ldlm_policy_data policy; unsigned long addr; ssize_t count; int result = 0; @@ -450,7 +451,8 @@ static void vvp_io_advance(const struct lu_env *env, struct vvp_io *vio = cl2vvp_io(env, ios); CLOBINVRNT(env, obj, vvp_object_invariant(obj)); - iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count -= nob); + vio->vui_tot_count -= nob; + iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count); } static void vvp_io_update_iov(const struct lu_env *env, @@ -551,9 +553,16 @@ static int vvp_io_setattr_lock(const struct lu_env *env, if (new_size == 0) enqflags = CEF_DISCARD_DATA; } else { - if ((io->u.ci_setattr.sa_attr.lvb_mtime >= - io->u.ci_setattr.sa_attr.lvb_ctime) || - (io->u.ci_setattr.sa_attr.lvb_atime >= + unsigned int valid = io->u.ci_setattr.sa_valid; + + if (!(valid & TIMES_SET_FLAGS)) + return 0; + + if ((!(valid & ATTR_MTIME) || + io->u.ci_setattr.sa_attr.lvb_mtime >= + io->u.ci_setattr.sa_attr.lvb_ctime) && + (!(valid & ATTR_ATIME) || + io->u.ci_setattr.sa_attr.lvb_atime >= io->u.ci_setattr.sa_attr.lvb_ctime)) return 0; new_size = 0; @@ -580,14 +589,6 @@ static int vvp_do_vmtruncate(struct inode *inode, size_t size) return result; } -static int vvp_io_setattr_trunc(const struct lu_env *env, - const struct cl_io_slice *ios, - struct inode *inode, loff_t size) -{ - inode_dio_wait(inode); - return 0; -} - static int vvp_io_setattr_time(const struct lu_env *env, const struct cl_io_slice *ios) { @@ -618,15 +619,20 @@ static int vvp_io_setattr_start(const struct lu_env *env, { struct cl_io *io = ios->cis_io; struct inode *inode = vvp_object_inode(io->ci_obj); - int result = 0; + struct ll_inode_info *lli = ll_i2info(inode); - inode_lock(inode); - if (cl_io_is_trunc(io)) - result = vvp_io_setattr_trunc(env, ios, inode, - io->u.ci_setattr.sa_attr.lvb_size); - if (result == 0) - result = vvp_io_setattr_time(env, ios); - return result; + if (cl_io_is_trunc(io)) { + down_write(&lli->lli_trunc_sem); + inode_lock(inode); + inode_dio_wait(inode); + } else { + inode_lock(inode); + } + + if (io->u.ci_setattr.sa_valid & TIMES_SET_FLAGS) + return vvp_io_setattr_time(env, ios); + + return 0; } static void vvp_io_setattr_end(const struct lu_env *env, @@ -634,14 +640,18 @@ static void vvp_io_setattr_end(const struct lu_env *env, { struct cl_io *io = ios->cis_io; struct inode *inode = vvp_object_inode(io->ci_obj); + struct ll_inode_info *lli = ll_i2info(inode); - if (cl_io_is_trunc(io)) + if (cl_io_is_trunc(io)) { /* Truncate in memory pages - they must be clean pages * because osc has already notified to destroy osc_extents. */ vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size); - - inode_unlock(inode); + inode_unlock(inode); + up_write(&lli->lli_trunc_sem); + } else { + inode_unlock(inode); + } } static void vvp_io_setattr_fini(const struct lu_env *env, @@ -657,6 +667,7 @@ static int vvp_io_read_start(const struct lu_env *env, struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = vvp_object_inode(obj); + struct ll_inode_info *lli = ll_i2info(inode); struct file *file = vio->vui_fd->fd_file; int result; @@ -669,6 +680,8 @@ static int vvp_io_read_start(const struct lu_env *env, CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt); + down_read(&lli->lli_trunc_sem); + if (!can_populate_pages(env, io, inode)) return 0; @@ -770,16 +783,11 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io, static void write_commit_callback(const struct lu_env *env, struct cl_io *io, struct cl_page *page) { - struct vvp_page *vpg; struct page *vmpage = page->cp_vmpage; - struct cl_object *clob = cl_io_top(io)->ci_obj; SetPageUptodate(vmpage); set_page_dirty(vmpage); - vpg = cl2vvp_page(cl_object_page_slice(clob, page)); - vvp_write_pending(cl2vvp(clob), vpg); - cl_page_disown(env, io, page); /* held in ll_cl_init() */ @@ -899,10 +907,13 @@ static int vvp_io_write_start(const struct lu_env *env, struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = vvp_object_inode(obj); + struct ll_inode_info *lli = ll_i2info(inode); ssize_t result = 0; loff_t pos = io->u.ci_wr.wr.crw_pos; size_t cnt = io->u.ci_wr.wr.crw_count; + down_read(&lli->lli_trunc_sem); + if (!can_populate_pages(env, io, inode)) return 0; @@ -921,6 +932,20 @@ static int vvp_io_write_start(const struct lu_env *env, CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); + /* + * The maximum Lustre file size is variable, based on the OST maximum + * object size and number of stripes. This needs another check in + * addition to the VFS checks earlier. + */ + if (pos + cnt > ll_file_maxbytes(inode)) { + CDEBUG(D_INODE, + "%s: file " DFID " offset %llu > maxbytes %llu\n", + ll_get_fsname(inode->i_sb, NULL, 0), + PFID(ll_inode2fid(inode)), pos + cnt, + ll_file_maxbytes(inode)); + return -EFBIG; + } + if (!vio->vui_iter) { /* from a temp io in ll_cl_init(). */ result = 0; @@ -957,11 +982,7 @@ static int vvp_io_write_start(const struct lu_env *env, } } if (result > 0) { - struct ll_inode_info *lli = ll_i2info(inode); - - spin_lock(&lli->lli_lock); - lli->lli_flags |= LLIF_DATA_MODIFIED; - spin_unlock(&lli->lli_lock); + set_bit(LLIF_DATA_MODIFIED, &(ll_i2info(inode))->lli_flags); if (result < cnt) io->ci_continue = 0; @@ -972,6 +993,15 @@ static int vvp_io_write_start(const struct lu_env *env, return result; } +static void vvp_io_rw_end(const struct lu_env *env, + const struct cl_io_slice *ios) +{ + struct inode *inode = vvp_object_inode(ios->cis_obj); + struct ll_inode_info *lli = ll_i2info(inode); + + up_read(&lli->lli_trunc_sem); +} + static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) { struct vm_fault *vmf = cfio->ft_vmf; @@ -984,7 +1014,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n", vmf->page, vmf->page->mapping, vmf->page->index, (long)vmf->page->flags, page_count(vmf->page), - page_private(vmf->page), vmf->virtual_address); + page_private(vmf->page), (void *)vmf->address); if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) { lock_page(vmf->page); cfio->ft_flags |= VM_FAULT_LOCKED; @@ -995,12 +1025,12 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) } if (cfio->ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { - CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); + CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", (void *)vmf->address); return -EFAULT; } if (cfio->ft_flags & VM_FAULT_OOM) { - CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address); + CDEBUG(D_PAGE, "got addr %p - OOM\n", (void *)vmf->address); return -ENOMEM; } @@ -1014,13 +1044,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io, struct cl_page *page) { - struct vvp_page *vpg; - struct cl_object *clob = cl_io_top(io)->ci_obj; - set_page_dirty(page->cp_vmpage); - - vpg = cl2vvp_page(cl_object_page_slice(clob, page)); - vvp_write_pending(cl2vvp(clob), vpg); } static int vvp_io_fault_start(const struct lu_env *env, @@ -1030,6 +1054,7 @@ static int vvp_io_fault_start(const struct lu_env *env, struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = vvp_object_inode(obj); + struct ll_inode_info *lli = ll_i2info(inode); struct cl_fault_io *fio = &io->u.ci_fault; struct vvp_fault_io *cfio = &vio->u.fault; loff_t offset; @@ -1039,11 +1064,7 @@ static int vvp_io_fault_start(const struct lu_env *env, loff_t size; pgoff_t last_index; - if (fio->ft_executable && - inode->i_mtime.tv_sec != vio->u.fault.ft_mtime) - CWARN("binary "DFID - " changed while waiting for the page fault lock\n", - PFID(lu_object_fid(&obj->co_lu))); + down_read(&lli->lli_trunc_sem); /* offset of the last byte on the page */ offset = cl_offset(obj, fio->ft_index + 1) - 1; @@ -1192,6 +1213,17 @@ static int vvp_io_fault_start(const struct lu_env *env, return result; } +static void vvp_io_fault_end(const struct lu_env *env, + const struct cl_io_slice *ios) +{ + struct inode *inode = vvp_object_inode(ios->cis_obj); + struct ll_inode_info *lli = ll_i2info(inode); + + CLOBINVRNT(env, ios->cis_io->ci_obj, + vvp_object_invariant(ios->cis_io->ci_obj)); + up_read(&lli->lli_trunc_sem); +} + static int vvp_io_fsync_start(const struct lu_env *env, const struct cl_io_slice *ios) { @@ -1202,46 +1234,23 @@ static int vvp_io_fsync_start(const struct lu_env *env, return 0; } -static int vvp_io_read_page(const struct lu_env *env, - const struct cl_io_slice *ios, - const struct cl_page_slice *slice) +static int vvp_io_read_ahead(const struct lu_env *env, + const struct cl_io_slice *ios, + pgoff_t start, struct cl_read_ahead *ra) { - struct cl_io *io = ios->cis_io; - struct vvp_page *vpg = cl2vvp_page(slice); - struct cl_page *page = slice->cpl_page; - struct inode *inode = vvp_object_inode(slice->cpl_obj); - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ll_file_data *fd = cl2vvp_io(env, ios)->vui_fd; - struct ll_readahead_state *ras = &fd->fd_ras; - struct cl_2queue *queue = &io->ci_queue; - - if (sbi->ll_ra_info.ra_max_pages_per_file && - sbi->ll_ra_info.ra_max_pages) - ras_update(sbi, inode, ras, vvp_index(vpg), - vpg->vpg_defer_uptodate); - - if (vpg->vpg_defer_uptodate) { - vpg->vpg_ra_used = 1; - cl_page_export(env, page, 1); - } - /* - * Add page into the queue even when it is marked uptodate above. - * this will unlock it automatically as part of cl_page_list_disown(). - */ + int result = 0; - cl_page_list_add(&queue->c2_qin, page); - if (sbi->ll_ra_info.ra_max_pages_per_file && - sbi->ll_ra_info.ra_max_pages) - ll_readahead(env, io, &queue->c2_qin, ras, - vpg->vpg_defer_uptodate); + if (ios->cis_io->ci_type == CIT_READ || + ios->cis_io->ci_type == CIT_FAULT) { + struct vvp_io *vio = cl2vvp_io(env, ios); - return 0; -} + if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) { + ra->cra_end = CL_PAGE_EOF; + result = 1; /* no need to call down */ + } + } -static void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios) -{ - CLOBINVRNT(env, ios->cis_io->ci_obj, - vvp_object_invariant(ios->cis_io->ci_obj)); + return result; } static const struct cl_io_operations vvp_io_ops = { @@ -1250,6 +1259,7 @@ static const struct cl_io_operations vvp_io_ops = { .cio_fini = vvp_io_fini, .cio_lock = vvp_io_read_lock, .cio_start = vvp_io_read_start, + .cio_end = vvp_io_rw_end, .cio_advance = vvp_io_advance, }, [CIT_WRITE] = { @@ -1258,6 +1268,7 @@ static const struct cl_io_operations vvp_io_ops = { .cio_iter_fini = vvp_io_write_iter_fini, .cio_lock = vvp_io_write_lock, .cio_start = vvp_io_write_start, + .cio_end = vvp_io_rw_end, .cio_advance = vvp_io_advance, }, [CIT_SETATTR] = { @@ -1272,7 +1283,7 @@ static const struct cl_io_operations vvp_io_ops = { .cio_iter_init = vvp_io_fault_iter_init, .cio_lock = vvp_io_fault_lock, .cio_start = vvp_io_fault_start, - .cio_end = vvp_io_end, + .cio_end = vvp_io_fault_end, }, [CIT_FSYNC] = { .cio_start = vvp_io_fsync_start, @@ -1282,7 +1293,7 @@ static const struct cl_io_operations vvp_io_ops = { .cio_fini = vvp_io_fini } }, - .cio_read_page = vvp_io_read_page, + .cio_read_ahead = vvp_io_read_ahead, }; int vvp_io_init(const struct lu_env *env, struct cl_object *obj, diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c index b57195d156742921cc722284a0850b7ca18a1058..8e18cf86cefce39183869ca964d437d2f7c675a3 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_object.c +++ b/drivers/staging/lustre/lustre/llite/vvp_object.c @@ -65,8 +65,7 @@ static int vvp_object_print(const struct lu_env *env, void *cookie, struct inode *inode = obj->vob_inode; struct ll_inode_info *lli; - (*p)(env, cookie, "(%s %d %d) inode: %p ", - list_empty(&obj->vob_pending_list) ? "-" : "+", + (*p)(env, cookie, "(%d %d) inode: %p ", atomic_read(&obj->vob_transient_pages), atomic_read(&obj->vob_mmap_cnt), inode); if (inode) { @@ -133,7 +132,7 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj, CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n", PFID(&lli->lli_fid)); - ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE); + ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE); /* Clean up page mmap for this inode. * The reason for us to do this is that if the page has @@ -146,27 +145,8 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj, */ unmap_mapping_range(conf->coc_inode->i_mapping, 0, OBD_OBJECT_EOF, 0); - - return 0; } - if (conf->coc_opc != OBJECT_CONF_SET) - return 0; - - if (conf->u.coc_md && conf->u.coc_md->lsm) { - CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n", - PFID(&lli->lli_fid), lli->lli_layout_gen, - conf->u.coc_md->lsm->lsm_layout_gen); - - lli->lli_has_smd = lsm_has_objects(conf->u.coc_md->lsm); - ll_layout_version_set(lli, conf->u.coc_md->lsm->lsm_layout_gen); - } else { - CDEBUG(D_VFSTRACE, DFID ": layout nuked: %u.\n", - PFID(&lli->lli_fid), lli->lli_layout_gen); - - lli->lli_has_smd = false; - ll_layout_version_set(lli, LL_LAYOUT_GEN_EMPTY); - } return 0; } @@ -204,6 +184,26 @@ static int vvp_object_glimpse(const struct lu_env *env, return 0; } +static void vvp_req_attr_set(const struct lu_env *env, struct cl_object *obj, + struct cl_req_attr *attr) +{ + u64 valid_flags = OBD_MD_FLTYPE; + struct inode *inode; + struct obdo *oa; + + oa = attr->cra_oa; + inode = vvp_object_inode(obj); + + if (attr->cra_type == CRT_WRITE) + valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME | + OBD_MD_FLUID | OBD_MD_FLGID; + obdo_from_inode(oa, inode, valid_flags & attr->cra_flags); + obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid); + if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID)) + oa->o_parent_oid++; + memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid, LUSTRE_JOBID_SIZE); +} + static const struct cl_object_operations vvp_ops = { .coo_page_init = vvp_page_init, .coo_lock_init = vvp_lock_init, @@ -212,7 +212,8 @@ static const struct cl_object_operations vvp_ops = { .coo_attr_update = vvp_attr_update, .coo_conf_set = vvp_conf_set, .coo_prune = vvp_prune, - .coo_glimpse = vvp_object_glimpse + .coo_glimpse = vvp_object_glimpse, + .coo_req_attr_set = vvp_req_attr_set }; static int vvp_object_init0(const struct lu_env *env, @@ -240,7 +241,6 @@ static int vvp_object_init(const struct lu_env *env, struct lu_object *obj, const struct cl_object_conf *cconf; cconf = lu2cl_conf(conf); - INIT_LIST_HEAD(&vob->vob_pending_list); lu_object_add(obj, below); result = vvp_object_init0(env, vob, cconf); } else { diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c index 046e84d7a15890e5b624763fe03f66a441eb7740..23d66308ff2036e4ce9b3d05c1a8de2c8f252eec 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_page.c +++ b/drivers/staging/lustre/lustre/llite/vvp_page.c @@ -162,13 +162,10 @@ static void vvp_page_delete(const struct lu_env *env, LASSERT((struct cl_page *)vmpage->private == page); LASSERT(inode == vvp_object_inode(obj)); - vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice)); - /* Drop the reference count held in vvp_page_init */ refc = atomic_dec_return(&page->cp_ref); LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc); - ClearPageUptodate(vmpage); ClearPagePrivate(vmpage); vmpage->private = 0; /* @@ -221,8 +218,6 @@ static int vvp_page_prep_write(const struct lu_env *env, if (!pg->cp_sync_io) set_page_writeback(vmpage); - vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice)); - return 0; } @@ -287,19 +282,6 @@ static void vvp_page_completion_write(const struct lu_env *env, CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret); - /* - * TODO: Actually it makes sense to add the page into oap pending - * list again and so that we don't need to take the page out from - * SoM write pending list, if we just meet a recoverable error, - * -ENOMEM, etc. - * To implement this, we just need to return a non zero value in - * ->cpo_completion method. The underlying transfer should be notified - * and then re-add the page into pending transfer queue. -jay - */ - - vpg->vpg_write_queued = 0; - vvp_write_complete(cl2vvp(slice->cpl_obj), vpg); - if (pg->cp_sync_io) { LASSERT(PageLocked(vmpage)); LASSERT(!PageWriteback(vmpage)); @@ -341,7 +323,6 @@ static int vvp_page_make_ready(const struct lu_env *env, LASSERT(pg->cp_state == CPS_CACHED); /* This actually clears the dirty bit in the radix tree. */ set_page_writeback(vmpage); - vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice)); CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); } else if (pg->cp_state == CPS_PAGEOUT) { /* is it possible for osc_flush_async_page() to already @@ -357,20 +338,6 @@ static int vvp_page_make_ready(const struct lu_env *env, return result; } -static int vvp_page_is_under_lock(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io, pgoff_t *max_index) -{ - if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE || - io->ci_type == CIT_FAULT) { - struct vvp_io *vio = vvp_env_io(env); - - if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) - *max_index = CL_PAGE_EOF; - } - return 0; -} - static int vvp_page_print(const struct lu_env *env, const struct cl_page_slice *slice, void *cookie, lu_printer_t printer) @@ -378,9 +345,8 @@ static int vvp_page_print(const struct lu_env *env, struct vvp_page *vpg = cl2vvp_page(slice); struct page *vmpage = vpg->vpg_page; - (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ", - vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, - vpg->vpg_write_queued, vmpage); + (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d) vm@%p ", + vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage); if (vmpage) { (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru", (long)vmpage->flags, page_count(vmpage), @@ -416,7 +382,6 @@ static const struct cl_page_operations vvp_page_ops = { .cpo_is_vmlocked = vvp_page_is_vmlocked, .cpo_fini = vvp_page_fini, .cpo_print = vvp_page_print, - .cpo_is_under_lock = vvp_page_is_under_lock, .io = { [CRT_READ] = { .cpo_prep = vvp_page_prep_read, @@ -515,7 +480,6 @@ static const struct cl_page_operations vvp_transient_page_ops = { .cpo_fini = vvp_transient_page_fini, .cpo_is_vmlocked = vvp_transient_page_is_vmlocked, .cpo_print = vvp_page_print, - .cpo_is_under_lock = vvp_page_is_under_lock, .io = { [CRT_READ] = { .cpo_prep = vvp_transient_page_prep, @@ -539,7 +503,6 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj, vpg->vpg_page = vmpage; get_page(vmpage); - INIT_LIST_HEAD(&vpg->vpg_pending_linkage); if (page->cp_type == CPT_CACHEABLE) { /* in cache, decref in vvp_page_delete */ atomic_inc(&page->cp_ref); diff --git a/drivers/staging/lustre/lustre/llite/vvp_req.c b/drivers/staging/lustre/lustre/llite/vvp_req.c deleted file mode 100644 index e3f4c790d64689cd4ac60b8e9ffb840d2c1819ec..0000000000000000000000000000000000000000 --- a/drivers/staging/lustre/lustre/llite/vvp_req.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2014, Intel Corporation. - */ - -#define DEBUG_SUBSYSTEM S_LLITE - -#include "../include/lustre/lustre_idl.h" -#include "../include/cl_object.h" -#include "../include/obd.h" -#include "../include/obd_support.h" -#include "llite_internal.h" -#include "vvp_internal.h" - -static inline struct vvp_req *cl2vvp_req(const struct cl_req_slice *slice) -{ - return container_of0(slice, struct vvp_req, vrq_cl); -} - -/** - * Implementation of struct cl_req_operations::cro_attr_set() for VVP - * layer. VVP is responsible for - * - * - o_[mac]time - * - * - o_mode - * - * - o_parent_seq - * - * - o_[ug]id - * - * - o_parent_oid - * - * - o_parent_ver - * - * - o_ioepoch, - * - */ -static void vvp_req_attr_set(const struct lu_env *env, - const struct cl_req_slice *slice, - const struct cl_object *obj, - struct cl_req_attr *attr, u64 flags) -{ - struct inode *inode; - struct obdo *oa; - u32 valid_flags; - - oa = attr->cra_oa; - inode = vvp_object_inode(obj); - valid_flags = OBD_MD_FLTYPE; - - if (slice->crs_req->crq_type == CRT_WRITE) { - if (flags & OBD_MD_FLEPOCH) { - oa->o_valid |= OBD_MD_FLEPOCH; - oa->o_ioepoch = ll_i2info(inode)->lli_ioepoch; - valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME | - OBD_MD_FLUID | OBD_MD_FLGID; - } - } - obdo_from_inode(oa, inode, valid_flags & flags); - obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid); - if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID)) - oa->o_parent_oid++; - memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid, - LUSTRE_JOBID_SIZE); -} - -static void vvp_req_completion(const struct lu_env *env, - const struct cl_req_slice *slice, int ioret) -{ - struct vvp_req *vrq; - - if (ioret > 0) - cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret); - - vrq = cl2vvp_req(slice); - kmem_cache_free(vvp_req_kmem, vrq); -} - -static const struct cl_req_operations vvp_req_ops = { - .cro_attr_set = vvp_req_attr_set, - .cro_completion = vvp_req_completion -}; - -int vvp_req_init(const struct lu_env *env, struct cl_device *dev, - struct cl_req *req) -{ - struct vvp_req *vrq; - int result; - - vrq = kmem_cache_zalloc(vvp_req_kmem, GFP_NOFS); - if (vrq) { - cl_req_slice_add(req, &vrq->vrq_cl, dev, &vvp_req_ops); - result = 0; - } else { - result = -ENOMEM; - } - return result; -} diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c index e070adb7a3cc92fadb56715cef6930ddae0dcac4..7a848ebc57c1e6f22ab7629978660a1cd32a5d03 100644 --- a/drivers/staging/lustre/lustre/llite/xattr.c +++ b/drivers/staging/lustre/lustre/llite/xattr.c @@ -44,48 +44,39 @@ #include "llite_internal.h" -static -int get_xattr_type(const char *name) +const struct xattr_handler *get_xattr_type(const char *name) { - if (!strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS)) - return XATTR_ACL_ACCESS_T; + int i = 0; - if (!strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT)) - return XATTR_ACL_DEFAULT_T; + while (ll_xattr_handlers[i]) { + size_t len = strlen(ll_xattr_handlers[i]->prefix); - if (!strncmp(name, XATTR_USER_PREFIX, - sizeof(XATTR_USER_PREFIX) - 1)) - return XATTR_USER_T; - - if (!strncmp(name, XATTR_TRUSTED_PREFIX, - sizeof(XATTR_TRUSTED_PREFIX) - 1)) - return XATTR_TRUSTED_T; - - if (!strncmp(name, XATTR_SECURITY_PREFIX, - sizeof(XATTR_SECURITY_PREFIX) - 1)) - return XATTR_SECURITY_T; - - if (!strncmp(name, XATTR_LUSTRE_PREFIX, - sizeof(XATTR_LUSTRE_PREFIX) - 1)) - return XATTR_LUSTRE_T; - - return XATTR_OTHER_T; + if (!strncmp(ll_xattr_handlers[i]->prefix, name, len)) + return ll_xattr_handlers[i]; + i++; + } + return NULL; } -static -int xattr_type_filter(struct ll_sb_info *sbi, int xattr_type) +static int xattr_type_filter(struct ll_sb_info *sbi, + const struct xattr_handler *handler) { - if ((xattr_type == XATTR_ACL_ACCESS_T || - xattr_type == XATTR_ACL_DEFAULT_T) && + /* No handler means XATTR_OTHER_T */ + if (!handler) + return -EOPNOTSUPP; + + if ((handler->flags == XATTR_ACL_ACCESS_T || + handler->flags == XATTR_ACL_DEFAULT_T) && !(sbi->ll_flags & LL_SBI_ACL)) return -EOPNOTSUPP; - if (xattr_type == XATTR_USER_T && !(sbi->ll_flags & LL_SBI_USER_XATTR)) + if (handler->flags == XATTR_USER_T && + !(sbi->ll_flags & LL_SBI_USER_XATTR)) return -EOPNOTSUPP; - if (xattr_type == XATTR_TRUSTED_T && !capable(CFS_CAP_SYS_ADMIN)) + + if (handler->flags == XATTR_TRUSTED_T && + !capable(CFS_CAP_SYS_ADMIN)) return -EPERM; - if (xattr_type == XATTR_OTHER_T) - return -EOPNOTSUPP; return 0; } @@ -111,7 +102,7 @@ ll_xattr_set_common(const struct xattr_handler *handler, valid = OBD_MD_FLXATTR; } - rc = xattr_type_filter(sbi, handler->flags); + rc = xattr_type_filter(sbi, handler); if (rc) return rc; @@ -121,8 +112,9 @@ ll_xattr_set_common(const struct xattr_handler *handler, return -EPERM; /* b10667: ignore lustre special xattr for now */ - if ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) || - (handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov"))) + if (!strcmp(name, "hsm") || + ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) || + (handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov")))) return 0; /* b15587: ignore security.capability xattr for now */ @@ -135,6 +127,11 @@ ll_xattr_set_common(const struct xattr_handler *handler, strcmp(name, "selinux") == 0) return -EOPNOTSUPP; + /*FIXME: enable IMA when the conditions are ready */ + if (handler->flags == XATTR_SECURITY_T && + (!strcmp(name, "ima") || !strcmp(name, "evm"))) + return -EOPNOTSUPP; + sprintf(fullname, "%s%s\n", handler->prefix, name); rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), valid, fullname, pv, size, 0, flags, @@ -151,6 +148,37 @@ ll_xattr_set_common(const struct xattr_handler *handler, return 0; } +static int get_hsm_state(struct inode *inode, u32 *hus_states) +{ + struct md_op_data *op_data; + struct hsm_user_state *hus; + int rc; + + hus = kzalloc(sizeof(*hus), GFP_NOFS); + if (!hus) + return -ENOMEM; + + op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0, + LUSTRE_OPC_ANY, hus); + if (!IS_ERR(op_data)) { + rc = obd_iocontrol(LL_IOC_HSM_STATE_GET, ll_i2mdexp(inode), + sizeof(*op_data), op_data, NULL); + if (!rc) + *hus_states = hus->hus_states; + else + CDEBUG(D_VFSTRACE, "obd_iocontrol failed. rc = %d\n", + rc); + + ll_finish_md_op_data(op_data); + } else { + rc = PTR_ERR(op_data); + CDEBUG(D_VFSTRACE, "Could not prepare the opdata. rc = %d\n", + rc); + } + kfree(hus); + return rc; +} + static int ll_xattr_set(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, @@ -187,6 +215,31 @@ static int ll_xattr_set(const struct xattr_handler *handler, if (lump && lump->lmm_stripe_offset == 0) lump->lmm_stripe_offset = -1; + /* Avoid anyone directly setting the RELEASED flag. */ + if (lump && (lump->lmm_pattern & LOV_PATTERN_F_RELEASED)) { + /* Only if we have a released flag check if the file + * was indeed archived. + */ + u32 state = HS_NONE; + + rc = get_hsm_state(inode, &state); + if (rc) + return rc; + + if (!(state & HS_ARCHIVED)) { + CDEBUG(D_VFSTRACE, + "hus_states state = %x, pattern = %x\n", + state, lump->lmm_pattern); + /* + * Here the state is: real file is not + * archived but user is requesting to set + * the RELEASED flag so we mask off the + * released flag from the request + */ + lump->lmm_pattern ^= LOV_PATTERN_F_RELEASED; + } + } + if (lump && S_ISREG(inode->i_mode)) { __u64 it_flags = FMODE_WRITE; int lum_size; @@ -225,7 +278,8 @@ ll_xattr_list(struct inode *inode, const char *name, int type, void *buffer, void *xdata; int rc; - if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T) { + if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T && + (type != XATTR_SECURITY_T || strcmp(name, "security.selinux"))) { rc = ll_xattr_cache_get(inode, name, buffer, size, valid); if (rc == -EAGAIN) goto getxattr_nocache; @@ -313,7 +367,7 @@ static int ll_xattr_get_common(const struct xattr_handler *handler, ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1); - rc = xattr_type_filter(sbi, handler->flags); + rc = xattr_type_filter(sbi, handler); if (rc) return rc; @@ -353,80 +407,99 @@ static int ll_xattr_get_common(const struct xattr_handler *handler, OBD_MD_FLXATTR); } -static int ll_xattr_get(const struct xattr_handler *handler, - struct dentry *dentry, struct inode *inode, - const char *name, void *buffer, size_t size) +static ssize_t ll_getxattr_lov(struct inode *inode, void *buf, size_t buf_size) { - LASSERT(inode); - LASSERT(name); + ssize_t rc; - CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n", - PFID(ll_inode2fid(inode)), inode, name); + if (S_ISREG(inode->i_mode)) { + struct cl_object *obj = ll_i2info(inode)->lli_clob; + struct cl_layout cl = { + .cl_buf.lb_buf = buf, + .cl_buf.lb_len = buf_size, + }; + struct lu_env *env; + int refcheck; + + if (!obj) + return -ENODATA; - if (!strcmp(name, "lov")) { - struct lov_stripe_md *lsm; - struct lov_user_md *lump; - struct lov_mds_md *lmm = NULL; - struct ptlrpc_request *request = NULL; - int rc = 0, lmmsize = 0; + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + return PTR_ERR(env); - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1); - - if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) - return -ENODATA; + rc = cl_object_layout_get(env, obj, &cl); + if (rc < 0) + goto out_env; - lsm = ccc_inode_lsm_get(inode); - if (!lsm) { - if (S_ISDIR(inode->i_mode)) { - rc = ll_dir_getstripe(inode, (void **)&lmm, - &lmmsize, &request, 0); - } else { - rc = -ENODATA; - } - } else { - /* LSM is present already after lookup/getattr call. - * we need to grab layout lock once it is implemented - */ - rc = obd_packmd(ll_i2dtexp(inode), &lmm, lsm); - lmmsize = rc; + if (!cl.cl_size) { + rc = -ENODATA; + goto out_env; } - ccc_inode_lsm_put(inode, lsm); + rc = cl.cl_size; + + if (!buf_size) + goto out_env; + + LASSERT(buf && rc <= buf_size); + + /* + * Do not return layout gen for getxattr() since + * otherwise it would confuse tar --xattr by + * recognizing layout gen as stripe offset when the + * file is restored. See LU-2809. + */ + ((struct lov_mds_md *)buf)->lmm_layout_gen = 0; +out_env: + cl_env_put(env, &refcheck); + + return rc; + } else if (S_ISDIR(inode->i_mode)) { + struct ptlrpc_request *req = NULL; + struct lov_mds_md *lmm = NULL; + int lmm_size = 0; + + rc = ll_dir_getstripe(inode, (void **)&lmm, &lmm_size, + &req, 0); if (rc < 0) - goto out; + goto out_req; - if (size == 0) { - /* used to call ll_get_max_mdsize() forward to get - * the maximum buffer size, while some apps (such as - * rsync 3.0.x) care much about the exact xattr value - * size - */ - rc = lmmsize; - goto out; + if (!buf_size) { + rc = lmm_size; + goto out_req; } - if (size < lmmsize) { - CERROR("server bug: replied size %d > %d for %pd (%s)\n", - lmmsize, (int)size, dentry, name); + if (buf_size < lmm_size) { rc = -ERANGE; - goto out; + goto out_req; } - lump = buffer; - memcpy(lump, lmm, lmmsize); - /* do not return layout gen for getxattr otherwise it would - * confuse tar --xattr by recognizing layout gen as stripe - * offset when the file is restored. See LU-2809. - */ - lump->lmm_layout_gen = 0; + memcpy(buf, lmm, lmm_size); + rc = lmm_size; +out_req: + if (req) + ptlrpc_req_finished(req); - rc = lmmsize; -out: - if (request) - ptlrpc_req_finished(request); - else if (lmm) - obd_free_diskmd(ll_i2dtexp(inode), &lmm); return rc; + } else { + return -ENODATA; + } +} + +static int ll_xattr_get(const struct xattr_handler *handler, + struct dentry *dentry, struct inode *inode, + const char *name, void *buffer, size_t size) +{ + LASSERT(inode); + LASSERT(name); + + CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), xattr %s\n", + PFID(ll_inode2fid(inode)), inode, name); + + if (!strcmp(name, "lov")) { + ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1); + + return ll_getxattr_lov(inode, buffer, size); } return ll_xattr_get_common(handler, dentry, inode, name, buffer, size); @@ -435,10 +508,10 @@ static int ll_xattr_get(const struct xattr_handler *handler, ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size) { struct inode *inode = d_inode(dentry); - int rc = 0, rc2 = 0; - struct lov_mds_md *lmm = NULL; - struct ptlrpc_request *request = NULL; - int lmmsize; + struct ll_sb_info *sbi = ll_i2sbi(inode); + char *xattr_name; + ssize_t rc, rc2; + size_t len, rem; LASSERT(inode); @@ -450,65 +523,48 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size) rc = ll_xattr_list(inode, NULL, XATTR_OTHER_T, buffer, size, OBD_MD_FLXATTRLS); if (rc < 0) - goto out; - - if (buffer) { - struct ll_sb_info *sbi = ll_i2sbi(inode); - char *xattr_name = buffer; - int xlen, rem = rc; - - while (rem > 0) { - xlen = strnlen(xattr_name, rem - 1) + 1; - rem -= xlen; - if (xattr_type_filter(sbi, - get_xattr_type(xattr_name)) == 0) { - /* skip OK xattr type - * leave it in buffer - */ - xattr_name += xlen; - continue; - } - /* move up remaining xattrs in buffer - * removing the xattr that is not OK - */ - memmove(xattr_name, xattr_name + xlen, rem); - rc -= xlen; + return rc; + /* + * If we're being called to get the size of the xattr list + * (buf_size == 0) then just assume that a lustre.lov xattr + * exists. + */ + if (!size) + return rc + sizeof(XATTR_LUSTRE_LOV); + + xattr_name = buffer; + rem = rc; + + while (rem > 0) { + len = strnlen(xattr_name, rem - 1) + 1; + rem -= len; + if (!xattr_type_filter(sbi, get_xattr_type(xattr_name))) { + /* Skip OK xattr type leave it in buffer */ + xattr_name += len; + continue; } - } - if (S_ISREG(inode->i_mode)) { - if (!ll_i2info(inode)->lli_has_smd) - rc2 = -1; - } else if (S_ISDIR(inode->i_mode)) { - rc2 = ll_dir_getstripe(inode, (void **)&lmm, &lmmsize, - &request, 0); + + /* + * Move up remaining xattrs in buffer + * removing the xattr that is not OK + */ + memmove(xattr_name, xattr_name + len, rem); + rc -= len; } - if (rc2 < 0) { - rc2 = 0; - goto out; - } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) { - const int prefix_len = sizeof(XATTR_LUSTRE_PREFIX) - 1; - const size_t name_len = sizeof("lov") - 1; - const size_t total_len = prefix_len + name_len + 1; - - if (((rc + total_len) > size) && buffer) { - ptlrpc_req_finished(request); - return -ERANGE; - } + rc2 = ll_getxattr_lov(inode, NULL, 0); + if (rc2 == -ENODATA) + return rc; - if (buffer) { - buffer += rc; - memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len); - memcpy(buffer + prefix_len, "lov", name_len); - buffer[prefix_len + name_len] = '\0'; - } - rc2 = total_len; - } -out: - ptlrpc_req_finished(request); - rc = rc + rc2; + if (rc2 < 0) + return rc2; - return rc; + if (size < rc + sizeof(XATTR_LUSTRE_LOV)) + return -ERANGE; + + memcpy(buffer + rc, XATTR_LUSTRE_LOV, sizeof(XATTR_LUSTRE_LOV)); + + return rc + sizeof(XATTR_LUSTRE_LOV); } static const struct xattr_handler ll_user_xattr_handler = { diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c index 50a19a40bd4e776114f99fde2e103f14d91f045a..38f75f6aa887a32cda33236933baff4d637d7685 100644 --- a/drivers/staging/lustre/lustre/llite/xattr_cache.c +++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c @@ -26,8 +26,8 @@ struct ll_xattr_entry { */ char *xe_name; /* xattr name, \0-terminated */ char *xe_value; /* xattr value */ - unsigned xe_namelen; /* strlen(xe_name) + 1 */ - unsigned xe_vallen; /* xattr value length */ + unsigned int xe_namelen; /* strlen(xe_name) + 1 */ + unsigned int xe_vallen; /* xattr value length */ }; static struct kmem_cache *xattr_kmem; @@ -60,7 +60,7 @@ void ll_xattr_fini(void) static void ll_xattr_cache_init(struct ll_inode_info *lli) { INIT_LIST_HEAD(&lli->lli_xattrs); - lli->lli_flags |= LLIF_XATTR_CACHE; + set_bit(LLIF_XATTR_CACHE, &lli->lli_flags); } /** @@ -104,7 +104,7 @@ static int ll_xattr_cache_find(struct list_head *cache, static int ll_xattr_cache_add(struct list_head *cache, const char *xattr_name, const char *xattr_val, - unsigned xattr_val_len) + unsigned int xattr_val_len) { struct ll_xattr_entry *xattr; @@ -216,7 +216,7 @@ static int ll_xattr_cache_list(struct list_head *cache, */ static int ll_xattr_cache_valid(struct ll_inode_info *lli) { - return !!(lli->lli_flags & LLIF_XATTR_CACHE); + return test_bit(LLIF_XATTR_CACHE, &lli->lli_flags); } /** @@ -233,7 +233,8 @@ static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli) while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0) ; /* empty loop */ - lli->lli_flags &= ~LLIF_XATTR_CACHE; + + clear_bit(LLIF_XATTR_CACHE, &lli->lli_flags); return 0; } @@ -415,6 +416,10 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) CDEBUG(D_CACHE, "not caching %s\n", XATTR_NAME_ACL_ACCESS); rc = 0; + } else if (!strcmp(xdata, "security.selinux")) { + /* Filter out security.selinux, it is cached in slab */ + CDEBUG(D_CACHE, "not caching security.selinux\n"); + rc = 0; } else { rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval, *xsizes); diff --git a/drivers/staging/lustre/lustre/llite/xattr_security.c b/drivers/staging/lustre/lustre/llite/xattr_security.c new file mode 100644 index 0000000000000000000000000000000000000000..d61d8018001afad6e67907ae88b0375dd816a55d --- /dev/null +++ b/drivers/staging/lustre/lustre/llite/xattr_security.c @@ -0,0 +1,88 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see http://www.gnu.org/licenses + * + * GPL HEADER END + */ + +/* + * Copyright (c) 2014 Bull SAS + * Author: Sebastien Buisson sebastien.buisson@bull.net + */ + +/* + * lustre/llite/xattr_security.c + * Handler for storing security labels as extended attributes. + */ +#include +#include +#include "llite_internal.h" + +/** + * A helper function for ll_security_inode_init_security() + * that takes care of setting xattrs + * + * Get security context of @inode from @xattr_array, + * and put it in 'security.xxx' xattr of dentry + * stored in @fs_info. + * + * \retval 0 success + * \retval -ENOMEM if no memory could be allocated for xattr name + * \retval < 0 failure to set xattr + */ +static int +ll_initxattrs(struct inode *inode, const struct xattr *xattr_array, + void *fs_info) +{ + const struct xattr_handler *handler; + struct dentry *dentry = fs_info; + const struct xattr *xattr; + int err = 0; + + handler = get_xattr_type(XATTR_SECURITY_PREFIX); + if (!handler) + return -ENXIO; + + for (xattr = xattr_array; xattr->name; xattr++) { + err = handler->set(handler, dentry, inode, xattr->name, + xattr->value, xattr->value_len, + XATTR_CREATE); + if (err < 0) + break; + } + return err; +} + +/** + * Initializes security context + * + * Get security context of @inode in @dir, + * and put it in 'security.xxx' xattr of @dentry. + * + * \retval 0 success, or SELinux is disabled + * \retval -ENOMEM if no memory could be allocated for xattr name + * \retval < 0 failure to get security context or set xattr + */ +int +ll_init_security(struct dentry *dentry, struct inode *inode, struct inode *dir) +{ + if (!selinux_is_enabled()) + return 0; + + return security_inode_init_security(inode, dir, NULL, + &ll_initxattrs, dentry); +} diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c index 9f4e826bb0af7b56fc7501bfacb9cd08cd0711b6..b1071cf5a70c7323e7207e6c0252a2b4d3cbf629 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c @@ -223,7 +223,14 @@ int lmv_revalidate_slaves(struct obd_export *exp, LASSERT(body); if (unlikely(body->mbo_nlink < 2)) { - CERROR("%s: nlink %d < 2 corrupt stripe %d "DFID":" DFID"\n", + /* + * If this is bad stripe, most likely due + * to the race between close(unlink) and + * getattr, let's return -EONENT, so llite + * will revalidate the dentry see + * ll_inode_revalidate_fini() + */ + CDEBUG(D_INODE, "%s: nlink %d < 2 corrupt stripe %d "DFID":" DFID"\n", obd->obd_name, body->mbo_nlink, i, PFID(&lsm->lsm_md_oinfo[i].lmo_fid), PFID(&lsm->lsm_md_oinfo[0].lmo_fid)); @@ -233,7 +240,7 @@ int lmv_revalidate_slaves(struct obd_export *exp, it.it_lock_mode = 0; } - rc = -EIO; + rc = -ENOENT; goto cleanup; } diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h index 52b03745ac1940734fc21f4cd77cabb91dc63505..12731a17e263499444daa925477c1661333220a1 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h +++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h @@ -54,9 +54,6 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds); int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp, struct lu_fid *fid, struct md_op_data *op_data); -int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp, - const union lmv_mds_md *lmm, int stripe_count); - int lmv_revalidate_slaves(struct obd_export *exp, const struct lmv_stripe_md *lsm, ldlm_blocking_callback cb_blocking, diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c index 7dbb2b946acf56e90eaad6992ff34d3555a5951f..f124f6c05ea4a7a34122c959f8c398efe0a859b9 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c @@ -62,6 +62,7 @@ static void lmv_activate_target(struct lmv_obd *lmv, tgt->ltd_active = activate; lmv->desc.ld_active_tgt_count += (activate ? 1 : -1); + tgt->ltd_exp->exp_obd->obd_inactive = !activate; } /** @@ -245,8 +246,7 @@ static int lmv_connect(const struct lu_env *env, return rc; } -static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize, - u32 cookiesize, u32 def_cookiesize) +static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize) { struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; @@ -262,14 +262,7 @@ static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize, lmv->max_def_easize = def_easize; change = 1; } - if (lmv->max_cookiesize < cookiesize) { - lmv->max_cookiesize = cookiesize; - change = 1; - } - if (lmv->max_def_cookiesize < def_cookiesize) { - lmv->max_def_cookiesize = def_cookiesize; - change = 1; - } + if (change == 0) return 0; @@ -284,8 +277,7 @@ static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize, continue; } - rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize, - cookiesize, def_cookiesize); + rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize); if (rc) { CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n", obd->obd_name, i, rc); @@ -368,8 +360,7 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) tgt->ltd_exp = mdc_exp; lmv->desc.ld_active_tgt_count++; - md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize, - lmv->max_cookiesize, lmv->max_def_cookiesize); + md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize); CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n", mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, @@ -396,27 +387,23 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp, __u32 index, int gen) { struct lmv_obd *lmv = &obd->u.lmv; + struct obd_device *mdc_obd; struct lmv_tgt_desc *tgt; int orig_tgt_count = 0; int rc = 0; CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index); - mutex_lock(&lmv->lmv_init_mutex); - - if (lmv->desc.ld_tgt_count == 0) { - struct obd_device *mdc_obd; - - mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME, - &obd->obd_uuid); - if (!mdc_obd) { - mutex_unlock(&lmv->lmv_init_mutex); - CERROR("%s: Target %s not attached: rc = %d\n", - obd->obd_name, uuidp->uuid, -EINVAL); - return -EINVAL; - } + mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME, + &obd->obd_uuid); + if (!mdc_obd) { + CERROR("%s: Target %s not attached: rc = %d\n", + obd->obd_name, uuidp->uuid, -EINVAL); + return -EINVAL; } + mutex_lock(&lmv->lmv_init_mutex); + if ((index < lmv->tgts_size) && lmv->tgts[index]) { tgt = lmv->tgts[index]; CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n", @@ -472,22 +459,27 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp, lmv->desc.ld_tgt_count = index + 1; } - if (lmv->connected) { - rc = lmv_connect_mdc(obd, tgt); - if (rc) { - spin_lock(&lmv->lmv_lock); - if (lmv->desc.ld_tgt_count == index + 1) - lmv->desc.ld_tgt_count = orig_tgt_count; - memset(tgt, 0, sizeof(*tgt)); - spin_unlock(&lmv->lmv_lock); - } else { - int easize = sizeof(struct lmv_stripe_md) + - lmv->desc.ld_tgt_count * sizeof(struct lu_fid); - lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0); - } + if (!lmv->connected) { + /* lmv_check_connect() will connect this target. */ + mutex_unlock(&lmv->lmv_init_mutex); + return rc; } + /* Otherwise let's connect it ourselves */ mutex_unlock(&lmv->lmv_init_mutex); + rc = lmv_connect_mdc(obd, tgt); + if (rc) { + spin_lock(&lmv->lmv_lock); + if (lmv->desc.ld_tgt_count == index + 1) + lmv->desc.ld_tgt_count = orig_tgt_count; + memset(tgt, 0, sizeof(*tgt)); + spin_unlock(&lmv->lmv_lock); + } else { + int easize = sizeof(struct lmv_stripe_md) + + lmv->desc.ld_tgt_count * sizeof(struct lu_fid); + lmv_init_ea_size(obd->obd_self_export, easize, 0); + } + return rc; } @@ -538,7 +530,7 @@ int lmv_check_connect(struct obd_device *obd) class_export_put(lmv->exp); lmv->connected = 1; easize = lmv_mds_md_size(lmv->desc.ld_tgt_count, LMV_MAGIC); - lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0); + lmv_init_ea_size(obd->obd_self_export, easize, 0); mutex_unlock(&lmv->lmv_init_mutex); return 0; @@ -1128,9 +1120,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, mdc_obd = class_exp2obd(tgt->ltd_exp); mdc_obd->obd_force = obddev->obd_force; err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); - if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) { - return err; - } else if (err) { + if (err) { if (tgt->ltd_active) { CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n", tgt->ltd_uuid.uuid, i, cmd, err); @@ -1284,7 +1274,6 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg) obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid); lmv->desc.ld_tgt_count = 0; lmv->desc.ld_active_tgt_count = 0; - lmv->max_cookiesize = 0; lmv->max_def_easize = 0; lmv->max_easize = 0; lmv->lmv_placement = PLACEMENT_CHAR_POLICY; @@ -1630,27 +1619,28 @@ lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data, * ct_restore(). */ if (op_data->op_bias & MDS_CREATE_VOLATILE && - (int)op_data->op_mds != -1 && lsm) { + (int)op_data->op_mds != -1) { int i; tgt = lmv_get_target(lmv, op_data->op_mds, NULL); if (IS_ERR(tgt)) return tgt; - /* refill the right parent fid */ - for (i = 0; i < lsm->lsm_md_stripe_count; i++) { - struct lmv_oinfo *oinfo; + if (lsm) { + /* refill the right parent fid */ + for (i = 0; i < lsm->lsm_md_stripe_count; i++) { + struct lmv_oinfo *oinfo; - oinfo = &lsm->lsm_md_oinfo[i]; - if (oinfo->lmo_mds == op_data->op_mds) { - *fid = oinfo->lmo_fid; - break; + oinfo = &lsm->lsm_md_oinfo[i]; + if (oinfo->lmo_mds == op_data->op_mds) { + *fid = oinfo->lmo_fid; + break; + } } - } - /* Hmm, can not find the stripe by mdt_index(op_mds) */ - if (i == lsm->lsm_md_stripe_count) - tgt = ERR_PTR(-EINVAL); + if (i == lsm->lsm_md_stripe_count) + *fid = lsm->lsm_md_oinfo[0].lmo_fid; + } return tgt; } @@ -1728,30 +1718,9 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data, return rc; } -static int lmv_done_writing(struct obd_export *exp, - struct md_op_data *op_data, - struct md_open_data *mod) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - int rc; - - rc = lmv_check_connect(obd); - if (rc) - return rc; - - tgt = lmv_find_target(lmv, &op_data->op_fid1); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - rc = md_done_writing(tgt->ltd_exp, op_data, mod); - return rc; -} - static int lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, - const ldlm_policy_data_t *policy, + const union ldlm_policy_data *policy, struct lookup_intent *it, struct md_op_data *op_data, struct lustre_handle *lockh, __u64 extra_lock_flags) { @@ -1847,7 +1816,7 @@ static int lmv_early_cancel(struct obd_export *exp, struct lmv_tgt_desc *tgt, struct lu_fid *fid = md_op_data_fid(op_data, flag); struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; - ldlm_policy_data_t policy = { {0} }; + union ldlm_policy_data policy = { { 0 } }; int rc = 0; if (!fid_is_sane(fid)) @@ -1937,7 +1906,10 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data, { struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; + struct obd_export *target_exp; struct lmv_tgt_desc *src_tgt; + struct lmv_tgt_desc *tgt_tgt; + struct mdt_body *body; int rc; LASSERT(oldlen != 0); @@ -1977,6 +1949,10 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data, if (rc) return rc; src_tgt = lmv_find_target(lmv, &op_data->op_fid3); + if (IS_ERR(src_tgt)) + return PTR_ERR(src_tgt); + + target_exp = src_tgt->ltd_exp; } else { if (op_data->op_mea1) { struct lmv_stripe_md *lsm = op_data->op_mea1; @@ -1985,29 +1961,27 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data, oldlen, &op_data->op_fid1, &op_data->op_mds); - if (IS_ERR(src_tgt)) - return PTR_ERR(src_tgt); } else { src_tgt = lmv_find_target(lmv, &op_data->op_fid1); - if (IS_ERR(src_tgt)) - return PTR_ERR(src_tgt); - - op_data->op_mds = src_tgt->ltd_idx; } + if (IS_ERR(src_tgt)) + return PTR_ERR(src_tgt); if (op_data->op_mea2) { struct lmv_stripe_md *lsm = op_data->op_mea2; - const struct lmv_oinfo *oinfo; - oinfo = lsm_name_to_stripe_info(lsm, new, newlen); - if (IS_ERR(oinfo)) - return PTR_ERR(oinfo); - - op_data->op_fid2 = oinfo->lmo_fid; + tgt_tgt = lmv_locate_target_for_name(lmv, lsm, new, + newlen, + &op_data->op_fid2, + &op_data->op_mds); + } else { + tgt_tgt = lmv_find_target(lmv, &op_data->op_fid2); } + if (IS_ERR(tgt_tgt)) + return PTR_ERR(tgt_tgt); + + target_exp = tgt_tgt->ltd_exp; } - if (IS_ERR(src_tgt)) - return PTR_ERR(src_tgt); /* * LOOKUP lock on src child (fid3) should also be cancelled for @@ -2048,26 +2022,56 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data, return rc; } +retry_rename: /* * Cancel all the locks on tgt child (fid4). */ - if (fid_is_sane(&op_data->op_fid4)) + if (fid_is_sane(&op_data->op_fid4)) { + struct lmv_tgt_desc *tgt; + rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx, LCK_EX, MDS_INODELOCK_FULL, MF_MDC_CANCEL_FID4); + if (rc) + return rc; + + tgt = lmv_find_target(lmv, &op_data->op_fid4); + if (IS_ERR(tgt)) + return PTR_ERR(tgt); - CDEBUG(D_INODE, DFID":m%d to "DFID"\n", PFID(&op_data->op_fid1), - op_data->op_mds, PFID(&op_data->op_fid2)); + /* + * Since the target child might be destroyed, and it might + * become orphan, and we can only check orphan on the local + * MDT right now, so we send rename request to the MDT where + * target child is located. If target child does not exist, + * then it will send the request to the target parent + */ + target_exp = tgt->ltd_exp; + } - rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen, - new, newlen, request); - return rc; + rc = md_rename(target_exp, op_data, old, oldlen, new, newlen, request); + if (rc && rc != -EREMOTE) + return rc; + + body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY); + if (!body) + return -EPROTO; + + /* Not cross-ref case, just get out of here. */ + if (likely(!(body->mbo_valid & OBD_MD_MDS))) + return rc; + + CDEBUG(D_INODE, "%s: try rename to another MDT for " DFID "\n", + exp->exp_obd->obd_name, PFID(&body->mbo_fid1)); + + op_data->op_fid4 = body->mbo_fid1; + ptlrpc_req_finished(*request); + *request = NULL; + goto retry_rename; } static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data, - void *ea, size_t ealen, void *ea2, size_t ea2len, - struct ptlrpc_request **request, - struct md_open_data **mod) + void *ea, size_t ealen, struct ptlrpc_request **request) { struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; @@ -2086,10 +2090,7 @@ static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data, if (IS_ERR(tgt)) return PTR_ERR(tgt); - rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, ea2, - ea2len, request, mod); - - return rc; + return md_setattr(tgt->ltd_exp, op_data, ea, ealen, request); } static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, @@ -2623,23 +2624,10 @@ static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data, goto retry_unlink; } -static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) +static int lmv_precleanup(struct obd_device *obd) { - struct lmv_obd *lmv = &obd->u.lmv; - - switch (stage) { - case OBD_CLEANUP_EARLY: - /* XXX: here should be calling obd_precleanup() down to - * stack. - */ - break; - case OBD_CLEANUP_EXPORTS: - fld_client_debugfs_fini(&lmv->lmv_fld); - lprocfs_obd_cleanup(obd); - break; - default: - break; - } + fld_client_debugfs_fini(&obd->u.lmv.lmv_fld); + lprocfs_obd_cleanup(obd); return 0; } @@ -2654,14 +2642,12 @@ static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) * \param[in] key identifier of key to get value for * \param[in] vallen size of \a val * \param[out] val pointer to storage location for value - * \param[in] lsm optional striping metadata of object * * \retval 0 on success * \retval negative negated errno on failure */ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, - __u32 keylen, void *key, __u32 *vallen, void *val, - struct lov_stripe_md *lsm) + __u32 keylen, void *key, __u32 *vallen, void *val) { struct obd_device *obd; struct lmv_obd *lmv; @@ -2693,7 +2679,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, continue; if (!obd_get_info(env, tgt->ltd_exp, keylen, key, - vallen, val, NULL)) + vallen, val)) return 0; } return -EINVAL; @@ -2709,7 +2695,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, * desc. */ rc = obd_get_info(env, lmv->tgts[0]->ltd_exp, keylen, key, - vallen, val, NULL); + vallen, val); if (!rc && KEY_IS(KEY_CONN_DATA)) exp->exp_connect_data = *(struct obd_connect_data *)val; return rc; @@ -2777,90 +2763,6 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp, return -EINVAL; } -static int lmv_pack_md_v1(const struct lmv_stripe_md *lsm, - struct lmv_mds_md_v1 *lmm1) -{ - int cplen; - int i; - - lmm1->lmv_magic = cpu_to_le32(lsm->lsm_md_magic); - lmm1->lmv_stripe_count = cpu_to_le32(lsm->lsm_md_stripe_count); - lmm1->lmv_master_mdt_index = cpu_to_le32(lsm->lsm_md_master_mdt_index); - lmm1->lmv_hash_type = cpu_to_le32(lsm->lsm_md_hash_type); - cplen = strlcpy(lmm1->lmv_pool_name, lsm->lsm_md_pool_name, - sizeof(lmm1->lmv_pool_name)); - if (cplen >= sizeof(lmm1->lmv_pool_name)) - return -E2BIG; - - for (i = 0; i < lsm->lsm_md_stripe_count; i++) - fid_cpu_to_le(&lmm1->lmv_stripe_fids[i], - &lsm->lsm_md_oinfo[i].lmo_fid); - return 0; -} - -static int -lmv_pack_md(union lmv_mds_md **lmmp, const struct lmv_stripe_md *lsm, - int stripe_count) -{ - int lmm_size = 0, rc = 0; - bool allocated = false; - - LASSERT(lmmp); - - /* Free lmm */ - if (*lmmp && !lsm) { - int stripe_cnt; - - stripe_cnt = lmv_mds_md_stripe_count_get(*lmmp); - lmm_size = lmv_mds_md_size(stripe_cnt, - le32_to_cpu((*lmmp)->lmv_magic)); - if (!lmm_size) - return -EINVAL; - kvfree(*lmmp); - *lmmp = NULL; - return 0; - } - - /* Alloc lmm */ - if (!*lmmp && !lsm) { - lmm_size = lmv_mds_md_size(stripe_count, LMV_MAGIC); - LASSERT(lmm_size > 0); - *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS); - if (!*lmmp) - return -ENOMEM; - lmv_mds_md_stripe_count_set(*lmmp, stripe_count); - (*lmmp)->lmv_magic = cpu_to_le32(LMV_MAGIC); - return lmm_size; - } - - /* pack lmm */ - LASSERT(lsm); - lmm_size = lmv_mds_md_size(lsm->lsm_md_stripe_count, - lsm->lsm_md_magic); - if (!*lmmp) { - *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS); - if (!*lmmp) - return -ENOMEM; - allocated = true; - } - - switch (lsm->lsm_md_magic) { - case LMV_MAGIC_V1: - rc = lmv_pack_md_v1(lsm, &(*lmmp)->lmv_md_v1); - break; - default: - rc = -EINVAL; - break; - } - - if (rc && allocated) { - kvfree(*lmmp); - *lmmp = NULL; - } - - return lmm_size; -} - static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm, const struct lmv_mds_md_v1 *lmm1) { @@ -2903,8 +2805,8 @@ static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm, return rc; } -int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp, - const union lmv_mds_md *lmm, int stripe_count) +static int lmv_unpackmd(struct obd_export *exp, struct lmv_stripe_md **lsmp, + const union lmv_mds_md *lmm, size_t lmm_size) { struct lmv_stripe_md *lsm; bool allocated = false; @@ -2933,17 +2835,6 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp, return 0; } - /* Alloc memmd */ - if (!lsm && !lmm) { - lsm_size = lmv_stripe_md_size(stripe_count); - lsm = libcfs_kvzalloc(lsm_size, GFP_NOFS); - if (!lsm) - return -ENOMEM; - lsm->lsm_md_stripe_count = stripe_count; - *lsmp = lsm; - return 0; - } - if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE) return -EPERM; @@ -2991,38 +2882,17 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp, } return lsm_size; } -EXPORT_SYMBOL(lmv_unpack_md); -static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, - struct lov_mds_md *lmm, int disk_len) +void lmv_free_memmd(struct lmv_stripe_md *lsm) { - return lmv_unpack_md(exp, (struct lmv_stripe_md **)lsmp, - (union lmv_mds_md *)lmm, disk_len); -} - -static int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, - struct lov_stripe_md *lsm) -{ - const struct lmv_stripe_md *lmv = (struct lmv_stripe_md *)lsm; - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv_obd = &obd->u.lmv; - int stripe_count; - - if (!lmmp) { - if (lsm) - stripe_count = lmv->lsm_md_stripe_count; - else - stripe_count = lmv_obd->desc.ld_tgt_count; - - return lmv_mds_md_size(stripe_count, LMV_MAGIC_V1); - } - - return lmv_pack_md((union lmv_mds_md **)lmmp, lmv, 0); + lmv_unpackmd(NULL, &lsm, NULL, 0); } +EXPORT_SYMBOL(lmv_free_memmd); static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, - ldlm_policy_data_t *policy, enum ldlm_mode mode, - enum ldlm_cancel_flags flags, void *opaque) + union ldlm_policy_data *policy, + enum ldlm_mode mode, enum ldlm_cancel_flags flags, + void *opaque) { struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; @@ -3064,7 +2934,7 @@ static int lmv_set_lock_data(struct obd_export *exp, static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags, const struct lu_fid *fid, enum ldlm_type type, - ldlm_policy_data_t *policy, + union ldlm_policy_data *policy, enum ldlm_mode mode, struct lustre_handle *lockh) { @@ -3271,32 +3141,6 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp, return rc; } -static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp, - struct obd_quotactl *oqctl) -{ - struct obd_device *obd = class_exp2obd(exp); - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - int rc = 0; - u32 i; - - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - int err; - - tgt = lmv->tgts[i]; - if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) { - CERROR("lmv idx %d inactive\n", i); - return -EIO; - } - - err = obd_quotacheck(tgt->ltd_exp, oqctl); - if (err && !rc) - rc = err; - } - - return rc; -} - static int lmv_merge_attr(struct obd_export *exp, const struct lmv_stripe_md *lsm, struct cl_attr *attr, @@ -3349,12 +3193,9 @@ static struct obd_ops lmv_obd_ops = { .statfs = lmv_statfs, .get_info = lmv_get_info, .set_info_async = lmv_set_info_async, - .packmd = lmv_packmd, - .unpackmd = lmv_unpackmd, .notify = lmv_notify, .get_uuid = lmv_get_uuid, .iocontrol = lmv_iocontrol, - .quotacheck = lmv_quotacheck, .quotactl = lmv_quotactl }; @@ -3363,7 +3204,6 @@ static struct md_ops lmv_md_ops = { .null_inode = lmv_null_inode, .close = lmv_close, .create = lmv_create, - .done_writing = lmv_done_writing, .enqueue = lmv_enqueue, .getattr = lmv_getattr, .getxattr = lmv_getxattr, @@ -3388,6 +3228,7 @@ static struct md_ops lmv_md_ops = { .intent_getattr_async = lmv_intent_getattr_async, .revalidate_lock = lmv_revalidate_lock, .get_fid_from_lsm = lmv_get_fid_from_lsm, + .unpackmd = lmv_unpackmd, }; static int __init lmv_init(void) diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h index 4d2b7d303fea80904528ca8148ec54c9c9d36204..c49a34bf10e56ae0709d191481e1c974216688db 100644 --- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h +++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h @@ -217,7 +217,7 @@ struct lov_object { union lov_layout_state { struct lov_layout_raid0 { - unsigned lo_nr; + unsigned int lo_nr; /** * When this is true, lov_object::lo_attr contains * valid up to date attributes for a top-level @@ -412,7 +412,6 @@ struct lov_io_sub { int sub_refcheck; int sub_refcheck2; int sub_reenter; - void *sub_cookie; }; /** @@ -473,20 +472,6 @@ struct lov_session { struct lov_sublock_env ls_subenv; }; -/** - * State of transfer for lov. - */ -struct lov_req { - struct cl_req_slice lr_cl; -}; - -/** - * State of transfer for lovsub. - */ -struct lovsub_req { - struct cl_req_slice lsrq_cl; -}; - extern struct lu_device_type lov_device_type; extern struct lu_device_type lovsub_device_type; @@ -497,11 +482,9 @@ extern struct kmem_cache *lov_lock_kmem; extern struct kmem_cache *lov_object_kmem; extern struct kmem_cache *lov_thread_kmem; extern struct kmem_cache *lov_session_kmem; -extern struct kmem_cache *lov_req_kmem; extern struct kmem_cache *lovsub_lock_kmem; extern struct kmem_cache *lovsub_object_kmem; -extern struct kmem_cache *lovsub_req_kmem; extern struct kmem_cache *lov_lock_link_kmem; @@ -700,11 +683,6 @@ static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice) return container_of0(slice, struct lov_page, lps_cl); } -static inline struct lov_req *cl2lov_req(const struct cl_req_slice *slice) -{ - return container_of0(slice, struct lov_req, lr_cl); -} - static inline struct lovsub_page * cl2lovsub_page(const struct cl_page_slice *slice) { @@ -712,11 +690,6 @@ cl2lovsub_page(const struct cl_page_slice *slice) return container_of0(slice, struct lovsub_page, lsb_cl); } -static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice) -{ - return container_of0(slice, struct lovsub_req, lsrq_cl); -} - static inline struct lov_io *cl2lov_io(const struct lu_env *env, const struct cl_io_slice *ios) { diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c index 056ae2ed88e8ccc562066916d3d1a5bdcf4b6319..7301f6e579a16a442f785f71d21311a6a0fe4b3e 100644 --- a/drivers/staging/lustre/lustre/lov/lov_dev.c +++ b/drivers/staging/lustre/lustre/lov/lov_dev.c @@ -46,11 +46,9 @@ struct kmem_cache *lov_lock_kmem; struct kmem_cache *lov_object_kmem; struct kmem_cache *lov_thread_kmem; struct kmem_cache *lov_session_kmem; -struct kmem_cache *lov_req_kmem; struct kmem_cache *lovsub_lock_kmem; struct kmem_cache *lovsub_object_kmem; -struct kmem_cache *lovsub_req_kmem; struct kmem_cache *lov_lock_link_kmem; @@ -78,11 +76,6 @@ struct lu_kmem_descr lov_caches[] = { .ckd_name = "lov_session_kmem", .ckd_size = sizeof(struct lov_session) }, - { - .ckd_cache = &lov_req_kmem, - .ckd_name = "lov_req_kmem", - .ckd_size = sizeof(struct lov_req) - }, { .ckd_cache = &lovsub_lock_kmem, .ckd_name = "lovsub_lock_kmem", @@ -93,11 +86,6 @@ struct lu_kmem_descr lov_caches[] = { .ckd_name = "lovsub_object_kmem", .ckd_size = sizeof(struct lovsub_object) }, - { - .ckd_cache = &lovsub_req_kmem, - .ckd_name = "lovsub_req_kmem", - .ckd_size = sizeof(struct lovsub_req) - }, { .ckd_cache = &lov_lock_link_kmem, .ckd_name = "lov_lock_link_kmem", @@ -108,25 +96,6 @@ struct lu_kmem_descr lov_caches[] = { } }; -/***************************************************************************** - * - * Lov transfer operations. - * - */ - -static void lov_req_completion(const struct lu_env *env, - const struct cl_req_slice *slice, int ioret) -{ - struct lov_req *lr; - - lr = cl2lov_req(slice); - kmem_cache_free(lov_req_kmem, lr); -} - -static const struct cl_req_operations lov_req_ops = { - .cro_completion = lov_req_completion -}; - /***************************************************************************** * * Lov device and device type functions. @@ -248,26 +217,6 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d, return rc; } -static int lov_req_init(const struct lu_env *env, struct cl_device *dev, - struct cl_req *req) -{ - struct lov_req *lr; - int result; - - lr = kmem_cache_zalloc(lov_req_kmem, GFP_NOFS); - if (lr) { - cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops); - result = 0; - } else { - result = -ENOMEM; - } - return result; -} - -static const struct cl_device_operations lov_cl_ops = { - .cdo_req_init = lov_req_init -}; - static void lov_emerg_free(struct lov_device_emerg **emrg, int nr) { int i; @@ -478,7 +427,6 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env, cl_device_init(&ld->ld_cl, t); d = lov2lu_dev(ld); d->ld_ops = &lov_lu_ops; - ld->ld_cl.cd_ops = &lov_cl_ops; mutex_init(&ld->ld_mutex); lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class); diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c index 214c561767e02d07e9286c2f5310fe95e123b05c..ac0bf64c08c1d47dc1d412fb68472f3fe241fe39 100644 --- a/drivers/staging/lustre/lustre/lov/lov_ea.c +++ b/drivers/staging/lustre/lustre/lov/lov_ea.c @@ -76,18 +76,19 @@ static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes, return 0; } -struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size) +struct lov_stripe_md *lsm_alloc_plain(u16 stripe_count) { + size_t oinfo_ptrs_size, lsm_size; struct lov_stripe_md *lsm; struct lov_oinfo *loi; - int i, oinfo_ptrs_size; + int i; LASSERT(stripe_count <= LOV_MAX_STRIPE_COUNT); oinfo_ptrs_size = sizeof(struct lov_oinfo *) * stripe_count; - *size = sizeof(struct lov_stripe_md) + oinfo_ptrs_size; + lsm_size = sizeof(*lsm) + oinfo_ptrs_size; - lsm = libcfs_kvzalloc(*size, GFP_NOFS); + lsm = libcfs_kvzalloc(lsm_size, GFP_NOFS); if (!lsm) return NULL; @@ -117,9 +118,43 @@ void lsm_free_plain(struct lov_stripe_md *lsm) kvfree(lsm); } -static void lsm_unpackmd_common(struct lov_stripe_md *lsm, - struct lov_mds_md *lmm) +/* + * Find minimum stripe maxbytes value. For inactive or + * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES. + */ +static loff_t lov_tgt_maxbytes(struct lov_tgt_desc *tgt) +{ + loff_t maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES; + struct obd_import *imp; + + if (!tgt->ltd_active) + return maxbytes; + + imp = tgt->ltd_obd->u.cli.cl_import; + if (!imp) + return maxbytes; + + spin_lock(&imp->imp_lock); + if (imp->imp_state == LUSTRE_IMP_FULL && + (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) && + imp->imp_connect_data.ocd_maxbytes > 0) + maxbytes = imp->imp_connect_data.ocd_maxbytes; + + spin_unlock(&imp->imp_lock); + + return maxbytes; +} + +static int lsm_unpackmd_common(struct lov_obd *lov, + struct lov_stripe_md *lsm, + struct lov_mds_md *lmm, + struct lov_ost_data_v1 *objects) { + loff_t stripe_maxbytes = LLONG_MAX; + unsigned int stripe_count; + struct lov_oinfo *loi; + unsigned int i; + /* * This supposes lov_mds_md_v1/v3 first fields are * are the same @@ -129,11 +164,54 @@ static void lsm_unpackmd_common(struct lov_stripe_md *lsm, lsm->lsm_pattern = le32_to_cpu(lmm->lmm_pattern); lsm->lsm_layout_gen = le16_to_cpu(lmm->lmm_layout_gen); lsm->lsm_pool_name[0] = '\0'; + + stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count; + + for (i = 0; i < stripe_count; i++) { + loff_t tgt_bytes; + + loi = lsm->lsm_oinfo[i]; + ostid_le_to_cpu(&objects[i].l_ost_oi, &loi->loi_oi); + loi->loi_ost_idx = le32_to_cpu(objects[i].l_ost_idx); + loi->loi_ost_gen = le32_to_cpu(objects[i].l_ost_gen); + if (lov_oinfo_is_dummy(loi)) + continue; + + if (loi->loi_ost_idx >= lov->desc.ld_tgt_count && + !lov2obd(lov)->obd_process_conf) { + CERROR("%s: OST index %d more than OST count %d\n", + (char *)lov->desc.ld_uuid.uuid, + loi->loi_ost_idx, lov->desc.ld_tgt_count); + lov_dump_lmm_v1(D_WARNING, lmm); + return -EINVAL; + } + + if (!lov->lov_tgts[loi->loi_ost_idx]) { + CERROR("%s: OST index %d missing\n", + (char *)lov->desc.ld_uuid.uuid, + loi->loi_ost_idx); + lov_dump_lmm_v1(D_WARNING, lmm); + continue; + } + + tgt_bytes = lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx]); + stripe_maxbytes = min_t(loff_t, stripe_maxbytes, tgt_bytes); + } + + if (stripe_maxbytes == LLONG_MAX) + stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES; + + if (!lsm->lsm_stripe_count) + lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count; + else + lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count; + + return 0; } static void lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno, - u64 *lov_off, u64 *swidth) + loff_t *lov_off, loff_t *swidth) { if (swidth) *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count; @@ -141,36 +219,12 @@ lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno, static void lsm_stripe_by_offset_plain(struct lov_stripe_md *lsm, int *stripeno, - u64 *lov_off, u64 *swidth) + loff_t *lov_off, loff_t *swidth) { if (swidth) *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count; } -/* Find minimum stripe maxbytes value. For inactive or - * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES. - */ -static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes) -{ - struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import; - - if (!imp || !tgt->ltd_active) { - *stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES; - return; - } - - spin_lock(&imp->imp_lock); - if (imp->imp_state == LUSTRE_IMP_FULL && - (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) && - imp->imp_connect_data.ocd_maxbytes > 0) { - if (*stripe_maxbytes > imp->imp_connect_data.ocd_maxbytes) - *stripe_maxbytes = imp->imp_connect_data.ocd_maxbytes; - } else { - *stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES; - } - spin_unlock(&imp->imp_lock); -} - static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes, __u16 *stripe_count) { @@ -197,45 +251,7 @@ static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes, static int lsm_unpackmd_v1(struct lov_obd *lov, struct lov_stripe_md *lsm, struct lov_mds_md_v1 *lmm) { - struct lov_oinfo *loi; - int i; - int stripe_count; - __u64 stripe_maxbytes = OBD_OBJECT_EOF; - - lsm_unpackmd_common(lsm, lmm); - - stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count; - - for (i = 0; i < stripe_count; i++) { - /* XXX LOV STACKING call down to osc_unpackmd() */ - loi = lsm->lsm_oinfo[i]; - ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi); - loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx); - loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen); - if (lov_oinfo_is_dummy(loi)) - continue; - - if (loi->loi_ost_idx >= lov->desc.ld_tgt_count) { - CERROR("OST index %d more than OST count %d\n", - loi->loi_ost_idx, lov->desc.ld_tgt_count); - lov_dump_lmm_v1(D_WARNING, lmm); - return -EINVAL; - } - if (!lov->lov_tgts[loi->loi_ost_idx]) { - CERROR("OST index %d missing\n", loi->loi_ost_idx); - lov_dump_lmm_v1(D_WARNING, lmm); - return -EINVAL; - } - /* calculate the minimum stripe max bytes */ - lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx], - &stripe_maxbytes); - } - - lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count; - if (lsm->lsm_stripe_count == 0) - lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count; - - return 0; + return lsm_unpackmd_common(lov, lsm, lmm, lmm->lmm_objects); } const struct lsm_operations lsm_v1_ops = { @@ -275,55 +291,21 @@ static int lsm_lmm_verify_v3(struct lov_mds_md *lmmv1, int lmm_bytes, } static int lsm_unpackmd_v3(struct lov_obd *lov, struct lov_stripe_md *lsm, - struct lov_mds_md *lmmv1) + struct lov_mds_md *lmm) { - struct lov_mds_md_v3 *lmm; - struct lov_oinfo *loi; - int i; - int stripe_count; - __u64 stripe_maxbytes = OBD_OBJECT_EOF; - int cplen = 0; + struct lov_mds_md_v3 *lmm_v3 = (struct lov_mds_md_v3 *)lmm; + size_t cplen = 0; + int rc; - lmm = (struct lov_mds_md_v3 *)lmmv1; + rc = lsm_unpackmd_common(lov, lsm, lmm, lmm_v3->lmm_objects); + if (rc) + return rc; - lsm_unpackmd_common(lsm, (struct lov_mds_md_v1 *)lmm); - - stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count; - - cplen = strlcpy(lsm->lsm_pool_name, lmm->lmm_pool_name, + cplen = strlcpy(lsm->lsm_pool_name, lmm_v3->lmm_pool_name, sizeof(lsm->lsm_pool_name)); if (cplen >= sizeof(lsm->lsm_pool_name)) return -E2BIG; - for (i = 0; i < stripe_count; i++) { - /* XXX LOV STACKING call down to osc_unpackmd() */ - loi = lsm->lsm_oinfo[i]; - ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi); - loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx); - loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen); - if (lov_oinfo_is_dummy(loi)) - continue; - - if (loi->loi_ost_idx >= lov->desc.ld_tgt_count) { - CERROR("OST index %d more than OST count %d\n", - loi->loi_ost_idx, lov->desc.ld_tgt_count); - lov_dump_lmm_v3(D_WARNING, lmm); - return -EINVAL; - } - if (!lov->lov_tgts[loi->loi_ost_idx]) { - CERROR("OST index %d missing\n", loi->loi_ost_idx); - lov_dump_lmm_v3(D_WARNING, lmm); - return -EINVAL; - } - /* calculate the minimum stripe max bytes */ - lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx], - &stripe_maxbytes); - } - - lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count; - if (lsm->lsm_stripe_count == 0) - lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count; - return 0; } diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h index 07e5ede3e952b1e2c12b9dfc47aa00afeec1da75..774499c74daa8cf9e292aaab6f4e38cbf3b1d21e 100644 --- a/drivers/staging/lustre/lustre/lov/lov_internal.h +++ b/drivers/staging/lustre/lustre/lov/lov_internal.h @@ -36,6 +36,77 @@ #include "../include/obd_class.h" #include "../include/lustre/lustre_user.h" +/* + * If we are unable to get the maximum object size from the OST in + * ocd_maxbytes using OBD_CONNECT_MAXBYTES, then we fall back to using + * the old maximum object size from ext3. + */ +#define LUSTRE_EXT3_STRIPE_MAXBYTES 0x1fffffff000ULL + +struct lov_stripe_md { + atomic_t lsm_refc; + spinlock_t lsm_lock; + pid_t lsm_lock_owner; /* debugging */ + + /* + * maximum possible file size, might change as OSTs status changes, + * e.g. disconnected, deactivated + */ + loff_t lsm_maxbytes; + struct ost_id lsm_oi; + u32 lsm_magic; + u32 lsm_stripe_size; + u32 lsm_pattern; /* RAID0, RAID1, released, ... */ + u16 lsm_stripe_count; + u16 lsm_layout_gen; + char lsm_pool_name[LOV_MAXPOOLNAME + 1]; + struct lov_oinfo *lsm_oinfo[0]; +}; + +static inline bool lsm_is_released(struct lov_stripe_md *lsm) +{ + return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED); +} + +static inline bool lsm_has_objects(struct lov_stripe_md *lsm) +{ + if (!lsm) + return false; + + if (lsm_is_released(lsm)) + return false; + + return true; +} + +struct lsm_operations { + void (*lsm_free)(struct lov_stripe_md *); + void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, loff_t *, + loff_t *); + void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, loff_t *, + loff_t *); + int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes, + u16 *stripe_count); + int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm, + struct lov_mds_md *lmm); +}; + +extern const struct lsm_operations lsm_v1_ops; +extern const struct lsm_operations lsm_v3_ops; + +static inline const struct lsm_operations *lsm_op_find(int magic) +{ + switch (magic) { + case LOV_MAGIC_V1: + return &lsm_v1_ops; + case LOV_MAGIC_V3: + return &lsm_v3_ops; + default: + CERROR("unrecognized lsm_magic %08x\n", magic); + return NULL; + } +} + /* lov_do_div64(a, b) returns a % b, and a = a / b. * The 32-bit code is LOV-specific due to knowing about stripe limits in * order to reduce the divisor to a 32-bit number. If the divisor is @@ -110,8 +181,6 @@ struct lov_request_set { atomic_t set_completes; atomic_t set_success; atomic_t set_finish_checked; - struct llog_cookie *set_cookies; - int set_cookie_sent; struct list_head set_list; wait_queue_head_t set_waitq; }; @@ -132,8 +201,6 @@ static inline void lov_put_reqset(struct lov_request_set *set) (char *)((lv)->lov_tgts[index]->ltd_uuid.uuid) /* lov_merge.c */ -void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid, - struct lov_stripe_md *lsm, int stripeno, int *set); int lov_merge_lvb_kms(struct lov_stripe_md *lsm, struct ost_lvb *lvb, __u64 *kms_place); @@ -150,17 +217,9 @@ pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index, int stripe); /* lov_request.c */ -int lov_update_common_set(struct lov_request_set *set, - struct lov_request *req, int rc); int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo, struct lov_request_set **reqset); int lov_fini_getattr_set(struct lov_request_set *set); -int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo, - struct obd_trans_info *oti, - struct lov_request_set **reqset); -int lov_update_setattr_set(struct lov_request_set *set, - struct lov_request *req, int rc); -int lov_fini_setattr_set(struct lov_request_set *set); int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo, struct lov_request_set **reqset); int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs, @@ -186,12 +245,10 @@ int lov_del_target(struct obd_device *obd, __u32 index, struct obd_uuid *uuidp, int gen); /* lov_pack.c */ -int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmm, - struct lov_stripe_md *lsm); -int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, - struct lov_mds_md *lmm, int lmm_bytes); -int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count, - int pattern, int magic); +ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf, + size_t buf_size); +struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm, + size_t lmm_size); int lov_free_memmd(struct lov_stripe_md **lsmp); void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm); @@ -199,7 +256,7 @@ void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm); void lov_dump_lmm_common(int level, void *lmmp); /* lov_ea.c */ -struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size); +struct lov_stripe_md *lsm_alloc_plain(u16 stripe_count); void lsm_free_plain(struct lov_stripe_md *lsm); void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm); @@ -244,4 +301,9 @@ static inline bool lov_oinfo_is_dummy(const struct lov_oinfo *loi) return false; } +static inline struct obd_device *lov2obd(const struct lov_obd *lov) +{ + return container_of0(lov, struct obd_device, u.lov); +} + #endif diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c index d10157985ed91806d1c327ba435e623a0890f52f..002326c282a71f335b3d847b3b8bb9e579f5b4d4 100644 --- a/drivers/staging/lustre/lustre/lov/lov_io.c +++ b/drivers/staging/lustre/lustre/lov/lov_io.c @@ -86,6 +86,8 @@ static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio, switch (io->ci_type) { case CIT_SETATTR: { io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr; + io->u.ci_setattr.sa_attr_flags = + parent->u.ci_setattr.sa_attr_flags; io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid; io->u.ci_setattr.sa_stripe_index = stripe; io->u.ci_setattr.sa_parent_fid = @@ -98,6 +100,12 @@ static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio, } break; } + case CIT_DATA_VERSION: { + io->u.ci_data_version.dv_data_version = 0; + io->u.ci_data_version.dv_flags = + parent->u.ci_data_version.dv_flags; + break; + } case CIT_FAULT: { struct cl_object *obj = parent->ci_obj; loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index); @@ -159,12 +167,7 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio, sub->sub_env = ld->ld_emrg[stripe]->emrg_env; sub->sub_borrowed = 1; } else { - void *cookie; - - /* obtain new environment */ - cookie = cl_env_reenter(); sub->sub_env = cl_env_get(&sub->sub_refcheck); - cl_env_reexit(cookie); if (IS_ERR(sub->sub_env)) result = PTR_ERR(sub->sub_env); @@ -337,6 +340,11 @@ static int lov_io_slice_init(struct lov_io *lio, struct lov_object *obj, lio->lis_endpos = OBD_OBJECT_EOF; break; + case CIT_DATA_VERSION: + lio->lis_pos = 0; + lio->lis_endpos = OBD_OBJECT_EOF; + break; + case CIT_FAULT: { pgoff_t index = io->u.ci_fault.ft_index; @@ -514,6 +522,24 @@ static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io) return 0; } +static void +lov_io_data_version_end(const struct lu_env *env, const struct cl_io_slice *ios) +{ + struct lov_io *lio = cl2lov_io(env, ios); + struct cl_io *parent = lio->lis_cl.cis_io; + struct lov_io_sub *sub; + + list_for_each_entry(sub, &lio->lis_active, sub_linkage) { + lov_io_end_wrapper(env, sub->sub_io); + + parent->u.ci_data_version.dv_data_version += + sub->sub_io->u.ci_data_version.dv_data_version; + + if (!parent->ci_result) + parent->ci_result = sub->sub_io->ci_result; + } +} + static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io) { cl_io_iter_fini(env, io); @@ -555,6 +581,65 @@ static void lov_io_unlock(const struct lu_env *env, LASSERT(rc == 0); } +static int lov_io_read_ahead(const struct lu_env *env, + const struct cl_io_slice *ios, + pgoff_t start, struct cl_read_ahead *ra) +{ + struct lov_io *lio = cl2lov_io(env, ios); + struct lov_object *loo = lio->lis_object; + struct cl_object *obj = lov2cl(loo); + struct lov_layout_raid0 *r0 = lov_r0(loo); + unsigned int pps; /* pages per stripe */ + struct lov_io_sub *sub; + pgoff_t ra_end; + loff_t suboff; + int stripe; + int rc; + + stripe = lov_stripe_number(loo->lo_lsm, cl_offset(obj, start)); + if (unlikely(!r0->lo_sub[stripe])) + return -EIO; + + sub = lov_sub_get(env, lio, stripe); + if (IS_ERR(sub)) + return PTR_ERR(sub); + + lov_stripe_offset(loo->lo_lsm, cl_offset(obj, start), stripe, &suboff); + rc = cl_io_read_ahead(sub->sub_env, sub->sub_io, + cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff), + ra); + lov_sub_put(sub); + + CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n", + PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, r0->lo_nr, rc); + if (rc) + return rc; + + /** + * Adjust the stripe index by layout of raid0. ra->cra_end is + * the maximum page index covered by an underlying DLM lock. + * This function converts cra_end from stripe level to file + * level, and make sure it's not beyond stripe boundary. + */ + if (r0->lo_nr == 1) /* single stripe file */ + return 0; + + /* cra_end is stripe level, convert it into file level */ + ra_end = ra->cra_end; + if (ra_end != CL_PAGE_EOF) + ra_end = lov_stripe_pgoff(loo->lo_lsm, ra_end, stripe); + + pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT; + + CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, stripe_size = %u, stripe no = %u, start index = %lu\n", + PFID(lu_object_fid(lov2lu(loo))), ra_end, pps, + loo->lo_lsm->lsm_stripe_size, stripe, start); + + /* never exceed the end of the stripe */ + ra->cra_end = min_t(pgoff_t, ra_end, start + pps - start % pps - 1); + return 0; +} + /** * lov implementation of cl_operations::cio_submit() method. It takes a list * of pages in \a queue, splits it into per-stripe sub-lists, invokes @@ -779,6 +864,15 @@ static const struct cl_io_operations lov_io_ops = { .cio_start = lov_io_start, .cio_end = lov_io_end }, + [CIT_DATA_VERSION] = { + .cio_fini = lov_io_fini, + .cio_iter_init = lov_io_iter_init, + .cio_iter_fini = lov_io_iter_fini, + .cio_lock = lov_io_lock, + .cio_unlock = lov_io_unlock, + .cio_start = lov_io_start, + .cio_end = lov_io_data_version_end, + }, [CIT_FAULT] = { .cio_fini = lov_io_fini, .cio_iter_init = lov_io_iter_init, @@ -801,6 +895,7 @@ static const struct cl_io_operations lov_io_ops = { .cio_fini = lov_io_fini } }, + .cio_read_ahead = lov_io_read_ahead, .cio_submit = lov_io_submit, .cio_commit_async = lov_io_commit_async, }; @@ -820,6 +915,13 @@ static void lov_empty_io_fini(const struct lu_env *env, wake_up_all(&lov->lo_waitq); } +static int lov_empty_io_submit(const struct lu_env *env, + const struct cl_io_slice *ios, + enum cl_req_type crt, struct cl_2queue *queue) +{ + return -EBADF; +} + static void lov_empty_impossible(const struct lu_env *env, struct cl_io_slice *ios) { @@ -870,7 +972,7 @@ static const struct cl_io_operations lov_empty_io_ops = { .cio_fini = lov_empty_io_fini } }, - .cio_submit = LOV_EMPTY_IMPOSSIBLE, + .cio_submit = lov_empty_io_submit, .cio_commit_async = LOV_EMPTY_IMPOSSIBLE }; @@ -909,6 +1011,7 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj, break; case CIT_FSYNC: case CIT_SETATTR: + case CIT_DATA_VERSION: result = 1; break; case CIT_WRITE: @@ -944,6 +1047,7 @@ int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, LASSERTF(0, "invalid type %d\n", io->ci_type); case CIT_MISC: case CIT_FSYNC: + case CIT_DATA_VERSION: result = 1; break; case CIT_SETATTR: diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c index 674af106b50b51ca1adee245126bf54eeaf31a9e..391dfd20717746aad10fae2ff50a3bed24b1d15d 100644 --- a/drivers/staging/lustre/lustre/lov/lov_merge.c +++ b/drivers/staging/lustre/lustre/lov/lov_merge.c @@ -104,53 +104,3 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm, lvb->lvb_ctime = current_ctime; return rc; } - -void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid, - struct lov_stripe_md *lsm, int stripeno, int *set) -{ - valid &= src->o_valid; - - if (*set) { - tgt->o_valid &= valid; - if (valid & OBD_MD_FLSIZE) { - /* this handles sparse files properly */ - u64 lov_size; - - lov_size = lov_stripe_size(lsm, src->o_size, stripeno); - if (lov_size > tgt->o_size) - tgt->o_size = lov_size; - } - if (valid & OBD_MD_FLBLOCKS) - tgt->o_blocks += src->o_blocks; - if (valid & OBD_MD_FLBLKSZ) - tgt->o_blksize += src->o_blksize; - if (valid & OBD_MD_FLCTIME && tgt->o_ctime < src->o_ctime) - tgt->o_ctime = src->o_ctime; - if (valid & OBD_MD_FLMTIME && tgt->o_mtime < src->o_mtime) - tgt->o_mtime = src->o_mtime; - if (valid & OBD_MD_FLDATAVERSION) - tgt->o_data_version += src->o_data_version; - - /* handle flags */ - if (valid & OBD_MD_FLFLAGS) - tgt->o_flags &= src->o_flags; - else - tgt->o_flags = 0; - } else { - memcpy(tgt, src, sizeof(*tgt)); - tgt->o_oi = lsm->lsm_oi; - tgt->o_valid = valid; - if (valid & OBD_MD_FLSIZE) - tgt->o_size = lov_stripe_size(lsm, src->o_size, - stripeno); - tgt->o_flags = 0; - if (valid & OBD_MD_FLFLAGS) - tgt->o_flags = src->o_flags; - } - - /* data_version needs to be valid on all stripes to be correct! */ - if (!(valid & OBD_MD_FLDATAVERSION)) - tgt->o_valid &= ~OBD_MD_FLDATAVERSION; - - *set += 1; -} diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c index b23016f7ec2690fc83a55cf0c3347dbe634178b7..63b064523c6a0ae22a30026c94ea84a162a73a6a 100644 --- a/drivers/staging/lustre/lustre/lov/lov_obd.c +++ b/drivers/staging/lustre/lustre/lov/lov_obd.c @@ -40,19 +40,20 @@ #define DEBUG_SUBSYSTEM S_LOV #include "../../include/linux/libcfs/libcfs.h" -#include "../include/obd_support.h" -#include "../include/lustre/lustre_ioctl.h" -#include "../include/lustre_lib.h" -#include "../include/lustre_net.h" #include "../include/lustre/lustre_idl.h" +#include "../include/lustre/lustre_ioctl.h" + +#include "../include/cl_object.h" #include "../include/lustre_dlm.h" +#include "../include/lustre_fid.h" +#include "../include/lustre_lib.h" #include "../include/lustre_mds.h" -#include "../include/obd_class.h" -#include "../include/lprocfs_status.h" +#include "../include/lustre_net.h" #include "../include/lustre_param.h" -#include "../include/cl_object.h" -#include "../include/lustre/ll_fiemap.h" -#include "../include/lustre_fid.h" +#include "../include/lustre_swab.h" +#include "../include/lprocfs_status.h" +#include "../include/obd_class.h" +#include "../include/obd_support.h" #include "lov_internal.h" @@ -826,29 +827,6 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg) return rc; } -static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) -{ - struct lov_obd *lov = &obd->u.lov; - - switch (stage) { - case OBD_CLEANUP_EARLY: { - int i; - - for (i = 0; i < lov->desc.ld_tgt_count; i++) { - if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active) - continue; - obd_precleanup(class_exp2obd(lov->lov_tgts[i]->ltd_exp), - OBD_CLEANUP_EARLY); - } - break; - } - default: - break; - } - - return 0; -} - static int lov_cleanup(struct obd_device *obd) { struct lov_obd *lov = &obd->u.lov; @@ -972,163 +950,6 @@ int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg, return rc; } -#define ASSERT_LSM_MAGIC(lsmp) \ -do { \ - LASSERT((lsmp)); \ - LASSERTF(((lsmp)->lsm_magic == LOV_MAGIC_V1 || \ - (lsmp)->lsm_magic == LOV_MAGIC_V3), \ - "%p->lsm_magic=%x\n", (lsmp), (lsmp)->lsm_magic); \ -} while (0) - -static int lov_getattr_interpret(struct ptlrpc_request_set *rqset, - void *data, int rc) -{ - struct lov_request_set *lovset = (struct lov_request_set *)data; - int err; - - /* don't do attribute merge if this async op failed */ - if (rc) - atomic_set(&lovset->set_completes, 0); - err = lov_fini_getattr_set(lovset); - return rc ? rc : err; -} - -static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo, - struct ptlrpc_request_set *rqset) -{ - struct lov_request_set *lovset; - struct lov_obd *lov; - struct lov_request *req; - int rc = 0, err; - - LASSERT(oinfo); - ASSERT_LSM_MAGIC(oinfo->oi_md); - - if (!exp || !exp->exp_obd) - return -ENODEV; - - lov = &exp->exp_obd->u.lov; - - rc = lov_prep_getattr_set(exp, oinfo, &lovset); - if (rc) - return rc; - - CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n", - POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count, - oinfo->oi_md->lsm_stripe_size); - - list_for_each_entry(req, &lovset->set_list, rq_link) { - CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n", - POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe, - POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx); - rc = obd_getattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp, - &req->rq_oi, rqset); - if (rc) { - CERROR("%s: getattr objid "DOSTID" subobj" - DOSTID" on OST idx %d: rc = %d\n", - exp->exp_obd->obd_name, - POSTID(&oinfo->oi_oa->o_oi), - POSTID(&req->rq_oi.oi_oa->o_oi), - req->rq_idx, rc); - goto out; - } - } - - if (!list_empty(&rqset->set_requests)) { - LASSERT(rc == 0); - LASSERT(!rqset->set_interpret); - rqset->set_interpret = lov_getattr_interpret; - rqset->set_arg = (void *)lovset; - return rc; - } -out: - if (rc) - atomic_set(&lovset->set_completes, 0); - err = lov_fini_getattr_set(lovset); - return rc ? rc : err; -} - -static int lov_setattr_interpret(struct ptlrpc_request_set *rqset, - void *data, int rc) -{ - struct lov_request_set *lovset = (struct lov_request_set *)data; - int err; - - if (rc) - atomic_set(&lovset->set_completes, 0); - err = lov_fini_setattr_set(lovset); - return rc ? rc : err; -} - -/* If @oti is given, the request goes from MDS and responses from OSTs are not - * needed. Otherwise, a client is waiting for responses. - */ -static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo, - struct obd_trans_info *oti, - struct ptlrpc_request_set *rqset) -{ - struct lov_request_set *set; - struct lov_request *req; - struct lov_obd *lov; - int rc = 0; - - LASSERT(oinfo); - ASSERT_LSM_MAGIC(oinfo->oi_md); - if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) { - LASSERT(oti); - LASSERT(oti->oti_logcookies); - } - - if (!exp || !exp->exp_obd) - return -ENODEV; - - lov = &exp->exp_obd->u.lov; - rc = lov_prep_setattr_set(exp, oinfo, oti, &set); - if (rc) - return rc; - - CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n", - POSTID(&oinfo->oi_md->lsm_oi), - oinfo->oi_md->lsm_stripe_count, - oinfo->oi_md->lsm_stripe_size); - - list_for_each_entry(req, &set->set_list, rq_link) { - if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) - oti->oti_logcookies = set->set_cookies + req->rq_stripe; - - CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n", - POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe, - POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx); - - rc = obd_setattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp, - &req->rq_oi, oti, rqset); - if (rc) { - CERROR("error: setattr objid "DOSTID" subobj" - DOSTID" on OST idx %d: rc = %d\n", - POSTID(&set->set_oi->oi_oa->o_oi), - POSTID(&req->rq_oi.oi_oa->o_oi), - req->rq_idx, rc); - break; - } - } - - /* If we are not waiting for responses on async requests, return. */ - if (rc || !rqset || list_empty(&rqset->set_requests)) { - int err; - - if (rc) - atomic_set(&set->set_completes, 0); - err = lov_fini_setattr_set(set); - return rc ? rc : err; - } - - LASSERT(!rqset->set_interpret); - rqset->set_interpret = lov_setattr_interpret; - rqset->set_arg = (void *)set; - - return 0; -} - int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc) { struct lov_request_set *lovset = (struct lov_request_set *)data; @@ -1183,7 +1004,10 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp, struct obd_statfs *osfs, __u64 max_age, __u32 flags) { struct ptlrpc_request_set *set = NULL; - struct obd_info oinfo = { }; + struct obd_info oinfo = { + .oi_osfs = osfs, + .oi_flags = flags, + }; int rc = 0; /* for obdclass we forbid using obd_statfs_rqset, but prefer using async @@ -1193,8 +1017,6 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp, if (!set) return -ENOMEM; - oinfo.oi_osfs = osfs; - oinfo.oi_flags = flags; rc = lov_statfs_async(exp, &oinfo, max_age, set); if (rc == 0) rc = ptlrpc_set_wait(set); @@ -1235,8 +1057,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, /* copy UUID */ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd), - min((int)data->ioc_plen2, - (int)sizeof(struct obd_uuid)))) + min_t(unsigned long, data->ioc_plen2, + sizeof(struct obd_uuid)))) return -EFAULT; memcpy(&flags, data->ioc_inlbuf1, sizeof(__u32)); @@ -1249,8 +1071,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, if (rc) return rc; if (copy_to_user(data->ioc_pbuf1, &stat_buf, - min((int)data->ioc_plen1, - (int)sizeof(stat_buf)))) + min_t(unsigned long, data->ioc_plen1, + sizeof(stat_buf)))) return -EFAULT; break; } @@ -1367,8 +1189,6 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, osc_obd->obd_force = obddev->obd_force; err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp, len, karg, uarg); - if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) - return err; if (err) { if (lov->lov_tgts[i]->ltd_active) { CDEBUG(err == -ENOTTY ? @@ -1391,454 +1211,35 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, return rc; } -#define FIEMAP_BUFFER_SIZE 4096 - -/** - * Non-zero fe_logical indicates that this is a continuation FIEMAP - * call. The local end offset and the device are sent in the first - * fm_extent. This function calculates the stripe number from the index. - * This function returns a stripe_no on which mapping is to be restarted. - * - * This function returns fm_end_offset which is the in-OST offset at which - * mapping should be restarted. If fm_end_offset=0 is returned then caller - * will re-calculate proper offset in next stripe. - * Note that the first extent is passed to lov_get_info via the value field. - * - * \param fiemap fiemap request header - * \param lsm striping information for the file - * \param fm_start logical start of mapping - * \param fm_end logical end of mapping - * \param start_stripe starting stripe will be returned in this - */ -static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap, - struct lov_stripe_md *lsm, u64 fm_start, - u64 fm_end, int *start_stripe) -{ - u64 local_end = fiemap->fm_extents[0].fe_logical; - u64 lun_start, lun_end; - u64 fm_end_offset; - int stripe_no = -1, i; - - if (fiemap->fm_extent_count == 0 || - fiemap->fm_extents[0].fe_logical == 0) - return 0; - - /* Find out stripe_no from ost_index saved in the fe_device */ - for (i = 0; i < lsm->lsm_stripe_count; i++) { - struct lov_oinfo *oinfo = lsm->lsm_oinfo[i]; - - if (lov_oinfo_is_dummy(oinfo)) - continue; - - if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) { - stripe_no = i; - break; - } - } - if (stripe_no == -1) - return -EINVAL; - - /* If we have finished mapping on previous device, shift logical - * offset to start of next device - */ - if ((lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end, - &lun_start, &lun_end)) != 0 && - local_end < lun_end) { - fm_end_offset = local_end; - *start_stripe = stripe_no; - } else { - /* This is a special value to indicate that caller should - * calculate offset in next stripe. - */ - fm_end_offset = 0; - *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count; - } - - return fm_end_offset; -} - -/** - * We calculate on which OST the mapping will end. If the length of mapping - * is greater than (stripe_size * stripe_count) then the last_stripe will - * will be one just before start_stripe. Else we check if the mapping - * intersects each OST and find last_stripe. - * This function returns the last_stripe and also sets the stripe_count - * over which the mapping is spread - * - * \param lsm striping information for the file - * \param fm_start logical start of mapping - * \param fm_end logical end of mapping - * \param start_stripe starting stripe of the mapping - * \param stripe_count the number of stripes across which to map is returned - * - * \retval last_stripe return the last stripe of the mapping - */ -static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, u64 fm_start, - u64 fm_end, int start_stripe, - int *stripe_count) -{ - int last_stripe; - u64 obd_start, obd_end; - int i, j; - - if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) { - last_stripe = start_stripe < 1 ? lsm->lsm_stripe_count - 1 : - start_stripe - 1; - *stripe_count = lsm->lsm_stripe_count; - } else { - for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count; - i = (i + 1) % lsm->lsm_stripe_count, j++) { - if ((lov_stripe_intersects(lsm, i, fm_start, fm_end, - &obd_start, &obd_end)) == 0) - break; - } - *stripe_count = j; - last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count; - } - - return last_stripe; -} - -/** - * Set fe_device and copy extents from local buffer into main return buffer. - * - * \param fiemap fiemap request header - * \param lcl_fm_ext array of local fiemap extents to be copied - * \param ost_index OST index to be written into the fm_device field for each - extent - * \param ext_count number of extents to be copied - * \param current_extent where to start copying in main extent array - */ -static void fiemap_prepare_and_copy_exts(struct ll_user_fiemap *fiemap, - struct ll_fiemap_extent *lcl_fm_ext, - int ost_index, unsigned int ext_count, - int current_extent) -{ - char *to; - int ext; - - for (ext = 0; ext < ext_count; ext++) { - lcl_fm_ext[ext].fe_device = ost_index; - lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET; - } - - /* Copy fm_extent's from fm_local to return buffer */ - to = (char *)fiemap + fiemap_count_to_size(current_extent); - memcpy(to, lcl_fm_ext, ext_count * sizeof(struct ll_fiemap_extent)); -} - -/** - * Break down the FIEMAP request and send appropriate calls to individual OSTs. - * This also handles the restarting of FIEMAP calls in case mapping overflows - * the available number of extents in single call. - */ -static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key, - __u32 *vallen, void *val, struct lov_stripe_md *lsm) -{ - struct ll_fiemap_info_key *fm_key = key; - struct ll_user_fiemap *fiemap = val; - struct ll_user_fiemap *fm_local = NULL; - struct ll_fiemap_extent *lcl_fm_ext; - int count_local; - unsigned int get_num_extents = 0; - int ost_index = 0, actual_start_stripe, start_stripe; - u64 fm_start, fm_end, fm_length, fm_end_offset; - u64 curr_loc; - int current_extent = 0, rc = 0, i; - /* Whether have we collected enough extents */ - bool enough = false; - int ost_eof = 0; /* EOF for object */ - int ost_done = 0; /* done with required mapping for this OST? */ - int last_stripe; - int cur_stripe = 0, cur_stripe_wrap = 0, stripe_count; - unsigned int buffer_size = FIEMAP_BUFFER_SIZE; - - if (!lsm_has_objects(lsm)) { - if (lsm && lsm_is_released(lsm) && (fm_key->fiemap.fm_start < - fm_key->oa.o_size)) { - /* - * released file, return a minimal FIEMAP if - * request fits in file-size. - */ - fiemap->fm_mapped_extents = 1; - fiemap->fm_extents[0].fe_logical = - fm_key->fiemap.fm_start; - if (fm_key->fiemap.fm_start + fm_key->fiemap.fm_length < - fm_key->oa.o_size) { - fiemap->fm_extents[0].fe_length = - fm_key->fiemap.fm_length; - } else { - fiemap->fm_extents[0].fe_length = - fm_key->oa.o_size - fm_key->fiemap.fm_start; - fiemap->fm_extents[0].fe_flags |= - (FIEMAP_EXTENT_UNKNOWN | - FIEMAP_EXTENT_LAST); - } - } - rc = 0; - goto out; - } - - if (fiemap_count_to_size(fm_key->fiemap.fm_extent_count) < buffer_size) - buffer_size = fiemap_count_to_size(fm_key->fiemap.fm_extent_count); - - fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS); - if (!fm_local) { - rc = -ENOMEM; - goto out; - } - lcl_fm_ext = &fm_local->fm_extents[0]; - - count_local = fiemap_size_to_count(buffer_size); - - memcpy(fiemap, &fm_key->fiemap, sizeof(*fiemap)); - fm_start = fiemap->fm_start; - fm_length = fiemap->fm_length; - /* Calculate start stripe, last stripe and length of mapping */ - start_stripe = lov_stripe_number(lsm, fm_start); - actual_start_stripe = start_stripe; - fm_end = (fm_length == ~0ULL ? fm_key->oa.o_size : - fm_start + fm_length - 1); - /* If fm_length != ~0ULL but fm_start+fm_length-1 exceeds file size */ - if (fm_end > fm_key->oa.o_size) - fm_end = fm_key->oa.o_size; - - last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end, - actual_start_stripe, - &stripe_count); - - fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start, - fm_end, &start_stripe); - if (fm_end_offset == -EINVAL) { - rc = -EINVAL; - goto out; - } - - if (fiemap_count_to_size(fiemap->fm_extent_count) > *vallen) - fiemap->fm_extent_count = fiemap_size_to_count(*vallen); - if (fiemap->fm_extent_count == 0) { - get_num_extents = 1; - count_local = 0; - } - /* Check each stripe */ - for (cur_stripe = start_stripe, i = 0; i < stripe_count; - i++, cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) { - u64 req_fm_len; /* Stores length of required mapping */ - u64 len_mapped_single_call; - u64 lun_start, lun_end, obd_object_end; - unsigned int ext_count; - - cur_stripe_wrap = cur_stripe; - - /* Find out range of mapping on this stripe */ - if ((lov_stripe_intersects(lsm, cur_stripe, fm_start, fm_end, - &lun_start, &obd_object_end)) == 0) - continue; - - if (lov_oinfo_is_dummy(lsm->lsm_oinfo[cur_stripe])) { - rc = -EIO; - goto out; - } - - /* If this is a continuation FIEMAP call and we are on - * starting stripe then lun_start needs to be set to - * fm_end_offset - */ - if (fm_end_offset != 0 && cur_stripe == start_stripe) - lun_start = fm_end_offset; - - if (fm_length != ~0ULL) { - /* Handle fm_start + fm_length overflow */ - if (fm_start + fm_length < fm_start) - fm_length = ~0ULL - fm_start; - lun_end = lov_size_to_stripe(lsm, fm_start + fm_length, - cur_stripe); - } else { - lun_end = ~0ULL; - } - - if (lun_start == lun_end) - continue; - - req_fm_len = obd_object_end - lun_start; - fm_local->fm_length = 0; - len_mapped_single_call = 0; - - /* If the output buffer is very large and the objects have many - * extents we may need to loop on a single OST repeatedly - */ - ost_eof = 0; - ost_done = 0; - do { - if (get_num_extents == 0) { - /* Don't get too many extents. */ - if (current_extent + count_local > - fiemap->fm_extent_count) - count_local = fiemap->fm_extent_count - - current_extent; - } - - lun_start += len_mapped_single_call; - fm_local->fm_length = req_fm_len - len_mapped_single_call; - req_fm_len = fm_local->fm_length; - fm_local->fm_extent_count = enough ? 1 : count_local; - fm_local->fm_mapped_extents = 0; - fm_local->fm_flags = fiemap->fm_flags; - - fm_key->oa.o_oi = lsm->lsm_oinfo[cur_stripe]->loi_oi; - ost_index = lsm->lsm_oinfo[cur_stripe]->loi_ost_idx; - - if (ost_index < 0 || - ost_index >= lov->desc.ld_tgt_count) { - rc = -EINVAL; - goto out; - } - - /* If OST is inactive, return extent with UNKNOWN flag */ - if (!lov->lov_tgts[ost_index]->ltd_active) { - fm_local->fm_flags |= FIEMAP_EXTENT_LAST; - fm_local->fm_mapped_extents = 1; - - lcl_fm_ext[0].fe_logical = lun_start; - lcl_fm_ext[0].fe_length = obd_object_end - - lun_start; - lcl_fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN; - - goto inactive_tgt; - } - - fm_local->fm_start = lun_start; - fm_local->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER; - memcpy(&fm_key->fiemap, fm_local, sizeof(*fm_local)); - *vallen = fiemap_count_to_size(fm_local->fm_extent_count); - rc = obd_get_info(NULL, - lov->lov_tgts[ost_index]->ltd_exp, - keylen, key, vallen, fm_local, lsm); - if (rc != 0) - goto out; - -inactive_tgt: - ext_count = fm_local->fm_mapped_extents; - if (ext_count == 0) { - ost_done = 1; - /* If last stripe has hole at the end, - * then we need to return - */ - if (cur_stripe_wrap == last_stripe) { - fiemap->fm_mapped_extents = 0; - goto finish; - } - break; - } else if (enough) { - /* - * We've collected enough extents and there are - * more extents after it. - */ - goto finish; - } - - /* If we just need num of extents then go to next device */ - if (get_num_extents) { - current_extent += ext_count; - break; - } - - len_mapped_single_call = - lcl_fm_ext[ext_count - 1].fe_logical - - lun_start + lcl_fm_ext[ext_count - 1].fe_length; - - /* Have we finished mapping on this device? */ - if (req_fm_len <= len_mapped_single_call) - ost_done = 1; - - /* Clear the EXTENT_LAST flag which can be present on - * last extent - */ - if (lcl_fm_ext[ext_count - 1].fe_flags & - FIEMAP_EXTENT_LAST) - lcl_fm_ext[ext_count - 1].fe_flags &= - ~FIEMAP_EXTENT_LAST; - - curr_loc = lov_stripe_size(lsm, - lcl_fm_ext[ext_count - 1].fe_logical + - lcl_fm_ext[ext_count - 1].fe_length, - cur_stripe); - if (curr_loc >= fm_key->oa.o_size) - ost_eof = 1; - - fiemap_prepare_and_copy_exts(fiemap, lcl_fm_ext, - ost_index, ext_count, - current_extent); - - current_extent += ext_count; - - /* Ran out of available extents? */ - if (current_extent >= fiemap->fm_extent_count) - enough = true; - } while (ost_done == 0 && ost_eof == 0); - - if (cur_stripe_wrap == last_stripe) - goto finish; - } - -finish: - /* Indicate that we are returning device offsets unless file just has - * single stripe - */ - if (lsm->lsm_stripe_count > 1) - fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER; - - if (get_num_extents) - goto skip_last_device_calc; - - /* Check if we have reached the last stripe and whether mapping for that - * stripe is done. - */ - if (cur_stripe_wrap == last_stripe) { - if (ost_done || ost_eof) - fiemap->fm_extents[current_extent - 1].fe_flags |= - FIEMAP_EXTENT_LAST; - } - -skip_last_device_calc: - fiemap->fm_mapped_extents = current_extent; - -out: - kvfree(fm_local); - return rc; -} - static int lov_get_info(const struct lu_env *env, struct obd_export *exp, - __u32 keylen, void *key, __u32 *vallen, void *val, - struct lov_stripe_md *lsm) + __u32 keylen, void *key, __u32 *vallen, void *val) { struct obd_device *obddev = class_exp2obd(exp); struct lov_obd *lov = &obddev->u.lov; - int rc; + struct lov_desc *ld = &lov->desc; + int rc = 0; if (!vallen || !val) return -EFAULT; obd_getref(obddev); - if (KEY_IS(KEY_LOVDESC)) { - struct lov_desc *desc_ret = val; - *desc_ret = lov->desc; + if (KEY_IS(KEY_MAX_EASIZE)) { + u32 max_stripe_count = min_t(u32, ld->ld_active_tgt_count, + LOV_MAX_STRIPE_COUNT); - rc = 0; - goto out; - } else if (KEY_IS(KEY_FIEMAP)) { - rc = lov_fiemap(lov, keylen, key, vallen, val, lsm); - goto out; + *((u32 *)val) = lov_mds_md_size(max_stripe_count, LOV_MAGIC_V3); + } else if (KEY_IS(KEY_DEFAULT_EASIZE)) { + u32 def_stripe_count = min_t(u32, ld->ld_default_stripe_count, + LOV_MAX_STRIPE_COUNT); + + *((u32 *)val) = lov_mds_md_size(def_stripe_count, LOV_MAGIC_V3); } else if (KEY_IS(KEY_TGT_COUNT)) { *((int *)val) = lov->desc.ld_tgt_count; - rc = 0; - goto out; + } else { + rc = -EINVAL; } - rc = -EINVAL; - -out: obd_putref(obddev); return rc; } @@ -1926,12 +1327,8 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp, __u64 bhardlimit = 0; int i, rc = 0; - if (oqctl->qc_cmd != LUSTRE_Q_QUOTAON && - oqctl->qc_cmd != LUSTRE_Q_QUOTAOFF && - oqctl->qc_cmd != Q_GETOQUOTA && - oqctl->qc_cmd != Q_INITQUOTA && - oqctl->qc_cmd != LUSTRE_Q_SETQUOTA && - oqctl->qc_cmd != Q_FINVALIDATE) { + if (oqctl->qc_cmd != Q_GETOQUOTA && + oqctl->qc_cmd != LUSTRE_Q_SETQUOTA) { CERROR("bad quota opc %x for lov obd\n", oqctl->qc_cmd); return -EFAULT; } @@ -1978,63 +1375,15 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp, return rc; } -static int lov_quotacheck(struct obd_device *obd, struct obd_export *exp, - struct obd_quotactl *oqctl) -{ - struct lov_obd *lov = &obd->u.lov; - int i, rc = 0; - - obd_getref(obd); - - for (i = 0; i < lov->desc.ld_tgt_count; i++) { - if (!lov->lov_tgts[i]) - continue; - - /* Skip quota check on the administratively disabled OSTs. */ - if (!lov->lov_tgts[i]->ltd_activate) { - CWARN("lov idx %d was administratively disabled, skip quotacheck on it.\n", - i); - continue; - } - - if (!lov->lov_tgts[i]->ltd_active) { - CERROR("lov idx %d inactive\n", i); - rc = -EIO; - goto out; - } - } - - for (i = 0; i < lov->desc.ld_tgt_count; i++) { - int err; - - if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_activate) - continue; - - err = obd_quotacheck(lov->lov_tgts[i]->ltd_exp, oqctl); - if (err && !rc) - rc = err; - } - -out: - obd_putref(obd); - - return rc; -} - static struct obd_ops lov_obd_ops = { .owner = THIS_MODULE, .setup = lov_setup, - .precleanup = lov_precleanup, .cleanup = lov_cleanup, /*.process_config = lov_process_config,*/ .connect = lov_connect, .disconnect = lov_disconnect, .statfs = lov_statfs, .statfs_async = lov_statfs_async, - .packmd = lov_packmd, - .unpackmd = lov_unpackmd, - .getattr_async = lov_getattr_async, - .setattr_async = lov_setattr_async, .iocontrol = lov_iocontrol, .get_info = lov_get_info, .set_info_async = lov_set_info_async, @@ -2046,7 +1395,6 @@ static struct obd_ops lov_obd_ops = { .getref = lov_getref, .putref = lov_putref, .quotactl = lov_quotactl, - .quotacheck = lov_quotacheck, }; struct kmem_cache *lov_oinfo_slab; diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c index 52f736338887938842d0b49ba509c3614df7fc30..76d4256fa828f6e6c6ca6f4132aa096de072130e 100644 --- a/drivers/staging/lustre/lustre/lov/lov_object.c +++ b/drivers/staging/lustre/lustre/lov/lov_object.c @@ -39,6 +39,11 @@ #include "lov_cl_internal.h" +static inline struct lov_device *lov_object_dev(struct lov_object *obj) +{ + return lu2lov_dev(obj->lo_cl.co_lu.lo_dev); +} + /** \addtogroup lov * @{ */ @@ -51,7 +56,7 @@ struct lov_layout_operations { int (*llo_init)(const struct lu_env *env, struct lov_device *dev, - struct lov_object *lov, + struct lov_object *lov, struct lov_stripe_md *lsm, const struct cl_object_conf *conf, union lov_layout_state *state); int (*llo_delete)(const struct lu_env *env, struct lov_object *lov, @@ -75,12 +80,11 @@ struct lov_layout_operations { static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov); -void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm) +static void lov_lsm_put(struct lov_stripe_md *lsm) { if (lsm) lov_free_memmd(&lsm); } -EXPORT_SYMBOL(lov_lsm_put); /***************************************************************************** * @@ -97,17 +101,17 @@ static void lov_install_empty(const struct lu_env *env, */ } -static int lov_init_empty(const struct lu_env *env, - struct lov_device *dev, struct lov_object *lov, +static int lov_init_empty(const struct lu_env *env, struct lov_device *dev, + struct lov_object *lov, struct lov_stripe_md *lsm, const struct cl_object_conf *conf, - union lov_layout_state *state) + union lov_layout_state *state) { return 0; } static void lov_install_raid0(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state) + union lov_layout_state *state) { } @@ -212,8 +216,8 @@ static int lov_page_slice_fixup(struct lov_object *lov, return cl_object_header(stripe)->coh_page_bufsize; } -static int lov_init_raid0(const struct lu_env *env, - struct lov_device *dev, struct lov_object *lov, +static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev, + struct lov_object *lov, struct lov_stripe_md *lsm, const struct cl_object_conf *conf, union lov_layout_state *state) { @@ -223,7 +227,6 @@ static int lov_init_raid0(const struct lu_env *env, struct cl_object *stripe; struct lov_thread_info *lti = lov_env_info(env); struct cl_object_conf *subconf = <i->lti_stripe_conf; - struct lov_stripe_md *lsm = conf->u.coc_md->lsm; struct lu_fid *ofid = <i->lti_fid; struct lov_layout_raid0 *r0 = &state->raid0; @@ -298,13 +301,11 @@ static int lov_init_raid0(const struct lu_env *env, return result; } -static int lov_init_released(const struct lu_env *env, - struct lov_device *dev, struct lov_object *lov, +static int lov_init_released(const struct lu_env *env, struct lov_device *dev, + struct lov_object *lov, struct lov_stripe_md *lsm, const struct cl_object_conf *conf, union lov_layout_state *state) { - struct lov_stripe_md *lsm = conf->u.coc_md->lsm; - LASSERT(lsm); LASSERT(lsm_is_released(lsm)); LASSERT(!lov->lo_lsm); @@ -313,6 +314,40 @@ static int lov_init_released(const struct lu_env *env, return 0; } +static struct cl_object *lov_find_subobj(const struct lu_env *env, + struct lov_object *lov, + struct lov_stripe_md *lsm, + int stripe_idx) +{ + struct lov_device *dev = lu2lov_dev(lov2lu(lov)->lo_dev); + struct lov_oinfo *oinfo = lsm->lsm_oinfo[stripe_idx]; + struct lov_thread_info *lti = lov_env_info(env); + struct lu_fid *ofid = <i->lti_fid; + struct cl_device *subdev; + struct cl_object *result; + int ost_idx; + int rc; + + if (lov->lo_type != LLT_RAID0) { + result = NULL; + goto out; + } + + ost_idx = oinfo->loi_ost_idx; + rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx); + if (rc) { + result = NULL; + goto out; + } + + subdev = lovsub2cl_dev(dev->ld_target[ost_idx]); + result = lov_sub_find(env, subdev, ofid, NULL); +out: + if (!result) + result = ERR_PTR(-EINVAL); + return result; +} + static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov, union lov_layout_state *state) { @@ -687,31 +722,24 @@ static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov) } static int lov_layout_change(const struct lu_env *unused, - struct lov_object *lov, + struct lov_object *lov, struct lov_stripe_md *lsm, const struct cl_object_conf *conf) { - int result; - enum lov_layout_type llt = LLT_EMPTY; + enum lov_layout_type llt = lov_type(lsm); union lov_layout_state *state = &lov->u; const struct lov_layout_operations *old_ops; const struct lov_layout_operations *new_ops; - - void *cookie; struct lu_env *env; int refcheck; + int rc; LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch)); - if (conf->u.coc_md) - llt = lov_type(conf->u.coc_md->lsm); - LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch)); - - cookie = cl_env_reenter(); env = cl_env_get(&refcheck); - if (IS_ERR(env)) { - cl_env_reexit(cookie); + if (IS_ERR(env)) return PTR_ERR(env); - } + + LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch)); CDEBUG(D_INODE, DFID" from %s to %s\n", PFID(lu_object_fid(lov2lu(lov))), @@ -720,38 +748,37 @@ static int lov_layout_change(const struct lu_env *unused, old_ops = &lov_dispatch[lov->lo_type]; new_ops = &lov_dispatch[llt]; - result = cl_object_prune(env, &lov->lo_cl); - if (result != 0) + rc = cl_object_prune(env, &lov->lo_cl); + if (rc) + goto out; + + rc = old_ops->llo_delete(env, lov, &lov->u); + if (rc) goto out; - result = old_ops->llo_delete(env, lov, &lov->u); - if (result == 0) { - old_ops->llo_fini(env, lov, &lov->u); + old_ops->llo_fini(env, lov, &lov->u); - LASSERT(atomic_read(&lov->lo_active_ios) == 0); + LASSERT(!atomic_read(&lov->lo_active_ios)); - lov->lo_type = LLT_EMPTY; - /* page bufsize fixup */ - cl_object_header(&lov->lo_cl)->coh_page_bufsize -= + lov->lo_type = LLT_EMPTY; + + /* page bufsize fixup */ + cl_object_header(&lov->lo_cl)->coh_page_bufsize -= lov_page_slice_fixup(lov, NULL); - result = new_ops->llo_init(env, - lu2lov_dev(lov->lo_cl.co_lu.lo_dev), - lov, conf, state); - if (result == 0) { - new_ops->llo_install(env, lov, state); - lov->lo_type = llt; - } else { - new_ops->llo_delete(env, lov, state); - new_ops->llo_fini(env, lov, state); - /* this file becomes an EMPTY file. */ - } + rc = new_ops->llo_init(env, lov_object_dev(lov), lov, lsm, conf, state); + if (rc) { + new_ops->llo_delete(env, lov, state); + new_ops->llo_fini(env, lov, state); + /* this file becomes an EMPTY file. */ + goto out; } + new_ops->llo_install(env, lov, state); + lov->lo_type = llt; out: cl_env_put(env, &refcheck); - cl_env_reexit(cookie); - return result; + return rc; } /***************************************************************************** @@ -762,26 +789,38 @@ static int lov_layout_change(const struct lu_env *unused, int lov_object_init(const struct lu_env *env, struct lu_object *obj, const struct lu_object_conf *conf) { - struct lov_device *dev = lu2lov_dev(obj->lo_dev); struct lov_object *lov = lu2lov(obj); + struct lov_device *dev = lov_object_dev(lov); const struct cl_object_conf *cconf = lu2cl_conf(conf); union lov_layout_state *set = &lov->u; const struct lov_layout_operations *ops; - int result; + struct lov_stripe_md *lsm = NULL; + int rc; init_rwsem(&lov->lo_type_guard); atomic_set(&lov->lo_active_ios, 0); init_waitqueue_head(&lov->lo_waitq); - cl_object_page_init(lu2cl(obj), sizeof(struct lov_page)); + lov->lo_type = LLT_EMPTY; + if (cconf->u.coc_layout.lb_buf) { + lsm = lov_unpackmd(dev->ld_lov, + cconf->u.coc_layout.lb_buf, + cconf->u.coc_layout.lb_len); + if (IS_ERR(lsm)) + return PTR_ERR(lsm); + } + /* no locking is necessary, as object is being created */ - lov->lo_type = lov_type(cconf->u.coc_md->lsm); + lov->lo_type = lov_type(lsm); ops = &lov_dispatch[lov->lo_type]; - result = ops->llo_init(env, dev, lov, cconf, set); - if (result == 0) + rc = ops->llo_init(env, dev, lov, lsm, cconf, set); + if (!rc) ops->llo_install(env, lov, set); - return result; + + lov_lsm_put(lsm); + + return rc; } static int lov_conf_set(const struct lu_env *env, struct cl_object *obj, @@ -791,6 +830,15 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj, struct lov_object *lov = cl2lov(obj); int result = 0; + if (conf->coc_opc == OBJECT_CONF_SET && + conf->u.coc_layout.lb_buf) { + lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov, + conf->u.coc_layout.lb_buf, + conf->u.coc_layout.lb_len); + if (IS_ERR(lsm)) + return PTR_ERR(lsm); + } + lov_conf_lock(lov); if (conf->coc_opc == OBJECT_CONF_INVALIDATE) { lov->lo_layout_invalid = true; @@ -810,8 +858,6 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj, LASSERT(conf->coc_opc == OBJECT_CONF_SET); - if (conf->u.coc_md) - lsm = conf->u.coc_md->lsm; if ((!lsm && !lov->lo_lsm) || ((lsm && lov->lo_lsm) && (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) && @@ -829,11 +875,12 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj, goto out; } - result = lov_layout_change(env, lov, conf); + result = lov_layout_change(env, lov, lsm, conf); lov->lo_layout_invalid = result != 0; out: lov_conf_unlock(lov); + lov_lsm_put(lsm); CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n", PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid); return result; @@ -911,6 +958,473 @@ int lov_lock_init(const struct lu_env *env, struct cl_object *obj, io); } +/** + * We calculate on which OST the mapping will end. If the length of mapping + * is greater than (stripe_size * stripe_count) then the last_stripe will + * will be one just before start_stripe. Else we check if the mapping + * intersects each OST and find last_stripe. + * This function returns the last_stripe and also sets the stripe_count + * over which the mapping is spread + * + * \param lsm [in] striping information for the file + * \param fm_start [in] logical start of mapping + * \param fm_end [in] logical end of mapping + * \param start_stripe [in] starting stripe of the mapping + * \param stripe_count [out] the number of stripes across which to map is + * returned + * + * \retval last_stripe return the last stripe of the mapping + */ +static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, + loff_t fm_start, loff_t fm_end, + int start_stripe, int *stripe_count) +{ + int last_stripe; + loff_t obd_start; + loff_t obd_end; + int i, j; + + if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) { + last_stripe = (start_stripe < 1 ? lsm->lsm_stripe_count - 1 : + start_stripe - 1); + *stripe_count = lsm->lsm_stripe_count; + } else { + for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count; + i = (i + 1) % lsm->lsm_stripe_count, j++) { + if (!(lov_stripe_intersects(lsm, i, fm_start, fm_end, + &obd_start, &obd_end))) + break; + } + *stripe_count = j; + last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count; + } + + return last_stripe; +} + +/** + * Set fe_device and copy extents from local buffer into main return buffer. + * + * \param fiemap [out] fiemap to hold all extents + * \param lcl_fm_ext [in] array of fiemap extents get from OSC layer + * \param ost_index [in] OST index to be written into the fm_device + * field for each extent + * \param ext_count [in] number of extents to be copied + * \param current_extent [in] where to start copying in the extent array + */ +static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap, + struct fiemap_extent *lcl_fm_ext, + int ost_index, unsigned int ext_count, + int current_extent) +{ + unsigned int ext; + char *to; + + for (ext = 0; ext < ext_count; ext++) { + lcl_fm_ext[ext].fe_device = ost_index; + lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET; + } + + /* Copy fm_extent's from fm_local to return buffer */ + to = (char *)fiemap + fiemap_count_to_size(current_extent); + memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent)); +} + +#define FIEMAP_BUFFER_SIZE 4096 + +/** + * Non-zero fe_logical indicates that this is a continuation FIEMAP + * call. The local end offset and the device are sent in the first + * fm_extent. This function calculates the stripe number from the index. + * This function returns a stripe_no on which mapping is to be restarted. + * + * This function returns fm_end_offset which is the in-OST offset at which + * mapping should be restarted. If fm_end_offset=0 is returned then caller + * will re-calculate proper offset in next stripe. + * Note that the first extent is passed to lov_get_info via the value field. + * + * \param fiemap [in] fiemap request header + * \param lsm [in] striping information for the file + * \param fm_start [in] logical start of mapping + * \param fm_end [in] logical end of mapping + * \param start_stripe [out] starting stripe will be returned in this + */ +static loff_t fiemap_calc_fm_end_offset(struct fiemap *fiemap, + struct lov_stripe_md *lsm, + loff_t fm_start, loff_t fm_end, + int *start_stripe) +{ + loff_t local_end = fiemap->fm_extents[0].fe_logical; + loff_t lun_start, lun_end; + loff_t fm_end_offset; + int stripe_no = -1; + int i; + + if (!fiemap->fm_extent_count || !fiemap->fm_extents[0].fe_logical) + return 0; + + /* Find out stripe_no from ost_index saved in the fe_device */ + for (i = 0; i < lsm->lsm_stripe_count; i++) { + struct lov_oinfo *oinfo = lsm->lsm_oinfo[i]; + + if (lov_oinfo_is_dummy(oinfo)) + continue; + + if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) { + stripe_no = i; + break; + } + } + + if (stripe_no == -1) + return -EINVAL; + + /* + * If we have finished mapping on previous device, shift logical + * offset to start of next device + */ + if (lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end, + &lun_start, &lun_end) && + local_end < lun_end) { + fm_end_offset = local_end; + *start_stripe = stripe_no; + } else { + /* This is a special value to indicate that caller should + * calculate offset in next stripe. + */ + fm_end_offset = 0; + *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count; + } + + return fm_end_offset; +} + +/** + * Break down the FIEMAP request and send appropriate calls to individual OSTs. + * This also handles the restarting of FIEMAP calls in case mapping overflows + * the available number of extents in single call. + * + * \param env [in] lustre environment + * \param obj [in] file object + * \param fmkey [in] fiemap request header and other info + * \param fiemap [out] fiemap buffer holding retrived map extents + * \param buflen [in/out] max buffer length of @fiemap, when iterate + * each OST, it is used to limit max map needed + * \retval 0 success + * \retval < 0 error + */ +static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj, + struct ll_fiemap_info_key *fmkey, + struct fiemap *fiemap, size_t *buflen) +{ + struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov; + unsigned int buffer_size = FIEMAP_BUFFER_SIZE; + struct fiemap_extent *lcl_fm_ext; + struct cl_object *subobj = NULL; + struct fiemap *fm_local = NULL; + struct lov_stripe_md *lsm; + loff_t fm_start; + loff_t fm_end; + loff_t fm_length; + loff_t fm_end_offset; + int count_local; + int ost_index = 0; + int start_stripe; + int current_extent = 0; + int rc = 0; + int last_stripe; + int cur_stripe = 0; + int cur_stripe_wrap = 0; + int stripe_count; + /* Whether have we collected enough extents */ + bool enough = false; + /* EOF for object */ + bool ost_eof = false; + /* done with required mapping for this OST? */ + bool ost_done = false; + + lsm = lov_lsm_addref(cl2lov(obj)); + if (!lsm) + return -ENODATA; + + /** + * If the stripe_count > 1 and the application does not understand + * DEVICE_ORDER flag, it cannot interpret the extents correctly. + */ + if (lsm->lsm_stripe_count > 1 && + !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) { + rc = -ENOTSUPP; + goto out; + } + + if (lsm_is_released(lsm)) { + if (fiemap->fm_start < fmkey->lfik_oa.o_size) { + /** + * released file, return a minimal FIEMAP if + * request fits in file-size. + */ + fiemap->fm_mapped_extents = 1; + fiemap->fm_extents[0].fe_logical = fiemap->fm_start; + if (fiemap->fm_start + fiemap->fm_length < + fmkey->lfik_oa.o_size) + fiemap->fm_extents[0].fe_length = + fiemap->fm_length; + else + fiemap->fm_extents[0].fe_length = + fmkey->lfik_oa.o_size - + fiemap->fm_start; + fiemap->fm_extents[0].fe_flags |= + FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST; + } + rc = 0; + goto out; + } + + if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size) + buffer_size = fiemap_count_to_size(fiemap->fm_extent_count); + + fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS); + if (!fm_local) { + rc = -ENOMEM; + goto out; + } + lcl_fm_ext = &fm_local->fm_extents[0]; + count_local = fiemap_size_to_count(buffer_size); + + fm_start = fiemap->fm_start; + fm_length = fiemap->fm_length; + /* Calculate start stripe, last stripe and length of mapping */ + start_stripe = lov_stripe_number(lsm, fm_start); + fm_end = (fm_length == ~0ULL) ? fmkey->lfik_oa.o_size : + fm_start + fm_length - 1; + /* If fm_length != ~0ULL but fm_start_fm_length-1 exceeds file size */ + if (fm_end > fmkey->lfik_oa.o_size) + fm_end = fmkey->lfik_oa.o_size; + + last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end, + start_stripe, &stripe_count); + fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start, fm_end, + &start_stripe); + if (fm_end_offset == -EINVAL) { + rc = -EINVAL; + goto out; + } + + /** + * Requested extent count exceeds the fiemap buffer size, shrink our + * ambition. + */ + if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen) + fiemap->fm_extent_count = fiemap_size_to_count(*buflen); + if (!fiemap->fm_extent_count) + count_local = 0; + + /* Check each stripe */ + for (cur_stripe = start_stripe; stripe_count > 0; + --stripe_count, + cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) { + loff_t req_fm_len; /* Stores length of required mapping */ + loff_t len_mapped_single_call; + loff_t lun_start; + loff_t lun_end; + loff_t obd_object_end; + unsigned int ext_count; + + cur_stripe_wrap = cur_stripe; + + /* Find out range of mapping on this stripe */ + if (!(lov_stripe_intersects(lsm, cur_stripe, fm_start, fm_end, + &lun_start, &obd_object_end))) + continue; + + if (lov_oinfo_is_dummy(lsm->lsm_oinfo[cur_stripe])) { + rc = -EIO; + goto out; + } + + /* + * If this is a continuation FIEMAP call and we are on + * starting stripe then lun_start needs to be set to + * fm_end_offset + */ + if (fm_end_offset && cur_stripe == start_stripe) + lun_start = fm_end_offset; + + if (fm_length != ~0ULL) { + /* Handle fm_start + fm_length overflow */ + if (fm_start + fm_length < fm_start) + fm_length = ~0ULL - fm_start; + lun_end = lov_size_to_stripe(lsm, fm_start + fm_length, + cur_stripe); + } else { + lun_end = ~0ULL; + } + + if (lun_start == lun_end) + continue; + + req_fm_len = obd_object_end - lun_start; + fm_local->fm_length = 0; + len_mapped_single_call = 0; + + /* find lobsub object */ + subobj = lov_find_subobj(env, cl2lov(obj), lsm, + cur_stripe); + if (IS_ERR(subobj)) { + rc = PTR_ERR(subobj); + goto out; + } + /* + * If the output buffer is very large and the objects have many + * extents we may need to loop on a single OST repeatedly + */ + ost_eof = false; + ost_done = false; + do { + if (fiemap->fm_extent_count > 0) { + /* Don't get too many extents. */ + if (current_extent + count_local > + fiemap->fm_extent_count) + count_local = fiemap->fm_extent_count - + current_extent; + } + + lun_start += len_mapped_single_call; + fm_local->fm_length = req_fm_len - + len_mapped_single_call; + req_fm_len = fm_local->fm_length; + fm_local->fm_extent_count = enough ? 1 : count_local; + fm_local->fm_mapped_extents = 0; + fm_local->fm_flags = fiemap->fm_flags; + + ost_index = lsm->lsm_oinfo[cur_stripe]->loi_ost_idx; + + if (ost_index < 0 || + ost_index >= lov->desc.ld_tgt_count) { + rc = -EINVAL; + goto obj_put; + } + /* + * If OST is inactive, return extent with UNKNOWN + * flag. + */ + if (!lov->lov_tgts[ost_index]->ltd_active) { + fm_local->fm_flags |= FIEMAP_EXTENT_LAST; + fm_local->fm_mapped_extents = 1; + + lcl_fm_ext[0].fe_logical = lun_start; + lcl_fm_ext[0].fe_length = obd_object_end - + lun_start; + lcl_fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN; + + goto inactive_tgt; + } + + fm_local->fm_start = lun_start; + fm_local->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER; + memcpy(&fmkey->lfik_fiemap, fm_local, sizeof(*fm_local)); + *buflen = fiemap_count_to_size(fm_local->fm_extent_count); + + rc = cl_object_fiemap(env, subobj, fmkey, fm_local, + buflen); + if (rc) + goto obj_put; +inactive_tgt: + ext_count = fm_local->fm_mapped_extents; + if (!ext_count) { + ost_done = true; + /* + * If last stripe has hold at the end, + * we need to return + */ + if (cur_stripe_wrap == last_stripe) { + fiemap->fm_mapped_extents = 0; + goto finish; + } + break; + } else if (enough) { + /* + * We've collected enough extents and there are + * more extents after it. + */ + goto finish; + } + + /* If we just need num of extents, got to next device */ + if (!fiemap->fm_extent_count) { + current_extent += ext_count; + break; + } + + /* prepare to copy retrived map extents */ + len_mapped_single_call = + lcl_fm_ext[ext_count - 1].fe_logical - + lun_start + lcl_fm_ext[ext_count - 1].fe_length; + + /* Have we finished mapping on this device? */ + if (req_fm_len <= len_mapped_single_call) + ost_done = true; + + /* + * Clear the EXTENT_LAST flag which can be present on + * the last extent + */ + if (lcl_fm_ext[ext_count - 1].fe_flags & + FIEMAP_EXTENT_LAST) + lcl_fm_ext[ext_count - 1].fe_flags &= + ~FIEMAP_EXTENT_LAST; + + if (lov_stripe_size(lsm, + lcl_fm_ext[ext_count - 1].fe_logical + + lcl_fm_ext[ext_count - 1].fe_length, + cur_stripe) >= fmkey->lfik_oa.o_size) + ost_eof = true; + + fiemap_prepare_and_copy_exts(fiemap, lcl_fm_ext, + ost_index, ext_count, + current_extent); + current_extent += ext_count; + + /* Ran out of available extents? */ + if (current_extent >= fiemap->fm_extent_count) + enough = true; + } while (!ost_done && !ost_eof); + + cl_object_put(env, subobj); + subobj = NULL; + + if (cur_stripe_wrap == last_stripe) + goto finish; + } /* for each stripe */ +finish: + /* + * Indicate that we are returning device offsets unless file just has + * single stripe + */ + if (lsm->lsm_stripe_count > 1) + fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER; + + if (!fiemap->fm_extent_count) + goto skip_last_device_calc; + + /* + * Check if we have reached the last stripe and whether mapping for that + * stripe is done. + */ + if ((cur_stripe_wrap == last_stripe) && (ost_done || ost_eof)) + fiemap->fm_extents[current_extent - 1].fe_flags |= + FIEMAP_EXTENT_LAST; +skip_last_device_calc: + fiemap->fm_mapped_extents = current_extent; +obj_put: + if (subobj) + cl_object_put(env, subobj); +out: + kvfree(fm_local); + lov_lsm_put(lsm); + return rc; +} + static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj, struct lov_user_md __user *lum) { @@ -923,10 +1437,53 @@ static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj, return -ENODATA; rc = lov_getstripe(cl2lov(obj), lsm, lum); - lov_lsm_put(obj, lsm); + lov_lsm_put(lsm); return rc; } +static int lov_object_layout_get(const struct lu_env *env, + struct cl_object *obj, + struct cl_layout *cl) +{ + struct lov_object *lov = cl2lov(obj); + struct lov_stripe_md *lsm = lov_lsm_addref(lov); + struct lu_buf *buf = &cl->cl_buf; + ssize_t rc; + + if (!lsm) { + cl->cl_size = 0; + cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY; + cl->cl_is_released = false; + + return 0; + } + + cl->cl_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic); + cl->cl_layout_gen = lsm->lsm_layout_gen; + cl->cl_is_released = lsm_is_released(lsm); + + rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len); + lov_lsm_put(lsm); + + return rc < 0 ? rc : 0; +} + +static loff_t lov_object_maxbytes(struct cl_object *obj) +{ + struct lov_object *lov = cl2lov(obj); + struct lov_stripe_md *lsm = lov_lsm_addref(lov); + loff_t maxbytes; + + if (!lsm) + return LLONG_MAX; + + maxbytes = lsm->lsm_maxbytes; + + lov_lsm_put(lsm); + + return maxbytes; +} + static const struct cl_object_operations lov_ops = { .coo_page_init = lov_page_init, .coo_lock_init = lov_lock_init, @@ -934,7 +1491,10 @@ static const struct cl_object_operations lov_ops = { .coo_attr_get = lov_attr_get, .coo_attr_update = lov_attr_update, .coo_conf_set = lov_conf_set, - .coo_getstripe = lov_object_getstripe + .coo_getstripe = lov_object_getstripe, + .coo_layout_get = lov_object_layout_get, + .coo_maxbytes = lov_object_maxbytes, + .coo_fiemap = lov_object_fiemap, }; static const struct lu_object_operations lov_lu_obj_ops = { @@ -986,22 +1546,6 @@ struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov) return lsm; } -struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj) -{ - struct lu_object *luobj; - struct lov_stripe_md *lsm = NULL; - - if (!clobj) - return NULL; - - luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu, - &lov_device_type); - if (luobj) - lsm = lov_lsm_addref(lu2lov(luobj)); - return lsm; -} -EXPORT_SYMBOL(lov_lsm_get); - int lov_read_and_clear_async_rc(struct cl_object *clob) { struct lu_object *luobj; diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c index be6e9857ce2a911e87705179813a51e1aab7fc81..6c93d180aef7f03056d631c5415ace823e4f1092 100644 --- a/drivers/staging/lustre/lustre/lov/lov_pack.c +++ b/drivers/staging/lustre/lustre/lov/lov_pack.c @@ -38,14 +38,17 @@ #define DEBUG_SUBSYSTEM S_LOV +#include "../include/lustre/lustre_idl.h" +#include "../include/lustre/lustre_user.h" + #include "../include/lustre_net.h" +#include "../include/lustre_swab.h" #include "../include/obd.h" #include "../include/obd_class.h" #include "../include/obd_support.h" -#include "../include/lustre/lustre_user.h" -#include "lov_internal.h" #include "lov_cl_internal.h" +#include "lov_internal.h" void lov_dump_lmm_common(int level, void *lmmp) { @@ -97,120 +100,54 @@ void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm) le16_to_cpu(lmm->lmm_stripe_count)); } -/* Pack LOV object metadata for disk storage. It is packed in LE byte - * order and is opaque to the networking layer. +/** + * Pack LOV striping metadata for disk storage format (in little + * endian byte order). * - * XXX In the future, this will be enhanced to get the EA size from the - * underlying OSC device(s) to get their EA sizes so we can stack - * LOVs properly. For now lov_mds_md_size() just assumes one u64 - * per stripe. + * This follows the getxattr() conventions. If \a buf_size is zero + * then return the size needed. If \a buf_size is too small then + * return -ERANGE. Otherwise return the size of the result. */ -int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp, - struct lov_stripe_md *lsm) +ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf, + size_t buf_size) { - struct lov_mds_md_v1 *lmmv1; - struct lov_mds_md_v3 *lmmv3; - __u16 stripe_count; struct lov_ost_data_v1 *lmm_objects; - int lmm_size, lmm_magic; - int i; - int cplen = 0; - - if (lsm) { - lmm_magic = lsm->lsm_magic; - } else { - if (lmmp && *lmmp) - lmm_magic = le32_to_cpu((*lmmp)->lmm_magic); - else - /* lsm == NULL and lmmp == NULL */ - lmm_magic = LOV_MAGIC; - } - - if ((lmm_magic != LOV_MAGIC_V1) && - (lmm_magic != LOV_MAGIC_V3)) { - CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", - lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); - return -EINVAL; - } - - if (lsm) { - /* If we are just sizing the EA, limit the stripe count - * to the actual number of OSTs in this filesystem. - */ - if (!lmmp) { - stripe_count = lov_get_stripecnt(lov, lmm_magic, - lsm->lsm_stripe_count); - lsm->lsm_stripe_count = stripe_count; - } else if (!lsm_is_released(lsm)) { - stripe_count = lsm->lsm_stripe_count; - } else { - stripe_count = 0; - } - } else { - /* - * To calculate maximum easize by active targets at present, - * which is exactly the maximum easize to be seen by LOV - */ - stripe_count = lov->desc.ld_active_tgt_count; - } + struct lov_mds_md_v1 *lmmv1 = buf; + struct lov_mds_md_v3 *lmmv3 = buf; + size_t lmm_size; + unsigned int i; - /* XXX LOV STACKING call into osc for sizes */ - lmm_size = lov_mds_md_size(stripe_count, lmm_magic); - - if (!lmmp) + lmm_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic); + if (!buf_size) return lmm_size; - if (*lmmp && !lsm) { - stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count); - lmm_size = lov_mds_md_size(stripe_count, lmm_magic); - kvfree(*lmmp); - *lmmp = NULL; - return 0; - } - - if (!*lmmp) { - *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS); - if (!*lmmp) - return -ENOMEM; - } - - CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n", - lmm_magic, lmm_size); - - lmmv1 = *lmmp; - lmmv3 = (struct lov_mds_md_v3 *)*lmmp; - if (lmm_magic == LOV_MAGIC_V3) - lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3); - else - lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1); - - if (!lsm) - return lmm_size; + if (buf_size < lmm_size) + return -ERANGE; - /* lmmv1 and lmmv3 point to the same struct and have the + /* + * lmmv1 and lmmv3 point to the same struct and have the * same first fields */ + lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic); lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi); lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size); - lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count); + lmmv1->lmm_stripe_count = cpu_to_le16(lsm->lsm_stripe_count); lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern); lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen); + if (lsm->lsm_magic == LOV_MAGIC_V3) { - cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name, - sizeof(lmmv3->lmm_pool_name)); - if (cplen >= sizeof(lmmv3->lmm_pool_name)) - return -E2BIG; + CLASSERT(sizeof(lsm->lsm_pool_name) == + sizeof(lmmv3->lmm_pool_name)); + strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name, + sizeof(lmmv3->lmm_pool_name)); lmm_objects = lmmv3->lmm_objects; } else { lmm_objects = lmmv1->lmm_objects; } - for (i = 0; i < stripe_count; i++) { + for (i = 0; i < lsm->lsm_stripe_count; i++) { struct lov_oinfo *loi = lsm->lsm_oinfo[i]; - /* XXX LOV STACKING call down to osc_packmd() to do packing */ - LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID - " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi), - i, stripe_count, loi->loi_ost_idx); + ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi); lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen); lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx); @@ -219,15 +156,6 @@ int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp, return lmm_size; } -int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, - struct lov_stripe_md *lsm) -{ - struct obd_device *obd = class_exp2obd(exp); - struct lov_obd *lov = &obd->u.lov; - - return lov_obd_packmd(lov, lmmp, lsm); -} - /* Find the max stripecount we should use */ __u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count) { @@ -270,34 +198,34 @@ static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count) return rc; } -int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count, - int pattern, int magic) +struct lov_stripe_md *lov_lsm_alloc(u16 stripe_count, u32 pattern, u32 magic) { - int i, lsm_size; + struct lov_stripe_md *lsm; + unsigned int i; - CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count); + CDEBUG(D_INFO, "alloc lsm, stripe_count %u\n", stripe_count); - *lsmp = lsm_alloc_plain(stripe_count, &lsm_size); - if (!*lsmp) { - CERROR("can't allocate lsmp stripe_count %d\n", stripe_count); - return -ENOMEM; + lsm = lsm_alloc_plain(stripe_count); + if (!lsm) { + CERROR("cannot allocate LSM stripe_count %u\n", stripe_count); + return ERR_PTR(-ENOMEM); } - atomic_set(&(*lsmp)->lsm_refc, 1); - spin_lock_init(&(*lsmp)->lsm_lock); - (*lsmp)->lsm_magic = magic; - (*lsmp)->lsm_stripe_count = stripe_count; - (*lsmp)->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count; - (*lsmp)->lsm_pattern = pattern; - (*lsmp)->lsm_pool_name[0] = '\0'; - (*lsmp)->lsm_layout_gen = 0; + atomic_set(&lsm->lsm_refc, 1); + spin_lock_init(&lsm->lsm_lock); + lsm->lsm_magic = magic; + lsm->lsm_stripe_count = stripe_count; + lsm->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count; + lsm->lsm_pattern = pattern; + lsm->lsm_pool_name[0] = '\0'; + lsm->lsm_layout_gen = 0; if (stripe_count > 0) - (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0; + lsm->lsm_oinfo[0]->loi_ost_idx = ~0; for (i = 0; i < stripe_count; i++) - loi_init((*lsmp)->lsm_oinfo[i]); + loi_init(lsm->lsm_oinfo[i]); - return lsm_size; + return lsm; } int lov_free_memmd(struct lov_stripe_md **lsmp) @@ -317,56 +245,34 @@ int lov_free_memmd(struct lov_stripe_md **lsmp) /* Unpack LOV object metadata from disk storage. It is packed in LE byte * order and is opaque to the networking layer. */ -int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, - struct lov_mds_md *lmm, int lmm_bytes) +struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm, + size_t lmm_size) { - struct obd_device *obd = class_exp2obd(exp); - struct lov_obd *lov = &obd->u.lov; - int rc = 0, lsm_size; - __u16 stripe_count; - __u32 magic; - __u32 pattern; - - /* If passed an MDS struct use values from there, otherwise defaults */ - if (lmm) { - rc = lov_verify_lmm(lmm, lmm_bytes, &stripe_count); - if (rc) - return rc; - magic = le32_to_cpu(lmm->lmm_magic); - pattern = le32_to_cpu(lmm->lmm_pattern); - } else { - magic = LOV_MAGIC; - stripe_count = lov_get_stripecnt(lov, magic, 0); - pattern = LOV_PATTERN_RAID0; - } + struct lov_stripe_md *lsm; + u16 stripe_count; + u32 pattern; + u32 magic; + int rc; - /* If we aren't passed an lsmp struct, we just want the size */ - if (!lsmp) { - /* XXX LOV STACKING call into osc for sizes */ - LBUG(); - return lov_stripe_md_size(stripe_count); - } - /* If we are passed an allocated struct but nothing to unpack, free */ - if (*lsmp && !lmm) { - lov_free_memmd(lsmp); - return 0; - } + rc = lov_verify_lmm(lmm, lmm_size, &stripe_count); + if (rc) + return ERR_PTR(rc); - lsm_size = lov_alloc_memmd(lsmp, stripe_count, pattern, magic); - if (lsm_size < 0) - return lsm_size; + magic = le32_to_cpu(lmm->lmm_magic); + pattern = le32_to_cpu(lmm->lmm_pattern); - /* If we are passed a pointer but nothing to unpack, we only alloc */ - if (!lmm) - return lsm_size; + lsm = lov_lsm_alloc(stripe_count, pattern, magic); + if (IS_ERR(lsm)) + return lsm; - rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm); + LASSERT(lsm_op_find(magic)); + rc = lsm_op_find(magic)->lsm_unpackmd(lov, lsm, lmm); if (rc) { - lov_free_memmd(lsmp); - return rc; + lov_free_memmd(&lsm); + return ERR_PTR(rc); } - return lsm_size; + return lsm; } /* Retrieve object striping information. @@ -378,15 +284,14 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm, struct lov_user_md __user *lump) { - /* - * XXX huge struct allocated on stack. - */ /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */ - struct lov_obd *lov; struct lov_user_md_v3 lum; - struct lov_mds_md *lmmk = NULL; - int rc, lmmk_size, lmm_size; - int lum_size; + struct lov_mds_md *lmmk; + u32 stripe_count; + ssize_t lmm_size; + size_t lmmk_size; + size_t lum_size; + int rc; mm_segment_t seg; if (!lsm) @@ -399,6 +304,18 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm, seg = get_fs(); set_fs(KERNEL_DS); + if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) { + CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", + lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); + rc = -EIO; + goto out; + } + + if (!lsm_is_released(lsm)) + stripe_count = lsm->lsm_stripe_count; + else + stripe_count = 0; + /* we only need the header part from user space to get lmm_magic and * lmm_stripe_count, (the header part is common to v1 and v3) */ @@ -417,32 +334,40 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm, if (lum.lmm_stripe_count && (lum.lmm_stripe_count < lsm->lsm_stripe_count)) { /* Return right size of stripe to user */ - lum.lmm_stripe_count = lsm->lsm_stripe_count; + lum.lmm_stripe_count = stripe_count; rc = copy_to_user(lump, &lum, lum_size); rc = -EOVERFLOW; goto out; } - lov = lu2lov_dev(obj->lo_cl.co_lu.lo_dev)->ld_lov; - rc = lov_obd_packmd(lov, &lmmk, lsm); - if (rc < 0) + lmmk_size = lov_mds_md_size(stripe_count, lsm->lsm_magic); + + + lmmk = libcfs_kvzalloc(lmmk_size, GFP_NOFS); + if (!lmmk) { + rc = -ENOMEM; goto out; - lmmk_size = rc; - lmm_size = rc; - rc = 0; + } + + lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size); + if (lmm_size < 0) { + rc = lmm_size; + goto out_free; + } /* FIXME: Bug 1185 - copy fields properly when structs change */ /* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */ CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3)); CLASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lmmk->lmm_objects[0])); - if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) && - ((lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) || - (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)))) { + if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC && + (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) || + lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3))) { lustre_swab_lov_mds_md(lmmk); lustre_swab_lov_user_md_objects( (struct lov_user_ost_data *)lmmk->lmm_objects, lmmk->lmm_stripe_count); } + if (lum.lmm_magic == LOV_USER_MAGIC) { /* User request for v1, we need skip lmm_pool_name */ if (lmmk->lmm_magic == LOV_MAGIC_V3) { @@ -474,9 +399,11 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm, ((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count; if (copy_to_user(lump, lmmk, lmm_size)) rc = -EFAULT; + else + rc = 0; out_free: - kfree(lmmk); + kvfree(lmmk); out: set_fs(seg); return rc; diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c index 00bfabad78eb42e41d2b4569268a74ecb33d5e5b..62ceb6dfdfdf3a46c4328d25d18fbe2ef69f037b 100644 --- a/drivers/staging/lustre/lustre/lov/lov_page.c +++ b/drivers/staging/lustre/lustre/lov/lov_page.c @@ -49,51 +49,6 @@ * */ -/** - * Adjust the stripe index by layout of raid0. @max_index is the maximum - * page index covered by an underlying DLM lock. - * This function converts max_index from stripe level to file level, and make - * sure it's not beyond one stripe. - */ -static int lov_raid0_page_is_under_lock(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused, - pgoff_t *max_index) -{ - struct lov_object *loo = cl2lov(slice->cpl_obj); - struct lov_layout_raid0 *r0 = lov_r0(loo); - pgoff_t index = *max_index; - unsigned int pps; /* pages per stripe */ - - CDEBUG(D_READA, DFID "*max_index = %lu, nr = %d\n", - PFID(lu_object_fid(lov2lu(loo))), index, r0->lo_nr); - - if (index == 0) /* the page is not covered by any lock */ - return 0; - - if (r0->lo_nr == 1) /* single stripe file */ - return 0; - - /* max_index is stripe level, convert it into file level */ - if (index != CL_PAGE_EOF) { - int stripeno = lov_page_stripe(slice->cpl_page); - *max_index = lov_stripe_pgoff(loo->lo_lsm, index, stripeno); - } - - /* calculate the end of current stripe */ - pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT; - index = slice->cpl_index + pps - slice->cpl_index % pps - 1; - - CDEBUG(D_READA, DFID "*max_index = %lu, index = %lu, pps = %u, stripe_size = %u, stripe no = %u, page index = %lu\n", - PFID(lu_object_fid(lov2lu(loo))), *max_index, index, pps, - loo->lo_lsm->lsm_stripe_size, lov_page_stripe(slice->cpl_page), - slice->cpl_index); - - /* never exceed the end of the stripe */ - *max_index = min_t(pgoff_t, *max_index, index); - return 0; -} - static int lov_raid0_page_print(const struct lu_env *env, const struct cl_page_slice *slice, void *cookie, lu_printer_t printer) @@ -104,7 +59,6 @@ static int lov_raid0_page_print(const struct lu_env *env, } static const struct cl_page_operations lov_raid0_page_ops = { - .cpo_is_under_lock = lov_raid0_page_is_under_lock, .cpo_print = lov_raid0_page_print }; diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c index f8c8a361ef79280f9ba94c5ba5eef56417a08ff2..7daa8671fdc3436138fed8bc2918f1497d66e5ec 100644 --- a/drivers/staging/lustre/lustre/lov/lov_pool.c +++ b/drivers/staging/lustre/lustre/lov/lov_pool.c @@ -81,7 +81,8 @@ static void lov_pool_putref_locked(struct pool_desc *pool) * Chapter 6.4. * Addison Wesley, 1973 */ -static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key, unsigned mask) +static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key, + unsigned int mask) { int i; __u32 result; diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c index 09dcaf484c899bb1bc2f560a992cc4ddcc675329..d43cc88ae6418da45d1b84d13f46b90478d4e727 100644 --- a/drivers/staging/lustre/lustre/lov/lov_request.c +++ b/drivers/staging/lustre/lustre/lov/lov_request.c @@ -44,7 +44,6 @@ static void lov_init_set(struct lov_request_set *set) atomic_set(&set->set_completes, 0); atomic_set(&set->set_success, 0); atomic_set(&set->set_finish_checked, 0); - set->set_cookies = NULL; INIT_LIST_HEAD(&set->set_list); atomic_set(&set->set_refcount, 1); init_waitqueue_head(&set->set_waitq); @@ -61,8 +60,6 @@ void lov_finish_set(struct lov_request_set *set) rq_link); list_del_init(&req->rq_link); - if (req->rq_oi.oi_oa) - kmem_cache_free(obdo_cachep, req->rq_oi.oi_oa); kfree(req->rq_oi.oi_osfs); kfree(req); } @@ -97,22 +94,6 @@ static void lov_update_set(struct lov_request_set *set, wake_up(&set->set_waitq); } -int lov_update_common_set(struct lov_request_set *set, - struct lov_request *req, int rc) -{ - struct lov_obd *lov = &set->set_exp->exp_obd->u.lov; - - lov_update_set(set, req, rc); - - /* grace error on inactive ost */ - if (rc && !(lov->lov_tgts[req->rq_idx] && - lov->lov_tgts[req->rq_idx]->ltd_active)) - rc = 0; - - /* FIXME in raid1 regime, should return 0 */ - return rc; -} - static void lov_set_add_req(struct lov_request *req, struct lov_request_set *set) { @@ -183,279 +164,6 @@ static int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx) return rc; } -static int common_attr_done(struct lov_request_set *set) -{ - struct lov_request *req; - struct obdo *tmp_oa; - int rc = 0, attrset = 0; - - if (!set->set_oi->oi_oa) - return 0; - - if (!atomic_read(&set->set_success)) - return -EIO; - - tmp_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); - if (!tmp_oa) { - rc = -ENOMEM; - goto out; - } - - list_for_each_entry(req, &set->set_list, rq_link) { - if (!req->rq_complete || req->rq_rc) - continue; - if (req->rq_oi.oi_oa->o_valid == 0) /* inactive stripe */ - continue; - lov_merge_attrs(tmp_oa, req->rq_oi.oi_oa, - req->rq_oi.oi_oa->o_valid, - set->set_oi->oi_md, req->rq_stripe, &attrset); - } - if (!attrset) { - CERROR("No stripes had valid attrs\n"); - rc = -EIO; - } - if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) && - (set->set_oi->oi_md->lsm_stripe_count != attrset)) { - /* When we take attributes of some epoch, we require all the - * ost to be active. - */ - CERROR("Not all the stripes had valid attrs\n"); - rc = -EIO; - goto out; - } - - tmp_oa->o_oi = set->set_oi->oi_oa->o_oi; - memcpy(set->set_oi->oi_oa, tmp_oa, sizeof(*set->set_oi->oi_oa)); -out: - if (tmp_oa) - kmem_cache_free(obdo_cachep, tmp_oa); - return rc; -} - -int lov_fini_getattr_set(struct lov_request_set *set) -{ - int rc = 0; - - if (!set) - return 0; - LASSERT(set->set_exp); - if (atomic_read(&set->set_completes)) - rc = common_attr_done(set); - - lov_put_reqset(set); - - return rc; -} - -/* The callback for osc_getattr_async that finalizes a request info when a - * response is received. - */ -static int cb_getattr_update(void *cookie, int rc) -{ - struct obd_info *oinfo = cookie; - struct lov_request *lovreq; - - lovreq = container_of(oinfo, struct lov_request, rq_oi); - return lov_update_common_set(lovreq->rq_rqset, lovreq, rc); -} - -int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo, - struct lov_request_set **reqset) -{ - struct lov_request_set *set; - struct lov_obd *lov = &exp->exp_obd->u.lov; - int rc = 0, i; - - set = kzalloc(sizeof(*set), GFP_NOFS); - if (!set) - return -ENOMEM; - lov_init_set(set); - - set->set_exp = exp; - set->set_oi = oinfo; - - for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) { - struct lov_oinfo *loi; - struct lov_request *req; - - loi = oinfo->oi_md->lsm_oinfo[i]; - if (lov_oinfo_is_dummy(loi)) - continue; - - if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) { - CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx); - if (oinfo->oi_oa->o_valid & OBD_MD_FLEPOCH) { - /* SOM requires all the OSTs to be active. */ - rc = -EIO; - goto out_set; - } - continue; - } - - req = kzalloc(sizeof(*req), GFP_NOFS); - if (!req) { - rc = -ENOMEM; - goto out_set; - } - - req->rq_stripe = i; - req->rq_idx = loi->loi_ost_idx; - - req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); - if (!req->rq_oi.oi_oa) { - kfree(req); - rc = -ENOMEM; - goto out_set; - } - memcpy(req->rq_oi.oi_oa, oinfo->oi_oa, - sizeof(*req->rq_oi.oi_oa)); - req->rq_oi.oi_oa->o_oi = loi->loi_oi; - req->rq_oi.oi_cb_up = cb_getattr_update; - - lov_set_add_req(req, set); - } - if (!set->set_count) { - rc = -EIO; - goto out_set; - } - *reqset = set; - return rc; -out_set: - lov_fini_getattr_set(set); - return rc; -} - -int lov_fini_setattr_set(struct lov_request_set *set) -{ - int rc = 0; - - if (!set) - return 0; - LASSERT(set->set_exp); - if (atomic_read(&set->set_completes)) { - rc = common_attr_done(set); - /* FIXME update qos data here */ - } - - lov_put_reqset(set); - return rc; -} - -int lov_update_setattr_set(struct lov_request_set *set, - struct lov_request *req, int rc) -{ - struct lov_obd *lov = &req->rq_rqset->set_exp->exp_obd->u.lov; - struct lov_stripe_md *lsm = req->rq_rqset->set_oi->oi_md; - - lov_update_set(set, req, rc); - - /* grace error on inactive ost */ - if (rc && !(lov->lov_tgts[req->rq_idx] && - lov->lov_tgts[req->rq_idx]->ltd_active)) - rc = 0; - - if (rc == 0) { - if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLCTIME) - lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_ctime = - req->rq_oi.oi_oa->o_ctime; - if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLMTIME) - lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_mtime = - req->rq_oi.oi_oa->o_mtime; - if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLATIME) - lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_atime = - req->rq_oi.oi_oa->o_atime; - } - - return rc; -} - -/* The callback for osc_setattr_async that finalizes a request info when a - * response is received. - */ -static int cb_setattr_update(void *cookie, int rc) -{ - struct obd_info *oinfo = cookie; - struct lov_request *lovreq; - - lovreq = container_of(oinfo, struct lov_request, rq_oi); - return lov_update_setattr_set(lovreq->rq_rqset, lovreq, rc); -} - -int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo, - struct obd_trans_info *oti, - struct lov_request_set **reqset) -{ - struct lov_request_set *set; - struct lov_obd *lov = &exp->exp_obd->u.lov; - int rc = 0, i; - - set = kzalloc(sizeof(*set), GFP_NOFS); - if (!set) - return -ENOMEM; - lov_init_set(set); - - set->set_exp = exp; - set->set_oi = oinfo; - if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) - set->set_cookies = oti->oti_logcookies; - - for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) { - struct lov_oinfo *loi = oinfo->oi_md->lsm_oinfo[i]; - struct lov_request *req; - - if (lov_oinfo_is_dummy(loi)) - continue; - - if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) { - CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx); - continue; - } - - req = kzalloc(sizeof(*req), GFP_NOFS); - if (!req) { - rc = -ENOMEM; - goto out_set; - } - req->rq_stripe = i; - req->rq_idx = loi->loi_ost_idx; - - req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); - if (!req->rq_oi.oi_oa) { - kfree(req); - rc = -ENOMEM; - goto out_set; - } - memcpy(req->rq_oi.oi_oa, oinfo->oi_oa, - sizeof(*req->rq_oi.oi_oa)); - req->rq_oi.oi_oa->o_oi = loi->loi_oi; - req->rq_oi.oi_oa->o_stripe_idx = i; - req->rq_oi.oi_cb_up = cb_setattr_update; - - if (oinfo->oi_oa->o_valid & OBD_MD_FLSIZE) { - int off = lov_stripe_offset(oinfo->oi_md, - oinfo->oi_oa->o_size, i, - &req->rq_oi.oi_oa->o_size); - - if (off < 0 && req->rq_oi.oi_oa->o_size) - req->rq_oi.oi_oa->o_size--; - - CDEBUG(D_INODE, "stripe %d has size %llu/%llu\n", - i, req->rq_oi.oi_oa->o_size, - oinfo->oi_oa->o_size); - } - lov_set_add_req(req, set); - } - if (!set->set_count) { - rc = -EIO; - goto out_set; - } - *reqset = set; - return rc; -out_set: - lov_fini_setattr_set(set); - return rc; -} - #define LOV_U64_MAX ((__u64)~0ULL) #define LOV_SUM_MAX(tot, add) \ do { \ diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c index b519a1940e1e691ec86712ffa4c5c128723431b7..5d6536f8a4f737e37722f15c6bf7fd897c1d5b5e 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c @@ -42,46 +42,6 @@ * @{ */ -/***************************************************************************** - * - * Lovsub transfer operations. - * - */ - -static void lovsub_req_completion(const struct lu_env *env, - const struct cl_req_slice *slice, int ioret) -{ - struct lovsub_req *lsr; - - lsr = cl2lovsub_req(slice); - kmem_cache_free(lovsub_req_kmem, lsr); -} - -/** - * Implementation of struct cl_req_operations::cro_attr_set() for lovsub - * layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx - * field, which is filled there. - */ -static void lovsub_req_attr_set(const struct lu_env *env, - const struct cl_req_slice *slice, - const struct cl_object *obj, - struct cl_req_attr *attr, u64 flags) -{ - struct lovsub_object *subobj; - - subobj = cl2lovsub(obj); - /* - * There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it - * unconditionally. It never changes anyway. - */ - attr->cra_oa->o_stripe_idx = subobj->lso_index; -} - -static const struct cl_req_operations lovsub_req_ops = { - .cro_attr_set = lovsub_req_attr_set, - .cro_completion = lovsub_req_completion -}; - /***************************************************************************** * * Lov-sub device and device type functions. @@ -137,32 +97,12 @@ static struct lu_device *lovsub_device_free(const struct lu_env *env, return next; } -static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev, - struct cl_req *req) -{ - struct lovsub_req *lsr; - int result; - - lsr = kmem_cache_zalloc(lovsub_req_kmem, GFP_NOFS); - if (lsr) { - cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops); - result = 0; - } else { - result = -ENOMEM; - } - return result; -} - static const struct lu_device_operations lovsub_lu_ops = { .ldo_object_alloc = lovsub_object_alloc, .ldo_process_config = NULL, .ldo_recovery_complete = NULL }; -static const struct cl_device_operations lovsub_cl_ops = { - .cdo_req_init = lovsub_req_init -}; - static struct lu_device *lovsub_device_alloc(const struct lu_env *env, struct lu_device_type *t, struct lustre_cfg *cfg) @@ -178,7 +118,6 @@ static struct lu_device *lovsub_device_alloc(const struct lu_env *env, if (result == 0) { d = lovsub2lu_dev(lsd); d->ld_ops = &lovsub_lu_ops; - lsd->acid_cl.cd_ops = &lovsub_cl_ops; } else { d = ERR_PTR(result); } diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c index a2bac7a3b71baa9cf3ef18632c13db15d2541bf0..011296ee16e6aac01722a6b3624fb342f160e9e9 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_object.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c @@ -116,11 +116,31 @@ static int lovsub_object_glimpse(const struct lu_env *env, return cl_object_glimpse(env, &los->lso_super->lo_cl, lvb); } +/** + * Implementation of struct cl_object_operations::coo_req_attr_set() for lovsub + * layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx + * field, which is filled there. + */ +static void lovsub_req_attr_set(const struct lu_env *env, struct cl_object *obj, + struct cl_req_attr *attr) +{ + struct lovsub_object *subobj = cl2lovsub(obj); + + cl_req_attr_set(env, &subobj->lso_super->lo_cl, attr); + + /* + * There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it + * unconditionally. It never changes anyway. + */ + attr->cra_oa->o_stripe_idx = subobj->lso_index; +} + static const struct cl_object_operations lovsub_ops = { .coo_page_init = lovsub_page_init, .coo_lock_init = lovsub_lock_init, .coo_attr_update = lovsub_attr_update, - .coo_glimpse = lovsub_object_glimpse + .coo_glimpse = lovsub_object_glimpse, + .coo_req_attr_set = lovsub_req_attr_set }; static const struct lu_object_operations lovsub_lu_obj_ops = { diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c index fca9450de57c2a8db61520b463374d621aab3633..9021c465c04498d07e4252975fd2baf9e088f0bc 100644 --- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c +++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c @@ -36,6 +36,42 @@ #include "../include/lprocfs_status.h" #include "mdc_internal.h" +static ssize_t active_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct obd_device *dev = container_of(kobj, struct obd_device, + obd_kobj); + + return sprintf(buf, "%u\n", !dev->u.cli.cl_import->imp_deactive); +} + +static ssize_t active_store(struct kobject *kobj, struct attribute *attr, + const char *buffer, size_t count) +{ + struct obd_device *dev = container_of(kobj, struct obd_device, + obd_kobj); + unsigned long val; + int rc; + + rc = kstrtoul(buffer, 10, &val); + if (rc) + return rc; + + if (val < 0 || val > 1) + return -ERANGE; + + /* opposite senses */ + if (dev->u.cli.cl_import->imp_deactive == val) { + rc = ptlrpc_set_import_active(dev->u.cli.cl_import, val); + if (rc) + count = rc; + } else { + CDEBUG(D_CONFIG, "activate %lu: ignoring repeat request\n", val); + } + return count; +} +LUSTRE_RW_ATTR(active); + static ssize_t max_rpcs_in_flight_show(struct kobject *kobj, struct attribute *attr, char *buf) @@ -73,6 +109,64 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj, } LUSTRE_RW_ATTR(max_rpcs_in_flight); +static ssize_t max_mod_rpcs_in_flight_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct obd_device *dev = container_of(kobj, struct obd_device, + obd_kobj); + u16 max; + int len; + + max = dev->u.cli.cl_max_mod_rpcs_in_flight; + len = sprintf(buf, "%hu\n", max); + + return len; +} + +static ssize_t max_mod_rpcs_in_flight_store(struct kobject *kobj, + struct attribute *attr, + const char *buffer, + size_t count) +{ + struct obd_device *dev = container_of(kobj, struct obd_device, + obd_kobj); + u16 val; + int rc; + + rc = kstrtou16(buffer, 10, &val); + if (rc) + return rc; + + rc = obd_set_max_mod_rpcs_in_flight(&dev->u.cli, val); + if (rc) + count = rc; + + return count; +} +LUSTRE_RW_ATTR(max_mod_rpcs_in_flight); + +static int mdc_rpc_stats_seq_show(struct seq_file *seq, void *v) +{ + struct obd_device *dev = seq->private; + + return obd_mod_rpc_stats_seq_show(&dev->u.cli, seq); +} + +static ssize_t mdc_rpc_stats_seq_write(struct file *file, + const char __user *buf, + size_t len, loff_t *off) +{ + struct seq_file *seq = file->private_data; + struct obd_device *dev = seq->private; + struct client_obd *cli = &dev->u.cli; + + lprocfs_oh_clear(&cli->cl_mod_rpcs_hist); + + return len; +} +LPROC_SEQ_FOPS(mdc_rpc_stats); + LPROC_SEQ_FOPS_WR_ONLY(mdc, ping); LPROC_SEQ_FOPS_RO_TYPE(mdc, connect_flags); @@ -112,11 +206,15 @@ static struct lprocfs_vars lprocfs_mdc_obd_vars[] = { { "import", &mdc_import_fops, NULL, 0 }, { "state", &mdc_state_fops, NULL, 0 }, { "pinger_recov", &mdc_pinger_recov_fops, NULL, 0 }, + { .name = "rpc_stats", + .fops = &mdc_rpc_stats_fops }, { NULL } }; static struct attribute *mdc_attrs[] = { + &lustre_attr_active.attr, &lustre_attr_max_rpcs_in_flight.attr, + &lustre_attr_max_mod_rpcs_in_flight.attr, &lustre_attr_max_pages_per_rpc.attr, NULL, }; diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h index f446c1c2584b7b0c5f4357517448f7a441d5c9d0..881c6a0676a6cf3f69576db78a4df7c72173e846 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h +++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h @@ -46,7 +46,7 @@ void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, size_t size, void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags, struct md_op_data *data, size_t ea_size); void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, - void *ea, size_t ealen, void *ea2, size_t ea2len); + void *ea, size_t ealen); void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data, const void *data, size_t datalen, umode_t mode, uid_t uid, gid_t gid, cfs_cap_t capability, __u64 rdev); @@ -75,7 +75,7 @@ int mdc_intent_lock(struct obd_export *exp, __u64 extra_lock_flags); int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, - const ldlm_policy_data_t *policy, + const union ldlm_policy_data *policy, struct lookup_intent *it, struct md_op_data *op_data, struct lustre_handle *lockh, __u64 extra_lock_flags); @@ -105,12 +105,11 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data, const char *new, size_t newlen, struct ptlrpc_request **request); int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, - void *ea, size_t ealen, void *ea2, size_t ea2len, - struct ptlrpc_request **request, struct md_open_data **mod); + void *ea, size_t ealen, struct ptlrpc_request **request); int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data, struct ptlrpc_request **request); int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, - ldlm_policy_data_t *policy, enum ldlm_mode mode, + union ldlm_policy_data *policy, enum ldlm_mode mode, enum ldlm_cancel_flags flags, void *opaque); int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, @@ -122,7 +121,8 @@ int mdc_intent_getattr_async(struct obd_export *exp, enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags, const struct lu_fid *fid, enum ldlm_type type, - ldlm_policy_data_t *policy, enum ldlm_mode mode, + union ldlm_policy_data *policy, + enum ldlm_mode mode, struct lustre_handle *lockh); static inline int mdc_prep_elc_req(struct obd_export *exp, diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c index aac7e04873e27bd0f2bb7194d1152cefeeb814ec..f35e1f9afdefb03102c5b5f4f7a4bff99045912d 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c @@ -139,7 +139,7 @@ void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data, rec->cr_time = op_data->op_mod_time; rec->cr_suppgid1 = op_data->op_suppgids[0]; rec->cr_suppgid2 = op_data->op_suppgids[1]; - flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS; + flags = 0; if (op_data->op_bias & MDS_CREATE_VOLATILE) flags |= MDS_OPEN_VOLATILE; set_mrc_cr_flags(rec, flags); @@ -301,16 +301,16 @@ static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec, static void mdc_ioepoch_pack(struct mdt_ioepoch *epoch, struct md_op_data *op_data) { - memcpy(&epoch->handle, &op_data->op_handle, sizeof(epoch->handle)); - epoch->ioepoch = op_data->op_ioepoch; - epoch->flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS; + epoch->mio_handle = op_data->op_handle; + epoch->mio_unused1 = 0; + epoch->mio_unused2 = 0; + epoch->mio_padding = 0; } void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, - void *ea, size_t ealen, void *ea2, size_t ea2len) + void *ea, size_t ealen) { struct mdt_rec_setattr *rec; - struct mdt_ioepoch *epoch; struct lov_user_md *lum = NULL; CLASSERT(sizeof(struct mdt_rec_reint) == @@ -318,11 +318,6 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); mdc_setattr_pack_rec(rec, op_data); - if (op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) { - epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH); - mdc_ioepoch_pack(epoch, op_data); - } - if (ealen == 0) return; @@ -335,12 +330,6 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, } else { memcpy(lum, ea, ealen); } - - if (ea2len == 0) - return; - - memcpy(req_capsule_client_get(&req->rq_pill, &RMF_LOGCOOKIES), ea2, - ea2len); } void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data) @@ -387,6 +376,31 @@ void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data) mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen); } +static void mdc_intent_close_pack(struct ptlrpc_request *req, + struct md_op_data *op_data) +{ + enum mds_op_bias bias = op_data->op_bias; + struct close_data *data; + struct ldlm_lock *lock; + + if (!(bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP | + MDS_RENAME_MIGRATE))) + return; + + data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA); + LASSERT(data); + + lock = ldlm_handle2lock(&op_data->op_lease_handle); + if (lock) { + data->cd_handle = lock->l_remote_handle; + LDLM_LOCK_PUT(lock); + } + ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL); + + data->cd_data_version = op_data->op_data_version; + data->cd_fid = op_data->op_fid2; +} + void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data, const char *old, size_t oldlen, const char *new, size_t newlen) @@ -415,6 +429,15 @@ void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data, if (new) mdc_pack_name(req, &RMF_SYMTGT, new, newlen); + + if (op_data->op_cli_flags & CLI_MIGRATE && + op_data->op_bias & MDS_RENAME_MIGRATE) { + struct mdt_ioepoch *epoch; + + mdc_intent_close_pack(req, op_data); + epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH); + mdc_ioepoch_pack(epoch, op_data); + } } void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags, @@ -441,27 +464,6 @@ void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags, op_data->op_namelen); } -static void mdc_hsm_release_pack(struct ptlrpc_request *req, - struct md_op_data *op_data) -{ - if (op_data->op_bias & MDS_HSM_RELEASE) { - struct close_data *data; - struct ldlm_lock *lock; - - data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA); - - lock = ldlm_handle2lock(&op_data->op_lease_handle); - if (lock) { - data->cd_handle = lock->l_remote_handle; - LDLM_LOCK_PUT(lock); - } - ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL); - - data->cd_data_version = op_data->op_data_version; - data->cd_fid = op_data->op_fid2; - } -} - void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data) { struct mdt_ioepoch *epoch; @@ -484,5 +486,5 @@ void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data) rec->sa_valid &= ~MDS_ATTR_ATIME; mdc_ioepoch_pack(epoch, op_data); - mdc_hsm_release_pack(req, op_data); + mdc_intent_close_pack(req, op_data); } diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c index f1f6c082fa42cf98daa76d09f56c01af1bc60b1e..54ebb9952d66a1304cd6b2cbe652a3f12c6ee7cb 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c @@ -38,10 +38,12 @@ #include "../include/obd.h" #include "../include/obd_class.h" #include "../include/lustre_dlm.h" -#include "../include/lustre_fid.h" /* fid_res_name_eq() */ +#include "../include/lustre_fid.h" #include "../include/lustre_mdc.h" #include "../include/lustre_net.h" #include "../include/lustre_req_layout.h" +#include "../include/lustre_swab.h" + #include "mdc_internal.h" struct mdc_getattr_args { @@ -131,7 +133,8 @@ int mdc_set_lock_data(struct obd_export *exp, const struct lustre_handle *lockh, enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags, const struct lu_fid *fid, enum ldlm_type type, - ldlm_policy_data_t *policy, enum ldlm_mode mode, + union ldlm_policy_data *policy, + enum ldlm_mode mode, struct lustre_handle *lockh) { struct ldlm_res_id res_id; @@ -147,7 +150,7 @@ enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags, int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, - ldlm_policy_data_t *policy, + union ldlm_policy_data *policy, enum ldlm_mode mode, enum ldlm_cancel_flags flags, void *opaque) @@ -386,8 +389,6 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp, req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, obddev->u.cli.cl_default_mds_easize); - req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, - obddev->u.cli.cl_default_mds_cookiesize); ptlrpc_request_set_replen(req); return req; } @@ -688,20 +689,20 @@ static int mdc_finish_enqueue(struct obd_export *exp, * we don't know in advance the file type. */ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, - const ldlm_policy_data_t *policy, + const union ldlm_policy_data *policy, struct lookup_intent *it, struct md_op_data *op_data, struct lustre_handle *lockh, u64 extra_lock_flags) { - static const ldlm_policy_data_t lookup_policy = { + static const union ldlm_policy_data lookup_policy = { .l_inodebits = { MDS_INODELOCK_LOOKUP } }; - static const ldlm_policy_data_t update_policy = { + static const union ldlm_policy_data update_policy = { .l_inodebits = { MDS_INODELOCK_UPDATE } }; - static const ldlm_policy_data_t layout_policy = { + static const union ldlm_policy_data layout_policy = { .l_inodebits = { MDS_INODELOCK_LAYOUT } }; - static const ldlm_policy_data_t getxattr_policy = { + static const union ldlm_policy_data getxattr_policy = { .l_inodebits = { MDS_INODELOCK_XATTR } }; struct obd_device *obddev = class_exp2obd(exp); @@ -762,27 +763,22 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, if (IS_ERR(req)) return PTR_ERR(req); - if (req && it && it->it_op & IT_CREAT) - /* ask ptlrpc not to resend on EINPROGRESS since we have our own - * retry logic - */ - req->rq_no_retry_einprogress = 1; - if (resends) { req->rq_generation_set = 1; req->rq_import_generation = generation; req->rq_sent = ktime_get_real_seconds() + resends; } - /* It is important to obtain rpc_lock first (if applicable), so that - * threads that are serialised with rpc_lock are not polluting our - * rpcs in flight counter. We do not do flock request limiting, though + /* It is important to obtain modify RPC slot first (if applicable), so + * that threads that are waiting for a modify RPC slot are not polluting + * our rpcs in flight counter. + * We do not do flock request limiting, though */ if (it) { - mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it); + mdc_get_mod_rpc_slot(req, it); rc = obd_get_request_slot(&obddev->u.cli); if (rc != 0) { - mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it); + mdc_put_mod_rpc_slot(req, it); mdc_clear_replay_flag(req, 0); ptlrpc_req_finished(req); return rc; @@ -809,7 +805,7 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, } obd_put_request_slot(&obddev->u.cli); - mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it); + mdc_put_mod_rpc_slot(req, it); if (rc < 0) { CDEBUG(D_INFO, "%s: ldlm_cli_enqueue failed: rc = %d\n", @@ -825,11 +821,12 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, lockrep->lock_policy_res2 = ptlrpc_status_ntoh(lockrep->lock_policy_res2); - /* Retry the create infinitely when we get -EINPROGRESS from - * server. This is required by the new quota design. + /* + * Retry infinitely when the server returns -EINPROGRESS for the + * intent operation, when server returns -EINPROGRESS for acquiring + * intent lock, we'll retry in after_reply(). */ - if (it->it_op & IT_CREAT && - (int)lockrep->lock_policy_res2 == -EINPROGRESS) { + if (it->it_op && (int)lockrep->lock_policy_res2 == -EINPROGRESS) { mdc_clear_replay_flag(req, rc); ptlrpc_req_finished(req); resends++; @@ -931,7 +928,7 @@ static int mdc_finish_intent_lock(struct obd_export *exp, */ lock = ldlm_handle2lock(lockh); if (lock) { - ldlm_policy_data_t policy = lock->l_policy_data; + union ldlm_policy_data policy = lock->l_policy_data; LDLM_DEBUG(lock, "matching against this"); @@ -967,7 +964,7 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, */ struct ldlm_res_id res_id; struct lustre_handle lockh; - ldlm_policy_data_t policy; + union ldlm_policy_data policy; enum ldlm_mode mode; if (it->it_lock_handle) { @@ -1169,10 +1166,9 @@ int mdc_intent_getattr_async(struct obd_export *exp, * for statahead currently. Consider CMD in future, such two bits * maybe managed by different MDS, should be adjusted then. */ - ldlm_policy_data_t policy = { - .l_inodebits = { MDS_INODELOCK_LOOKUP | - MDS_INODELOCK_UPDATE } - }; + union ldlm_policy_data policy = { + .l_inodebits = { MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE } + }; int rc = 0; __u64 flags = LDLM_FL_HAS_INTENT; diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c index c921e471fa275909cf19178d07f928ba724f7451..07b168490f095184b1a6bce5c569b7e1a5710282 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c @@ -40,17 +40,15 @@ #include "../include/lustre_fid.h" /* mdc_setattr does its own semaphore handling */ -static int mdc_reint(struct ptlrpc_request *request, - struct mdc_rpc_lock *rpc_lock, - int level) +static int mdc_reint(struct ptlrpc_request *request, int level) { int rc; request->rq_send_state = level; - mdc_get_rpc_lock(rpc_lock, NULL); + mdc_get_mod_rpc_slot(request, NULL); rc = ptlrpc_queue_wait(request); - mdc_put_rpc_lock(rpc_lock, NULL); + mdc_put_mod_rpc_slot(request, NULL); if (rc) CDEBUG(D_INFO, "error in handling %d\n", rc); else if (!req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY)) @@ -68,7 +66,7 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid, __u64 bits) { struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; - ldlm_policy_data_t policy = {}; + union ldlm_policy_data policy = {}; struct ldlm_res_id res_id; struct ldlm_resource *res; int count; @@ -99,13 +97,10 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid, } int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, - void *ea, size_t ealen, void *ea2, size_t ea2len, - struct ptlrpc_request **request, struct md_open_data **mod) + void *ea, size_t ealen, struct ptlrpc_request **request) { LIST_HEAD(cancels); struct ptlrpc_request *req; - struct mdc_rpc_lock *rpc_lock; - struct obd_device *obd = exp->exp_obd; int count = 0, rc; __u64 bits; @@ -122,12 +117,9 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } - if ((op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) == 0) - req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, - 0); + req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, 0); req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen); - req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, - ea2len); + req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, 0); rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); if (rc) { @@ -135,63 +127,21 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, return rc; } - rpc_lock = obd->u.cli.cl_rpc_lock; - if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME)) CDEBUG(D_INODE, "setting mtime %ld, ctime %ld\n", LTIME_S(op_data->op_attr.ia_mtime), LTIME_S(op_data->op_attr.ia_ctime)); - mdc_setattr_pack(req, op_data, ea, ealen, ea2, ea2len); + mdc_setattr_pack(req, op_data, ea, ealen); ptlrpc_request_set_replen(req); - if (mod && (op_data->op_flags & MF_EPOCH_OPEN) && - req->rq_import->imp_replayable) { - LASSERT(!*mod); - - *mod = obd_mod_alloc(); - if (!*mod) { - DEBUG_REQ(D_ERROR, req, "Can't allocate md_open_data"); - } else { - req->rq_replay = 1; - req->rq_cb_data = *mod; - (*mod)->mod_open_req = req; - req->rq_commit_cb = mdc_commit_open; - (*mod)->mod_is_create = true; - /** - * Take an extra reference on \var mod, it protects \var - * mod from being freed on eviction (commit callback is - * called despite rq_replay flag). - * Will be put on mdc_done_writing(). - */ - obd_mod_get(*mod); - } - } - - rc = mdc_reint(req, rpc_lock, LUSTRE_IMP_FULL); - /* Save the obtained info in the original RPC for the replay case. */ - if (rc == 0 && (op_data->op_flags & MF_EPOCH_OPEN)) { - struct mdt_ioepoch *epoch; - struct mdt_body *body; + rc = mdc_reint(req, LUSTRE_IMP_FULL); - epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH); - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - epoch->handle = body->mbo_handle; - epoch->ioepoch = body->mbo_ioepoch; - req->rq_replay_cb = mdc_replay_open; - /** bug 3633, open may be committed and estale answer is not error */ - } else if (rc == -ESTALE && (op_data->op_flags & MF_SOM_CHANGE)) { - rc = 0; - } else if (rc == -ERESTARTSYS) { + if (rc == -ERESTARTSYS) rc = 0; - } + *request = req; - if (rc && req->rq_commit_cb) { - /* Put an extra reference on \var mod on error case. */ - if (mod && *mod) - obd_mod_put(*mod); - req->rq_commit_cb(req); - } + return rc; } @@ -264,7 +214,7 @@ int mdc_create(struct obd_export *exp, struct md_op_data *op_data, } level = LUSTRE_IMP_FULL; resend: - rc = mdc_reint(req, exp->exp_obd->u.cli.cl_rpc_lock, level); + rc = mdc_reint(req, level); /* Resend if we were told to. */ if (rc == -ERESTARTSYS) { @@ -332,13 +282,11 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data, req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, obd->u.cli.cl_default_mds_easize); - req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER, - obd->u.cli.cl_default_mds_cookiesize); ptlrpc_request_set_replen(req); *request = req; - rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL); + rc = mdc_reint(req, LUSTRE_IMP_FULL); if (rc == -ERESTARTSYS) rc = 0; return rc; @@ -348,7 +296,6 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data, struct ptlrpc_request **request) { LIST_HEAD(cancels); - struct obd_device *obd = exp->exp_obd; struct ptlrpc_request *req; int count = 0, rc; @@ -380,7 +327,7 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data, mdc_link_pack(req, op_data); ptlrpc_request_set_replen(req); - rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL); + rc = mdc_reint(req, LUSTRE_IMP_FULL); *request = req; if (rc == -ERESTARTSYS) rc = 0; @@ -419,7 +366,8 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data, MDS_INODELOCK_FULL); req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_MDS_REINT_RENAME); + op_data->op_cli_flags & CLI_MIGRATE ? + &RQF_MDS_REINT_MIGRATE : &RQF_MDS_REINT_RENAME); if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; @@ -435,6 +383,23 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data, return rc; } + if (op_data->op_cli_flags & CLI_MIGRATE && op_data->op_data) { + struct md_open_data *mod = op_data->op_data; + + LASSERTF(mod->mod_open_req && + mod->mod_open_req->rq_type != LI_POISON, + "POISONED open %p!\n", mod->mod_open_req); + + DEBUG_REQ(D_HA, mod->mod_open_req, "matched open"); + /* + * We no longer want to preserve this open for replay even + * though the open was committed. b=3632, b=3633 + */ + spin_lock(&mod->mod_open_req->rq_lock); + mod->mod_open_req->rq_replay = 0; + spin_unlock(&mod->mod_open_req->rq_lock); + } + if (exp_connect_cancelset(exp) && req) ldlm_cli_cancel_list(&cancels, count, req, 0); @@ -442,11 +407,9 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data, req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, obd->u.cli.cl_default_mds_easize); - req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER, - obd->u.cli.cl_default_mds_cookiesize); ptlrpc_request_set_replen(req); - rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL); + rc = mdc_reint(req, LUSTRE_IMP_FULL); *request = req; if (rc == -ERESTARTSYS) rc = 0; diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c index f56ea643f9bf906d437cc180993ff97f79c8789c..2cfd913f9bc569272b38c38c86f978eae3ef953e 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_request.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c @@ -38,15 +38,18 @@ # include # include +#include "../include/cl_object.h" +#include "../include/llog_swab.h" +#include "../include/lprocfs_status.h" #include "../include/lustre_acl.h" +#include "../include/lustre_fid.h" #include "../include/lustre/lustre_ioctl.h" -#include "../include/obd_class.h" +#include "../include/lustre_kernelcomm.h" #include "../include/lustre_lmv.h" -#include "../include/lustre_fid.h" -#include "../include/lprocfs_status.h" -#include "../include/lustre_param.h" #include "../include/lustre_log.h" -#include "../include/lustre_kernelcomm.h" +#include "../include/lustre_param.h" +#include "../include/lustre_swab.h" +#include "../include/obd_class.h" #include "mdc_internal.h" @@ -327,12 +330,12 @@ static int mdc_xattr_common(struct obd_export *exp, /* make rpc */ if (opcode == MDS_REINT) - mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL); + mdc_get_mod_rpc_slot(req, NULL); rc = ptlrpc_queue_wait(req); if (opcode == MDS_REINT) - mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL); + mdc_put_mod_rpc_slot(req, NULL); if (rc) ptlrpc_req_finished(req); @@ -420,9 +423,6 @@ static int mdc_get_lustre_md(struct obd_export *exp, md->body = req_capsule_server_get(pill, &RMF_MDT_BODY); if (md->body->mbo_valid & OBD_MD_FLEASIZE) { - int lmmsize; - struct lov_mds_md *lmm; - if (!S_ISREG(md->body->mbo_mode)) { CDEBUG(D_INFO, "OBD_MD_FLEASIZE set, should be a regular file, but is not\n"); @@ -436,28 +436,18 @@ static int mdc_get_lustre_md(struct obd_export *exp, rc = -EPROTO; goto out; } - lmmsize = md->body->mbo_eadatasize; - lmm = req_capsule_server_sized_get(pill, &RMF_MDT_MD, lmmsize); - if (!lmm) { - rc = -EPROTO; - goto out; - } - - rc = obd_unpackmd(dt_exp, &md->lsm, lmm, lmmsize); - if (rc < 0) - goto out; - if (rc < (typeof(rc))sizeof(*md->lsm)) { - CDEBUG(D_INFO, - "lsm size too small: rc < sizeof (*md->lsm) (%d < %d)\n", - rc, (int)sizeof(*md->lsm)); + md->layout.lb_len = md->body->mbo_eadatasize; + md->layout.lb_buf = req_capsule_server_sized_get(pill, + &RMF_MDT_MD, + md->layout.lb_len); + if (!md->layout.lb_buf) { rc = -EPROTO; goto out; } - } else if (md->body->mbo_valid & OBD_MD_FLDIREA) { - int lmvsize; - struct lov_mds_md *lmv; + const union lmv_mds_md *lmv; + size_t lmv_size; if (!S_ISDIR(md->body->mbo_mode)) { CDEBUG(D_INFO, @@ -466,22 +456,21 @@ static int mdc_get_lustre_md(struct obd_export *exp, goto out; } - if (md->body->mbo_eadatasize == 0) { + lmv_size = md->body->mbo_eadatasize; + if (!lmv_size) { CDEBUG(D_INFO, "OBD_MD_FLDIREA is set, but eadatasize 0\n"); return -EPROTO; } if (md->body->mbo_valid & OBD_MD_MEA) { - lmvsize = md->body->mbo_eadatasize; lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD, - lmvsize); + lmv_size); if (!lmv) { rc = -EPROTO; goto out; } - rc = obd_unpackmd(md_exp, (void *)&md->lmv, lmv, - lmvsize); + rc = md_unpackmd(md_exp, &md->lmv, lmv, lmv_size); if (rc < 0) goto out; @@ -517,8 +506,6 @@ static int mdc_get_lustre_md(struct obd_export *exp, #ifdef CONFIG_FS_POSIX_ACL posix_acl_release(md->posix_acl); #endif - if (md->lsm) - obd_free_memmd(dt_exp, &md->lsm); } return rc; } @@ -528,10 +515,6 @@ static int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md) return 0; } -/** - * Handles both OPEN and SETATTR RPCs for OPEN-CLOSE and SETATTR-DONE_WRITING - * RPC chains. - */ void mdc_replay_open(struct ptlrpc_request *req) { struct md_open_data *mod = req->rq_cb_data; @@ -565,15 +548,15 @@ void mdc_replay_open(struct ptlrpc_request *req) __u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg); struct mdt_ioepoch *epoch; - LASSERT(opc == MDS_CLOSE || opc == MDS_DONE_WRITING); + LASSERT(opc == MDS_CLOSE); epoch = req_capsule_client_get(&close_req->rq_pill, &RMF_MDT_EPOCH); LASSERT(epoch); if (och) - LASSERT(!memcmp(&old, &epoch->handle, sizeof(old))); + LASSERT(!memcmp(&old, &epoch->mio_handle, sizeof(old))); DEBUG_REQ(D_HA, close_req, "updating close body with new fh"); - epoch->handle = body->mbo_handle; + epoch->mio_handle = body->mbo_handle; } } @@ -715,22 +698,6 @@ static int mdc_clear_open_replay_data(struct obd_export *exp, return 0; } -/* Prepares the request for the replay by the given reply */ -static void mdc_close_handle_reply(struct ptlrpc_request *req, - struct md_op_data *op_data, int rc) { - struct mdt_body *repbody; - struct mdt_ioepoch *epoch; - - if (req && rc == -EAGAIN) { - repbody = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH); - - epoch->flags |= MF_SOM_AU; - if (repbody->mbo_valid & OBD_MD_FLGETATTRLOCK) - op_data->op_flags |= MF_GETATTR_LOCK; - } -} - static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, struct md_open_data *mod, struct ptlrpc_request **request) { @@ -740,9 +707,8 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, int rc; int saved_rc = 0; - req_fmt = &RQF_MDS_CLOSE; if (op_data->op_bias & MDS_HSM_RELEASE) { - req_fmt = &RQF_MDS_RELEASE_CLOSE; + req_fmt = &RQF_MDS_INTENT_CLOSE; /* allocate a FID for volatile file */ rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data); @@ -752,6 +718,10 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, /* save the errcode and proceed to close */ saved_rc = rc; } + } else if (op_data->op_bias & MDS_CLOSE_LAYOUT_SWAP) { + req_fmt = &RQF_MDS_INTENT_CLOSE; + } else { + req_fmt = &RQF_MDS_CLOSE; } *request = NULL; @@ -807,14 +777,12 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, obd->u.cli.cl_default_mds_easize); - req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER, - obd->u.cli.cl_default_mds_cookiesize); ptlrpc_request_set_replen(req); - mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL); + mdc_get_mod_rpc_slot(req, NULL); rc = ptlrpc_queue_wait(req); - mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL); + mdc_put_mod_rpc_slot(req, NULL); if (!req->rq_repmsg) { CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req, @@ -857,79 +825,9 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, obd_mod_put(mod); } *request = req; - mdc_close_handle_reply(req, op_data, rc); return rc < 0 ? rc : saved_rc; } -static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data, - struct md_open_data *mod) -{ - struct obd_device *obd = class_exp2obd(exp); - struct ptlrpc_request *req; - int rc; - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_MDS_DONE_WRITING); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - if (mod) { - LASSERTF(mod->mod_open_req && - mod->mod_open_req->rq_type != LI_POISON, - "POISONED setattr %p!\n", mod->mod_open_req); - - mod->mod_close_req = req; - DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr"); - /* We no longer want to preserve this setattr for replay even - * though the open was committed. b=3632, b=3633 - */ - spin_lock(&mod->mod_open_req->rq_lock); - mod->mod_open_req->rq_replay = 0; - spin_unlock(&mod->mod_open_req->rq_lock); - } - - mdc_close_pack(req, op_data); - ptlrpc_request_set_replen(req); - - mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL); - rc = ptlrpc_queue_wait(req); - mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL); - - if (rc == -ESTALE) { - /** - * it can be allowed error after 3633 if open or setattr were - * committed and server failed before close was sent. - * Let's check if mod exists and return no error in that case - */ - if (mod) { - if (mod->mod_open_req->rq_committed) - rc = 0; - } - } - - if (mod) { - if (rc != 0) - mod->mod_close_req = NULL; - LASSERT(mod->mod_open_req); - mdc_free_open(mod); - - /* Since now, mod is accessed through setattr req only, - * thus DW req does not keep a reference on mod anymore. - */ - obd_mod_put(mod); - } - - mdc_close_handle_reply(req, op_data, rc); - ptlrpc_req_finished(req); - return rc; -} - static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid, u64 offset, struct page **pages, int npages, struct ptlrpc_request **request) @@ -959,8 +857,10 @@ static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid, req->rq_request_portal = MDS_READPAGE_PORTAL; ptlrpc_at_set_req_timeout(req); - desc = ptlrpc_prep_bulk_imp(req, npages, 1, BULK_PUT_SINK, - MDS_BULK_PORTAL); + desc = ptlrpc_prep_bulk_imp(req, npages, 1, + PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV, + MDS_BULK_PORTAL, + &ptlrpc_bulk_kiov_pin_ops); if (!desc) { ptlrpc_request_free(req); return -ENOMEM; @@ -968,7 +868,7 @@ static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid, /* NB req now owns desc and will free it when it gets freed */ for (i = 0; i < npages; i++) - ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE); + desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, PAGE_SIZE); mdc_readdir_pack(req, offset, PAGE_SIZE * npages, fid); @@ -1546,7 +1446,7 @@ static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf) /* Val is struct getinfo_fid2path result plus path */ vallen = sizeof(*gf) + gf->gf_pathlen; - rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf, NULL); + rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf); if (rc != 0 && rc != -EREMOTE) goto out; @@ -1558,8 +1458,11 @@ static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf) goto out; } - CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n%s\n", - PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno, gf->gf_path); + CDEBUG(D_IOCTL, "path got " DFID " from %llu #%d: %s\n", + PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno, + gf->gf_pathlen < 512 ? gf->gf_path : + /* only log the last 512 characters of the path */ + gf->gf_path + gf->gf_pathlen - 512); out: kfree(key); @@ -1595,7 +1498,9 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp, ptlrpc_request_set_replen(req); - rc = mdc_queue_wait(req); + mdc_get_mod_rpc_slot(req, NULL); + rc = ptlrpc_queue_wait(req); + mdc_put_mod_rpc_slot(req, NULL); out: ptlrpc_req_finished(req); return rc; @@ -1773,7 +1678,9 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp, ptlrpc_request_set_replen(req); - rc = mdc_queue_wait(req); + mdc_get_mod_rpc_slot(req, NULL); + rc = ptlrpc_queue_wait(req); + mdc_put_mod_rpc_slot(req, NULL); out: ptlrpc_req_finished(req); return rc; @@ -1836,7 +1743,9 @@ static int mdc_ioc_hsm_request(struct obd_export *exp, ptlrpc_request_set_replen(req); - rc = mdc_queue_wait(req); + mdc_get_mod_rpc_slot(req, NULL); + rc = ptlrpc_queue_wait(req); + mdc_put_mod_rpc_slot(req, NULL); out: ptlrpc_req_finished(req); return rc; @@ -1957,10 +1866,8 @@ static int mdc_changelog_send_thread(void *csdata) /* Send EOF no matter what our result */ kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch), cs->cs_flags); - if (kuch) { - kuch->kuc_msgtype = CL_EOF; - libcfs_kkuc_msg_put(cs->cs_fp, kuch); - } + kuch->kuc_msgtype = CL_EOF; + libcfs_kkuc_msg_put(cs->cs_fp, kuch); out: fput(cs->cs_fp); @@ -2015,52 +1922,6 @@ static int mdc_ioc_changelog_send(struct obd_device *obd, static int mdc_ioc_hsm_ct_start(struct obd_export *exp, struct lustre_kernelcomm *lk); -static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp, - struct obd_quotactl *oqctl) -{ - struct client_obd *cli = &exp->exp_obd->u.cli; - struct ptlrpc_request *req; - struct obd_quotactl *body; - int rc; - - req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), - &RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION, - MDS_QUOTACHECK); - if (!req) - return -ENOMEM; - - body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); - *body = *oqctl; - - ptlrpc_request_set_replen(req); - - /* the next poll will find -ENODATA, that means quotacheck is - * going on - */ - cli->cl_qchk_stat = -ENODATA; - rc = ptlrpc_queue_wait(req); - if (rc) - cli->cl_qchk_stat = rc; - ptlrpc_req_finished(req); - return rc; -} - -static int mdc_quota_poll_check(struct obd_export *exp, - struct if_quotacheck *qchk) -{ - struct client_obd *cli = &exp->exp_obd->u.cli; - int rc; - - qchk->obd_uuid = cli->cl_target_uuid; - memcpy(qchk->obd_type, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME)); - - rc = cli->cl_qchk_stat; - /* the client is not the previous one */ - if (rc == CL_NOT_QUOTACHECKED) - rc = -EINTR; - return rc; -} - static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp, struct obd_quotactl *oqctl) { @@ -2215,9 +2076,6 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, case IOC_OSC_SET_ACTIVE: rc = ptlrpc_set_import_active(imp, data->ioc_offset); goto out; - case OBD_IOC_POLL_QUOTACHECK: - rc = mdc_quota_poll_check(exp, (struct if_quotacheck *)karg); - goto out; case OBD_IOC_PING_TARGET: rc = ptlrpc_obd_ping(obd); goto out; @@ -2528,8 +2386,7 @@ static int mdc_set_info_async(const struct lu_env *env, } static int mdc_get_info(const struct lu_env *env, struct obd_export *exp, - __u32 keylen, void *key, __u32 *vallen, void *val, - struct lov_stripe_md *lsm) + __u32 keylen, void *key, __u32 *vallen, void *val) { int rc = -EINVAL; @@ -2733,29 +2590,17 @@ static void mdc_llog_finish(struct obd_device *obd) static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg) { - struct client_obd *cli = &obd->u.cli; struct lprocfs_static_vars lvars = { NULL }; int rc; - cli->cl_rpc_lock = kzalloc(sizeof(*cli->cl_rpc_lock), GFP_NOFS); - if (!cli->cl_rpc_lock) - return -ENOMEM; - mdc_init_rpc_lock(cli->cl_rpc_lock); - rc = ptlrpcd_addref(); if (rc < 0) - goto err_rpc_lock; - - cli->cl_close_lock = kzalloc(sizeof(*cli->cl_close_lock), GFP_NOFS); - if (!cli->cl_close_lock) { - rc = -ENOMEM; - goto err_ptlrpcd_decref; - } - mdc_init_rpc_lock(cli->cl_close_lock); + return rc; rc = client_obd_setup(obd, cfg); if (rc) - goto err_close_lock; + goto err_ptlrpcd_decref; + lprocfs_mdc_init_vars(&lvars); lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars); sptlrpc_lprocfs_cliobd_attach(obd); @@ -2769,29 +2614,25 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg) if (rc) { mdc_cleanup(obd); CERROR("failed to setup llogging subsystems\n"); + return rc; } return rc; -err_close_lock: - kfree(cli->cl_close_lock); err_ptlrpcd_decref: ptlrpcd_decref(); -err_rpc_lock: - kfree(cli->cl_rpc_lock); return rc; } -/* Initialize the default and maximum LOV EA and cookie sizes. This allows +/* Initialize the default and maximum LOV EA sizes. This allows * us to make MDS RPCs with large enough reply buffers to hold a default - * sized EA and cookie without having to calculate this (via a call into the + * sized EA without having to calculate this (via a call into the * LOV + OSCs) each time we make an RPC. The maximum size is also tracked * but not used to avoid wastefully vmalloc()'ing large reply buffers when * a large number of stripes is possible. If a larger reply buffer is * required it will be reallocated in the ptlrpc layer due to overflow. */ -static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize, - u32 cookiesize, u32 def_cookiesize) +static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize) { struct obd_device *obd = exp->exp_obd; struct client_obd *cli = &obd->u.cli; @@ -2802,42 +2643,24 @@ static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize, if (cli->cl_default_mds_easize < def_easize) cli->cl_default_mds_easize = def_easize; - if (cli->cl_max_mds_cookiesize < cookiesize) - cli->cl_max_mds_cookiesize = cookiesize; - - if (cli->cl_default_mds_cookiesize < def_cookiesize) - cli->cl_default_mds_cookiesize = def_cookiesize; - return 0; } -static int mdc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) +static int mdc_precleanup(struct obd_device *obd) { - switch (stage) { - case OBD_CLEANUP_EARLY: - break; - case OBD_CLEANUP_EXPORTS: - /* Failsafe, ok if racy */ - if (obd->obd_type->typ_refcnt <= 1) - libcfs_kkuc_group_rem(0, KUC_GRP_HSM); + /* Failsafe, ok if racy */ + if (obd->obd_type->typ_refcnt <= 1) + libcfs_kkuc_group_rem(0, KUC_GRP_HSM); - obd_cleanup_client_import(obd); - ptlrpc_lprocfs_unregister_obd(obd); - lprocfs_obd_cleanup(obd); - - mdc_llog_finish(obd); - break; - } + obd_cleanup_client_import(obd); + ptlrpc_lprocfs_unregister_obd(obd); + lprocfs_obd_cleanup(obd); + mdc_llog_finish(obd); return 0; } static int mdc_cleanup(struct obd_device *obd) { - struct client_obd *cli = &obd->u.cli; - - kfree(cli->cl_rpc_lock); - kfree(cli->cl_close_lock); - ptlrpcd_decref(); return client_obd_cleanup(obd); @@ -2881,7 +2704,6 @@ static struct obd_ops mdc_obd_ops = { .process_config = mdc_process_config, .get_uuid = mdc_get_uuid, .quotactl = mdc_quotactl, - .quotacheck = mdc_quotacheck }; static struct md_ops mdc_md_ops = { @@ -2889,7 +2711,6 @@ static struct md_ops mdc_md_ops = { .null_inode = mdc_null_inode, .close = mdc_close, .create = mdc_create, - .done_writing = mdc_done_writing, .enqueue = mdc_enqueue, .getattr = mdc_getattr, .getattr_name = mdc_getattr_name, diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c index 23374cae513365161036a328f09b78d9f89b7c78..b9c522a3c7a4b48ddd3890eacc56a51372233534 100644 --- a/drivers/staging/lustre/lustre/mgc/mgc_request.c +++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c @@ -38,11 +38,13 @@ #define D_MGC D_CONFIG /*|D_WARNING*/ #include -#include "../include/obd_class.h" -#include "../include/lustre_dlm.h" + #include "../include/lprocfs_status.h" -#include "../include/lustre_log.h" +#include "../include/lustre_dlm.h" #include "../include/lustre_disk.h" +#include "../include/lustre_log.h" +#include "../include/lustre_swab.h" +#include "../include/obd_class.h" #include "mgc_internal.h" @@ -373,7 +375,7 @@ static int config_log_add(struct obd_device *obd, char *logname, return rc; } -DEFINE_MUTEX(llog_process_lock); +static DEFINE_MUTEX(llog_process_lock); /** Stop watching for updates on this log. */ @@ -684,35 +686,33 @@ static int mgc_llog_fini(const struct lu_env *env, struct obd_device *obd) } static atomic_t mgc_count = ATOMIC_INIT(0); -static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) +static int mgc_precleanup(struct obd_device *obd) { int rc = 0; int temp; - switch (stage) { - case OBD_CLEANUP_EARLY: - break; - case OBD_CLEANUP_EXPORTS: - if (atomic_dec_and_test(&mgc_count)) { - LASSERT(rq_state & RQ_RUNNING); - /* stop requeue thread */ - temp = RQ_STOP; - } else { - /* wakeup requeue thread to clean our cld */ - temp = RQ_NOW | RQ_PRECLEANUP; - } - spin_lock(&config_list_lock); - rq_state |= temp; - spin_unlock(&config_list_lock); - wake_up(&rq_waitq); - if (temp & RQ_STOP) - wait_for_completion(&rq_exit); - obd_cleanup_client_import(obd); - rc = mgc_llog_fini(NULL, obd); - if (rc != 0) - CERROR("failed to cleanup llogging subsystems\n"); - break; + if (atomic_dec_and_test(&mgc_count)) { + LASSERT(rq_state & RQ_RUNNING); + /* stop requeue thread */ + temp = RQ_STOP; + } else { + /* wakeup requeue thread to clean our cld */ + temp = RQ_NOW | RQ_PRECLEANUP; } + + spin_lock(&config_list_lock); + rq_state |= temp; + spin_unlock(&config_list_lock); + wake_up(&rq_waitq); + + if (temp & RQ_STOP) + wait_for_completion(&rq_exit); + obd_cleanup_client_import(obd); + + rc = mgc_llog_fini(NULL, obd); + if (rc) + CERROR("failed to cleanup llogging subsystems\n"); + return rc; } @@ -887,8 +887,8 @@ static int mgc_set_mgs_param(struct obd_export *exp, } /* Take a config lock so we can get cancel notifications */ -static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm, - __u32 type, ldlm_policy_data_t *policy, __u32 mode, +static int mgc_enqueue(struct obd_export *exp, __u32 type, + union ldlm_policy_data *policy, __u32 mode, __u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb, void *data, __u32 lvb_len, void *lvb_swabber, struct lustre_handle *lockh) @@ -1059,8 +1059,7 @@ static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp, } static int mgc_get_info(const struct lu_env *env, struct obd_export *exp, - __u32 keylen, void *key, __u32 *vallen, void *val, - struct lov_stripe_md *unused) + __u32 keylen, void *key, __u32 *vallen, void *val) { int rc = -EINVAL; @@ -1387,15 +1386,17 @@ static int mgc_process_recover_log(struct obd_device *obd, body->mcb_units = nrpages; /* allocate bulk transfer descriptor */ - desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK, - MGS_BULK_PORTAL); + desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, + PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV, + MGS_BULK_PORTAL, + &ptlrpc_bulk_kiov_pin_ops); if (!desc) { rc = -ENOMEM; goto out; } for (i = 0; i < nrpages; i++) - ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE); + desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, PAGE_SIZE); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); @@ -1553,14 +1554,52 @@ static int mgc_process_cfg_log(struct obd_device *mgc, return rc; } -/** Get a config log from the MGS and process it. - * This func is called for both clients and servers. - * Copy the log locally before parsing it if appropriate (non-MGS server) +static bool mgc_import_in_recovery(struct obd_import *imp) +{ + bool in_recovery = true; + + spin_lock(&imp->imp_lock); + if (imp->imp_state == LUSTRE_IMP_FULL || + imp->imp_state == LUSTRE_IMP_CLOSED) + in_recovery = false; + spin_unlock(&imp->imp_lock); + + return in_recovery; +} + +/** + * Get a configuration log from the MGS and process it. + * + * This function is called for both clients and servers to process the + * configuration log from the MGS. The MGC enqueues a DLM lock on the + * log from the MGS, and if the lock gets revoked the MGC will be notified + * by the lock cancellation callback that the config log has changed, + * and will enqueue another MGS lock on it, and then continue processing + * the new additions to the end of the log. + * + * Since the MGC import is not replayable, if the import is being evicted + * (rcl == -ESHUTDOWN, \see ptlrpc_import_delay_req()), retry to process + * the log until recovery is finished or the import is closed. + * + * Make a local copy of the log before parsing it if appropriate (non-MGS + * server) so that the server can start even when the MGS is down. + * + * There shouldn't be multiple processes running process_log at once -- + * sounds like badness. It actually might be fine, as long as they're not + * trying to update from the same log simultaneously, in which case we + * should use a per-log semaphore instead of cld_lock. + * + * \param[in] mgc MGC device by which to fetch the configuration log + * \param[in] cld log processing state (stored in lock callback data) + * + * \retval 0 on success + * \retval negative errno on failure */ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld) { struct lustre_handle lockh = { 0 }; __u64 flags = LDLM_FL_NO_LRU; + bool retry = false; int rc = 0, rcl; LASSERT(cld); @@ -1570,6 +1609,7 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld) * we're not trying to update from the same log * simultaneously (in which case we should use a per-log sem.) */ +restart: mutex_lock(&cld->cld_lock); if (cld->cld_stopping) { mutex_unlock(&cld->cld_lock); @@ -1582,7 +1622,7 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld) cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1); /* Get the cfg lock on the llog */ - rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, NULL, LDLM_PLAIN, NULL, + rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, LDLM_PLAIN, NULL, LCK_CR, &flags, NULL, NULL, NULL, cld, 0, NULL, &lockh); if (rcl == 0) { @@ -1593,18 +1633,57 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld) } else { CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl); - /* mark cld_lostlock so that it will requeue - * after MGC becomes available. - */ - cld->cld_lostlock = 1; + if (rcl == -ESHUTDOWN && + atomic_read(&mgc->u.cli.cl_mgc_refcount) > 0 && !retry) { + int secs = cfs_time_seconds(obd_timeout); + struct obd_import *imp; + struct l_wait_info lwi; + + mutex_unlock(&cld->cld_lock); + imp = class_exp2cliimp(mgc->u.cli.cl_mgc_mgsexp); + + /* + * Let's force the pinger, and wait the import to be + * connected, note: since mgc import is non-replayable, + * and even the import state is disconnected, it does + * not mean the "recovery" is stopped, so we will keep + * waitting until timeout or the import state is + * FULL or closed + */ + ptlrpc_pinger_force(imp); + + lwi = LWI_TIMEOUT(secs, NULL, NULL); + l_wait_event(imp->imp_recovery_waitq, + !mgc_import_in_recovery(imp), &lwi); + + if (imp->imp_state == LUSTRE_IMP_FULL) { + retry = true; + goto restart; + } else { + mutex_lock(&cld->cld_lock); + cld->cld_lostlock = 1; + } + } else { + /* mark cld_lostlock so that it will requeue + * after MGC becomes available. + */ + cld->cld_lostlock = 1; + } /* Get extra reference, it will be put in requeue thread */ config_log_get(cld); } if (cld_is_recover(cld)) { rc = 0; /* this is not a fatal error for recover log */ - if (rcl == 0) + if (!rcl) { rc = mgc_process_recover_log(mgc, cld); + if (rc) { + CERROR("%s: recover log %s failed: rc = %d not fatal.\n", + mgc->obd_name, cld->cld_logname, rc); + rc = 0; + cld->cld_lostlock = 1; + } + } } else { rc = mgc_process_cfg_log(mgc, cld, rcl != 0); } diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile index b42e109b30e02fa79836088a6dc7ae90fe026eaa..af570c0db15b0a70225a9b5780d7848977c92dcd 100644 --- a/drivers/staging/lustre/lustre/obdclass/Makefile +++ b/drivers/staging/lustre/lustre/obdclass/Makefile @@ -1,6 +1,6 @@ obj-$(CONFIG_LUSTRE_FS) += obdclass.o -obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \ +obdclass-y := linux/linux-module.o linux/linux-sysctl.o \ llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \ genops.o uuid.o lprocfs_status.o lprocfs_counters.o \ lustre_handles.o lustre_peer.o statfs_pack.o linkea.o \ diff --git a/drivers/staging/lustre/lustre/obdclass/cl_internal.h b/drivers/staging/lustre/lustre/obdclass/cl_internal.h index e866754a42d53868ee3e2c7ad1b3be0ae57ed5b7..7b403fbd5f945c77166bb0563436070678ee848c 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_internal.h +++ b/drivers/staging/lustre/lustre/obdclass/cl_internal.h @@ -49,25 +49,6 @@ enum clt_nesting_level { CNL_NR }; -/** - * Counters used to check correctness of cl_lock interface usage. - */ -struct cl_thread_counters { - /** - * Number of outstanding calls to cl_lock_mutex_get() made by the - * current thread. For debugging. - */ - int ctc_nr_locks_locked; - /** List of locked locks. */ - struct lu_ref ctc_locks_locked; - /** Number of outstanding holds on locks. */ - int ctc_nr_held; - /** Number of outstanding uses on locks. */ - int ctc_nr_used; - /** Number of held extent locks. */ - int ctc_nr_locks_acquired; -}; - /** * Thread local state internal for generic cl-code. */ @@ -83,10 +64,6 @@ struct cl_thread_info { */ struct cl_lock_descr clt_descr; struct cl_page_list clt_list; - /** - * Counters for every level of lock nesting. - */ - struct cl_thread_counters clt_counters[CNL_NR]; /** @} debugging */ /* diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c index bc4b7b6b9a209f4fa67417fc3034c669ef0d3695..3f42457b0d7d3a2bc421275203be45b4e7df2592 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_io.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c @@ -126,6 +126,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io) switch (io->ci_type) { case CIT_READ: case CIT_WRITE: + case CIT_DATA_VERSION: break; case CIT_FAULT: break; @@ -411,7 +412,6 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io) scan->cis_iop->op[io->ci_type].cio_unlock(env, scan); } io->ci_state = CIS_UNLOCKED; - LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired); } EXPORT_SYMBOL(cl_io_unlock); @@ -586,67 +586,32 @@ void cl_io_end(const struct lu_env *env, struct cl_io *io) } EXPORT_SYMBOL(cl_io_end); -static const struct cl_page_slice * -cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page) -{ - const struct cl_page_slice *slice; - - slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type); - LINVRNT(slice); - return slice; -} - /** - * Called by read io, when page has to be read from the server. + * Called by read io, to decide the readahead extent * - * \see cl_io_operations::cio_read_page() + * \see cl_io_operations::cio_read_ahead() */ -int cl_io_read_page(const struct lu_env *env, struct cl_io *io, - struct cl_page *page) +int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io, + pgoff_t start, struct cl_read_ahead *ra) { const struct cl_io_slice *scan; - struct cl_2queue *queue; int result = 0; LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT); - LINVRNT(cl_page_is_owned(page, io)); LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED); LINVRNT(cl_io_invariant(io)); - queue = &io->ci_queue; - - cl_2queue_init(queue); - /* - * ->cio_read_page() methods called in the loop below are supposed to - * never block waiting for network (the only subtle point is the - * creation of new pages for read-ahead that might result in cache - * shrinking, but currently only clean pages are shrunk and this - * requires no network io). - * - * Should this ever starts blocking, retry loop would be needed for - * "parallel io" (see CLO_REPEAT loops in cl_lock.c). - */ cl_io_for_each(scan, io) { - if (scan->cis_iop->cio_read_page) { - const struct cl_page_slice *slice; + if (!scan->cis_iop->cio_read_ahead) + continue; - slice = cl_io_slice_page(scan, page); - LINVRNT(slice); - result = scan->cis_iop->cio_read_page(env, scan, slice); - if (result != 0) - break; - } + result = scan->cis_iop->cio_read_ahead(env, scan, start, ra); + if (result) + break; } - if (result == 0 && queue->c2_qin.pl_nr > 0) - result = cl_io_submit_rw(env, io, CRT_READ, queue); - /* - * Unlock unsent pages in case of error. - */ - cl_page_list_disown(env, io, &queue->c2_qin); - cl_2queue_fini(env, queue); - return result; + return result > 0 ? 0 : result; } -EXPORT_SYMBOL(cl_io_read_page); +EXPORT_SYMBOL(cl_io_read_ahead); /** * Commit a list of contiguous pages into writeback cache. @@ -1079,236 +1044,19 @@ struct cl_io *cl_io_top(struct cl_io *io) } EXPORT_SYMBOL(cl_io_top); -/** - * Adds request slice to the compound request. - * - * This is called by cl_device_operations::cdo_req_init() methods to add a - * per-layer state to the request. New state is added at the end of - * cl_req::crq_layers list, that is, it is at the bottom of the stack. - * - * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add() - */ -void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice, - struct cl_device *dev, - const struct cl_req_operations *ops) -{ - list_add_tail(&slice->crs_linkage, &req->crq_layers); - slice->crs_dev = dev; - slice->crs_ops = ops; - slice->crs_req = req; -} -EXPORT_SYMBOL(cl_req_slice_add); - -static void cl_req_free(const struct lu_env *env, struct cl_req *req) -{ - unsigned i; - - LASSERT(list_empty(&req->crq_pages)); - LASSERT(req->crq_nrpages == 0); - LINVRNT(list_empty(&req->crq_layers)); - LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o)); - - if (req->crq_o) { - for (i = 0; i < req->crq_nrobjs; ++i) { - struct cl_object *obj = req->crq_o[i].ro_obj; - - if (obj) { - lu_object_ref_del_at(&obj->co_lu, - &req->crq_o[i].ro_obj_ref, - "cl_req", req); - cl_object_put(env, obj); - } - } - kfree(req->crq_o); - } - kfree(req); -} - -static int cl_req_init(const struct lu_env *env, struct cl_req *req, - struct cl_page *page) -{ - struct cl_device *dev; - struct cl_page_slice *slice; - int result; - - result = 0; - list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { - dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev); - if (dev->cd_ops->cdo_req_init) { - result = dev->cd_ops->cdo_req_init(env, dev, req); - if (result != 0) - break; - } - } - return result; -} - -/** - * Invokes per-request transfer completion call-backs - * (cl_req_operations::cro_completion()) bottom-to-top. - */ -void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc) -{ - struct cl_req_slice *slice; - - /* - * for the lack of list_for_each_entry_reverse_safe()... - */ - while (!list_empty(&req->crq_layers)) { - slice = list_entry(req->crq_layers.prev, - struct cl_req_slice, crs_linkage); - list_del_init(&slice->crs_linkage); - if (slice->crs_ops->cro_completion) - slice->crs_ops->cro_completion(env, slice, rc); - } - cl_req_free(env, req); -} -EXPORT_SYMBOL(cl_req_completion); - -/** - * Allocates new transfer request. - */ -struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page, - enum cl_req_type crt, int nr_objects) -{ - struct cl_req *req; - - LINVRNT(nr_objects > 0); - - req = kzalloc(sizeof(*req), GFP_NOFS); - if (req) { - int result; - - req->crq_type = crt; - INIT_LIST_HEAD(&req->crq_pages); - INIT_LIST_HEAD(&req->crq_layers); - - req->crq_o = kcalloc(nr_objects, sizeof(req->crq_o[0]), - GFP_NOFS); - if (req->crq_o) { - req->crq_nrobjs = nr_objects; - result = cl_req_init(env, req, page); - } else { - result = -ENOMEM; - } - if (result != 0) { - cl_req_completion(env, req, result); - req = ERR_PTR(result); - } - } else { - req = ERR_PTR(-ENOMEM); - } - return req; -} -EXPORT_SYMBOL(cl_req_alloc); - -/** - * Adds a page to a request. - */ -void cl_req_page_add(const struct lu_env *env, - struct cl_req *req, struct cl_page *page) -{ - struct cl_object *obj; - struct cl_req_obj *rqo; - unsigned int i; - - LASSERT(list_empty(&page->cp_flight)); - LASSERT(!page->cp_req); - - CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n", - req, req->crq_type, req->crq_nrpages); - - list_add_tail(&page->cp_flight, &req->crq_pages); - ++req->crq_nrpages; - page->cp_req = req; - obj = cl_object_top(page->cp_obj); - for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) { - if (!rqo->ro_obj) { - rqo->ro_obj = obj; - cl_object_get(obj); - lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref, - "cl_req", req); - break; - } - } - LASSERT(i < req->crq_nrobjs); -} -EXPORT_SYMBOL(cl_req_page_add); - -/** - * Removes a page from a request. - */ -void cl_req_page_done(const struct lu_env *env, struct cl_page *page) -{ - struct cl_req *req = page->cp_req; - - LASSERT(!list_empty(&page->cp_flight)); - LASSERT(req->crq_nrpages > 0); - - list_del_init(&page->cp_flight); - --req->crq_nrpages; - page->cp_req = NULL; -} -EXPORT_SYMBOL(cl_req_page_done); - -/** - * Notifies layers that request is about to depart by calling - * cl_req_operations::cro_prep() top-to-bottom. - */ -int cl_req_prep(const struct lu_env *env, struct cl_req *req) -{ - unsigned int i; - int result; - const struct cl_req_slice *slice; - - /* - * Check that the caller of cl_req_alloc() didn't lie about the number - * of objects. - */ - for (i = 0; i < req->crq_nrobjs; ++i) - LASSERT(req->crq_o[i].ro_obj); - - result = 0; - list_for_each_entry(slice, &req->crq_layers, crs_linkage) { - if (slice->crs_ops->cro_prep) { - result = slice->crs_ops->cro_prep(env, slice); - if (result != 0) - break; - } - } - return result; -} -EXPORT_SYMBOL(cl_req_prep); - /** * Fills in attributes that are passed to server together with transfer. Only * attributes from \a flags may be touched. This can be called multiple times * for the same request. */ -void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, - struct cl_req_attr *attr, u64 flags) +void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj, + struct cl_req_attr *attr) { - const struct cl_req_slice *slice; - struct cl_page *page; - unsigned int i; - - LASSERT(!list_empty(&req->crq_pages)); - - /* Take any page to use as a model. */ - page = list_entry(req->crq_pages.next, struct cl_page, cp_flight); - - for (i = 0; i < req->crq_nrobjs; ++i) { - list_for_each_entry(slice, &req->crq_layers, crs_linkage) { - const struct cl_page_slice *scan; - const struct cl_object *obj; - - scan = cl_page_at(page, - slice->crs_dev->cd_lu_dev.ld_type); - obj = scan->cpl_obj; - if (slice->crs_ops->cro_attr_set) - slice->crs_ops->cro_attr_set(env, slice, obj, - attr + i, flags); - } + struct cl_object *scan; + + cl_object_for_each(scan, obj) { + if (scan->co_ops->coo_req_attr_set) + scan->co_ops->coo_req_attr_set(env, scan, attr); } } EXPORT_SYMBOL(cl_req_attr_set); diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c index 3199dd4a3b72fa89fa7f47644b203c4b8af252cf..f5d4e23c64b7e1f945c83faf264f166eb487e97b 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_object.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c @@ -335,13 +335,74 @@ int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj, if (obj->co_ops->coo_getstripe) { result = obj->co_ops->coo_getstripe(env, obj, uarg); if (result) - break; + break; } } return result; } EXPORT_SYMBOL(cl_object_getstripe); +/** + * Get fiemap extents from file object. + * + * \param env [in] lustre environment + * \param obj [in] file object + * \param key [in] fiemap request argument + * \param fiemap [out] fiemap extents mapping retrived + * \param buflen [in] max buffer length of @fiemap + * + * \retval 0 success + * \retval < 0 error + */ +int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj, + struct ll_fiemap_info_key *key, + struct fiemap *fiemap, size_t *buflen) +{ + struct lu_object_header *top; + int result = 0; + + top = obj->co_lu.lo_header; + list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { + if (obj->co_ops->coo_fiemap) { + result = obj->co_ops->coo_fiemap(env, obj, key, fiemap, + buflen); + if (result) + break; + } + } + return result; +} +EXPORT_SYMBOL(cl_object_fiemap); + +int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj, + struct cl_layout *cl) +{ + struct lu_object_header *top = obj->co_lu.lo_header; + + list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { + if (obj->co_ops->coo_layout_get) + return obj->co_ops->coo_layout_get(env, obj, cl); + } + + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(cl_object_layout_get); + +loff_t cl_object_maxbytes(struct cl_object *obj) +{ + struct lu_object_header *top = obj->co_lu.lo_header; + loff_t maxbytes = LLONG_MAX; + + list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { + if (obj->co_ops->coo_maxbytes) + maxbytes = min_t(loff_t, obj->co_ops->coo_maxbytes(obj), + maxbytes); + } + + return maxbytes; +} +EXPORT_SYMBOL(cl_object_maxbytes); + /** * Helper function removing all object locks, and marking object for * deletion. All object pages must have been deleted at this point. @@ -483,36 +544,20 @@ EXPORT_SYMBOL(cl_site_stats_print); * bz20044, bz22683. */ -static LIST_HEAD(cl_envs); -static unsigned int cl_envs_cached_nr; -static unsigned int cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit - * for now. - */ -static DEFINE_SPINLOCK(cl_envs_guard); +static unsigned int cl_envs_cached_max = 32; /* XXX: prototype: arbitrary limit + * for now. + */ +static struct cl_env_cache { + rwlock_t cec_guard; + unsigned int cec_count; + struct list_head cec_envs; +} *cl_envs = NULL; struct cl_env { void *ce_magic; struct lu_env ce_lu; struct lu_context ce_ses; - /** - * This allows cl_env to be entered into cl_env_hash which implements - * the current thread -> client environment lookup. - */ - struct hlist_node ce_node; - /** - * Owner for the current cl_env. - * - * If LL_TASK_CL_ENV is defined, this point to the owning current, - * only for debugging purpose ; - * Otherwise hash is used, and this is the key for cfs_hash. - * Now current thread pid is stored. Note using thread pointer would - * lead to unbalanced hash because of its specific allocation locality - * and could be varied for different platforms and OSes, even different - * OS versions. - */ - void *ce_owner; - /* * Linkage into global list of all client environments. Used for * garbage collection. @@ -536,122 +581,13 @@ static void cl_env_init0(struct cl_env *cle, void *debug) { LASSERT(cle->ce_ref == 0); LASSERT(cle->ce_magic == &cl_env_init0); - LASSERT(!cle->ce_debug && !cle->ce_owner); + LASSERT(!cle->ce_debug); cle->ce_ref = 1; cle->ce_debug = debug; CL_ENV_INC(busy); } -/* - * The implementation of using hash table to connect cl_env and thread - */ - -static struct cfs_hash *cl_env_hash; - -static unsigned cl_env_hops_hash(struct cfs_hash *lh, - const void *key, unsigned mask) -{ -#if BITS_PER_LONG == 64 - return cfs_hash_u64_hash((__u64)key, mask); -#else - return cfs_hash_u32_hash((__u32)key, mask); -#endif -} - -static void *cl_env_hops_obj(struct hlist_node *hn) -{ - struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node); - - LASSERT(cle->ce_magic == &cl_env_init0); - return (void *)cle; -} - -static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn) -{ - struct cl_env *cle = cl_env_hops_obj(hn); - - LASSERT(cle->ce_owner); - return (key == cle->ce_owner); -} - -static void cl_env_hops_noop(struct cfs_hash *hs, struct hlist_node *hn) -{ - struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node); - - LASSERT(cle->ce_magic == &cl_env_init0); -} - -static struct cfs_hash_ops cl_env_hops = { - .hs_hash = cl_env_hops_hash, - .hs_key = cl_env_hops_obj, - .hs_keycmp = cl_env_hops_keycmp, - .hs_object = cl_env_hops_obj, - .hs_get = cl_env_hops_noop, - .hs_put_locked = cl_env_hops_noop, -}; - -static inline struct cl_env *cl_env_fetch(void) -{ - struct cl_env *cle; - - cle = cfs_hash_lookup(cl_env_hash, (void *)(long)current->pid); - LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0)); - return cle; -} - -static inline void cl_env_attach(struct cl_env *cle) -{ - if (cle) { - int rc; - - LASSERT(!cle->ce_owner); - cle->ce_owner = (void *)(long)current->pid; - rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner, - &cle->ce_node); - LASSERT(rc == 0); - } -} - -static inline void cl_env_do_detach(struct cl_env *cle) -{ - void *cookie; - - LASSERT(cle->ce_owner == (void *)(long)current->pid); - cookie = cfs_hash_del(cl_env_hash, cle->ce_owner, - &cle->ce_node); - LASSERT(cookie == cle); - cle->ce_owner = NULL; -} - -static int cl_env_store_init(void) -{ - cl_env_hash = cfs_hash_create("cl_env", - HASH_CL_ENV_BITS, HASH_CL_ENV_BITS, - HASH_CL_ENV_BKT_BITS, 0, - CFS_HASH_MIN_THETA, - CFS_HASH_MAX_THETA, - &cl_env_hops, - CFS_HASH_RW_BKTLOCK); - return cl_env_hash ? 0 : -ENOMEM; -} - -static void cl_env_store_fini(void) -{ - cfs_hash_putref(cl_env_hash); -} - -static inline struct cl_env *cl_env_detach(struct cl_env *cle) -{ - if (!cle) - cle = cl_env_fetch(); - - if (cle && cle->ce_owner) - cl_env_do_detach(cle); - - return cle; -} - static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) { struct lu_env *env; @@ -701,16 +637,20 @@ static struct lu_env *cl_env_obtain(void *debug) { struct cl_env *cle; struct lu_env *env; + int cpu = get_cpu(); - spin_lock(&cl_envs_guard); - LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs))); - if (cl_envs_cached_nr > 0) { + read_lock(&cl_envs[cpu].cec_guard); + LASSERT(equi(cl_envs[cpu].cec_count == 0, + list_empty(&cl_envs[cpu].cec_envs))); + if (cl_envs[cpu].cec_count > 0) { int rc; - cle = container_of(cl_envs.next, struct cl_env, ce_linkage); + cle = container_of(cl_envs[cpu].cec_envs.next, struct cl_env, + ce_linkage); list_del_init(&cle->ce_linkage); - cl_envs_cached_nr--; - spin_unlock(&cl_envs_guard); + cl_envs[cpu].cec_count--; + read_unlock(&cl_envs[cpu].cec_guard); + put_cpu(); env = &cle->ce_lu; rc = lu_env_refill(env); @@ -723,7 +663,8 @@ static struct lu_env *cl_env_obtain(void *debug) env = ERR_PTR(rc); } } else { - spin_unlock(&cl_envs_guard); + read_unlock(&cl_envs[cpu].cec_guard); + put_cpu(); env = cl_env_new(lu_context_tags_default, lu_session_tags_default, debug); } @@ -735,27 +676,6 @@ static inline struct cl_env *cl_env_container(struct lu_env *env) return container_of(env, struct cl_env, ce_lu); } -static struct lu_env *cl_env_peek(int *refcheck) -{ - struct lu_env *env; - struct cl_env *cle; - - CL_ENV_INC(lookup); - - /* check that we don't go far from untrusted pointer */ - CLASSERT(offsetof(struct cl_env, ce_magic) == 0); - - env = NULL; - cle = cl_env_fetch(); - if (cle) { - CL_ENV_INC(hit); - env = &cle->ce_lu; - *refcheck = ++cle->ce_ref; - } - CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle); - return env; -} - /** * Returns lu_env: if there already is an environment associated with the * current thread, it is returned, otherwise, new environment is allocated. @@ -773,17 +693,13 @@ struct lu_env *cl_env_get(int *refcheck) { struct lu_env *env; - env = cl_env_peek(refcheck); - if (!env) { - env = cl_env_obtain(__builtin_return_address(0)); - if (!IS_ERR(env)) { - struct cl_env *cle; + env = cl_env_obtain(__builtin_return_address(0)); + if (!IS_ERR(env)) { + struct cl_env *cle; - cle = cl_env_container(env); - cl_env_attach(cle); - *refcheck = cle->ce_ref; - CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); - } + cle = cl_env_container(env); + *refcheck = cle->ce_ref; + CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); } return env; } @@ -798,7 +714,6 @@ struct lu_env *cl_env_alloc(int *refcheck, __u32 tags) { struct lu_env *env; - LASSERT(!cl_env_peek(refcheck)); env = cl_env_new(tags, tags, __builtin_return_address(0)); if (!IS_ERR(env)) { struct cl_env *cle; @@ -813,7 +728,6 @@ EXPORT_SYMBOL(cl_env_alloc); static void cl_env_exit(struct cl_env *cle) { - LASSERT(!cle->ce_owner); lu_context_exit(&cle->ce_lu.le_ctx); lu_context_exit(&cle->ce_ses); } @@ -826,20 +740,25 @@ static void cl_env_exit(struct cl_env *cle) unsigned int cl_env_cache_purge(unsigned int nr) { struct cl_env *cle; + unsigned int i; - spin_lock(&cl_envs_guard); - for (; !list_empty(&cl_envs) && nr > 0; --nr) { - cle = container_of(cl_envs.next, struct cl_env, ce_linkage); - list_del_init(&cle->ce_linkage); - LASSERT(cl_envs_cached_nr > 0); - cl_envs_cached_nr--; - spin_unlock(&cl_envs_guard); + for_each_possible_cpu(i) { + write_lock(&cl_envs[i].cec_guard); + for (; !list_empty(&cl_envs[i].cec_envs) && nr > 0; --nr) { + cle = container_of(cl_envs[i].cec_envs.next, + struct cl_env, ce_linkage); + list_del_init(&cle->ce_linkage); + LASSERT(cl_envs[i].cec_count > 0); + cl_envs[i].cec_count--; + write_unlock(&cl_envs[i].cec_guard); - cl_env_fini(cle); - spin_lock(&cl_envs_guard); + cl_env_fini(cle); + write_lock(&cl_envs[i].cec_guard); + } + LASSERT(equi(cl_envs[i].cec_count == 0, + list_empty(&cl_envs[i].cec_envs))); + write_unlock(&cl_envs[i].cec_guard); } - LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs))); - spin_unlock(&cl_envs_guard); return nr; } EXPORT_SYMBOL(cl_env_cache_purge); @@ -862,8 +781,9 @@ void cl_env_put(struct lu_env *env, int *refcheck) CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); if (--cle->ce_ref == 0) { + int cpu = get_cpu(); + CL_ENV_DEC(busy); - cl_env_detach(cle); cle->ce_debug = NULL; cl_env_exit(cle); /* @@ -872,106 +792,21 @@ void cl_env_put(struct lu_env *env, int *refcheck) * Return environment to the cache only when it was allocated * with the standard tags. */ - if (cl_envs_cached_nr < cl_envs_cached_max && + if (cl_envs[cpu].cec_count < cl_envs_cached_max && (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD && (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) { - spin_lock(&cl_envs_guard); - list_add(&cle->ce_linkage, &cl_envs); - cl_envs_cached_nr++; - spin_unlock(&cl_envs_guard); + read_lock(&cl_envs[cpu].cec_guard); + list_add(&cle->ce_linkage, &cl_envs[cpu].cec_envs); + cl_envs[cpu].cec_count++; + read_unlock(&cl_envs[cpu].cec_guard); } else { cl_env_fini(cle); } + put_cpu(); } } EXPORT_SYMBOL(cl_env_put); -/** - * Declares a point of re-entrancy. - * - * \see cl_env_reexit() - */ -void *cl_env_reenter(void) -{ - return cl_env_detach(NULL); -} -EXPORT_SYMBOL(cl_env_reenter); - -/** - * Exits re-entrancy. - */ -void cl_env_reexit(void *cookie) -{ - cl_env_detach(NULL); - cl_env_attach(cookie); -} -EXPORT_SYMBOL(cl_env_reexit); - -/** - * Setup user-supplied \a env as a current environment. This is to be used to - * guaranteed that environment exists even when cl_env_get() fails. It is up - * to user to ensure proper concurrency control. - * - * \see cl_env_unplant() - */ -void cl_env_implant(struct lu_env *env, int *refcheck) -{ - struct cl_env *cle = cl_env_container(env); - - LASSERT(cle->ce_ref > 0); - - cl_env_attach(cle); - cl_env_get(refcheck); - CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); -} -EXPORT_SYMBOL(cl_env_implant); - -/** - * Detach environment installed earlier by cl_env_implant(). - */ -void cl_env_unplant(struct lu_env *env, int *refcheck) -{ - struct cl_env *cle = cl_env_container(env); - - LASSERT(cle->ce_ref > 1); - - CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); - - cl_env_detach(cle); - cl_env_put(env, refcheck); -} -EXPORT_SYMBOL(cl_env_unplant); - -struct lu_env *cl_env_nested_get(struct cl_env_nest *nest) -{ - struct lu_env *env; - - nest->cen_cookie = NULL; - env = cl_env_peek(&nest->cen_refcheck); - if (env) { - if (!cl_io_is_going(env)) - return env; - cl_env_put(env, &nest->cen_refcheck); - nest->cen_cookie = cl_env_reenter(); - } - env = cl_env_get(&nest->cen_refcheck); - if (IS_ERR(env)) { - cl_env_reexit(nest->cen_cookie); - return env; - } - - LASSERT(!cl_io_is_going(env)); - return env; -} -EXPORT_SYMBOL(cl_env_nested_get); - -void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env) -{ - cl_env_put(env, &nest->cen_refcheck); - cl_env_reexit(nest->cen_cookie); -} -EXPORT_SYMBOL(cl_env_nested_put); - /** * Converts struct ost_lvb to struct cl_attr. * @@ -999,6 +834,10 @@ static int cl_env_percpu_init(void) for_each_possible_cpu(i) { struct lu_env *env; + rwlock_init(&cl_envs[i].cec_guard); + INIT_LIST_HEAD(&cl_envs[i].cec_envs); + cl_envs[i].cec_count = 0; + cle = &cl_env_percpu[i]; env = &cle->ce_lu; @@ -1066,7 +905,6 @@ void cl_env_percpu_put(struct lu_env *env) LASSERT(cle->ce_ref == 0); CL_ENV_DEC(busy); - cl_env_detach(cle); cle->ce_debug = NULL; put_cpu(); @@ -1080,7 +918,6 @@ struct lu_env *cl_env_percpu_get(void) cle = &cl_env_percpu[get_cpu()]; cl_env_init0(cle, __builtin_return_address(0)); - cl_env_attach(cle); return &cle->ce_lu; } EXPORT_SYMBOL(cl_env_percpu_get); @@ -1144,51 +981,19 @@ LU_KEY_INIT_FINI(cl0, struct cl_thread_info); static void *cl_key_init(const struct lu_context *ctx, struct lu_context_key *key) { - struct cl_thread_info *info; - - info = cl0_key_init(ctx, key); - if (!IS_ERR(info)) { - size_t i; - - for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) - lu_ref_init(&info->clt_counters[i].ctc_locks_locked); - } - return info; + return cl0_key_init(ctx, key); } static void cl_key_fini(const struct lu_context *ctx, struct lu_context_key *key, void *data) { - struct cl_thread_info *info; - size_t i; - - info = data; - for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) - lu_ref_fini(&info->clt_counters[i].ctc_locks_locked); cl0_key_fini(ctx, key, data); } -static void cl_key_exit(const struct lu_context *ctx, - struct lu_context_key *key, void *data) -{ - struct cl_thread_info *info = data; - size_t i; - - for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) { - LASSERT(info->clt_counters[i].ctc_nr_held == 0); - LASSERT(info->clt_counters[i].ctc_nr_used == 0); - LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0); - LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0); - lu_ref_fini(&info->clt_counters[i].ctc_locks_locked); - lu_ref_init(&info->clt_counters[i].ctc_locks_locked); - } -} - static struct lu_context_key cl_key = { .lct_tags = LCT_CL_THREAD, .lct_init = cl_key_init, .lct_fini = cl_key_fini, - .lct_exit = cl_key_exit }; static struct lu_kmem_descr cl_object_caches[] = { @@ -1212,13 +1017,15 @@ int cl_global_init(void) { int result; - result = cl_env_store_init(); - if (result) - return result; + cl_envs = kzalloc(sizeof(*cl_envs) * num_possible_cpus(), GFP_KERNEL); + if (!cl_envs) { + result = -ENOMEM; + goto out; + } result = lu_kmem_init(cl_object_caches); if (result) - goto out_store; + goto out_envs; LU_CONTEXT_KEY_INIT(&cl_key); result = lu_context_key_register(&cl_key); @@ -1228,16 +1035,17 @@ int cl_global_init(void) result = cl_env_percpu_init(); if (result) /* no cl_env_percpu_fini on error */ - goto out_context; + goto out_keys; return 0; -out_context: +out_keys: lu_context_key_degister(&cl_key); out_kmem: lu_kmem_fini(cl_object_caches); -out_store: - cl_env_store_fini(); +out_envs: + kfree(cl_envs); +out: return result; } @@ -1249,5 +1057,5 @@ void cl_global_fini(void) cl_env_percpu_fini(); lu_context_key_degister(&cl_key); lu_kmem_fini(cl_object_caches); - cl_env_store_fini(); + kfree(cl_envs); } diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c index 63973ba096dac58eb844559f010df8bd7f49be60..cd9a40ca4448960a6130a7bc8e745ab3276f4a56 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_page.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c @@ -99,7 +99,6 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page) PASSERT(env, page, list_empty(&page->cp_batch)); PASSERT(env, page, !page->cp_owner); - PASSERT(env, page, !page->cp_req); PASSERT(env, page, page->cp_state == CPS_FREEING); while (!list_empty(&page->cp_layers)) { @@ -150,7 +149,6 @@ struct cl_page *cl_page_alloc(const struct lu_env *env, page->cp_type = type; INIT_LIST_HEAD(&page->cp_layers); INIT_LIST_HEAD(&page->cp_batch); - INIT_LIST_HEAD(&page->cp_flight); lu_ref_init(&page->cp_reference); head = o->co_lu.lo_header; list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) { @@ -390,30 +388,6 @@ EXPORT_SYMBOL(cl_page_at); __result; \ }) -#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \ -({ \ - const struct lu_env *__env = (_env); \ - struct cl_page *__page = (_page); \ - const struct cl_page_slice *__scan; \ - int __result; \ - ptrdiff_t __op = (_op); \ - int (*__method)_proto; \ - \ - __result = 0; \ - list_for_each_entry_reverse(__scan, &__page->cp_layers, \ - cpl_linkage) { \ - __method = *(void **)((char *)__scan->cpl_ops + __op); \ - if (__method) { \ - __result = (*__method)(__env, __scan, ## __VA_ARGS__); \ - if (__result != 0) \ - break; \ - } \ - } \ - if (__result > 0) \ - __result = 0; \ - __result; \ -}) - #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \ do { \ const struct lu_env *__env = (_env); \ @@ -552,7 +526,6 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io, io, nonblock); if (result == 0) { PASSERT(env, pg, !pg->cp_owner); - PASSERT(env, pg, !pg->cp_req); pg->cp_owner = cl_io_top(io); cl_page_owner_set(pg); if (pg->cp_state != CPS_FREEING) { @@ -694,7 +667,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg) PASSERT(env, pg, pg->cp_state != CPS_FREEING); /* - * Severe all ways to obtain new pointers to @pg. + * Sever all ways to obtain new pointers to @pg. */ cl_page_owner_clear(pg); @@ -845,8 +818,6 @@ void cl_page_completion(const struct lu_env *env, struct cl_sync_io *anchor = pg->cp_sync_io; PASSERT(env, pg, crt < CRT_NR); - /* cl_page::cp_req already cleared by the caller (osc_completion()) */ - PASSERT(env, pg, !pg->cp_req); PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt)); CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret); @@ -860,16 +831,8 @@ void cl_page_completion(const struct lu_env *env, if (anchor) { LASSERT(pg->cp_sync_io == anchor); pg->cp_sync_io = NULL; - } - /* - * As page->cp_obj is pinned by a reference from page->cp_req, it is - * safe to call cl_page_put() without risking object destruction in a - * non-blocking context. - */ - cl_page_put(env, pg); - - if (anchor) cl_sync_io_note(env, anchor, ioret); + } } EXPORT_SYMBOL(cl_page_completion); @@ -926,29 +889,6 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io, } EXPORT_SYMBOL(cl_page_flush); -/** - * Checks whether page is protected by any extent lock is at least required - * mode. - * - * \return the same as in cl_page_operations::cpo_is_under_lock() method. - * \see cl_page_operations::cpo_is_under_lock() - */ -int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, pgoff_t *max_index) -{ - int rc; - - PINVRNT(env, page, cl_page_invariant(page)); - - rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock), - (const struct lu_env *, - const struct cl_page_slice *, - struct cl_io *, pgoff_t *), - io, max_index); - return rc; -} -EXPORT_SYMBOL(cl_page_is_under_lock); - /** * Tells transfer engine that only part of a page is to be transmitted. * @@ -974,10 +914,10 @@ void cl_page_header_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_page *pg) { (*printer)(env, cookie, - "page@%p[%d %p %d %d %p %p]\n", + "page@%p[%d %p %d %d %p]\n", pg, atomic_read(&pg->cp_ref), pg->cp_obj, pg->cp_state, pg->cp_type, - pg->cp_owner, pg->cp_req); + pg->cp_owner); } EXPORT_SYMBOL(cl_page_header_print); diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c index cf8bb2a2f40b788f2739f7bd1d30d7571025bd08..fa0d38ddccb21aa1d0f9cc8b26ad694e4f88a549 100644 --- a/drivers/staging/lustre/lustre/obdclass/genops.c +++ b/drivers/staging/lustre/lustre/obdclass/genops.c @@ -907,6 +907,8 @@ struct obd_import *class_new_import(struct obd_device *obd) INIT_LIST_HEAD(&imp->imp_sending_list); INIT_LIST_HEAD(&imp->imp_delayed_list); INIT_LIST_HEAD(&imp->imp_committed_list); + INIT_LIST_HEAD(&imp->imp_unreplied_list); + imp->imp_known_replied_xid = 0; imp->imp_replay_cursor = &imp->imp_committed_list; spin_lock_init(&imp->imp_lock); imp->imp_last_success_conn = 0; @@ -1408,13 +1410,33 @@ EXPORT_SYMBOL(obd_get_max_rpcs_in_flight); int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max) { struct obd_request_slot_waiter *orsw; + const char *typ_name; __u32 old; int diff; + int rc; int i; if (max > OBD_MAX_RIF_MAX || max < 1) return -ERANGE; + typ_name = cli->cl_import->imp_obd->obd_type->typ_name; + if (!strcmp(typ_name, LUSTRE_MDC_NAME)) { + /* + * adjust max_mod_rpcs_in_flight to ensure it is always + * strictly lower that max_rpcs_in_flight + */ + if (max < 2) { + CERROR("%s: cannot set max_rpcs_in_flight to 1 because it must be higher than max_mod_rpcs_in_flight value\n", + cli->cl_import->imp_obd->obd_name); + return -ERANGE; + } + if (max <= cli->cl_max_mod_rpcs_in_flight) { + rc = obd_set_max_mod_rpcs_in_flight(cli, max - 1); + if (rc) + return rc; + } + } + spin_lock(&cli->cl_loi_list_lock); old = cli->cl_max_rpcs_in_flight; cli->cl_max_rpcs_in_flight = max; @@ -1436,3 +1458,209 @@ int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max) return 0; } EXPORT_SYMBOL(obd_set_max_rpcs_in_flight); + +int obd_set_max_mod_rpcs_in_flight(struct client_obd *cli, __u16 max) +{ + struct obd_connect_data *ocd; + u16 maxmodrpcs; + u16 prev; + + if (max > OBD_MAX_RIF_MAX || max < 1) + return -ERANGE; + + /* cannot exceed or equal max_rpcs_in_flight */ + if (max >= cli->cl_max_rpcs_in_flight) { + CERROR("%s: can't set max_mod_rpcs_in_flight to a value (%hu) higher or equal to max_rpcs_in_flight value (%u)\n", + cli->cl_import->imp_obd->obd_name, + max, cli->cl_max_rpcs_in_flight); + return -ERANGE; + } + + /* cannot exceed max modify RPCs in flight supported by the server */ + ocd = &cli->cl_import->imp_connect_data; + if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS) + maxmodrpcs = ocd->ocd_maxmodrpcs; + else + maxmodrpcs = 1; + if (max > maxmodrpcs) { + CERROR("%s: can't set max_mod_rpcs_in_flight to a value (%hu) higher than max_mod_rpcs_per_client value (%hu) returned by the server at connection\n", + cli->cl_import->imp_obd->obd_name, + max, maxmodrpcs); + return -ERANGE; + } + + spin_lock(&cli->cl_mod_rpcs_lock); + + prev = cli->cl_max_mod_rpcs_in_flight; + cli->cl_max_mod_rpcs_in_flight = max; + + /* wakeup waiters if limit has been increased */ + if (cli->cl_max_mod_rpcs_in_flight > prev) + wake_up(&cli->cl_mod_rpcs_waitq); + + spin_unlock(&cli->cl_mod_rpcs_lock); + + return 0; +} +EXPORT_SYMBOL(obd_set_max_mod_rpcs_in_flight); + +#define pct(a, b) (b ? (a * 100) / b : 0) + +int obd_mod_rpc_stats_seq_show(struct client_obd *cli, struct seq_file *seq) +{ + unsigned long mod_tot = 0, mod_cum; + struct timespec64 now; + int i; + + ktime_get_real_ts64(&now); + + spin_lock(&cli->cl_mod_rpcs_lock); + + seq_printf(seq, "snapshot_time: %llu.%9lu (secs.nsecs)\n", + (s64)now.tv_sec, (unsigned long)now.tv_nsec); + seq_printf(seq, "modify_RPCs_in_flight: %hu\n", + cli->cl_mod_rpcs_in_flight); + + seq_puts(seq, "\n\t\t\tmodify\n"); + seq_puts(seq, "rpcs in flight rpcs %% cum %%\n"); + + mod_tot = lprocfs_oh_sum(&cli->cl_mod_rpcs_hist); + + mod_cum = 0; + for (i = 0; i < OBD_HIST_MAX; i++) { + unsigned long mod = cli->cl_mod_rpcs_hist.oh_buckets[i]; + + mod_cum += mod; + seq_printf(seq, "%d:\t\t%10lu %3lu %3lu\n", + i, mod, pct(mod, mod_tot), + pct(mod_cum, mod_tot)); + if (mod_cum == mod_tot) + break; + } + + spin_unlock(&cli->cl_mod_rpcs_lock); + + return 0; +} +EXPORT_SYMBOL(obd_mod_rpc_stats_seq_show); +#undef pct + +/* + * The number of modify RPCs sent in parallel is limited + * because the server has a finite number of slots per client to + * store request result and ensure reply reconstruction when needed. + * On the client, this limit is stored in cl_max_mod_rpcs_in_flight + * that takes into account server limit and cl_max_rpcs_in_flight + * value. + * On the MDC client, to avoid a potential deadlock (see Bugzilla 3462), + * one close request is allowed above the maximum. + */ +static inline bool obd_mod_rpc_slot_avail_locked(struct client_obd *cli, + bool close_req) +{ + bool avail; + + /* A slot is available if + * - number of modify RPCs in flight is less than the max + * - it's a close RPC and no other close request is in flight + */ + avail = cli->cl_mod_rpcs_in_flight < cli->cl_max_mod_rpcs_in_flight || + (close_req && !cli->cl_close_rpcs_in_flight); + + return avail; +} + +static inline bool obd_mod_rpc_slot_avail(struct client_obd *cli, + bool close_req) +{ + bool avail; + + spin_lock(&cli->cl_mod_rpcs_lock); + avail = obd_mod_rpc_slot_avail_locked(cli, close_req); + spin_unlock(&cli->cl_mod_rpcs_lock); + return avail; +} + +/* Get a modify RPC slot from the obd client @cli according + * to the kind of operation @opc that is going to be sent + * and the intent @it of the operation if it applies. + * If the maximum number of modify RPCs in flight is reached + * the thread is put to sleep. + * Returns the tag to be set in the request message. Tag 0 + * is reserved for non-modifying requests. + */ +u16 obd_get_mod_rpc_slot(struct client_obd *cli, __u32 opc, + struct lookup_intent *it) +{ + struct l_wait_info lwi = LWI_INTR(NULL, NULL); + bool close_req = false; + u16 i, max; + + /* read-only metadata RPCs don't consume a slot on MDT + * for reply reconstruction + */ + if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || + it->it_op == IT_LAYOUT || it->it_op == IT_READDIR)) + return 0; + + if (opc == MDS_CLOSE) + close_req = true; + + do { + spin_lock(&cli->cl_mod_rpcs_lock); + max = cli->cl_max_mod_rpcs_in_flight; + if (obd_mod_rpc_slot_avail_locked(cli, close_req)) { + /* there is a slot available */ + cli->cl_mod_rpcs_in_flight++; + if (close_req) + cli->cl_close_rpcs_in_flight++; + lprocfs_oh_tally(&cli->cl_mod_rpcs_hist, + cli->cl_mod_rpcs_in_flight); + /* find a free tag */ + i = find_first_zero_bit(cli->cl_mod_tag_bitmap, + max + 1); + LASSERT(i < OBD_MAX_RIF_MAX); + LASSERT(!test_and_set_bit(i, cli->cl_mod_tag_bitmap)); + spin_unlock(&cli->cl_mod_rpcs_lock); + /* tag 0 is reserved for non-modify RPCs */ + return i + 1; + } + spin_unlock(&cli->cl_mod_rpcs_lock); + + CDEBUG(D_RPCTRACE, "%s: sleeping for a modify RPC slot opc %u, max %hu\n", + cli->cl_import->imp_obd->obd_name, opc, max); + + l_wait_event(cli->cl_mod_rpcs_waitq, + obd_mod_rpc_slot_avail(cli, close_req), &lwi); + } while (true); +} +EXPORT_SYMBOL(obd_get_mod_rpc_slot); + +/* + * Put a modify RPC slot from the obd client @cli according + * to the kind of operation @opc that has been sent and the + * intent @it of the operation if it applies. + */ +void obd_put_mod_rpc_slot(struct client_obd *cli, u32 opc, + struct lookup_intent *it, u16 tag) +{ + bool close_req = false; + + if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || + it->it_op == IT_LAYOUT || it->it_op == IT_READDIR)) + return; + + if (opc == MDS_CLOSE) + close_req = true; + + spin_lock(&cli->cl_mod_rpcs_lock); + cli->cl_mod_rpcs_in_flight--; + if (close_req) + cli->cl_close_rpcs_in_flight--; + /* release the tag in the bitmap */ + LASSERT(tag - 1 < OBD_MAX_RIF_MAX); + LASSERT(test_and_clear_bit(tag - 1, cli->cl_mod_tag_bitmap) != 0); + spin_unlock(&cli->cl_mod_rpcs_lock); + wake_up(&cli->cl_mod_rpcs_waitq); +} +EXPORT_SYMBOL(obd_put_mod_rpc_slot); diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c index be09e04b042f43de6ede301d160ebf3818e73532..9f5e8299d7e4b3c22643f599e4ec42504a263cc8 100644 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c +++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c @@ -217,8 +217,8 @@ static ssize_t pinger_show(struct kobject *kobj, struct attribute *attr, return sprintf(buf, "%s\n", "on"); } -static ssize_t health_show(struct kobject *kobj, struct attribute *attr, - char *buf) +static ssize_t +health_check_show(struct kobject *kobj, struct attribute *attr, char *buf) { bool healthy = true; int i; @@ -311,14 +311,14 @@ EXPORT_SYMBOL_GPL(debugfs_lustre_root); LUSTRE_RO_ATTR(version); LUSTRE_RO_ATTR(pinger); -LUSTRE_RO_ATTR(health); +LUSTRE_RO_ATTR(health_check); LUSTRE_RW_ATTR(jobid_var); LUSTRE_RW_ATTR(jobid_name); static struct attribute *lustre_attrs[] = { &lustre_attr_version.attr, &lustre_attr_pinger.attr, - &lustre_attr_health.attr, + &lustre_attr_health_check.attr, &lustre_attr_jobid_name.attr, &lustre_attr_jobid_var.attr, NULL, diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c deleted file mode 100644 index 41b77a30feb3cc768ebe8d455c07a5f09060daa0..0000000000000000000000000000000000000000 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/linux/linux-obdo.c - * - * Object Devices Class Driver - * These are the only exported functions, they provide some generic - * infrastructure for managing object devices - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include "../../include/obd_class.h" -#include "../../include/lustre/lustre_idl.h" - -#include - -void obdo_refresh_inode(struct inode *dst, const struct obdo *src, u32 valid) -{ - valid &= src->o_valid; - - if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME)) - CDEBUG(D_INODE, - "valid %#llx, cur time %lu/%lu, new %llu/%llu\n", - src->o_valid, LTIME_S(dst->i_mtime), - LTIME_S(dst->i_ctime), src->o_mtime, src->o_ctime); - - if (valid & OBD_MD_FLATIME && src->o_atime > LTIME_S(dst->i_atime)) - LTIME_S(dst->i_atime) = src->o_atime; - if (valid & OBD_MD_FLMTIME && src->o_mtime > LTIME_S(dst->i_mtime)) - LTIME_S(dst->i_mtime) = src->o_mtime; - if (valid & OBD_MD_FLCTIME && src->o_ctime > LTIME_S(dst->i_ctime)) - LTIME_S(dst->i_ctime) = src->o_ctime; - if (valid & OBD_MD_FLSIZE) - i_size_write(dst, src->o_size); - /* optimum IO size */ - if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits)) - dst->i_blkbits = ffs(src->o_blksize) - 1; - - if (dst->i_blkbits < PAGE_SHIFT) - dst->i_blkbits = PAGE_SHIFT; - - /* allocation of space */ - if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks) - /* - * XXX shouldn't overflow be checked here like in - * obdo_to_inode(). - */ - dst->i_blocks = src->o_blocks; -} -EXPORT_SYMBOL(obdo_refresh_inode); diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c index 43797f1067455eef475353765f50d3bf35f1ba50..736ea1067c93fc902c1e99bda4578946899d9ae8 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog.c +++ b/drivers/staging/lustre/lustre/obdclass/llog.c @@ -43,8 +43,9 @@ #define DEBUG_SUBSYSTEM S_LOG -#include "../include/obd_class.h" +#include "../include/llog_swab.h" #include "../include/lustre_log.h" +#include "../include/obd_class.h" #include "llog_internal.h" /* @@ -80,8 +81,7 @@ static void llog_free_handle(struct llog_handle *loghandle) LASSERT(list_empty(&loghandle->u.phd.phd_entry)); else if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) LASSERT(list_empty(&loghandle->u.chd.chd_head)); - LASSERT(sizeof(*loghandle->lgh_hdr) == LLOG_CHUNK_SIZE); - kfree(loghandle->lgh_hdr); + kvfree(loghandle->lgh_hdr); out: kfree(loghandle); } @@ -115,20 +115,29 @@ static int llog_read_header(const struct lu_env *env, rc = lop->lop_read_header(env, handle); if (rc == LLOG_EEMPTY) { struct llog_log_hdr *llh = handle->lgh_hdr; + size_t len; + /* lrh_len should be initialized in llog_init_handle */ handle->lgh_last_idx = 0; /* header is record with index 0 */ llh->llh_count = 1; /* for the header record */ llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC; - llh->llh_hdr.lrh_len = LLOG_CHUNK_SIZE; - llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE; + LASSERT(handle->lgh_ctxt->loc_chunk_size >= LLOG_MIN_CHUNK_SIZE); + llh->llh_hdr.lrh_len = handle->lgh_ctxt->loc_chunk_size; llh->llh_hdr.lrh_index = 0; - llh->llh_tail.lrt_index = 0; llh->llh_timestamp = ktime_get_real_seconds(); if (uuid) memcpy(&llh->llh_tgtuuid, uuid, sizeof(llh->llh_tgtuuid)); llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap); - ext2_set_bit(0, llh->llh_bitmap); + /* + * Since update llog header might also call this function, + * let's reset the bitmap to 0 here + */ + len = llh->llh_hdr.lrh_len - llh->llh_bitmap_offset; + memset(LLOG_HDR_BITMAP(llh), 0, len - sizeof(llh->llh_tail)); + ext2_set_bit(0, LLOG_HDR_BITMAP(llh)); + LLOG_HDR_TAIL(llh)->lrt_len = llh->llh_hdr.lrh_len; + LLOG_HDR_TAIL(llh)->lrt_index = llh->llh_hdr.lrh_index; rc = 0; } return rc; @@ -137,16 +146,19 @@ static int llog_read_header(const struct lu_env *env, int llog_init_handle(const struct lu_env *env, struct llog_handle *handle, int flags, struct obd_uuid *uuid) { + int chunk_size = handle->lgh_ctxt->loc_chunk_size; enum llog_flag fmt = flags & LLOG_F_EXT_MASK; struct llog_log_hdr *llh; int rc; LASSERT(!handle->lgh_hdr); - llh = kzalloc(sizeof(*llh), GFP_NOFS); + LASSERT(chunk_size >= LLOG_MIN_CHUNK_SIZE); + llh = libcfs_kvzalloc(sizeof(*llh), GFP_NOFS); if (!llh) return -ENOMEM; handle->lgh_hdr = llh; + handle->lgh_hdr_size = chunk_size; /* first assign flags to use llog_client_ops */ llh->llh_flags = flags; rc = llog_read_header(env, handle, uuid); @@ -189,6 +201,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle, LASSERT(list_empty(&handle->u.chd.chd_head)); INIT_LIST_HEAD(&handle->u.chd.chd_head); llh->llh_size = sizeof(struct llog_logid_rec); + llh->llh_flags |= LLOG_F_IS_FIXSIZE; } else if (!(flags & LLOG_F_IS_PLAIN)) { CERROR("%s: unknown flags: %#x (expected %#x or %#x)\n", handle->lgh_ctxt->loc_obd->obd_name, @@ -198,7 +211,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle, llh->llh_flags |= fmt; out: if (rc) { - kfree(llh); + kvfree(llh); handle->lgh_hdr = NULL; } return rc; @@ -212,15 +225,21 @@ static int llog_process_thread(void *arg) struct llog_log_hdr *llh = loghandle->lgh_hdr; struct llog_process_cat_data *cd = lpi->lpi_catdata; char *buf; - __u64 cur_offset = LLOG_CHUNK_SIZE; - __u64 last_offset; + u64 cur_offset, tmp_offset; + int chunk_size; int rc = 0, index = 1, last_index; int saved_index = 0; int last_called_index = 0; - LASSERT(llh); + if (!llh) + return -EINVAL; + + cur_offset = llh->llh_hdr.lrh_len; + chunk_size = llh->llh_hdr.lrh_len; + /* expect chunk_size to be power of two */ + LASSERT(is_power_of_2(chunk_size)); - buf = kzalloc(LLOG_CHUNK_SIZE, GFP_NOFS); + buf = libcfs_kvzalloc(chunk_size, GFP_NOFS); if (!buf) { lpi->lpi_rc = -ENOMEM; return 0; @@ -233,41 +252,53 @@ static int llog_process_thread(void *arg) if (cd && cd->lpcd_last_idx) last_index = cd->lpcd_last_idx; else - last_index = LLOG_BITMAP_BYTES * 8 - 1; - - /* Record is not in this buffer. */ - if (index > last_index) - goto out; + last_index = LLOG_HDR_BITMAP_SIZE(llh) - 1; while (rc == 0) { + unsigned int buf_offset = 0; struct llog_rec_hdr *rec; + bool partial_chunk; + off_t chunk_offset; /* skip records not set in bitmap */ while (index <= last_index && - !ext2_test_bit(index, llh->llh_bitmap)) + !ext2_test_bit(index, LLOG_HDR_BITMAP(llh))) ++index; - LASSERT(index <= last_index + 1); - if (index == last_index + 1) + if (index > last_index) break; -repeat: + CDEBUG(D_OTHER, "index: %d last_index %d\n", index, last_index); - +repeat: /* get the buf with our target record; avoid old garbage */ - memset(buf, 0, LLOG_CHUNK_SIZE); - last_offset = cur_offset; + memset(buf, 0, chunk_size); rc = llog_next_block(lpi->lpi_env, loghandle, &saved_index, - index, &cur_offset, buf, LLOG_CHUNK_SIZE); + index, &cur_offset, buf, chunk_size); if (rc) goto out; + /* + * NB: after llog_next_block() call the cur_offset is the + * offset of the next block after read one. + * The absolute offset of the current chunk is calculated + * from cur_offset value and stored in chunk_offset variable. + */ + tmp_offset = cur_offset; + if (do_div(tmp_offset, chunk_size)) { + partial_chunk = true; + chunk_offset = cur_offset & ~(chunk_size - 1); + } else { + partial_chunk = false; + chunk_offset = cur_offset - chunk_size; + } + /* NB: when rec->lrh_len is accessed it is already swabbed * since it is used at the "end" of the loop and the rec * swabbing is done at the beginning of the loop. */ - for (rec = (struct llog_rec_hdr *)buf; - (char *)rec < buf + LLOG_CHUNK_SIZE; + for (rec = (struct llog_rec_hdr *)(buf + buf_offset); + (char *)rec < buf + chunk_size; rec = llog_rec_hdr_next(rec)) { CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n", rec, rec->lrh_type); @@ -278,15 +309,29 @@ static int llog_process_thread(void *arg) CDEBUG(D_OTHER, "after swabbing, type=%#x idx=%d\n", rec->lrh_type, rec->lrh_index); - if (rec->lrh_index == 0) { - /* probably another rec just got added? */ - rc = 0; - if (index <= loghandle->lgh_last_idx) - goto repeat; - goto out; /* no more records */ + /* + * for partial chunk the end of it is zeroed, check + * for index 0 to distinguish it. + */ + if (partial_chunk && !rec->lrh_index) { + /* concurrent llog_add() might add new records + * while llog_processing, check this is not + * the case and re-read the current chunk + * otherwise. + */ + if (index > loghandle->lgh_last_idx) { + rc = 0; + goto out; + } + CDEBUG(D_OTHER, "Re-read last llog buffer for new records, index %u, last %u\n", + index, loghandle->lgh_last_idx); + /* save offset inside buffer for the re-read */ + buf_offset = (char *)rec - (char *)buf; + cur_offset = chunk_offset; + goto repeat; } - if (rec->lrh_len == 0 || - rec->lrh_len > LLOG_CHUNK_SIZE) { + + if (!rec->lrh_len || rec->lrh_len > chunk_size) { CWARN("invalid length %d in llog record for index %d/%d\n", rec->lrh_len, rec->lrh_index, index); @@ -300,32 +345,38 @@ static int llog_process_thread(void *arg) continue; } + if (rec->lrh_index != index) { + CERROR("%s: Invalid record: index %u but expected %u\n", + loghandle->lgh_ctxt->loc_obd->obd_name, + rec->lrh_index, index); + rc = -ERANGE; + goto out; + } + CDEBUG(D_OTHER, "lrh_index: %d lrh_len: %d (%d remains)\n", rec->lrh_index, rec->lrh_len, - (int)(buf + LLOG_CHUNK_SIZE - (char *)rec)); + (int)(buf + chunk_size - (char *)rec)); loghandle->lgh_cur_idx = rec->lrh_index; loghandle->lgh_cur_offset = (char *)rec - (char *)buf + - last_offset; + chunk_offset; /* if set, process the callback on this record */ - if (ext2_test_bit(index, llh->llh_bitmap)) { + if (ext2_test_bit(index, LLOG_HDR_BITMAP(llh))) { rc = lpi->lpi_cb(lpi->lpi_env, loghandle, rec, lpi->lpi_cbdata); last_called_index = index; if (rc) goto out; - } else { - CDEBUG(D_OTHER, "Skipped index %d\n", index); } - /* next record, still in buffer? */ - ++index; - if (index > last_index) { + /* exit if the last index is reached */ + if (index >= last_index) { rc = 0; goto out; } + index++; } } diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c index a4277d684614f53d3e0e402621bca7f52e8ec1ea..8574ad401f6655aaf0ddd222ebd9ad85d85c3867 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c +++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c @@ -158,6 +158,7 @@ int llog_setup(const struct lu_env *env, struct obd_device *obd, mutex_init(&ctxt->loc_mutex); ctxt->loc_exp = class_export_get(disk_obd->obd_self_export); ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED; + ctxt->loc_chunk_size = LLOG_MIN_CHUNK_SIZE; rc = llog_group_set_ctxt(olg, ctxt, index); if (rc) { diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c index 8c4c1b3f1b452b1fa206fd2c576d520d56492fd2..723c212c6747bd394082d8f96f62e995f637cdb0 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c +++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c @@ -38,6 +38,7 @@ #define DEBUG_SUBSYSTEM S_LOG +#include "../include/llog_swab.h" #include "../include/lustre_log.h" static void print_llogd_body(struct llogd_body *d) @@ -244,7 +245,7 @@ void lustre_swab_llog_rec(struct llog_rec_hdr *rec) __swab32s(&llh->llh_flags); __swab32s(&llh->llh_size); __swab32s(&llh->llh_cat_idx); - tail = &llh->llh_tail; + tail = LLOG_HDR_TAIL(llh); break; } case LLOG_LOGID_MAGIC: @@ -290,8 +291,10 @@ static void print_llog_hdr(struct llog_log_hdr *h) CDEBUG(D_OTHER, "\tllh_flags: %#x\n", h->llh_flags); CDEBUG(D_OTHER, "\tllh_size: %#x\n", h->llh_size); CDEBUG(D_OTHER, "\tllh_cat_idx: %#x\n", h->llh_cat_idx); - CDEBUG(D_OTHER, "\tllh_tail.lrt_index: %#x\n", h->llh_tail.lrt_index); - CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n", h->llh_tail.lrt_len); + CDEBUG(D_OTHER, "\tllh_tail.lrt_index: %#x\n", + LLOG_HDR_TAIL(h)->lrt_index); + CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n", + LLOG_HDR_TAIL(h)->lrt_len); } void lustre_swab_llog_hdr(struct llog_log_hdr *h) diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c index 852a5acfefabd30994a31bb537a43d15966ed310..2c99717b0abaf241eb2e93d01e683c32e4f05f7f 100644 --- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c +++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c @@ -100,9 +100,13 @@ static const char * const obd_connect_names[] = { "lfsck", "unknown", "unlink_close", - "unknown", + "multi_mod_rpcs", "dir_stripe", - "unknown", + "subtree", + "lock_ahead", + "bulk_mbits", + "compact_obdo", + "second_flags", NULL }; @@ -127,7 +131,7 @@ EXPORT_SYMBOL(obd_connect_flags2str); static void obd_connect_data_seqprint(struct seq_file *m, struct obd_connect_data *ocd) { - int flags; + u64 flags; LASSERT(ocd); flags = ocd->ocd_connect_flags; @@ -172,6 +176,9 @@ static void obd_connect_data_seqprint(struct seq_file *m, if (flags & OBD_CONNECT_MAXBYTES) seq_printf(m, " max_object_bytes: %llx\n", ocd->ocd_maxbytes); + if (flags & OBD_CONNECT_MULTIMODRPCS) + seq_printf(m, " max_mod_rpcs: %hu\n", + ocd->ocd_maxmodrpcs); } int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val, @@ -396,10 +403,17 @@ int lprocfs_wr_uint(struct file *file, const char __user *buffer, char dummy[MAX_STRING_SIZE + 1], *end; unsigned long tmp; - dummy[MAX_STRING_SIZE] = '\0'; - if (copy_from_user(dummy, buffer, MAX_STRING_SIZE)) + if (count >= sizeof(dummy)) + return -EINVAL; + + if (count == 0) + return 0; + + if (copy_from_user(dummy, buffer, count)) return -EFAULT; + dummy[count] = '\0'; + tmp = simple_strtoul(dummy, &end, 0); if (dummy == end) return -EINVAL; @@ -1275,7 +1289,8 @@ int ldebugfs_register_stats(struct dentry *parent, const char *name, EXPORT_SYMBOL_GPL(ldebugfs_register_stats); void lprocfs_counter_init(struct lprocfs_stats *stats, int index, - unsigned conf, const char *name, const char *units) + unsigned int conf, const char *name, + const char *units) { struct lprocfs_counter_header *header; struct lprocfs_counter *percpu_cntr; diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c index 054e567e6c8d452c04159b772b6ce8ab82b7e21e..7971562a3efd40371fa676160463c6dbfac38a4c 100644 --- a/drivers/staging/lustre/lustre/obdclass/lu_object.c +++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c @@ -68,6 +68,7 @@ enum { #define LU_SITE_BITS_MIN 12 #define LU_SITE_BITS_MAX 24 +#define LU_SITE_BITS_MAX_CL 19 /** * total 256 buckets, we don't want too many buckets because: * - consume too much memory @@ -338,7 +339,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) struct cfs_hash_bd bd2; struct list_head dispose; int did_sth; - unsigned int start; + unsigned int start = 0; int count; int bnr; unsigned int i; @@ -351,7 +352,8 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) * Under LRU list lock, scan LRU list and move unreferenced objects to * the dispose list, removing them from LRU and hash table. */ - start = s->ls_purge_start; + if (nr != ~0) + start = s->ls_purge_start; bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1; again: /* @@ -877,6 +879,9 @@ static unsigned long lu_htable_order(struct lu_device *top) unsigned long cache_size; unsigned long bits; + if (!strcmp(top->ld_type->ldt_name, LUSTRE_VVP_NAME)) + bits_max = LU_SITE_BITS_MAX_CL; + /* * Calculate hash table size, assuming that we want reasonable * performance when 20% of total memory is occupied by cache of @@ -909,8 +914,8 @@ static unsigned long lu_htable_order(struct lu_device *top) return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max); } -static unsigned lu_obj_hop_hash(struct cfs_hash *hs, - const void *key, unsigned mask) +static unsigned int lu_obj_hop_hash(struct cfs_hash *hs, + const void *key, unsigned int mask) { struct lu_fid *fid = (struct lu_fid *)key; __u32 hash; @@ -1311,6 +1316,7 @@ enum { static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, }; static DEFINE_SPINLOCK(lu_keys_guard); +static atomic_t lu_key_initing_cnt = ATOMIC_INIT(0); /** * Global counter incremented whenever key is registered, unregistered, @@ -1318,7 +1324,7 @@ static DEFINE_SPINLOCK(lu_keys_guard); * lu_context_refill(). No locking is provided, as initialization and shutdown * are supposed to be externally serialized. */ -static unsigned key_set_version; +static unsigned int key_set_version; /** * Register new key. @@ -1385,6 +1391,19 @@ void lu_context_key_degister(struct lu_context_key *key) ++key_set_version; spin_lock(&lu_keys_guard); key_fini(&lu_shrink_env.le_ctx, key->lct_index); + + /** + * Wait until all transient contexts referencing this key have + * run lu_context_key::lct_fini() method. + */ + while (atomic_read(&key->lct_used) > 1) { + spin_unlock(&lu_keys_guard); + CDEBUG(D_INFO, "lu_context_key_degister: \"%s\" %p, %d\n", + key->lct_owner ? key->lct_owner->name : "", key, + atomic_read(&key->lct_used)); + schedule(); + spin_lock(&lu_keys_guard); + } if (lu_keys[key->lct_index]) { lu_keys[key->lct_index] = NULL; lu_ref_fini(&key->lct_reference); @@ -1506,15 +1525,26 @@ void lu_context_key_quiesce(struct lu_context_key *key) struct lu_context *ctx; if (!(key->lct_tags & LCT_QUIESCENT)) { - /* - * XXX layering violation. - */ - cl_env_cache_purge(~0); - key->lct_tags |= LCT_QUIESCENT; /* * XXX memory barrier has to go here. */ spin_lock(&lu_keys_guard); + key->lct_tags |= LCT_QUIESCENT; + + /** + * Wait until all lu_context_key::lct_init() methods + * have completed. + */ + while (atomic_read(&lu_key_initing_cnt) > 0) { + spin_unlock(&lu_keys_guard); + CDEBUG(D_INFO, "lu_context_key_quiesce: \"%s\" %p, %d (%d)\n", + key->lct_owner ? key->lct_owner->name : "", + key, atomic_read(&key->lct_used), + atomic_read(&lu_key_initing_cnt)); + schedule(); + spin_lock(&lu_keys_guard); + } + list_for_each_entry(ctx, &lu_context_remembered, lc_remember) key_fini(ctx, key->lct_index); spin_unlock(&lu_keys_guard); @@ -1546,6 +1576,19 @@ static int keys_fill(struct lu_context *ctx) { unsigned int i; + /* + * A serialisation with lu_context_key_quiesce() is needed, but some + * "key->lct_init()" are calling kernel memory allocation routine and + * can't be called while holding a spin_lock. + * "lu_keys_guard" is held while incrementing "lu_key_initing_cnt" + * to ensure the start of the serialisation. + * An atomic_t variable is still used, in order not to reacquire the + * lock when decrementing the counter. + */ + spin_lock(&lu_keys_guard); + atomic_inc(&lu_key_initing_cnt); + spin_unlock(&lu_keys_guard); + LINVRNT(ctx->lc_value); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { struct lu_context_key *key; @@ -1563,12 +1606,19 @@ static int keys_fill(struct lu_context *ctx) LINVRNT(key->lct_init); LINVRNT(key->lct_index == i); + LASSERT(key->lct_owner); + if (!(ctx->lc_tags & LCT_NOREF) && + !try_module_get(key->lct_owner)) { + /* module is unloading, skip this key */ + continue; + } + value = key->lct_init(ctx, key); - if (IS_ERR(value)) + if (unlikely(IS_ERR(value))) { + atomic_dec(&lu_key_initing_cnt); return PTR_ERR(value); + } - if (!(ctx->lc_tags & LCT_NOREF)) - try_module_get(key->lct_owner); lu_ref_add_atomic(&key->lct_reference, "ctx", ctx); atomic_inc(&key->lct_used); /* @@ -1582,6 +1632,7 @@ static int keys_fill(struct lu_context *ctx) } ctx->lc_version = key_set_version; } + atomic_dec(&lu_key_initing_cnt); return 0; } @@ -1663,6 +1714,9 @@ void lu_context_exit(struct lu_context *ctx) ctx->lc_state = LCS_LEFT; if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) { for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { + /* could race with key quiescency */ + if (ctx->lc_tags & LCT_REMEMBER) + spin_lock(&lu_keys_guard); if (ctx->lc_value[i]) { struct lu_context_key *key; @@ -1671,6 +1725,8 @@ void lu_context_exit(struct lu_context *ctx) key->lct_exit(ctx, key, ctx->lc_value[i]); } + if (ctx->lc_tags & LCT_REMEMBER) + spin_unlock(&lu_keys_guard); } } } @@ -1930,7 +1986,7 @@ int lu_site_stats_print(const struct lu_site *s, struct seq_file *m) memset(&stats, 0, sizeof(stats)); lu_site_stats_get(s->ls_obj_hash, &stats, 1); - seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d %d\n", + seq_printf(m, "%d/%d %d/%ld %d %d %d %d %d %d %d %d\n", stats.lss_busy, stats.lss_total, stats.lss_populated, diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c index bbed1b72d52e022f22ace6ce27f3712ad378cb72..9ca84c7d49dea090d28d6bda356cc029dadd9882 100644 --- a/drivers/staging/lustre/lustre/obdclass/obd_config.c +++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c @@ -35,12 +35,15 @@ */ #define DEBUG_SUBSYSTEM S_CLASS -#include "../include/obd_class.h" + #include + #include "../include/lustre/lustre_ioctl.h" -#include "../include/lustre_log.h" +#include "../include/llog_swab.h" #include "../include/lprocfs_status.h" +#include "../include/lustre_log.h" #include "../include/lustre_param.h" +#include "../include/obd_class.h" #include "llog_internal.h" @@ -446,7 +449,7 @@ static int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg) LASSERT(obd->obd_self_export); /* Precleanup, we must make sure all exports get destroyed. */ - err = obd_precleanup(obd, OBD_CLEANUP_EXPORTS); + err = obd_precleanup(obd); if (err) CERROR("Precleanup %s returned %d\n", obd->obd_name, err); @@ -585,16 +588,21 @@ static int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg) } static LIST_HEAD(lustre_profile_list); +static DEFINE_SPINLOCK(lustre_profile_list_lock); struct lustre_profile *class_get_profile(const char *prof) { struct lustre_profile *lprof; + spin_lock(&lustre_profile_list_lock); list_for_each_entry(lprof, &lustre_profile_list, lp_list) { if (!strcmp(lprof->lp_profile, prof)) { + lprof->lp_refs++; + spin_unlock(&lustre_profile_list_lock); return lprof; } } + spin_unlock(&lustre_profile_list_lock); return NULL; } EXPORT_SYMBOL(class_get_profile); @@ -639,7 +647,11 @@ static int class_add_profile(int proflen, char *prof, int osclen, char *osc, } } + spin_lock(&lustre_profile_list_lock); + lprof->lp_refs = 1; + lprof->lp_list_deleted = false; list_add(&lprof->lp_list, &lustre_profile_list); + spin_unlock(&lustre_profile_list_lock); return err; free_lp_dt: @@ -659,27 +671,59 @@ void class_del_profile(const char *prof) lprof = class_get_profile(prof); if (lprof) { + spin_lock(&lustre_profile_list_lock); + /* because get profile increments the ref counter */ + lprof->lp_refs--; list_del(&lprof->lp_list); - kfree(lprof->lp_profile); - kfree(lprof->lp_dt); - kfree(lprof->lp_md); - kfree(lprof); + lprof->lp_list_deleted = true; + spin_unlock(&lustre_profile_list_lock); + + class_put_profile(lprof); } } EXPORT_SYMBOL(class_del_profile); +void class_put_profile(struct lustre_profile *lprof) +{ + spin_lock(&lustre_profile_list_lock); + if (--lprof->lp_refs > 0) { + LASSERT(lprof->lp_refs > 0); + spin_unlock(&lustre_profile_list_lock); + return; + } + spin_unlock(&lustre_profile_list_lock); + + /* confirm not a negative number */ + LASSERT(!lprof->lp_refs); + + /* + * At least one class_del_profile/profiles must be called + * on the target profile or lustre_profile_list will corrupt + */ + LASSERT(lprof->lp_list_deleted); + kfree(lprof->lp_profile); + kfree(lprof->lp_dt); + kfree(lprof->lp_md); + kfree(lprof); +} +EXPORT_SYMBOL(class_put_profile); + /* COMPAT_146 */ void class_del_profiles(void) { struct lustre_profile *lprof, *n; + spin_lock(&lustre_profile_list_lock); list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) { list_del(&lprof->lp_list); - kfree(lprof->lp_profile); - kfree(lprof->lp_dt); - kfree(lprof->lp_md); - kfree(lprof); + lprof->lp_list_deleted = true; + spin_unlock(&lustre_profile_list_lock); + + class_put_profile(lprof); + + spin_lock(&lustre_profile_list_lock); } + spin_unlock(&lustre_profile_list_lock); } EXPORT_SYMBOL(class_del_profiles); @@ -1406,8 +1450,8 @@ EXPORT_SYMBOL(class_manual_cleanup); * uuid<->export lustre hash operations */ -static unsigned -uuid_hash(struct cfs_hash *hs, const void *key, unsigned mask) +static unsigned int +uuid_hash(struct cfs_hash *hs, const void *key, unsigned int mask) { return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid, sizeof(((struct obd_uuid *)key)->uuid), mask); diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c index 0d3a3b05a6373843e8c03975ba9fb252d6e5b2e1..2283e920d839ef0712f192068ff94ea671d2c779 100644 --- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c +++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c @@ -261,7 +261,7 @@ int lustre_start_mgc(struct super_block *sb) rc = obd_get_info(NULL, obd->obd_self_export, strlen(KEY_CONN_DATA), KEY_CONN_DATA, - &vallen, data, NULL); + &vallen, data); LASSERT(rc == 0); has_ir = OCD_HAS_FLAG(data, IMP_RECOV); if (has_ir ^ !(*flags & LMD_FLG_NOIR)) { @@ -382,7 +382,7 @@ int lustre_start_mgc(struct super_block *sb) /* We connect to the MGS at setup, and don't disconnect until cleanup */ data->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_AT | OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | - OBD_CONNECT_LVB_TYPE; + OBD_CONNECT_LVB_TYPE | OBD_CONNECT_BULK_MBITS; #if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE data->ocd_connect_flags |= OBD_CONNECT_MNE_SWAB; @@ -1216,8 +1216,7 @@ static struct file_system_type lustre_fs_type = { .name = "lustre", .mount = lustre_mount, .kill_sb = lustre_kill_super, - .fs_flags = FS_BINARY_MOUNTDATA | FS_REQUIRES_DEV | - FS_RENAME_DOES_D_MOVE, + .fs_flags = FS_REQUIRES_DEV | FS_RENAME_DOES_D_MOVE, }; MODULE_ALIAS_FS("lustre"); diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c index 79104a66da96b68da15fc1e9474871eeed965852..c52b9e07d7ddc673591c1434b989f7c61b24567d 100644 --- a/drivers/staging/lustre/lustre/obdclass/obdo.c +++ b/drivers/staging/lustre/lustre/obdclass/obdo.c @@ -124,68 +124,3 @@ void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj) ioobj->ioo_max_brw = 0; } EXPORT_SYMBOL(obdo_to_ioobj); - -static void iattr_from_obdo(struct iattr *attr, const struct obdo *oa, - u32 valid) -{ - valid &= oa->o_valid; - - if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME)) - CDEBUG(D_INODE, "valid %#llx, new time %llu/%llu\n", - oa->o_valid, oa->o_mtime, oa->o_ctime); - - attr->ia_valid = 0; - if (valid & OBD_MD_FLATIME) { - LTIME_S(attr->ia_atime) = oa->o_atime; - attr->ia_valid |= ATTR_ATIME; - } - if (valid & OBD_MD_FLMTIME) { - LTIME_S(attr->ia_mtime) = oa->o_mtime; - attr->ia_valid |= ATTR_MTIME; - } - if (valid & OBD_MD_FLCTIME) { - LTIME_S(attr->ia_ctime) = oa->o_ctime; - attr->ia_valid |= ATTR_CTIME; - } - if (valid & OBD_MD_FLSIZE) { - attr->ia_size = oa->o_size; - attr->ia_valid |= ATTR_SIZE; - } -#if 0 /* you shouldn't be able to change a file's type with setattr */ - if (valid & OBD_MD_FLTYPE) { - attr->ia_mode = (attr->ia_mode & ~S_IFMT) | - (oa->o_mode & S_IFMT); - attr->ia_valid |= ATTR_MODE; - } -#endif - if (valid & OBD_MD_FLMODE) { - attr->ia_mode = (attr->ia_mode & S_IFMT) | - (oa->o_mode & ~S_IFMT); - attr->ia_valid |= ATTR_MODE; - if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) && - !capable(CFS_CAP_FSETID)) - attr->ia_mode &= ~S_ISGID; - } - if (valid & OBD_MD_FLUID) { - attr->ia_uid = make_kuid(&init_user_ns, oa->o_uid); - attr->ia_valid |= ATTR_UID; - } - if (valid & OBD_MD_FLGID) { - attr->ia_gid = make_kgid(&init_user_ns, oa->o_gid); - attr->ia_valid |= ATTR_GID; - } -} - -void md_from_obdo(struct md_op_data *op_data, const struct obdo *oa, u32 valid) -{ - iattr_from_obdo(&op_data->op_attr, oa, valid); - if (valid & OBD_MD_FLBLOCKS) { - op_data->op_attr_blocks = oa->o_blocks; - op_data->op_attr.ia_valid |= ATTR_BLOCKS; - } - if (valid & OBD_MD_FLFLAGS) { - op_data->op_attr_flags = oa->o_flags; - op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG; - } -} -EXPORT_SYMBOL(md_from_obdo); diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c index 505582ff4d1e6c034ffb9c7e11dda46ce5e59bd5..549076193bde4fa054a4fb9bd98ed22fa1cffe4f 100644 --- a/drivers/staging/lustre/lustre/obdecho/echo_client.c +++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c @@ -55,7 +55,7 @@ struct echo_device { struct echo_client_obd *ed_ec; struct cl_site ed_site_myself; - struct cl_site *ed_site; + struct lu_site *ed_site; struct lu_device *ed_next; }; @@ -505,9 +505,6 @@ static const struct lu_device_operations echo_device_lu_ops = { /** @} echo_lu_dev_ops */ -static const struct cl_device_operations echo_device_cl_ops = { -}; - /** \defgroup echo_init Setup and teardown * * Init and fini functions for echo client. @@ -527,17 +524,19 @@ static int echo_site_init(const struct lu_env *env, struct echo_device *ed) } rc = lu_site_init_finish(&site->cs_lu); - if (rc) + if (rc) { + cl_site_fini(site); return rc; + } - ed->ed_site = site; + ed->ed_site = &site->cs_lu; return 0; } static void echo_site_fini(const struct lu_env *env, struct echo_device *ed) { if (ed->ed_site) { - cl_site_fini(ed->ed_site); + lu_site_fini(ed->ed_site); ed->ed_site = NULL; } } @@ -561,16 +560,10 @@ static void echo_thread_key_fini(const struct lu_context *ctx, kmem_cache_free(echo_thread_kmem, info); } -static void echo_thread_key_exit(const struct lu_context *ctx, - struct lu_context_key *key, void *data) -{ -} - static struct lu_context_key echo_thread_key = { .lct_tags = LCT_CL_THREAD, .lct_init = echo_thread_key_init, .lct_fini = echo_thread_key_fini, - .lct_exit = echo_thread_key_exit }; static void *echo_session_key_init(const struct lu_context *ctx, @@ -592,16 +585,10 @@ static void echo_session_key_fini(const struct lu_context *ctx, kmem_cache_free(echo_session_kmem, session); } -static void echo_session_key_exit(const struct lu_context *ctx, - struct lu_context_key *key, void *data) -{ -} - static struct lu_context_key echo_session_key = { .lct_tags = LCT_SESSION, .lct_init = echo_session_key_init, .lct_fini = echo_session_key_fini, - .lct_exit = echo_session_key_exit }; LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key); @@ -630,7 +617,6 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env, goto out_free; cd->cd_lu_dev.ld_ops = &echo_device_lu_ops; - cd->cd_ops = &echo_device_cl_ops; obd = class_name2obd(lustre_cfg_string(cfg, 0)); LASSERT(obd); @@ -674,7 +660,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env, goto out_cleanup; } - next->ld_site = &ed->ed_site->cs_lu; + next->ld_site = ed->ed_site; rc = next->ld_type->ldt_ops->ldto_device_init(env, next, next->ld_type->ldt_name, NULL); @@ -741,7 +727,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env, CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n", ed, next); - lu_site_purge(env, &ed->ed_site->cs_lu, -1); + lu_site_purge(env, ed->ed_site, -1); /* check if there are objects still alive. * It shouldn't have any object because lu_site_purge would cleanup @@ -754,7 +740,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env, spin_unlock(&ec->ec_lock); /* purge again */ - lu_site_purge(env, &ed->ed_site->cs_lu, -1); + lu_site_purge(env, ed->ed_site, -1); CDEBUG(D_INFO, "Waiting for the reference of echo object to be dropped\n"); @@ -766,7 +752,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env, CERROR("echo_client still has objects at cleanup time, wait for 1 second\n"); set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(cfs_time_seconds(1)); - lu_site_purge(env, &ed->ed_site->cs_lu, -1); + lu_site_purge(env, ed->ed_site, -1); spin_lock(&ec->ec_lock); } spin_unlock(&ec->ec_lock); @@ -780,11 +766,13 @@ static struct lu_device *echo_device_free(const struct lu_env *env, while (next) next = next->ld_type->ldt_ops->ldto_device_free(env, next); - LASSERT(ed->ed_site == lu2cl_site(d->ld_site)); + LASSERT(ed->ed_site == d->ld_site); echo_site_fini(env, ed); cl_device_fini(&ed->ed_cl); kfree(ed); + cl_env_cache_purge(~0); + return NULL; } @@ -1100,7 +1088,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, static u64 last_object_id; static int echo_create_object(const struct lu_env *env, struct echo_device *ed, - struct obdo *oa, struct obd_trans_info *oti) + struct obdo *oa) { struct echo_object *eco; struct echo_client_obd *ec = ed->ed_ec; @@ -1117,7 +1105,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed, if (!ostid_id(&oa->o_oi)) ostid_set_id(&oa->o_oi, ++last_object_id); - rc = obd_create(env, ec->ec_exp, oa, oti); + rc = obd_create(env, ec->ec_exp, oa); if (rc != 0) { CERROR("Cannot create objects: rc = %d\n", rc); goto failed; @@ -1137,7 +1125,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed, failed: if (created && rc) - obd_destroy(env, ec->ec_exp, oa, oti); + obd_destroy(env, ec->ec_exp, oa); if (rc) CERROR("create object failed with: rc = %d\n", rc); return rc; @@ -1237,8 +1225,7 @@ static int echo_client_page_debug_check(struct page *page, u64 id, static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, struct echo_object *eco, u64 offset, - u64 count, int async, - struct obd_trans_info *oti) + u64 count, int async) { u32 npages; struct brw_page *pga; @@ -1332,12 +1319,11 @@ static int echo_client_prep_commit(const struct lu_env *env, struct obd_export *exp, int rw, struct obdo *oa, struct echo_object *eco, u64 offset, u64 count, - u64 batch, struct obd_trans_info *oti, - int async) + u64 batch, int async) { struct obd_ioobj ioo; struct niobuf_local *lnb; - struct niobuf_remote *rnb; + struct niobuf_remote rnb; u64 off; u64 npages, tot_pages; int i, ret = 0, brw_flags = 0; @@ -1349,9 +1335,7 @@ static int echo_client_prep_commit(const struct lu_env *env, tot_pages = count >> PAGE_SHIFT; lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS); - rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS); - - if (!lnb || !rnb) { + if (!lnb) { ret = -ENOMEM; goto out; } @@ -1363,26 +1347,22 @@ static int echo_client_prep_commit(const struct lu_env *env, off = offset; - for (; tot_pages; tot_pages -= npages) { + for (; tot_pages > 0; tot_pages -= npages) { int lpages; if (tot_pages < npages) npages = tot_pages; - for (i = 0; i < npages; i++, off += PAGE_SIZE) { - rnb[i].rnb_offset = off; - rnb[i].rnb_len = PAGE_SIZE; - rnb[i].rnb_flags = brw_flags; - } - - ioo.ioo_bufcnt = npages; + rnb.rnb_offset = off; + rnb.rnb_len = npages * PAGE_SIZE; + rnb.rnb_flags = brw_flags; + ioo.ioo_bufcnt = 1; + off += npages * PAGE_SIZE; lpages = npages; - ret = obd_preprw(env, rw, exp, oa, 1, &ioo, rnb, &lpages, - lnb, oti); + ret = obd_preprw(env, rw, exp, oa, 1, &ioo, &rnb, &lpages, lnb); if (ret != 0) goto out; - LASSERT(lpages == npages); for (i = 0; i < lpages; i++) { struct page *page = lnb[i].lnb_page; @@ -1401,24 +1381,21 @@ static int echo_client_prep_commit(const struct lu_env *env, if (rw == OBD_BRW_WRITE) echo_client_page_debug_setup(page, rw, - ostid_id(&oa->o_oi), - rnb[i].rnb_offset, - rnb[i].rnb_len); + ostid_id(&oa->o_oi), + lnb[i].lnb_file_offset, + lnb[i].lnb_len); else echo_client_page_debug_check(page, - ostid_id(&oa->o_oi), - rnb[i].rnb_offset, - rnb[i].rnb_len); + ostid_id(&oa->o_oi), + lnb[i].lnb_file_offset, + lnb[i].lnb_len); } - ret = obd_commitrw(env, rw, exp, oa, 1, &ioo, - rnb, npages, lnb, oti, ret); + ret = obd_commitrw(env, rw, exp, oa, 1, &ioo, &rnb, npages, lnb, + ret); if (ret != 0) goto out; - /* Reset oti otherwise it would confuse ldiskfs. */ - memset(oti, 0, sizeof(*oti)); - /* Reuse env context. */ lu_context_exit((struct lu_context *)&env->le_ctx); lu_context_enter((struct lu_context *)&env->le_ctx); @@ -1426,14 +1403,12 @@ static int echo_client_prep_commit(const struct lu_env *env, out: kfree(lnb); - kfree(rnb); return ret; } static int echo_client_brw_ioctl(const struct lu_env *env, int rw, struct obd_export *exp, - struct obd_ioctl_data *data, - struct obd_trans_info *dummy_oti) + struct obd_ioctl_data *data) { struct obd_device *obd = class_exp2obd(exp); struct echo_device *ed = obd2echo_dev(obd); @@ -1470,15 +1445,13 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw, case 1: /* fall through */ case 2: - rc = echo_client_kbrw(ed, rw, oa, - eco, data->ioc_offset, - data->ioc_count, async, dummy_oti); + rc = echo_client_kbrw(ed, rw, oa, eco, data->ioc_offset, + data->ioc_count, async); break; case 3: - rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa, - eco, data->ioc_offset, - data->ioc_count, data->ioc_plen1, - dummy_oti, async); + rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa, eco, + data->ioc_offset, data->ioc_count, + data->ioc_plen1, async); break; default: rc = -EINVAL; @@ -1496,16 +1469,11 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, struct echo_client_obd *ec = ed->ed_ec; struct echo_object *eco; struct obd_ioctl_data *data = karg; - struct obd_trans_info dummy_oti; struct lu_env *env; - struct oti_req_ack_lock *ack_lock; struct obdo *oa; struct lu_fid fid; int rw = OBD_BRW_READ; int rc = 0; - int i; - - memset(&dummy_oti, 0, sizeof(dummy_oti)); oa = &data->ioc_obdo1; if (!(oa->o_valid & OBD_MD_FLGROUP)) { @@ -1535,7 +1503,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, goto out; } - rc = echo_create_object(env, ed, oa, &dummy_oti); + rc = echo_create_object(env, ed, oa); goto out; case OBD_IOC_DESTROY: @@ -1546,7 +1514,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, rc = echo_get_object(&eco, ed, oa); if (rc == 0) { - rc = obd_destroy(env, ec->ec_exp, oa, &dummy_oti); + rc = obd_destroy(env, ec->ec_exp, oa); if (rc == 0) eco->eo_deleted = 1; echo_put_object(eco); @@ -1556,11 +1524,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, case OBD_IOC_GETATTR: rc = echo_get_object(&eco, ed, oa); if (rc == 0) { - struct obd_info oinfo = { - .oi_oa = oa, - }; - - rc = obd_getattr(env, ec->ec_exp, &oinfo); + rc = obd_getattr(env, ec->ec_exp, oa); echo_put_object(eco); } goto out; @@ -1573,11 +1537,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, rc = echo_get_object(&eco, ed, oa); if (rc == 0) { - struct obd_info oinfo = { - .oi_oa = oa, - }; - - rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL); + rc = obd_setattr(env, ec->ec_exp, oa); echo_put_object(eco); } goto out; @@ -1591,7 +1551,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, rw = OBD_BRW_WRITE; /* fall through */ case OBD_IOC_BRW_READ: - rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti); + rc = echo_client_brw_ioctl(env, rw, exp, data); goto out; default: @@ -1604,14 +1564,6 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, lu_env_fini(env); kfree(env); - /* XXX this should be in a helper also called by target_send_reply */ - for (ack_lock = dummy_oti.oti_ack_locks, i = 0; i < 4; - i++, ack_lock++) { - if (!ack_lock->mode) - break; - ldlm_lock_decref(&ack_lock->lock, ack_lock->mode); - } - return rc; } diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c index f0062d44ee031ef3f5d4f81611f9392ceefdd6d1..575b2969ad83325908fde5e5d3d277ff465040df 100644 --- a/drivers/staging/lustre/lustre/osc/lproc_osc.c +++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c @@ -162,7 +162,7 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */ if (pages_number <= 0 || - pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) || + pages_number >= OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) || pages_number > totalram_pages / 4) /* 1/4 of RAM */ return -ERANGE; @@ -183,10 +183,12 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v) seq_printf(m, "used_mb: %ld\n" - "busy_cnt: %ld\n", + "busy_cnt: %ld\n" + "reclaim: %llu\n", (atomic_long_read(&cli->cl_lru_in_list) + atomic_long_read(&cli->cl_lru_busy)) >> shift, - atomic_long_read(&cli->cl_lru_busy)); + atomic_long_read(&cli->cl_lru_busy), + cli->cl_lru_reclaim); return 0; } @@ -585,7 +587,8 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj, chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1); /* max_pages_per_rpc must be chunk aligned */ val = (val + ~chunk_mask) & chunk_mask; - if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) { + if (!val || (ocd->ocd_brw_size && + val > ocd->ocd_brw_size >> PAGE_SHIFT)) { return -ERANGE; } spin_lock(&cli->cl_loi_list_lock); diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c index 4bbe219add981393989d3d51b192b20b46276a58..b0f030c6c9c93d838760a7255951734e7c13da42 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cache.c +++ b/drivers/staging/lustre/lustre/osc/osc_cache.c @@ -360,6 +360,7 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj) RB_CLEAR_NODE(&ext->oe_node); ext->oe_obj = obj; + cl_object_get(osc2cl(obj)); atomic_set(&ext->oe_refc, 1); atomic_set(&ext->oe_users, 0); INIT_LIST_HEAD(&ext->oe_link); @@ -398,6 +399,7 @@ static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext) LDLM_LOCK_PUT(ext->oe_dlmlock); ext->oe_dlmlock = NULL; } + cl_object_put(env, osc2cl(ext->oe_obj)); osc_extent_free(ext); } } @@ -959,7 +961,7 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext, if (rc == -ETIMEDOUT) { OSC_EXTENT_DUMP(D_ERROR, ext, "%s: wait ext to %u timedout, recovery in progress?\n", - osc_export(obj)->exp_obd->obd_name, state); + cli_name(osc_cli(obj)), state); lwi = LWI_INTR(NULL, NULL); rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state), @@ -977,7 +979,6 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext, static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, bool partial) { - struct cl_env_nest nest; struct lu_env *env; struct cl_io *io; struct osc_object *obj = ext->oe_obj; @@ -990,6 +991,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, int grants = 0; int nr_pages = 0; int rc = 0; + int refcheck; LASSERT(sanity_check(ext) == 0); EASSERT(ext->oe_state == OES_TRUNC, ext); @@ -999,7 +1001,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, * We can't use that env from osc_cache_truncate_start() because * it's from lov_io_sub and not fully initialized. */ - env = cl_env_nested_get(&nest); + env = cl_env_get(&refcheck); io = &osc_env_info(env)->oti_io; io->ci_obj = cl_object_top(osc2cl(obj)); rc = cl_io_init(env, io, CIT_MISC, io->ci_obj); @@ -1085,7 +1087,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, out: cl_io_fini(env, io); - cl_env_nested_put(&nest, env); + cl_env_put(env, &refcheck); return rc; } @@ -1327,7 +1329,6 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, { struct osc_page *opg = oap2osc_page(oap); struct cl_page *page = oap2cl_page(oap); - struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); enum cl_req_type crt; int srvlock; @@ -1338,25 +1339,10 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, "cp_state:%u, cmd:%d\n", page->cp_state, cmd); LASSERT(opg->ops_transfer_pinned); - /* - * page->cp_req can be NULL if io submission failed before - * cl_req was allocated. - */ - if (page->cp_req) - cl_req_page_done(env, page); - LASSERT(!page->cp_req); - crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE; /* Clear opg->ops_transfer_pinned before VM lock is released. */ opg->ops_transfer_pinned = 0; - spin_lock(&obj->oo_seatbelt); - LASSERT(opg->ops_submitter); - LASSERT(!list_empty(&opg->ops_inflight)); - list_del_init(&opg->ops_inflight); - opg->ops_submitter = NULL; - spin_unlock(&obj->oo_seatbelt); - opg->ops_submit_time = 0; srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK; @@ -1380,16 +1366,17 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, lu_ref_del(&page->cp_reference, "transfer", page); cl_page_completion(env, page, crt, rc); + cl_page_put(env, page); return 0; } #define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \ struct client_obd *__tmp = (cli); \ - CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %ld/%lu " \ + CDEBUG(lvl, "%s: grant { dirty: %lu/%lu dirty_pages: %ld/%lu " \ "dropped: %ld avail: %ld, reserved: %ld, flight: %d }" \ "lru {in list: %ld, left: %ld, waiters: %d }" fmt "\n", \ - __tmp->cl_import->imp_obd->obd_name, \ + cli_name(__tmp), \ __tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages, \ atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \ __tmp->cl_lost_grant, __tmp->cl_avail_grant, \ @@ -1627,7 +1614,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, osc_io_unplug_async(env, cli, NULL); CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n", - cli->cl_import->imp_obd->obd_name, &ocw, oap); + cli_name(cli), &ocw, oap); rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi); @@ -1671,7 +1658,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, break; default: CDEBUG(D_CACHE, "%s: event for cache space @ %p never arrived due to %d, fall back to sync i/o\n", - cli->cl_import->imp_obd->obd_name, &ocw, rc); + cli_name(cli), &ocw, rc); break; } out: @@ -1931,7 +1918,8 @@ static int try_to_add_extent_for_io(struct client_obd *cli, } if (tmp->oe_srvlock != ext->oe_srvlock || - !tmp->oe_grants != !ext->oe_grants) + !tmp->oe_grants != !ext->oe_grants || + tmp->oe_no_merge || ext->oe_no_merge) return 0; /* remove break for strict check */ @@ -2250,14 +2238,9 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli, return 0; if (!async) { - /* disable osc_lru_shrink() temporarily to avoid - * potential stack overrun problem. LU-2859 - */ - atomic_inc(&cli->cl_lru_shrinkers); spin_lock(&cli->cl_loi_list_lock); osc_check_rpcs(env, cli); spin_unlock(&cli->cl_loi_list_lock); - atomic_dec(&cli->cl_lru_shrinkers); } else { CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli); LASSERT(cli->cl_writeback_work); @@ -2479,7 +2462,6 @@ int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj, struct osc_page *ops) { struct osc_async_page *oap = &ops->ops_oap; - struct osc_extent *ext = NULL; int rc = 0; LASSERT(oap->oap_magic == OAP_MAGIC); @@ -2487,12 +2469,15 @@ int osc_teardown_async_page(const struct lu_env *env, CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n", oap, ops, osc_index(oap2osc(oap))); - osc_object_lock(obj); if (!list_empty(&oap->oap_rpc_item)) { CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap); rc = -EBUSY; } else if (!list_empty(&oap->oap_pending_item)) { + struct osc_extent *ext = NULL; + + osc_object_lock(obj); ext = osc_extent_lookup(obj, osc_index(oap2osc(oap))); + osc_object_unlock(obj); /* only truncated pages are allowed to be taken out. * See osc_extent_truncate() and osc_cache_truncate_start() * for details. @@ -2502,10 +2487,9 @@ int osc_teardown_async_page(const struct lu_env *env, osc_index(oap2osc(oap))); rc = -EBUSY; } + if (ext) + osc_extent_put(env, ext); } - osc_object_unlock(obj); - if (ext) - osc_extent_put(env, ext); return rc; } @@ -2666,11 +2650,13 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj, struct osc_async_page *oap, *tmp; int page_count = 0; int mppr = cli->cl_max_pages_per_rpc; + bool can_merge = true; pgoff_t start = CL_PAGE_EOF; pgoff_t end = 0; list_for_each_entry(oap, list, oap_pending_item) { - pgoff_t index = osc_index(oap2osc(oap)); + struct osc_page *opg = oap2osc_page(oap); + pgoff_t index = osc_index(opg); if (index > end) end = index; @@ -2678,6 +2664,9 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj, start = index; ++page_count; mppr <<= (page_count > mppr); + + if (unlikely(opg->ops_from > 0 || opg->ops_to < PAGE_SIZE)) + can_merge = false; } ext = osc_extent_alloc(obj); @@ -2691,6 +2680,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj, ext->oe_rw = !!(cmd & OBD_BRW_READ); ext->oe_sync = 1; + ext->oe_no_merge = !can_merge; ext->oe_urgent = 1; ext->oe_start = start; ext->oe_end = end; @@ -3158,7 +3148,8 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io, struct cl_page *page = ops->ops_cl.cpl_page; /* refresh non-overlapped index */ - tmp = osc_dlmlock_at_pgoff(env, osc, index, 0, 0); + tmp = osc_dlmlock_at_pgoff(env, osc, index, + OSC_DAP_FL_TEST_LOCK); if (tmp) { __u64 end = tmp->l_policy_data.l_extent.end; /* Cache the first-non-overlapped index so as to skip diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h index 9c8de15c309c8060e48ae68df7bfc9abd0ff4f4c..cce55a9689f041a5e282ae7d195f71280a5cc620 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h +++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h @@ -77,7 +77,6 @@ struct osc_io { /** write osc_lock for this IO, used by osc_extent_find(). */ struct osc_lock *oi_write_osclock; - struct obd_info oi_info; struct obdo oi_oa; struct osc_async_cbargs { bool opc_rpc_sent; @@ -86,13 +85,6 @@ struct osc_io { } oi_cbarg; }; -/** - * State of transfer for osc. - */ -struct osc_req { - struct cl_req_slice or_cl; -}; - /** * State maintained by osc layer for the duration of a system call. */ @@ -103,7 +95,7 @@ struct osc_session { #define OTI_PVEC_SIZE 256 struct osc_thread_info { struct ldlm_res_id oti_resname; - ldlm_policy_data_t oti_policy; + union ldlm_policy_data oti_policy; struct cl_lock_descr oti_descr; struct cl_attr oti_attr; struct lustre_handle oti_handle; @@ -116,6 +108,7 @@ struct osc_thread_info { pgoff_t oti_next_index; pgoff_t oti_fn_index; /* first non-overlapped index */ struct cl_sync_io oti_anchor; + struct cl_req_attr oti_req_attr; }; struct osc_object { @@ -126,16 +119,6 @@ struct osc_object { */ int oo_contended; unsigned long oo_contention_time; - /** - * List of pages in transfer. - */ - struct list_head oo_inflight[CRT_NR]; - /** - * Lock, protecting osc_page::ops_inflight, because a seat-belt is - * locked during take-off and landing. - */ - spinlock_t oo_seatbelt; - /** * used by the osc to keep track of what objects to build into rpcs. * Protected by client_obd->cli_loi_list_lock. @@ -363,15 +346,6 @@ struct osc_page { * lru page list. See osc_lru_{del|use}() in osc_page.c for usage. */ struct list_head ops_lru; - /** - * Linkage into a per-osc_object list of pages in flight. For - * debugging. - */ - struct list_head ops_inflight; - /** - * Thread that submitted this page for transfer. For debugging. - */ - struct task_struct *ops_submitter; /** * Submit time - the time when the page is starting RPC. For debugging. */ @@ -382,7 +356,6 @@ extern struct kmem_cache *osc_lock_kmem; extern struct kmem_cache *osc_object_kmem; extern struct kmem_cache *osc_thread_kmem; extern struct kmem_cache *osc_session_kmem; -extern struct kmem_cache *osc_req_kmem; extern struct kmem_cache *osc_extent_kmem; extern struct lu_device_type osc_device_type; @@ -396,15 +369,14 @@ int osc_lock_init(const struct lu_env *env, const struct cl_io *io); int osc_io_init(const struct lu_env *env, struct cl_object *obj, struct cl_io *io); -int osc_req_init(const struct lu_env *env, struct cl_device *dev, - struct cl_req *req); struct lu_object *osc_object_alloc(const struct lu_env *env, const struct lu_object_header *hdr, struct lu_device *dev); int osc_page_init(const struct lu_env *env, struct cl_object *obj, struct cl_page *page, pgoff_t ind); -void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj, +void osc_index2policy(union ldlm_policy_data *policy, + const struct cl_object *obj, pgoff_t start, pgoff_t end); int osc_lvb_print(const struct lu_env *env, void *cookie, lu_printer_t p, const struct ost_lvb *lvb); @@ -554,6 +526,16 @@ static inline struct osc_page *oap2osc_page(struct osc_async_page *oap) return (struct osc_page *)container_of(oap, struct osc_page, ops_oap); } +static inline struct osc_page * +osc_cl_page_osc(struct cl_page *page, struct osc_object *osc) +{ + const struct cl_page_slice *slice; + + LASSERT(osc); + slice = cl_object_page_slice(&osc->oo_cl, page); + return cl2osc_page(slice); +} + static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice) { LINVRNT(osc_is_object(&slice->cls_obj->co_lu)); @@ -615,6 +597,10 @@ struct osc_extent { oe_rw:1, /** sync extent, queued by osc_queue_sync_pages() */ oe_sync:1, + /** set if this extent has partial, sync pages. + * Extents with partial page(s) can't merge with others in RPC + */ + oe_no_merge:1, oe_srvlock:1, oe_memalloc:1, /** an ACTIVE extent is going to be truncated, so when this extent diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c index 83d30c135ba4bcaf5c0b14aff3e28b836fb0c3da..c5d62aeaeab5b64101ddb3b705ab649cd7a44a62 100644 --- a/drivers/staging/lustre/lustre/osc/osc_dev.c +++ b/drivers/staging/lustre/lustre/osc/osc_dev.c @@ -29,7 +29,7 @@ * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * - * Implementation of cl_device, cl_req for OSC layer. + * Implementation of cl_device, for OSC layer. * * Author: Nikita Danilov */ @@ -49,7 +49,6 @@ struct kmem_cache *osc_lock_kmem; struct kmem_cache *osc_object_kmem; struct kmem_cache *osc_thread_kmem; struct kmem_cache *osc_session_kmem; -struct kmem_cache *osc_req_kmem; struct kmem_cache *osc_extent_kmem; struct kmem_cache *osc_quota_kmem; @@ -74,11 +73,6 @@ struct lu_kmem_descr osc_caches[] = { .ckd_name = "osc_session_kmem", .ckd_size = sizeof(struct osc_session) }, - { - .ckd_cache = &osc_req_kmem, - .ckd_name = "osc_req_kmem", - .ckd_size = sizeof(struct osc_req) - }, { .ckd_cache = &osc_extent_kmem, .ckd_name = "osc_extent_kmem", @@ -94,8 +88,6 @@ struct lu_kmem_descr osc_caches[] = { } }; -struct lock_class_key osc_ast_guard_class; - /***************************************************************************** * * Type conversions. @@ -178,10 +170,6 @@ static const struct lu_device_operations osc_lu_ops = { .ldo_recovery_complete = NULL }; -static const struct cl_device_operations osc_cl_ops = { - .cdo_req_init = osc_req_init -}; - static int osc_device_init(const struct lu_env *env, struct lu_device *d, const char *name, struct lu_device *next) { @@ -220,7 +208,6 @@ static struct lu_device *osc_device_alloc(const struct lu_env *env, cl_device_init(&od->od_cl, t); d = osc2lu_dev(od); d->ld_ops = &osc_lu_ops; - od->od_cl.cd_ops = &osc_cl_ops; /* Setup OSC OBD */ obd = class_name2obd(lustre_cfg_string(cfg, 0)); diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h index 67fe0a2549915d7e93d085a53f893ca1c252f008..688783dcc1e4d7991f861ca623bcce98eee13409 100644 --- a/drivers/staging/lustre/lustre/osc/osc_internal.h +++ b/drivers/staging/lustre/lustre/osc/osc_internal.h @@ -107,26 +107,24 @@ typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh, int rc); int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, - __u64 *flags, ldlm_policy_data_t *policy, + __u64 *flags, union ldlm_policy_data *policy, struct ost_lvb *lvb, int kms_valid, osc_enqueue_upcall_f upcall, void *cookie, struct ldlm_enqueue_info *einfo, struct ptlrpc_request_set *rqset, int async, int agl); -int osc_cancel_base(struct lustre_handle *lockh, __u32 mode); int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id, - __u32 type, ldlm_policy_data_t *policy, __u32 mode, + __u32 type, union ldlm_policy_data *policy, __u32 mode, __u64 *flags, void *data, struct lustre_handle *lockh, int unref); -int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo, - struct obd_trans_info *oti, - obd_enqueue_update_f upcall, void *cookie, - struct ptlrpc_request_set *rqset); -int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo, +int osc_setattr_async(struct obd_export *exp, struct obdo *oa, + obd_enqueue_update_f upcall, void *cookie, + struct ptlrpc_request_set *rqset); +int osc_punch_base(struct obd_export *exp, struct obdo *oa, obd_enqueue_update_f upcall, void *cookie, struct ptlrpc_request_set *rqset); -int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo, +int osc_sync_base(struct osc_object *exp, struct obdo *oa, obd_enqueue_update_f upcall, void *cookie, struct ptlrpc_request_set *rqset); @@ -135,7 +133,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, struct list_head *ext_list, int cmd); long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, long target, bool force); -long osc_lru_reclaim(struct client_obd *cli); +long osc_lru_reclaim(struct client_obd *cli, unsigned long npages); unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock); @@ -157,6 +155,11 @@ static inline unsigned long rpcs_in_flight(struct client_obd *cli) return cli->cl_r_in_flight + cli->cl_w_in_flight; } +static inline char *cli_name(struct client_obd *cli) +{ + return cli->cl_import->imp_obd->obd_name; +} + struct osc_device { struct cl_device od_cl; struct obd_export *od_exp; @@ -192,15 +195,27 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]); int osc_quotactl(struct obd_device *unused, struct obd_export *exp, struct obd_quotactl *oqctl); -int osc_quotacheck(struct obd_device *unused, struct obd_export *exp, - struct obd_quotactl *oqctl); -int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk); void osc_inc_unstable_pages(struct ptlrpc_request *req); void osc_dec_unstable_pages(struct ptlrpc_request *req); bool osc_over_unstable_soft_limit(struct client_obd *cli); +/** + * Bit flags for osc_dlm_lock_at_pageoff(). + */ +enum osc_dap_flags { + /** + * Just check if the desired lock exists, it won't hold reference + * count on lock. + */ + OSC_DAP_FL_TEST_LOCK = BIT(0), + /** + * Return the lock even if it is being canceled. + */ + OSC_DAP_FL_CANCELING = BIT(1), +}; + struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env, struct osc_object *obj, pgoff_t index, - int pending, int canceling); + enum osc_dap_flags flags); #endif /* OSC_INTERNAL_H */ diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c index 8a559cbcdd0c9f5bf264ca776b6bc4661713a3fe..228a97c098feb50803d8d5a7df56d6f5a7ca1969 100644 --- a/drivers/staging/lustre/lustre/osc/osc_io.c +++ b/drivers/staging/lustre/lustre/osc/osc_io.c @@ -49,12 +49,6 @@ * */ -static struct osc_req *cl2osc_req(const struct cl_req_slice *slice) -{ - LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type); - return container_of0(slice, struct osc_req, or_cl); -} - static struct osc_io *cl2osc_io(const struct lu_env *env, const struct cl_io_slice *slice) { @@ -64,20 +58,6 @@ static struct osc_io *cl2osc_io(const struct lu_env *env, return oio; } -static struct osc_page *osc_cl_page_osc(struct cl_page *page, - struct osc_object *osc) -{ - const struct cl_page_slice *slice; - - if (osc) - slice = cl_object_page_slice(&osc->oo_cl, page); - else - slice = cl_page_at(page, &osc_device_type); - LASSERT(slice); - - return cl2osc_page(slice); -} - /***************************************************************************** * * io operations. @@ -88,6 +68,45 @@ static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io) { } +static void osc_read_ahead_release(const struct lu_env *env, void *cbdata) +{ + struct ldlm_lock *dlmlock = cbdata; + struct lustre_handle lockh; + + ldlm_lock2handle(dlmlock, &lockh); + ldlm_lock_decref(&lockh, LCK_PR); + LDLM_LOCK_PUT(dlmlock); +} + +static int osc_io_read_ahead(const struct lu_env *env, + const struct cl_io_slice *ios, + pgoff_t start, struct cl_read_ahead *ra) +{ + struct osc_object *osc = cl2osc(ios->cis_obj); + struct ldlm_lock *dlmlock; + int result = -ENODATA; + + dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0); + if (dlmlock) { + LASSERT(dlmlock->l_ast_data == osc); + if (dlmlock->l_req_mode != LCK_PR) { + struct lustre_handle lockh; + + ldlm_lock2handle(dlmlock, &lockh); + ldlm_lock_addref(&lockh, LCK_PR); + ldlm_lock_decref(&lockh, dlmlock->l_req_mode); + } + + ra->cra_end = cl_index(osc2cl(osc), + dlmlock->l_policy_data.l_extent.end); + ra->cra_release = osc_read_ahead_release; + ra->cra_cbdata = dlmlock; + result = 0; + } + + return result; +} + /** * An implementation of cl_io_operations::cio_io_submit() method for osc * layer. Iterates over pages in the in-queue, prepares each for io by calling @@ -334,7 +353,7 @@ static int osc_io_rw_iter_init(const struct lu_env *env, npages = max_pages; c = atomic_long_read(cli->cl_lru_left); - if (c < npages && osc_lru_reclaim(cli) > 0) + if (c < npages && osc_lru_reclaim(cli, npages) > 0) c = atomic_long_read(cli->cl_lru_left); while (c >= npages) { if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) { @@ -343,6 +362,17 @@ static int osc_io_rw_iter_init(const struct lu_env *env, } c = atomic_long_read(cli->cl_lru_left); } + if (atomic_long_read(cli->cl_lru_left) < max_pages) { + /* + * If there aren't enough pages in the per-OSC LRU then + * wake up the LRU thread to try and clear out space, so + * we don't block if pages are being dirtied quickly. + */ + CDEBUG(D_CACHE, "%s: queue LRU, left: %lu/%ld.\n", + cli_name(cli), atomic_long_read(cli->cl_lru_left), + max_pages); + (void)ptlrpcd_queue_work(cli->cl_lru_work); + } return 0; } @@ -446,7 +476,6 @@ static int osc_io_setattr_start(const struct lu_env *env, __u64 size = io->u.ci_setattr.sa_attr.lvb_size; unsigned int ia_valid = io->u.ci_setattr.sa_valid; int result = 0; - struct obd_info oinfo = { }; /* truncate cache dirty pages first */ if (cl_io_is_trunc(io)) @@ -486,11 +515,19 @@ static int osc_io_setattr_start(const struct lu_env *env, oa->o_oi = loi->loi_oi; obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid); oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index; - oa->o_mtime = attr->cat_mtime; - oa->o_atime = attr->cat_atime; - oa->o_ctime = attr->cat_ctime; - oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME | - OBD_MD_FLCTIME | OBD_MD_FLMTIME; + oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP; + if (ia_valid & ATTR_CTIME) { + oa->o_valid |= OBD_MD_FLCTIME; + oa->o_ctime = attr->cat_ctime; + } + if (ia_valid & ATTR_ATIME) { + oa->o_valid |= OBD_MD_FLATIME; + oa->o_atime = attr->cat_atime; + } + if (ia_valid & ATTR_MTIME) { + oa->o_valid |= OBD_MD_FLMTIME; + oa->o_mtime = attr->cat_mtime; + } if (ia_valid & ATTR_SIZE) { oa->o_size = size; oa->o_blocks = OBD_OBJECT_EOF; @@ -503,19 +540,21 @@ static int osc_io_setattr_start(const struct lu_env *env, } else { LASSERT(oio->oi_lockless == 0); } + if (ia_valid & ATTR_ATTR_FLAG) { + oa->o_flags = io->u.ci_setattr.sa_attr_flags; + oa->o_valid |= OBD_MD_FLFLAGS; + } - oinfo.oi_oa = oa; init_completion(&cbargs->opc_sync); if (ia_valid & ATTR_SIZE) result = osc_punch_base(osc_export(cl2osc(obj)), - &oinfo, osc_async_upcall, + oa, osc_async_upcall, cbargs, PTLRPCD_SET); else - result = osc_setattr_async_base(osc_export(cl2osc(obj)), - &oinfo, NULL, - osc_async_upcall, - cbargs, PTLRPCD_SET); + result = osc_setattr_async(osc_export(cl2osc(obj)), + oa, osc_async_upcall, + cbargs, PTLRPCD_SET); cbargs->opc_rpc_sent = result == 0; } return result; @@ -557,6 +596,107 @@ static void osc_io_setattr_end(const struct lu_env *env, } } +struct osc_data_version_args { + struct osc_io *dva_oio; +}; + +static int +osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req, + void *arg, int rc) +{ + struct osc_data_version_args *dva = arg; + struct osc_io *oio = dva->dva_oio; + const struct ost_body *body; + + if (rc < 0) + goto out; + + body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); + if (!body) { + rc = -EPROTO; + goto out; + } + + lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa, + &body->oa); +out: + oio->oi_cbarg.opc_rc = rc; + complete(&oio->oi_cbarg.opc_sync); + + return 0; +} + +static int osc_io_data_version_start(const struct lu_env *env, + const struct cl_io_slice *slice) +{ + struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version; + struct osc_io *oio = cl2osc_io(env, slice); + struct osc_async_cbargs *cbargs = &oio->oi_cbarg; + struct osc_object *obj = cl2osc(slice->cis_obj); + struct obd_export *exp = osc_export(obj); + struct lov_oinfo *loi = obj->oo_oinfo; + struct osc_data_version_args *dva; + struct obdo *oa = &oio->oi_oa; + struct ptlrpc_request *req; + struct ost_body *body; + int rc; + + memset(oa, 0, sizeof(*oa)); + oa->o_oi = loi->loi_oi; + oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP; + + if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) { + oa->o_valid |= OBD_MD_FLFLAGS; + oa->o_flags |= OBD_FL_SRVLOCK; + if (dv->dv_flags & LL_DV_WR_FLUSH) + oa->o_flags |= OBD_FL_FLUSH; + } + + init_completion(&cbargs->opc_sync); + + req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); + if (!req) + return -ENOMEM; + + rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); + if (rc < 0) { + ptlrpc_request_free(req); + return rc; + } + + body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); + lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); + + ptlrpc_request_set_replen(req); + req->rq_interpret_reply = osc_data_version_interpret; + CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args)); + dva = ptlrpc_req_async_args(req); + dva->dva_oio = oio; + + ptlrpcd_add_req(req); + + return 0; +} + +static void osc_io_data_version_end(const struct lu_env *env, + const struct cl_io_slice *slice) +{ + struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version; + struct osc_io *oio = cl2osc_io(env, slice); + struct osc_async_cbargs *cbargs = &oio->oi_cbarg; + + wait_for_completion(&cbargs->opc_sync); + + if (cbargs->opc_rc) { + slice->cis_io->ci_result = cbargs->opc_rc; + } else if (!(oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)) { + slice->cis_io->ci_result = -EOPNOTSUPP; + } else { + dv->dv_data_version = oio->oi_oa.o_data_version; + slice->cis_io->ci_result = 0; + } +} + static int osc_io_read_start(const struct lu_env *env, const struct cl_io_slice *slice) { @@ -595,7 +735,6 @@ static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj, { struct osc_io *oio = osc_env_io(env); struct obdo *oa = &oio->oi_oa; - struct obd_info *oinfo = &oio->oi_info; struct lov_oinfo *loi = obj->oo_oinfo; struct osc_async_cbargs *cbargs = &oio->oi_cbarg; int rc = 0; @@ -611,12 +750,9 @@ static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj, obdo_set_parent_fid(oa, fio->fi_fid); - memset(oinfo, 0, sizeof(*oinfo)); - oinfo->oi_oa = oa; init_completion(&cbargs->opc_sync); - rc = osc_sync_base(osc_export(obj), oinfo, osc_async_upcall, cbargs, - PTLRPCD_SET); + rc = osc_sync_base(obj, oa, osc_async_upcall, cbargs, PTLRPCD_SET); return rc; } @@ -710,6 +846,10 @@ static const struct cl_io_operations osc_io_ops = { .cio_start = osc_io_setattr_start, .cio_end = osc_io_setattr_end }, + [CIT_DATA_VERSION] = { + .cio_start = osc_io_data_version_start, + .cio_end = osc_io_data_version_end, + }, [CIT_FAULT] = { .cio_start = osc_io_fault_start, .cio_end = osc_io_end, @@ -724,6 +864,7 @@ static const struct cl_io_operations osc_io_ops = { .cio_fini = osc_io_fini } }, + .cio_read_ahead = osc_io_read_ahead, .cio_submit = osc_io_submit, .cio_commit_async = osc_io_commit_async }; @@ -734,103 +875,6 @@ static const struct cl_io_operations osc_io_ops = { * */ -static int osc_req_prep(const struct lu_env *env, - const struct cl_req_slice *slice) -{ - return 0; -} - -static void osc_req_completion(const struct lu_env *env, - const struct cl_req_slice *slice, int ioret) -{ - struct osc_req *or; - - or = cl2osc_req(slice); - kmem_cache_free(osc_req_kmem, or); -} - -/** - * Implementation of struct cl_req_operations::cro_attr_set() for osc - * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq - * fields. - */ -static void osc_req_attr_set(const struct lu_env *env, - const struct cl_req_slice *slice, - const struct cl_object *obj, - struct cl_req_attr *attr, u64 flags) -{ - struct lov_oinfo *oinfo; - struct cl_req *clerq; - struct cl_page *apage; /* _some_ page in @clerq */ - struct ldlm_lock *lock; /* _some_ lock protecting @apage */ - struct osc_page *opg; - struct obdo *oa; - struct ost_lvb *lvb; - - oinfo = cl2osc(obj)->oo_oinfo; - lvb = &oinfo->loi_lvb; - oa = attr->cra_oa; - - if ((flags & OBD_MD_FLMTIME) != 0) { - oa->o_mtime = lvb->lvb_mtime; - oa->o_valid |= OBD_MD_FLMTIME; - } - if ((flags & OBD_MD_FLATIME) != 0) { - oa->o_atime = lvb->lvb_atime; - oa->o_valid |= OBD_MD_FLATIME; - } - if ((flags & OBD_MD_FLCTIME) != 0) { - oa->o_ctime = lvb->lvb_ctime; - oa->o_valid |= OBD_MD_FLCTIME; - } - if (flags & OBD_MD_FLGROUP) { - ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi)); - oa->o_valid |= OBD_MD_FLGROUP; - } - if (flags & OBD_MD_FLID) { - ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi)); - oa->o_valid |= OBD_MD_FLID; - } - if (flags & OBD_MD_FLHANDLE) { - clerq = slice->crs_req; - LASSERT(!list_empty(&clerq->crq_pages)); - apage = container_of(clerq->crq_pages.next, - struct cl_page, cp_flight); - opg = osc_cl_page_osc(apage, NULL); - lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg), - 1, 1); - if (!lock && !opg->ops_srvlock) { - struct ldlm_resource *res; - struct ldlm_res_id *resname; - - CL_PAGE_DEBUG(D_ERROR, env, apage, "uncovered page!\n"); - - resname = &osc_env_info(env)->oti_resname; - ostid_build_res_name(&oinfo->loi_oi, resname); - res = ldlm_resource_get( - osc_export(cl2osc(obj))->exp_obd->obd_namespace, - NULL, resname, LDLM_EXTENT, 0); - ldlm_resource_dump(D_ERROR, res); - - dump_stack(); - LBUG(); - } - - /* check for lockless io. */ - if (lock) { - oa->o_handle = lock->l_remote_handle; - oa->o_valid |= OBD_MD_FLHANDLE; - LDLM_LOCK_PUT(lock); - } - } -} - -static const struct cl_req_operations osc_req_ops = { - .cro_prep = osc_req_prep, - .cro_attr_set = osc_req_attr_set, - .cro_completion = osc_req_completion -}; - int osc_io_init(const struct lu_env *env, struct cl_object *obj, struct cl_io *io) { @@ -841,20 +885,4 @@ int osc_io_init(const struct lu_env *env, return 0; } -int osc_req_init(const struct lu_env *env, struct cl_device *dev, - struct cl_req *req) -{ - struct osc_req *or; - int result; - - or = kmem_cache_zalloc(osc_req_kmem, GFP_NOFS); - if (or) { - cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops); - result = 0; - } else { - result = -ENOMEM; - } - return result; -} - /** @} osc */ diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c index 39a8a58516038fb564ff581efc452382abd83cd0..5f799a4c78f9656614dcbc3a627dd5cf9b55b813 100644 --- a/drivers/staging/lustre/lustre/osc/osc_lock.c +++ b/drivers/staging/lustre/lustre/osc/osc_lock.c @@ -145,7 +145,7 @@ static void osc_lock_fini(const struct lu_env *env, static void osc_lock_build_policy(const struct lu_env *env, const struct cl_lock *lock, - ldlm_policy_data_t *policy) + union ldlm_policy_data *policy) { const struct cl_lock_descr *d = &lock->cll_descr; @@ -188,7 +188,7 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct cl_object *obj = osc2cl(osc); struct lov_oinfo *oinfo = osc->oo_oinfo; struct cl_attr *attr = &osc_env_info(env)->oti_attr; - unsigned valid; + unsigned int valid; valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE; if (!lvb) @@ -294,10 +294,10 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh, struct osc_lock *oscl = cookie; struct cl_lock_slice *slice = &oscl->ols_cl; struct lu_env *env; - struct cl_env_nest nest; int rc; + int refcheck; - env = cl_env_nested_get(&nest); + env = cl_env_get(&refcheck); /* should never happen, similar to osc_ldlm_blocking_ast(). */ LASSERT(!IS_ERR(env)); @@ -336,7 +336,7 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh, if (oscl->ols_owner) cl_sync_io_note(env, oscl->ols_owner, rc); - cl_env_nested_put(&nest, env); + cl_env_put(env, &refcheck); return rc; } @@ -347,9 +347,9 @@ static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh, struct osc_object *osc = cookie; struct ldlm_lock *dlmlock; struct lu_env *env; - struct cl_env_nest nest; + int refcheck; - env = cl_env_nested_get(&nest); + env = cl_env_get(&refcheck); LASSERT(!IS_ERR(env)); if (errcode == ELDLM_LOCK_MATCHED) { @@ -374,7 +374,7 @@ static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh, out: cl_object_put(env, osc2cl(osc)); - cl_env_nested_put(&nest, env); + cl_env_put(env, &refcheck); return ldlm_error2errno(errcode); } @@ -382,11 +382,11 @@ static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end, enum cl_lock_mode mode, int discard) { struct lu_env *env; - struct cl_env_nest nest; + int refcheck; int rc = 0; int rc2 = 0; - env = cl_env_nested_get(&nest); + env = cl_env_get(&refcheck); if (IS_ERR(env)) return PTR_ERR(env); @@ -404,7 +404,7 @@ static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end, if (rc == 0 && rc2 < 0) rc = rc2; - cl_env_nested_put(&nest, env); + cl_env_put(env, &refcheck); return rc; } @@ -536,7 +536,7 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock, } case LDLM_CB_CANCELING: { struct lu_env *env; - struct cl_env_nest nest; + int refcheck; /* * This can be called in the context of outer IO, e.g., @@ -549,14 +549,14 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock, * new environment has to be created to not corrupt outer * context. */ - env = cl_env_nested_get(&nest); + env = cl_env_get(&refcheck); if (IS_ERR(env)) { result = PTR_ERR(env); break; } result = osc_dlm_blocking_ast0(env, dlmlock, data, flag); - cl_env_nested_put(&nest, env); + cl_env_put(env, &refcheck); break; } default: @@ -568,61 +568,63 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock, static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data) { struct ptlrpc_request *req = data; - struct cl_env_nest nest; struct lu_env *env; struct ost_lvb *lvb; struct req_capsule *cap; + struct cl_object *obj = NULL; int result; + int refcheck; LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK); - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - struct cl_object *obj = NULL; + env = cl_env_get(&refcheck); + if (IS_ERR(env)) { + result = PTR_ERR(env); + goto out; + } - lock_res_and_lock(dlmlock); - if (dlmlock->l_ast_data) { - obj = osc2cl(dlmlock->l_ast_data); - cl_object_get(obj); - } - unlock_res_and_lock(dlmlock); + lock_res_and_lock(dlmlock); + if (dlmlock->l_ast_data) { + obj = osc2cl(dlmlock->l_ast_data); + cl_object_get(obj); + } + unlock_res_and_lock(dlmlock); - if (obj) { - /* Do not grab the mutex of cl_lock for glimpse. - * See LU-1274 for details. - * BTW, it's okay for cl_lock to be cancelled during - * this period because server can handle this race. - * See ldlm_server_glimpse_ast() for details. - * cl_lock_mutex_get(env, lock); - */ - cap = &req->rq_pill; - req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK); - req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER, - sizeof(*lvb)); - result = req_capsule_server_pack(cap); - if (result == 0) { - lvb = req_capsule_server_get(cap, &RMF_DLM_LVB); - result = cl_object_glimpse(env, obj, lvb); - } - if (!exp_connect_lvb_type(req->rq_export)) - req_capsule_shrink(&req->rq_pill, - &RMF_DLM_LVB, - sizeof(struct ost_lvb_v1), - RCL_SERVER); - cl_object_put(env, obj); - } else { - /* - * These errors are normal races, so we don't want to - * fill the console with messages by calling - * ptlrpc_error() - */ - lustre_pack_reply(req, 1, NULL, NULL); - result = -ELDLM_NO_LOCK_DATA; + if (obj) { + /* Do not grab the mutex of cl_lock for glimpse. + * See LU-1274 for details. + * BTW, it's okay for cl_lock to be cancelled during + * this period because server can handle this race. + * See ldlm_server_glimpse_ast() for details. + * cl_lock_mutex_get(env, lock); + */ + cap = &req->rq_pill; + req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK); + req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER, + sizeof(*lvb)); + result = req_capsule_server_pack(cap); + if (result == 0) { + lvb = req_capsule_server_get(cap, &RMF_DLM_LVB); + result = cl_object_glimpse(env, obj, lvb); + } + if (!exp_connect_lvb_type(req->rq_export)) { + req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB, + sizeof(struct ost_lvb_v1), + RCL_SERVER); } - cl_env_nested_put(&nest, env); + cl_object_put(env, obj); } else { - result = PTR_ERR(env); + /* + * These errors are normal races, so we don't want to + * fill the console with messages by calling + * ptlrpc_error() + */ + lustre_pack_reply(req, 1, NULL, NULL); + result = -ELDLM_NO_LOCK_DATA; } + cl_env_put(env, &refcheck); + +out: req->rq_status = result; return result; } @@ -677,12 +679,12 @@ static unsigned long osc_lock_weight(const struct lu_env *env, */ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock) { - struct cl_env_nest nest; struct lu_env *env; struct osc_object *obj; struct osc_lock *oscl; unsigned long weight; bool found = false; + int refcheck; might_sleep(); /* @@ -692,7 +694,7 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock) * the upper context because cl_lock_put don't modify environment * variables. But just in case .. */ - env = cl_env_nested_get(&nest); + env = cl_env_get(&refcheck); if (IS_ERR(env)) /* Mostly because lack of memory, do not eliminate this lock */ return 1; @@ -722,7 +724,7 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock) weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent); out: - cl_env_nested_put(&nest, env); + cl_env_put(env, &refcheck); return weight; } @@ -912,7 +914,7 @@ static int osc_lock_enqueue(const struct lu_env *env, struct osc_lock *oscl = cl2osc_lock(slice); struct cl_lock *lock = slice->cls_lock; struct ldlm_res_id *resname = &info->oti_resname; - ldlm_policy_data_t *policy = &info->oti_policy; + union ldlm_policy_data *policy = &info->oti_policy; osc_enqueue_upcall_f upcall = osc_lock_upcall; void *cookie = oscl; bool async = false; @@ -1009,7 +1011,7 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) if (olck->ols_hold) { olck->ols_hold = 0; - osc_cancel_base(&olck->ols_handle, olck->ols_einfo.ei_mode); + ldlm_lock_decref(&olck->ols_handle, olck->ols_einfo.ei_mode); olck->ols_handle.cookie = 0ULL; } @@ -1180,11 +1182,11 @@ int osc_lock_init(const struct lu_env *env, */ struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env, struct osc_object *obj, pgoff_t index, - int pending, int canceling) + enum osc_dap_flags dap_flags) { struct osc_thread_info *info = osc_env_info(env); struct ldlm_res_id *resname = &info->oti_resname; - ldlm_policy_data_t *policy = &info->oti_policy; + union ldlm_policy_data *policy = &info->oti_policy; struct lustre_handle lockh; struct ldlm_lock *lock = NULL; enum ldlm_mode mode; @@ -1194,17 +1196,18 @@ struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env, osc_index2policy(policy, osc2cl(obj), index, index); policy->l_extent.gid = LDLM_GID_ANY; - flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK; - if (pending) - flags |= LDLM_FL_CBPENDING; + flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING; + if (dap_flags & OSC_DAP_FL_TEST_LOCK) + flags |= LDLM_FL_TEST_LOCK; + /* * It is fine to match any group lock since there could be only one * with a uniq gid and it conflicts with all other lock modes too */ again: - mode = ldlm_lock_match(osc_export(obj)->exp_obd->obd_namespace, - flags, resname, LDLM_EXTENT, policy, - LCK_PR | LCK_PW | LCK_GROUP, &lockh, canceling); + mode = osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy, + LCK_PR | LCK_PW | LCK_GROUP, &flags, obj, &lockh, + dap_flags & OSC_DAP_FL_CANCELING); if (mode != 0) { lock = ldlm_handle2lock(&lockh); /* RACE: the lock is cancelled so let's try again */ diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c index aae3a2d4243f7b858dff84d121fc59003ae11226..e0c3324857dd3f6a42b491d13475fe510ae9a99f 100644 --- a/drivers/staging/lustre/lustre/osc/osc_object.c +++ b/drivers/staging/lustre/lustre/osc/osc_object.c @@ -71,13 +71,8 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj, { struct osc_object *osc = lu2osc(obj); const struct cl_object_conf *cconf = lu2cl_conf(conf); - int i; osc->oo_oinfo = cconf->u.coc_oinfo; - spin_lock_init(&osc->oo_seatbelt); - for (i = 0; i < CRT_NR; ++i) - INIT_LIST_HEAD(&osc->oo_inflight[i]); - INIT_LIST_HEAD(&osc->oo_ready_item); INIT_LIST_HEAD(&osc->oo_hp_ready_item); INIT_LIST_HEAD(&osc->oo_write_item); @@ -103,10 +98,6 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj, static void osc_object_free(const struct lu_env *env, struct lu_object *obj) { struct osc_object *osc = lu2osc(obj); - int i; - - for (i = 0; i < CRT_NR; ++i) - LASSERT(list_empty(&osc->oo_inflight[i])); LASSERT(list_empty(&osc->oo_ready_item)); LASSERT(list_empty(&osc->oo_hp_ready_item)); @@ -218,6 +209,94 @@ static int osc_object_prune(const struct lu_env *env, struct cl_object *obj) return 0; } +static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj, + struct ll_fiemap_info_key *fmkey, + struct fiemap *fiemap, size_t *buflen) +{ + struct obd_export *exp = osc_export(cl2osc(obj)); + union ldlm_policy_data policy; + struct ptlrpc_request *req; + struct lustre_handle lockh; + struct ldlm_res_id resid; + enum ldlm_mode mode = 0; + struct fiemap *reply; + char *tmp; + int rc; + + fmkey->lfik_oa.o_oi = cl2osc(obj)->oo_oinfo->loi_oi; + if (!(fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC)) + goto skip_locking; + + policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_MASK; + + if (OBD_OBJECT_EOF - fmkey->lfik_fiemap.fm_length <= + fmkey->lfik_fiemap.fm_start + PAGE_SIZE - 1) + policy.l_extent.end = OBD_OBJECT_EOF; + else + policy.l_extent.end = (fmkey->lfik_fiemap.fm_start + + fmkey->lfik_fiemap.fm_length + + PAGE_SIZE - 1) & PAGE_MASK; + + ostid_build_res_name(&fmkey->lfik_oa.o_oi, &resid); + mode = ldlm_lock_match(exp->exp_obd->obd_namespace, + LDLM_FL_BLOCK_GRANTED | LDLM_FL_LVB_READY, + &resid, LDLM_EXTENT, &policy, + LCK_PR | LCK_PW, &lockh, 0); + if (mode) { /* lock is cached on client */ + if (mode != LCK_PR) { + ldlm_lock_addref(&lockh, LCK_PR); + ldlm_lock_decref(&lockh, LCK_PW); + } + } else { /* no cached lock, needs acquire lock on server side */ + fmkey->lfik_oa.o_valid |= OBD_MD_FLFLAGS; + fmkey->lfik_oa.o_flags |= OBD_FL_SRVLOCK; + } + +skip_locking: + req = ptlrpc_request_alloc(class_exp2cliimp(exp), + &RQF_OST_GET_INFO_FIEMAP); + if (!req) { + rc = -ENOMEM; + goto drop_lock; + } + + req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, RCL_CLIENT, + sizeof(*fmkey)); + req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_CLIENT, + *buflen); + req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_SERVER, + *buflen); + + rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO); + if (rc) { + ptlrpc_request_free(req); + goto drop_lock; + } + tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY); + memcpy(tmp, fmkey, sizeof(*fmkey)); + tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL); + memcpy(tmp, fiemap, *buflen); + ptlrpc_request_set_replen(req); + + rc = ptlrpc_queue_wait(req); + if (rc) + goto fini_req; + + reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL); + if (!reply) { + rc = -EPROTO; + goto fini_req; + } + + memcpy(fiemap, reply, *buflen); +fini_req: + ptlrpc_req_finished(req); +drop_lock: + if (mode) + ldlm_lock_decref(&lockh, LCK_PR); + return rc; +} + void osc_object_set_contended(struct osc_object *obj) { obj->oo_contention_time = cfs_time_current(); @@ -256,6 +335,76 @@ int osc_object_is_contended(struct osc_object *obj) return 1; } +/** + * Implementation of struct cl_object_operations::coo_req_attr_set() for osc + * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq + * fields. + */ +static void osc_req_attr_set(const struct lu_env *env, struct cl_object *obj, + struct cl_req_attr *attr) +{ + u64 flags = attr->cra_flags; + struct lov_oinfo *oinfo; + struct ost_lvb *lvb; + struct obdo *oa; + + oinfo = cl2osc(obj)->oo_oinfo; + lvb = &oinfo->loi_lvb; + oa = attr->cra_oa; + + if (flags & OBD_MD_FLMTIME) { + oa->o_mtime = lvb->lvb_mtime; + oa->o_valid |= OBD_MD_FLMTIME; + } + if (flags & OBD_MD_FLATIME) { + oa->o_atime = lvb->lvb_atime; + oa->o_valid |= OBD_MD_FLATIME; + } + if (flags & OBD_MD_FLCTIME) { + oa->o_ctime = lvb->lvb_ctime; + oa->o_valid |= OBD_MD_FLCTIME; + } + if (flags & OBD_MD_FLGROUP) { + ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi)); + oa->o_valid |= OBD_MD_FLGROUP; + } + if (flags & OBD_MD_FLID) { + ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi)); + oa->o_valid |= OBD_MD_FLID; + } + if (flags & OBD_MD_FLHANDLE) { + struct ldlm_lock *lock; + struct osc_page *opg; + + opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj)); + lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg), + OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING); + if (!lock && !opg->ops_srvlock) { + struct ldlm_resource *res; + struct ldlm_res_id *resname; + + CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page, + "uncovered page!\n"); + + resname = &osc_env_info(env)->oti_resname; + ostid_build_res_name(&oinfo->loi_oi, resname); + res = ldlm_resource_get( + osc_export(cl2osc(obj))->exp_obd->obd_namespace, + NULL, resname, LDLM_EXTENT, 0); + ldlm_resource_dump(D_ERROR, res); + + LBUG(); + } + + /* check for lockless io. */ + if (lock) { + oa->o_handle = lock->l_remote_handle; + oa->o_valid |= OBD_MD_FLHANDLE; + LDLM_LOCK_PUT(lock); + } + } +} + static const struct cl_object_operations osc_ops = { .coo_page_init = osc_page_init, .coo_lock_init = osc_lock_init, @@ -263,7 +412,9 @@ static const struct cl_object_operations osc_ops = { .coo_attr_get = osc_attr_get, .coo_attr_update = osc_attr_update, .coo_glimpse = osc_object_glimpse, - .coo_prune = osc_object_prune + .coo_prune = osc_object_prune, + .coo_fiemap = osc_object_fiemap, + .coo_req_attr_set = osc_req_attr_set }; static const struct lu_object_operations osc_lu_obj_ops = { diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c index 2a7a70aa9e802dd38d31b2d6198e13107ba183c3..e356e4af08e1b780272e0b3b0351a9bfeed38235 100644 --- a/drivers/staging/lustre/lustre/osc/osc_page.c +++ b/drivers/staging/lustre/lustre/osc/osc_page.c @@ -37,6 +37,7 @@ #define DEBUG_SUBSYSTEM S_OSC +#include #include "osc_cl_internal.h" static void osc_lru_del(struct client_obd *cli, struct osc_page *opg); @@ -86,11 +87,6 @@ static void osc_page_transfer_add(const struct lu_env *env, struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); osc_lru_use(osc_cli(obj), opg); - - spin_lock(&obj->oo_seatbelt); - list_add(&opg->ops_inflight, &obj->oo_inflight[crt]); - opg->ops_submitter = current; - spin_unlock(&obj->oo_seatbelt); } int osc_page_cache_add(const struct lu_env *env, @@ -109,7 +105,8 @@ int osc_page_cache_add(const struct lu_env *env, return result; } -void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj, +void osc_index2policy(union ldlm_policy_data *policy, + const struct cl_object *obj, pgoff_t start, pgoff_t end) { memset(policy, 0, sizeof(*policy)); @@ -117,25 +114,6 @@ void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj, policy->l_extent.end = cl_offset(obj, end + 1) - 1; } -static int osc_page_is_under_lock(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused, pgoff_t *max_index) -{ - struct osc_page *opg = cl2osc_page(slice); - struct ldlm_lock *dlmlock; - int result = -ENODATA; - - dlmlock = osc_dlmlock_at_pgoff(env, cl2osc(slice->cpl_obj), - osc_index(opg), 1, 0); - if (dlmlock) { - *max_index = cl_index(slice->cpl_obj, - dlmlock->l_policy_data.l_extent.end); - LDLM_LOCK_PUT(dlmlock); - result = 0; - } - return result; -} - static const char *osc_list(struct list_head *head) { return list_empty(head) ? "-" : "+"; @@ -158,7 +136,7 @@ static int osc_page_print(const struct lu_env *env, struct osc_object *obj = cl2osc(slice->cpl_obj); struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli; - return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n", + return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n", opg, osc_index(opg), /* 1 */ oap->oap_magic, oap->oap_cmd, @@ -170,8 +148,7 @@ static int osc_page_print(const struct lu_env *env, oap->oap_async_flags, oap->oap_brw_flags, oap->oap_request, oap->oap_cli, obj, /* 3 */ - osc_list(&opg->ops_inflight), - opg->ops_submitter, opg->ops_transfer_pinned, + opg->ops_transfer_pinned, osc_submit_duration(opg), opg->ops_srvlock, /* 4 */ cli->cl_r_in_flight, cli->cl_w_in_flight, @@ -210,14 +187,6 @@ static void osc_page_delete(const struct lu_env *env, LASSERT(0); } - spin_lock(&obj->oo_seatbelt); - if (opg->ops_submitter) { - LASSERT(!list_empty(&opg->ops_inflight)); - list_del_init(&opg->ops_inflight); - opg->ops_submitter = NULL; - } - spin_unlock(&obj->oo_seatbelt); - osc_lru_del(osc_cli(obj), opg); if (slice->cpl_page->cp_type == CPT_CACHEABLE) { @@ -276,7 +245,6 @@ static int osc_page_flush(const struct lu_env *env, static const struct cl_page_operations osc_page_ops = { .cpo_print = osc_page_print, .cpo_delete = osc_page_delete, - .cpo_is_under_lock = osc_page_is_under_lock, .cpo_clip = osc_page_clip, .cpo_cancel = osc_page_cancel, .cpo_flush = osc_page_flush @@ -301,10 +269,6 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj, cl_page_slice_add(page, &opg->ops_cl, obj, index, &osc_page_ops); } - /* ops_inflight and ops_lru are the same field, but it doesn't - * hurt to initialize it twice :-) - */ - INIT_LIST_HEAD(&opg->ops_inflight); INIT_LIST_HEAD(&opg->ops_lru); /* reserve an LRU space for this page */ @@ -362,16 +326,27 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg, * OSC to free slots voluntarily to maintain a reasonable number of free slots * at any time. */ - static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq); -/* LRU pages are freed in batch mode. OSC should at least free this - * number of pages to avoid running out of LRU budget, and.. + +/** + * LRU pages are freed in batch mode. OSC should at least free this + * number of pages to avoid running out of LRU slots. + */ +static inline int lru_shrink_min(struct client_obd *cli) +{ + return cli->cl_max_pages_per_rpc * 2; +} + +/** + * free this number at most otherwise it will take too long time to finish. */ -static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */ -/* free this number at most otherwise it will take too long time to finish. */ -static const int lru_shrink_max = 8 << (20 - PAGE_SHIFT); /* 8M */ +static inline int lru_shrink_max(struct client_obd *cli) +{ + return cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight; +} -/* Check if we can free LRU slots from this OSC. If there exists LRU waiters, +/** + * Check if we can free LRU slots from this OSC. If there exists LRU waiters, * we should free slots aggressively. In this way, slots are freed in a steady * step to maintain fairness among OSCs. * @@ -388,13 +363,20 @@ static int osc_cache_too_much(struct client_obd *cli) /* if it's going to run out LRU slots, we should free some, but not * too much to maintain fairness among OSCs. */ - if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) { + if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 2) { if (pages >= budget) - return lru_shrink_max; + return lru_shrink_max(cli); else if (pages >= budget / 2) - return lru_shrink_min; - } else if (pages >= budget * 2) { - return lru_shrink_min; + return lru_shrink_min(cli); + } else { + time64_t duration = ktime_get_real_seconds(); + + /* knock out pages by duration of no IO activity */ + duration -= cli->cl_lru_last_used; + duration >>= 6; /* approximately 1 minute */ + if (duration > 0 && + pages >= div64_s64((s64)budget, duration)) + return lru_shrink_min(cli); } return 0; } @@ -402,11 +384,21 @@ static int osc_cache_too_much(struct client_obd *cli) int lru_queue_work(const struct lu_env *env, void *data) { struct client_obd *cli = data; + int count; - CDEBUG(D_CACHE, "Run LRU work for client obd %p.\n", cli); + CDEBUG(D_CACHE, "%s: run LRU work for client obd\n", cli_name(cli)); - if (osc_cache_too_much(cli)) - osc_lru_shrink(env, cli, lru_shrink_max, true); + count = osc_cache_too_much(cli); + if (count > 0) { + int rc = osc_lru_shrink(env, cli, count, false); + + CDEBUG(D_CACHE, "%s: shrank %d/%d pages from client obd\n", + cli_name(cli), rc, count); + if (rc >= count) { + CDEBUG(D_CACHE, "%s: queue again\n", cli_name(cli)); + ptlrpcd_queue_work(cli->cl_lru_work); + } + } return 0; } @@ -433,10 +425,10 @@ void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist) list_splice_tail(&lru, &cli->cl_lru_list); atomic_long_sub(npages, &cli->cl_lru_busy); atomic_long_add(npages, &cli->cl_lru_in_list); + cli->cl_lru_last_used = ktime_get_real_seconds(); spin_unlock(&cli->cl_lru_list_lock); - /* XXX: May set force to be true for better performance */ - if (osc_cache_too_much(cli)) + if (waitqueue_active(&osc_lru_waitq)) (void)ptlrpcd_queue_work(cli->cl_lru_work); } } @@ -469,8 +461,10 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg) * this osc occupies too many LRU pages and kernel is * stealing one of them. */ - if (!memory_pressure_get()) + if (osc_cache_too_much(cli)) { + CDEBUG(D_CACHE, "%s: queue LRU work\n", cli_name(cli)); (void)ptlrpcd_queue_work(cli->cl_lru_work); + } wake_up(&osc_lru_waitq); } else { LASSERT(list_empty(&opg->ops_lru)); @@ -502,6 +496,7 @@ static void discard_pagevec(const struct lu_env *env, struct cl_io *io, struct cl_page *page = pvec[i]; LASSERT(cl_page_is_owned(page, io)); + cl_page_delete(env, page); cl_page_discard(env, io, page); cl_page_disown(env, io, page); cl_page_put(env, page); @@ -542,7 +537,6 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, struct cl_object *clobj = NULL; struct cl_page **pvec; struct osc_page *opg; - struct osc_page *temp; int maxscan = 0; long count = 0; int index = 0; @@ -552,6 +546,8 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0) return 0; + CDEBUG(D_CACHE, "%s: shrinkers: %d, force: %d\n", + cli_name(cli), atomic_read(&cli->cl_lru_shrinkers), force); if (!force) { if (atomic_read(&cli->cl_lru_shrinkers) > 0) return -EBUSY; @@ -568,14 +564,21 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, io = &osc_env_info(env)->oti_io; spin_lock(&cli->cl_lru_list_lock); + if (force) + cli->cl_lru_reclaim++; maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list)); - list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) { + while (!list_empty(&cli->cl_lru_list)) { struct cl_page *page; bool will_free = false; + if (!force && atomic_read(&cli->cl_lru_shrinkers) > 1) + break; + if (--maxscan < 0) break; + opg = list_entry(cli->cl_lru_list.next, struct osc_page, + ops_lru); page = opg->ops_cl.cpl_page; if (lru_page_busy(cli, page)) { list_move_tail(&opg->ops_lru, &cli->cl_lru_list); @@ -662,34 +665,43 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, return count > 0 ? count : rc; } -long osc_lru_reclaim(struct client_obd *cli) +/** + * Reclaim LRU pages by an IO thread. The caller wants to reclaim at least + * \@npages of LRU slots. For performance consideration, it's better to drop + * LRU pages in batch. Therefore, the actual number is adjusted at least + * max_pages_per_rpc. + */ +long osc_lru_reclaim(struct client_obd *cli, unsigned long npages) { - struct cl_env_nest nest; struct lu_env *env; struct cl_client_cache *cache = cli->cl_cache; int max_scans; + int refcheck; long rc = 0; LASSERT(cache); - env = cl_env_nested_get(&nest); + env = cl_env_get(&refcheck); if (IS_ERR(env)) return 0; - rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli), false); - if (rc != 0) { - if (rc == -EBUSY) - rc = 0; - - CDEBUG(D_CACHE, "%s: Free %ld pages from own LRU: %p.\n", - cli->cl_import->imp_obd->obd_name, rc, cli); + npages = max_t(int, npages, cli->cl_max_pages_per_rpc); + CDEBUG(D_CACHE, "%s: start to reclaim %ld pages from LRU\n", + cli_name(cli), npages); + rc = osc_lru_shrink(env, cli, npages, true); + if (rc >= npages) { + CDEBUG(D_CACHE, "%s: reclaimed %ld/%ld pages from LRU\n", + cli_name(cli), rc, npages); + if (osc_cache_too_much(cli) > 0) + ptlrpcd_queue_work(cli->cl_lru_work); goto out; + } else if (rc > 0) { + npages -= rc; } - CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld, busy: %ld.\n", - cli->cl_import->imp_obd->obd_name, cli, - atomic_long_read(&cli->cl_lru_in_list), - atomic_long_read(&cli->cl_lru_busy)); + CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld/%ld, want: %ld\n", + cli_name(cli), cli, atomic_long_read(&cli->cl_lru_in_list), + atomic_long_read(&cli->cl_lru_busy), npages); /* Reclaim LRU slots from other client_obd as it can't free enough * from its own. This should rarely happen. @@ -706,7 +718,7 @@ long osc_lru_reclaim(struct client_obd *cli) cl_lru_osc); CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n", - cli->cl_import->imp_obd->obd_name, cli, + cli_name(cli), cli, atomic_long_read(&cli->cl_lru_in_list), atomic_long_read(&cli->cl_lru_busy)); @@ -714,19 +726,20 @@ long osc_lru_reclaim(struct client_obd *cli) if (osc_cache_too_much(cli) > 0) { spin_unlock(&cache->ccc_lru_lock); - rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli), - true); + rc = osc_lru_shrink(env, cli, npages, true); spin_lock(&cache->ccc_lru_lock); - if (rc != 0) + if (rc >= npages) break; + if (rc > 0) + npages -= rc; } } spin_unlock(&cache->ccc_lru_lock); out: - cl_env_nested_put(&nest, env); + cl_env_put(env, &refcheck); CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n", - cli->cl_import->imp_obd->obd_name, cli, rc); + cli_name(cli), cli, rc); return rc; } @@ -756,7 +769,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, LASSERT(atomic_long_read(cli->cl_lru_left) >= 0); while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) { /* run out of LRU spaces, try to drop some by itself */ - rc = osc_lru_reclaim(cli); + rc = osc_lru_reclaim(cli, 1); if (rc < 0) break; if (rc > 0) @@ -796,8 +809,10 @@ static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc, int count = 0; int i; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + for (i = 0; i < page_count; i++) { - pg_data_t *pgdat = page_pgdat(desc->bd_iov[i].bv_page); + pg_data_t *pgdat = page_pgdat(BD_GET_KIOV(desc, i).bv_page); if (likely(pgdat == last)) { ++count; @@ -857,7 +872,7 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req) if (!unstable_count) wake_up_all(&cli->cl_cache->ccc_unstable_waitq); - if (osc_cache_too_much(cli)) + if (waitqueue_active(&osc_lru_waitq)) (void)ptlrpcd_queue_work(cli->cl_lru_work); } @@ -913,8 +928,7 @@ bool osc_over_unstable_soft_limit(struct client_obd *cli) CDEBUG(D_CACHE, "%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n", - cli->cl_import->imp_obd->obd_name, cli, - unstable_nr, osc_unstable_count); + cli_name(cli), cli, unstable_nr, osc_unstable_count); /* * If the LRU slots are in shortage - 25% remaining AND this OSC diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c index 194d8ede40a2f852dd7f9b7556340ea640298d72..fed4da63ee45fc87bf5462ab38f1dd51ad6457c5 100644 --- a/drivers/staging/lustre/lustre/osc/osc_quota.c +++ b/drivers/staging/lustre/lustre/osc/osc_quota.c @@ -106,7 +106,7 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], } CDEBUG(D_QUOTA, "%s: setdq to insert for %s %d (%d)\n", - cli->cl_import->imp_obd->obd_name, + cli_name(cli), type == USRQUOTA ? "user" : "group", qid[type], rc); } else { @@ -122,7 +122,7 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], kmem_cache_free(osc_quota_kmem, oqi); CDEBUG(D_QUOTA, "%s: setdq to remove for %s %d (%p)\n", - cli->cl_import->imp_obd->obd_name, + cli_name(cli), type == USRQUOTA ? "user" : "group", qid[type], oqi); } @@ -134,8 +134,8 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], /* * Hash operations for uid/gid <-> osc_quota_info */ -static unsigned -oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned mask) +static unsigned int +oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned int mask) { return cfs_hash_u32_hash(*((__u32 *)key), mask); } @@ -281,47 +281,3 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp, return rc; } - -int osc_quotacheck(struct obd_device *unused, struct obd_export *exp, - struct obd_quotactl *oqctl) -{ - struct client_obd *cli = &exp->exp_obd->u.cli; - struct ptlrpc_request *req; - struct obd_quotactl *body; - int rc; - - req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), - &RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION, - OST_QUOTACHECK); - if (!req) - return -ENOMEM; - - body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); - *body = *oqctl; - - ptlrpc_request_set_replen(req); - - /* the next poll will find -ENODATA, that means quotacheck is going on - */ - cli->cl_qchk_stat = -ENODATA; - rc = ptlrpc_queue_wait(req); - if (rc) - cli->cl_qchk_stat = rc; - ptlrpc_req_finished(req); - return rc; -} - -int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk) -{ - struct client_obd *cli = &exp->exp_obd->u.cli; - int rc; - - qchk->obd_uuid = cli->cl_target_uuid; - memcpy(qchk->obd_type, LUSTRE_OST_NAME, strlen(LUSTRE_OST_NAME)); - - rc = cli->cl_qchk_stat; - /* the client is not the previous one */ - if (rc == CL_NOT_QUOTACHECKED) - rc = -EINTR; - return rc; -} diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c index 749781f022e2ee0f1db41f81c8010ea34c9863d3..7143564ae7e721545fceaa3bc65b7d79508083dc 100644 --- a/drivers/staging/lustre/lustre/osc/osc_request.c +++ b/drivers/staging/lustre/lustre/osc/osc_request.c @@ -68,7 +68,6 @@ struct osc_brw_async_args { struct client_obd *aa_cli; struct list_head aa_oaps; struct list_head aa_exts; - struct cl_req *aa_clerq; }; struct osc_async_args { @@ -82,7 +81,8 @@ struct osc_setattr_args { }; struct osc_fsync_args { - struct obd_info *fa_oi; + struct osc_object *fa_obj; + struct obdo *fa_oa; obd_enqueue_update_f fa_upcall; void *fa_cookie; }; @@ -103,140 +103,19 @@ static void osc_release_ppga(struct brw_page **ppga, u32 count); static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req, void *data, int rc); -/* Unpack OSC object metadata from disk storage (LE byte order). */ -static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, - struct lov_mds_md *lmm, int lmm_bytes) -{ - int lsm_size; - struct obd_import *imp = class_exp2cliimp(exp); - - if (lmm) { - if (lmm_bytes < sizeof(*lmm)) { - CERROR("%s: lov_mds_md too small: %d, need %d\n", - exp->exp_obd->obd_name, lmm_bytes, - (int)sizeof(*lmm)); - return -EINVAL; - } - /* XXX LOV_MAGIC etc check? */ - - if (unlikely(ostid_id(&lmm->lmm_oi) == 0)) { - CERROR("%s: zero lmm_object_id: rc = %d\n", - exp->exp_obd->obd_name, -EINVAL); - return -EINVAL; - } - } - - lsm_size = lov_stripe_md_size(1); - if (!lsmp) - return lsm_size; - - if (*lsmp && !lmm) { - kfree((*lsmp)->lsm_oinfo[0]); - kfree(*lsmp); - *lsmp = NULL; - return 0; - } - - if (!*lsmp) { - *lsmp = kzalloc(lsm_size, GFP_NOFS); - if (unlikely(!*lsmp)) - return -ENOMEM; - (*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo), - GFP_NOFS); - if (unlikely(!(*lsmp)->lsm_oinfo[0])) { - kfree(*lsmp); - return -ENOMEM; - } - loi_init((*lsmp)->lsm_oinfo[0]); - } else if (unlikely(ostid_id(&(*lsmp)->lsm_oi) == 0)) { - return -EBADF; - } - - if (lmm) - /* XXX zero *lsmp? */ - ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi); - - if (imp && - (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES)) - (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes; - else - (*lsmp)->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES; - - return lsm_size; -} - static inline void osc_pack_req_body(struct ptlrpc_request *req, - struct obd_info *oinfo) + struct obdo *oa) { struct ost_body *body; body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); - lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, - oinfo->oi_oa); -} - -static int osc_getattr_interpret(const struct lu_env *env, - struct ptlrpc_request *req, - struct osc_async_args *aa, int rc) -{ - struct ost_body *body; - - if (rc != 0) - goto out; - - body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body) { - CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode); - lustre_get_wire_obdo(&req->rq_import->imp_connect_data, - aa->aa_oi->oi_oa, &body->oa); - - /* This should really be sent by the OST */ - aa->aa_oi->oi_oa->o_blksize = DT_MAX_BRW_SIZE; - aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ; - } else { - CDEBUG(D_INFO, "can't unpack ost_body\n"); - rc = -EPROTO; - aa->aa_oi->oi_oa->o_valid = 0; - } -out: - rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc); - return rc; -} - -static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo, - struct ptlrpc_request_set *set) -{ - struct ptlrpc_request *req; - struct osc_async_args *aa; - int rc; - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - osc_pack_req_body(req, oinfo); - - ptlrpc_request_set_replen(req); - req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret; - - CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); - aa = ptlrpc_req_async_args(req); - aa->aa_oi = oinfo; - - ptlrpc_set_add_req(set, req); - return 0; + lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); } static int osc_getattr(const struct lu_env *env, struct obd_export *exp, - struct obd_info *oinfo) + struct obdo *oa) { struct ptlrpc_request *req; struct ost_body *body; @@ -252,7 +131,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp, return rc; } - osc_pack_req_body(req, oinfo); + osc_pack_req_body(req, oa); ptlrpc_request_set_replen(req); @@ -267,11 +146,11 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp, } CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode); - lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa, + lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa); - oinfo->oi_oa->o_blksize = cli_brw_size(exp->exp_obd); - oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ; + oa->o_blksize = cli_brw_size(exp->exp_obd); + oa->o_valid |= OBD_MD_FLBLKSZ; out: ptlrpc_req_finished(req); @@ -279,13 +158,13 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp, } static int osc_setattr(const struct lu_env *env, struct obd_export *exp, - struct obd_info *oinfo, struct obd_trans_info *oti) + struct obdo *oa) { struct ptlrpc_request *req; struct ost_body *body; int rc; - LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP); + LASSERT(oa->o_valid & OBD_MD_FLGROUP); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); if (!req) @@ -297,7 +176,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp, return rc; } - osc_pack_req_body(req, oinfo); + osc_pack_req_body(req, oa); ptlrpc_request_set_replen(req); @@ -311,7 +190,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp, goto out; } - lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa, + lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa); out: @@ -341,10 +220,9 @@ static int osc_setattr_interpret(const struct lu_env *env, return rc; } -int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo, - struct obd_trans_info *oti, - obd_enqueue_update_f upcall, void *cookie, - struct ptlrpc_request_set *rqset) +int osc_setattr_async(struct obd_export *exp, struct obdo *oa, + obd_enqueue_update_f upcall, void *cookie, + struct ptlrpc_request_set *rqset) { struct ptlrpc_request *req; struct osc_setattr_args *sa; @@ -360,10 +238,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo, return rc; } - if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) - oinfo->oi_oa->o_lcookie = *oti->oti_logcookies; - - osc_pack_req_body(req, oinfo); + osc_pack_req_body(req, oa); ptlrpc_request_set_replen(req); @@ -377,7 +252,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo, CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args)); sa = ptlrpc_req_async_args(req); - sa->sa_oa = oinfo->oi_oa; + sa->sa_oa = oa; sa->sa_upcall = upcall; sa->sa_cookie = cookie; @@ -390,16 +265,8 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo, return 0; } -static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo, - struct obd_trans_info *oti, - struct ptlrpc_request_set *rqset) -{ - return osc_setattr_async_base(exp, oinfo, oti, - oinfo->oi_cb_up, oinfo, rqset); -} - static int osc_create(const struct lu_env *env, struct obd_export *exp, - struct obdo *oa, struct obd_trans_info *oti) + struct obdo *oa) { struct ptlrpc_request *req; struct ost_body *body; @@ -428,15 +295,6 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp, ptlrpc_request_set_replen(req); - if ((oa->o_valid & OBD_MD_FLFLAGS) && - oa->o_flags == OBD_FL_DELORPHAN) { - DEBUG_REQ(D_HA, req, - "delorphan from OST integration"); - /* Don't resend the delorphan req */ - req->rq_no_resend = 1; - req->rq_no_delay = 1; - } - rc = ptlrpc_queue_wait(req); if (rc) goto out_req; @@ -453,12 +311,6 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp, oa->o_blksize = cli_brw_size(exp->exp_obd); oa->o_valid |= OBD_MD_FLBLKSZ; - if (oti && oa->o_valid & OBD_MD_FLCOOKIE) { - if (!oti->oti_logcookies) - oti->oti_logcookies = &oti->oti_onecookie; - *oti->oti_logcookies = oa->o_lcookie; - } - CDEBUG(D_HA, "transno: %lld\n", lustre_msg_get_transno(req->rq_repmsg)); out_req: @@ -467,7 +319,7 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp, return rc; } -int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo, +int osc_punch_base(struct obd_export *exp, struct obdo *oa, obd_enqueue_update_f upcall, void *cookie, struct ptlrpc_request_set *rqset) { @@ -491,14 +343,14 @@ int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo, body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, - oinfo->oi_oa); + oa); ptlrpc_request_set_replen(req); req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret; CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args)); sa = ptlrpc_req_async_args(req); - sa->sa_oa = oinfo->oi_oa; + sa->sa_oa = oa; sa->sa_upcall = upcall; sa->sa_cookie = cookie; if (rqset == PTLRPCD_SET) @@ -513,8 +365,11 @@ static int osc_sync_interpret(const struct lu_env *env, struct ptlrpc_request *req, void *arg, int rc) { + struct cl_attr *attr = &osc_env_info(env)->oti_attr; struct osc_fsync_args *fa = arg; + unsigned long valid = 0; struct ost_body *body; + struct cl_object *obj; if (rc) goto out; @@ -526,16 +381,30 @@ static int osc_sync_interpret(const struct lu_env *env, goto out; } - *fa->fa_oi->oi_oa = body->oa; + *fa->fa_oa = body->oa; + obj = osc2cl(fa->fa_obj); + + /* Update osc object's blocks attribute */ + cl_object_attr_lock(obj); + if (body->oa.o_valid & OBD_MD_FLBLOCKS) { + attr->cat_blocks = body->oa.o_blocks; + valid |= CAT_BLOCKS; + } + + if (valid) + cl_object_attr_update(env, obj, attr, valid); + cl_object_attr_unlock(obj); + out: rc = fa->fa_upcall(fa->fa_cookie, rc); return rc; } -int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo, +int osc_sync_base(struct osc_object *obj, struct obdo *oa, obd_enqueue_update_f upcall, void *cookie, struct ptlrpc_request_set *rqset) { + struct obd_export *exp = osc_export(obj); struct ptlrpc_request *req; struct ost_body *body; struct osc_fsync_args *fa; @@ -555,14 +424,15 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo, body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, - oinfo->oi_oa); + oa); ptlrpc_request_set_replen(req); req->rq_interpret_reply = osc_sync_interpret; CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args)); fa = ptlrpc_req_async_args(req); - fa->fa_oi = oinfo; + fa->fa_obj = obj; + fa->fa_oa = oa; fa->fa_upcall = upcall; fa->fa_cookie = cookie; @@ -639,19 +509,8 @@ static int osc_can_send_destroy(struct client_obd *cli) return 0; } -/* Destroy requests can be async always on the client, and we don't even really - * care about the return code since the client cannot do anything at all about - * a destroy failure. - * When the MDS is unlinking a filename, it saves the file objects into a - * recovery llog, and these object records are cancelled when the OST reports - * they were destroyed and sync'd to disk (i.e. transaction committed). - * If the client dies, or the OST is down when the object should be destroyed, - * the records are not cancelled, and when the OST reconnects to the MDS next, - * it will retrieve the llog unlink logs and then sends the log cancellation - * cookies to the MDS after committing destroy transactions. - */ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, - struct obdo *oa, struct obd_trans_info *oti) + struct obdo *oa) { struct client_obd *cli = &exp->exp_obd->u.cli; struct ptlrpc_request *req; @@ -683,32 +542,22 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ ptlrpc_at_set_req_timeout(req); - if (oti && oa->o_valid & OBD_MD_FLCOOKIE) - oa->o_lcookie = *oti->oti_logcookies; body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); ptlrpc_request_set_replen(req); - /* If osc_destroy is for destroying the unlink orphan, - * sent from MDT to OST, which should not be blocked here, - * because the process might be triggered by ptlrpcd, and - * it is not good to block ptlrpcd thread (b=16006 - **/ - if (!(oa->o_flags & OBD_FL_DELORPHAN)) { - req->rq_interpret_reply = osc_destroy_interpret; - if (!osc_can_send_destroy(cli)) { - struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, - NULL); - - /* - * Wait until the number of on-going destroy RPCs drops - * under max_rpc_in_flight - */ - l_wait_event_exclusive(cli->cl_destroy_waitq, - osc_can_send_destroy(cli), &lwi); - } + req->rq_interpret_reply = osc_destroy_interpret; + if (!osc_can_send_destroy(cli)) { + struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); + + /* + * Wait until the number of on-going destroy RPCs drops + * under max_rpc_in_flight + */ + l_wait_event_exclusive(cli->cl_destroy_waitq, + osc_can_send_destroy(cli), &lwi); } /* Do not wait for response */ @@ -734,14 +583,13 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, oa->o_undirty = 0; } else if (unlikely(atomic_long_read(&obd_dirty_pages) - atomic_long_read(&obd_dirty_transit_pages) > - (obd_max_dirty_pages + 1))) { + (long)(obd_max_dirty_pages + 1))) { /* The atomic_read() allowing the atomic_inc() are * not covered by a lock thus they may safely race and trip * this CERROR() unless we add in a small fudge factor (+1). */ - CERROR("%s: dirty %ld + %ld > system dirty_max %lu\n", - cli->cl_import->imp_obd->obd_name, - atomic_long_read(&obd_dirty_pages), + CERROR("%s: dirty %ld + %ld > system dirty_max %ld\n", + cli_name(cli), atomic_long_read(&obd_dirty_pages), atomic_long_read(&obd_dirty_transit_pages), obd_max_dirty_pages); oa->o_undirty = 0; @@ -936,12 +784,10 @@ static int osc_add_shrink_grant(struct client_obd *client) osc_grant_shrink_grant_cb, NULL, &client->cl_grant_shrink_list); if (rc) { - CERROR("add grant client %s error %d\n", - client->cl_import->imp_obd->obd_name, rc); + CERROR("add grant client %s error %d\n", cli_name(client), rc); return rc; } - CDEBUG(D_CACHE, "add grant client %s\n", - client->cl_import->imp_obd->obd_name); + CDEBUG(D_CACHE, "add grant client %s\n", cli_name(client)); osc_update_next_shrink(client); return 0; } @@ -970,23 +816,13 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) cli->cl_avail_grant = ocd->ocd_grant - (cli->cl_dirty_pages << PAGE_SHIFT); - if (cli->cl_avail_grant < 0) { - CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n", - cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant, - ocd->ocd_grant, cli->cl_dirty_pages << PAGE_SHIFT); - /* workaround for servers which do not have the patch from - * LU-2679 - */ - cli->cl_avail_grant = ocd->ocd_grant; - } - /* determine the appropriate chunk size used by osc_extent. */ cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize); spin_unlock(&cli->cl_loi_list_lock); CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n", - cli->cl_import->imp_obd->obd_name, - cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits); + cli_name(cli), cli->cl_avail_grant, cli->cl_lost_grant, + cli->cl_chunkbits); if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK && list_empty(&cli->cl_grant_shrink_list)) @@ -1072,9 +908,9 @@ static int check_write_rcs(struct ptlrpc_request *req, static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) { if (p1->flag != p2->flag) { - unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE | - OBD_BRW_SYNC | OBD_BRW_ASYNC | - OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC); + unsigned int mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE | + OBD_BRW_SYNC | OBD_BRW_ASYNC | + OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC); /* warn if we try to combine flags that we don't know to be * safe to combine @@ -1097,7 +933,6 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, int i = 0; struct cfs_crypto_hash_desc *hdesc; unsigned int bufsize; - int err; unsigned char cfs_alg = cksum_obd2cfs(cksum_type); LASSERT(pg_count > 0); @@ -1139,7 +974,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, } bufsize = sizeof(cksum); - err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize); + cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize); /* For sending we only compute the wrong checksum instead * of corrupting the data so it is still correct on a redo @@ -1151,8 +986,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, } static int osc_brw_prep_request(int cmd, struct client_obd *cli, - struct obdo *oa, - struct lov_stripe_md *lsm, u32 page_count, + struct obdo *oa, u32 page_count, struct brw_page **pga, struct ptlrpc_request **reqp, int reserve, @@ -1210,8 +1044,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, desc = ptlrpc_prep_bulk_imp(req, page_count, cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS, - opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK, - OST_BULK_PORTAL); + (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE : + PTLRPC_BULK_PUT_SINK) | PTLRPC_BULK_BUF_KIOV, OST_BULK_PORTAL, + &ptlrpc_bulk_kiov_pin_ops); if (!desc) { rc = -ENOMEM; @@ -1259,7 +1094,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) == (pg->flag & OBD_BRW_SRVLOCK)); - ptlrpc_prep_bulk_page_pin(desc, pg->pg, poff, pg->count); + desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff, pg->count); requested_nob += pg->count; if (i > 0 && can_merge_pages(pg_prev, pg)) { @@ -1569,7 +1404,6 @@ static int osc_brw_redo_request(struct ptlrpc_request *request, rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) == OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ, aa->aa_cli, aa->aa_oa, - NULL /* lsm unused by osc currently */, aa->aa_page_count, aa->aa_ppga, &new_req, 0, 1); if (rc) @@ -1764,8 +1598,6 @@ static int brw_interpret(const struct lu_env *env, LASSERT(list_empty(&aa->aa_exts)); LASSERT(list_empty(&aa->aa_oaps)); - cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc : - req->rq_bulk->bd_nob_transferred); osc_release_ppga(aa->aa_ppga, aa->aa_page_count); ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred); @@ -1818,9 +1650,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, struct osc_brw_async_args *aa = NULL; struct obdo *oa = NULL; struct osc_async_page *oap; - struct osc_async_page *tmp; - struct cl_req *clerq = NULL; - enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ; + struct osc_object *obj = NULL; struct cl_req_attr *crattr = NULL; u64 starting_offset = OBD_OBJECT_EOF; u64 ending_offset = 0; @@ -1828,6 +1658,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, int mem_tight = 0; int page_count = 0; bool soft_sync = false; + bool interrupted = false; int i; int rc; struct ost_body *body; @@ -1839,32 +1670,15 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, list_for_each_entry(ext, ext_list, oe_link) { LASSERT(ext->oe_state == OES_RPC); mem_tight |= ext->oe_memalloc; - list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { - ++page_count; - list_add_tail(&oap->oap_rpc_item, &rpc_list); - if (starting_offset > oap->oap_obj_off) - starting_offset = oap->oap_obj_off; - else - LASSERT(oap->oap_page_off == 0); - if (ending_offset < oap->oap_obj_off + oap->oap_count) - ending_offset = oap->oap_obj_off + - oap->oap_count; - else - LASSERT(oap->oap_page_off + oap->oap_count == - PAGE_SIZE); - } + page_count += ext->oe_nr_pages; + if (!obj) + obj = ext->oe_obj; } soft_sync = osc_over_unstable_soft_limit(cli); if (mem_tight) mpflag = cfs_memory_pressure_get_and_set(); - crattr = kzalloc(sizeof(*crattr), GFP_NOFS); - if (!crattr) { - rc = -ENOMEM; - goto out; - } - pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS); if (!pga) { rc = -ENOMEM; @@ -1878,44 +1692,46 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, } i = 0; - list_for_each_entry(oap, &rpc_list, oap_rpc_item) { - struct cl_page *page = oap2cl_page(oap); - - if (!clerq) { - clerq = cl_req_alloc(env, page, crt, - 1 /* only 1-object rpcs for now */); - if (IS_ERR(clerq)) { - rc = PTR_ERR(clerq); - goto out; - } + list_for_each_entry(ext, ext_list, oe_link) { + list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { + if (mem_tight) + oap->oap_brw_flags |= OBD_BRW_MEMALLOC; + if (soft_sync) + oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC; + pga[i] = &oap->oap_brw_page; + pga[i]->off = oap->oap_obj_off + oap->oap_page_off; + i++; + + list_add_tail(&oap->oap_rpc_item, &rpc_list); + if (starting_offset == OBD_OBJECT_EOF || + starting_offset > oap->oap_obj_off) + starting_offset = oap->oap_obj_off; + else + LASSERT(!oap->oap_page_off); + if (ending_offset < oap->oap_obj_off + oap->oap_count) + ending_offset = oap->oap_obj_off + + oap->oap_count; + else + LASSERT(oap->oap_page_off + oap->oap_count == + PAGE_SIZE); + if (oap->oap_interrupted) + interrupted = true; } - if (mem_tight) - oap->oap_brw_flags |= OBD_BRW_MEMALLOC; - if (soft_sync) - oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC; - pga[i] = &oap->oap_brw_page; - pga[i]->off = oap->oap_obj_off + oap->oap_page_off; - CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n", - pga[i]->pg, oap->oap_page->index, oap, - pga[i]->flag); - i++; - cl_req_page_add(env, clerq, page); } - /* always get the data for the obdo for the rpc */ - LASSERT(clerq); - crattr->cra_oa = oa; - cl_req_attr_set(env, clerq, crattr, ~0ULL); + /* first page in the list */ + oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item); - rc = cl_req_prep(env, clerq); - if (rc != 0) { - CERROR("cl_req_prep failed: %d\n", rc); - goto out; - } + crattr = &osc_env_info(env)->oti_req_attr; + memset(crattr, 0, sizeof(*crattr)); + crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ; + crattr->cra_flags = ~0ULL; + crattr->cra_page = oap2cl_page(oap); + crattr->cra_oa = oa; + cl_req_attr_set(env, osc2cl(obj), crattr); sort_brw_pages(pga, page_count); - rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count, - pga, &req, 1, 0); + rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 1, 0); if (rc != 0) { CERROR("prep_req failed: %d\n", rc); goto out; @@ -1924,8 +1740,10 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, req->rq_commit_cb = brw_commit; req->rq_interpret_reply = brw_interpret; - if (mem_tight != 0) - req->rq_memalloc = 1; + req->rq_memalloc = mem_tight != 0; + oap->oap_request = ptlrpc_request_addref(req); + if (interrupted && !req->rq_intr) + ptlrpc_mark_interrupted(req); /* Need to update the timestamps after the request is built in case * we race with setattr (locally or in queue at OST). If OST gets @@ -1935,9 +1753,8 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, */ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); crattr->cra_oa = &body->oa; - cl_req_attr_set(env, clerq, crattr, - OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME); - + crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME; + cl_req_attr_set(env, osc2cl(obj), crattr); lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid); CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); @@ -1946,24 +1763,6 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, list_splice_init(&rpc_list, &aa->aa_oaps); INIT_LIST_HEAD(&aa->aa_exts); list_splice_init(ext_list, &aa->aa_exts); - aa->aa_clerq = clerq; - - /* queued sync pages can be torn down while the pages - * were between the pending list and the rpc - */ - tmp = NULL; - list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { - /* only one oap gets a request reference */ - if (!tmp) - tmp = oap; - if (oap->oap_interrupted && !req->rq_intr) { - CDEBUG(D_INODE, "oap %p in req %p interrupted\n", - oap, req); - ptlrpc_mark_interrupted(req); - } - } - if (tmp) - tmp->oap_request = ptlrpc_request_addref(req); spin_lock(&cli->cl_loi_list_lock); starting_offset >>= PAGE_SHIFT; @@ -1985,6 +1784,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%dw in flight", page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight); + OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val); ptlrpcd_add_req(req); rc = 0; @@ -1993,8 +1793,6 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, if (mem_tight != 0) cfs_memory_pressure_restore(mpflag); - kfree(crattr); - if (rc != 0) { LASSERT(!req); @@ -2010,22 +1808,15 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, list_del_init(&ext->oe_link); osc_extent_finish(env, ext, 0, rc); } - if (clerq && !IS_ERR(clerq)) - cl_req_completion(env, clerq, rc); } return rc; } -static int osc_set_lock_data_with_check(struct ldlm_lock *lock, - struct ldlm_enqueue_info *einfo) +static int osc_set_lock_data(struct ldlm_lock *lock, void *data) { - void *data = einfo->ei_cbdata; int set = 0; - LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl); - LASSERT(lock->l_resource->lr_type == einfo->ei_type); - LASSERT(lock->l_completion_ast == einfo->ei_cb_cp); - LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl); + LASSERT(lock); lock_res_and_lock(lock); @@ -2039,21 +1830,6 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock, return set; } -static int osc_set_data_with_check(struct lustre_handle *lockh, - struct ldlm_enqueue_info *einfo) -{ - struct ldlm_lock *lock = ldlm_handle2lock(lockh); - int set = 0; - - if (lock) { - set = osc_set_lock_data_with_check(lock, einfo); - LDLM_LOCK_PUT(lock); - } else - CERROR("lockh %p, data %p - client evicted?\n", - lockh, einfo->ei_cbdata); - return set; -} - static int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall, void *cookie, struct lustre_handle *lockh, enum ldlm_mode mode, @@ -2153,7 +1929,7 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1; * release locks just after they are obtained. */ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, - __u64 *flags, ldlm_policy_data_t *policy, + __u64 *flags, union ldlm_policy_data *policy, struct ost_lvb *lvb, int kms_valid, osc_enqueue_upcall_f upcall, void *cookie, struct ldlm_enqueue_info *einfo, @@ -2219,7 +1995,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, ldlm_lock_decref(&lockh, mode); LDLM_LOCK_PUT(matched); return -ECANCELED; - } else if (osc_set_lock_data_with_check(matched, einfo)) { + } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) { *flags |= LDLM_FL_LVB_READY; /* We already have a lock, and it's referenced. */ (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED); @@ -2304,7 +2080,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, } int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id, - __u32 type, ldlm_policy_data_t *policy, __u32 mode, + __u32 type, union ldlm_policy_data *policy, __u32 mode, __u64 *flags, void *data, struct lustre_handle *lockh, int unref) { @@ -2331,31 +2107,20 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id, rc |= LCK_PW; rc = ldlm_lock_match(obd->obd_namespace, lflags, res_id, type, policy, rc, lockh, unref); - if (rc) { - if (data) { - if (!osc_set_data_with_check(lockh, data)) { - if (!(lflags & LDLM_FL_TEST_LOCK)) - ldlm_lock_decref(lockh, rc); - return 0; - } - } - if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) { - ldlm_lock_addref(lockh, LCK_PR); - ldlm_lock_decref(lockh, LCK_PW); - } + if (!rc || lflags & LDLM_FL_TEST_LOCK) return rc; - } - return rc; -} -int osc_cancel_base(struct lustre_handle *lockh, __u32 mode) -{ - if (unlikely(mode == LCK_GROUP)) - ldlm_lock_decref_and_cancel(lockh, mode); - else - ldlm_lock_decref(lockh, mode); + if (data) { + struct ldlm_lock *lock = ldlm_handle2lock(lockh); - return 0; + LASSERT(lock); + if (!osc_set_lock_data(lock, data)) { + ldlm_lock_decref(lockh, rc); + rc = 0; + } + LDLM_LOCK_PUT(lock); + } + return rc; } static int osc_statfs_interpret(const struct lu_env *env, @@ -2526,9 +2291,6 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, err = ptlrpc_set_import_active(obd->u.cli.cl_import, data->ioc_offset); goto out; - case OBD_IOC_POLL_QUOTACHECK: - err = osc_quota_poll_check(exp, karg); - goto out; case OBD_IOC_PING_TARGET: err = ptlrpc_obd_ping(obd); goto out; @@ -2543,103 +2305,6 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, return err; } -static int osc_get_info(const struct lu_env *env, struct obd_export *exp, - u32 keylen, void *key, __u32 *vallen, void *val, - struct lov_stripe_md *lsm) -{ - if (!vallen || !val) - return -EFAULT; - - if (KEY_IS(KEY_FIEMAP)) { - struct ll_fiemap_info_key *fm_key = key; - struct ldlm_res_id res_id; - ldlm_policy_data_t policy; - struct lustre_handle lockh; - enum ldlm_mode mode = 0; - struct ptlrpc_request *req; - struct ll_user_fiemap *reply; - char *tmp; - int rc; - - if (!(fm_key->fiemap.fm_flags & FIEMAP_FLAG_SYNC)) - goto skip_locking; - - policy.l_extent.start = fm_key->fiemap.fm_start & - PAGE_MASK; - - if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <= - fm_key->fiemap.fm_start + PAGE_SIZE - 1) - policy.l_extent.end = OBD_OBJECT_EOF; - else - policy.l_extent.end = (fm_key->fiemap.fm_start + - fm_key->fiemap.fm_length + - PAGE_SIZE - 1) & PAGE_MASK; - - ostid_build_res_name(&fm_key->oa.o_oi, &res_id); - mode = ldlm_lock_match(exp->exp_obd->obd_namespace, - LDLM_FL_BLOCK_GRANTED | - LDLM_FL_LVB_READY, - &res_id, LDLM_EXTENT, &policy, - LCK_PR | LCK_PW, &lockh, 0); - if (mode) { /* lock is cached on client */ - if (mode != LCK_PR) { - ldlm_lock_addref(&lockh, LCK_PR); - ldlm_lock_decref(&lockh, LCK_PW); - } - } else { /* no cached lock, needs acquire lock on server side */ - fm_key->oa.o_valid |= OBD_MD_FLFLAGS; - fm_key->oa.o_flags |= OBD_FL_SRVLOCK; - } - -skip_locking: - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_OST_GET_INFO_FIEMAP); - if (!req) { - rc = -ENOMEM; - goto drop_lock; - } - - req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, - RCL_CLIENT, keylen); - req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, - RCL_CLIENT, *vallen); - req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, - RCL_SERVER, *vallen); - - rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO); - if (rc) { - ptlrpc_request_free(req); - goto drop_lock; - } - - tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY); - memcpy(tmp, key, keylen); - tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL); - memcpy(tmp, val, *vallen); - - ptlrpc_request_set_replen(req); - rc = ptlrpc_queue_wait(req); - if (rc) - goto fini_req; - - reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL); - if (!reply) { - rc = -EPROTO; - goto fini_req; - } - - memcpy(val, reply, *vallen); -fini_req: - ptlrpc_req_finished(req); -drop_lock: - if (mode) - ldlm_lock_decref(&lockh, LCK_PR); - return rc; - } - - return -EINVAL; -} - static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, u32 keylen, void *key, u32 vallen, void *val, struct ptlrpc_request_set *set) @@ -2999,47 +2664,33 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) return rc; } -static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) +static int osc_precleanup(struct obd_device *obd) { - switch (stage) { - case OBD_CLEANUP_EARLY: { - struct obd_import *imp; - - imp = obd->u.cli.cl_import; - CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name); - /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */ - ptlrpc_deactivate_import(imp); - spin_lock(&imp->imp_lock); - imp->imp_pingable = 0; - spin_unlock(&imp->imp_lock); - break; + struct client_obd *cli = &obd->u.cli; + + /* LU-464 + * for echo client, export may be on zombie list, wait for + * zombie thread to cull it, because cli.cl_import will be + * cleared in client_disconnect_export(): + * class_export_destroy() -> obd_cleanup() -> + * echo_device_free() -> echo_client_cleanup() -> + * obd_disconnect() -> osc_disconnect() -> + * client_disconnect_export() + */ + obd_zombie_barrier(); + if (cli->cl_writeback_work) { + ptlrpcd_destroy_work(cli->cl_writeback_work); + cli->cl_writeback_work = NULL; } - case OBD_CLEANUP_EXPORTS: { - struct client_obd *cli = &obd->u.cli; - /* LU-464 - * for echo client, export may be on zombie list, wait for - * zombie thread to cull it, because cli.cl_import will be - * cleared in client_disconnect_export(): - * class_export_destroy() -> obd_cleanup() -> - * echo_device_free() -> echo_client_cleanup() -> - * obd_disconnect() -> osc_disconnect() -> - * client_disconnect_export() - */ - obd_zombie_barrier(); - if (cli->cl_writeback_work) { - ptlrpcd_destroy_work(cli->cl_writeback_work); - cli->cl_writeback_work = NULL; - } - if (cli->cl_lru_work) { - ptlrpcd_destroy_work(cli->cl_lru_work); - cli->cl_lru_work = NULL; - } - obd_cleanup_client_import(obd); - ptlrpc_lprocfs_unregister_obd(obd); - lprocfs_obd_cleanup(obd); - break; - } + + if (cli->cl_lru_work) { + ptlrpcd_destroy_work(cli->cl_lru_work); + cli->cl_lru_work = NULL; } + + obd_cleanup_client_import(obd); + ptlrpc_lprocfs_unregister_obd(obd); + lprocfs_obd_cleanup(obd); return 0; } @@ -3104,24 +2755,18 @@ static struct obd_ops osc_obd_ops = { .disconnect = osc_disconnect, .statfs = osc_statfs, .statfs_async = osc_statfs_async, - .unpackmd = osc_unpackmd, .create = osc_create, .destroy = osc_destroy, .getattr = osc_getattr, - .getattr_async = osc_getattr_async, .setattr = osc_setattr, - .setattr_async = osc_setattr_async, .iocontrol = osc_iocontrol, - .get_info = osc_get_info, .set_info_async = osc_set_info_async, .import_event = osc_import_event, .process_config = osc_process_config, .quotactl = osc_quotactl, - .quotacheck = osc_quotacheck, }; extern struct lu_kmem_descr osc_caches[]; -extern struct lock_class_key osc_ast_guard_class; static int __init osc_init(void) { diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c index 8c51d51a678b2d41ff661b0a369de144c3f63b13..804741362bc029b24899663852e3138a64a48500 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/client.c +++ b/drivers/staging/lustre/lustre/ptlrpc/client.c @@ -43,6 +43,18 @@ #include "ptlrpc_internal.h" +const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = { + .add_kiov_frag = ptlrpc_prep_bulk_page_pin, + .release_frags = ptlrpc_release_bulk_page_pin, +}; +EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops); + +const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = { + .add_kiov_frag = ptlrpc_prep_bulk_page_nopin, + .release_frags = NULL, +}; +EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops); + static int ptlrpc_send_new_req(struct ptlrpc_request *req); static int ptlrpcd_check_work(struct ptlrpc_request *req); static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async); @@ -95,24 +107,43 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid) * Allocate and initialize new bulk descriptor on the sender. * Returns pointer to the descriptor or NULL on error. */ -struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw, - unsigned type, unsigned portal) +struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags, + unsigned int max_brw, + enum ptlrpc_bulk_op_type type, + unsigned int portal, + const struct ptlrpc_bulk_frag_ops *ops) { struct ptlrpc_bulk_desc *desc; int i; - desc = kzalloc(offsetof(struct ptlrpc_bulk_desc, bd_iov[npages]), - GFP_NOFS); + /* ensure that only one of KIOV or IOVEC is set but not both */ + LASSERT((ptlrpc_is_bulk_desc_kiov(type) && ops->add_kiov_frag) || + (ptlrpc_is_bulk_desc_kvec(type) && ops->add_iov_frag)); + + desc = kzalloc(sizeof(*desc), GFP_NOFS); if (!desc) return NULL; + if (type & PTLRPC_BULK_BUF_KIOV) { + GET_KIOV(desc) = kcalloc(nfrags, sizeof(*GET_KIOV(desc)), + GFP_NOFS); + if (!GET_KIOV(desc)) + goto free_desc; + } else { + GET_KVEC(desc) = kcalloc(nfrags, sizeof(*GET_KVEC(desc)), + GFP_NOFS); + if (!GET_KVEC(desc)) + goto free_desc; + } + spin_lock_init(&desc->bd_lock); init_waitqueue_head(&desc->bd_waitq); - desc->bd_max_iov = npages; + desc->bd_max_iov = nfrags; desc->bd_iov_count = 0; desc->bd_portal = portal; desc->bd_type = type; desc->bd_md_count = 0; + desc->bd_frag_ops = (struct ptlrpc_bulk_frag_ops *)ops; LASSERT(max_brw > 0); desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT); /* @@ -123,24 +154,31 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw, LNetInvalidateHandle(&desc->bd_mds[i]); return desc; +free_desc: + kfree(desc); + return NULL; } /** * Prepare bulk descriptor for specified outgoing request \a req that - * can fit \a npages * pages. \a type is bulk type. \a portal is where + * can fit \a nfrags * pages. \a type is bulk type. \a portal is where * the bulk to be sent. Used on client-side. * Returns pointer to newly allocated initialized bulk descriptor or NULL on * error. */ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, - unsigned npages, unsigned max_brw, - unsigned type, unsigned portal) + unsigned int nfrags, + unsigned int max_brw, + unsigned int type, + unsigned int portal, + const struct ptlrpc_bulk_frag_ops *ops) { struct obd_import *imp = req->rq_import; struct ptlrpc_bulk_desc *desc; - LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE); - desc = ptlrpc_new_bulk(npages, max_brw, type, portal); + LASSERT(ptlrpc_is_bulk_op_passive(type)); + + desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops); if (!desc) return NULL; @@ -158,56 +196,82 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, } EXPORT_SYMBOL(ptlrpc_prep_bulk_imp); -/** - * Add a page \a page to the bulk descriptor \a desc. - * Data to transfer in the page starts at offset \a pageoffset and - * amount of data to transfer from the page is \a len - */ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, int pageoffset, int len, int pin) { + struct bio_vec *kiov; + LASSERT(desc->bd_iov_count < desc->bd_max_iov); LASSERT(page); LASSERT(pageoffset >= 0); LASSERT(len > 0); LASSERT(pageoffset + len <= PAGE_SIZE); + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + + kiov = &BD_GET_KIOV(desc, desc->bd_iov_count); desc->bd_nob += len; if (pin) get_page(page); - ptlrpc_add_bulk_page(desc, page, pageoffset, len); + kiov->bv_page = page; + kiov->bv_offset = pageoffset; + kiov->bv_len = len; + + desc->bd_iov_count++; } EXPORT_SYMBOL(__ptlrpc_prep_bulk_page); -/** - * Uninitialize and free bulk descriptor \a desc. - * Works on bulk descriptors both from server and client side. - */ -void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin) +int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc, + void *frag, int len) { - int i; + struct kvec *iovec; + + LASSERT(desc->bd_iov_count < desc->bd_max_iov); + LASSERT(frag); + LASSERT(len > 0); + LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type)); + iovec = &BD_GET_KVEC(desc, desc->bd_iov_count); + + desc->bd_nob += len; + + iovec->iov_base = frag; + iovec->iov_len = len; + + desc->bd_iov_count++; + + return desc->bd_nob; +} +EXPORT_SYMBOL(ptlrpc_prep_bulk_frag); + +void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc) +{ LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */ LASSERT(desc->bd_md_count == 0); /* network hands off */ LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL)); + LASSERT(desc->bd_frag_ops); - sptlrpc_enc_pool_put_pages(desc); + if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) + sptlrpc_enc_pool_put_pages(desc); if (desc->bd_export) class_export_put(desc->bd_export); else class_import_put(desc->bd_import); - if (unpin) { - for (i = 0; i < desc->bd_iov_count; i++) - put_page(desc->bd_iov[i].bv_page); - } + if (desc->bd_frag_ops->release_frags) + desc->bd_frag_ops->release_frags(desc); + + if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) + kfree(GET_KIOV(desc)); + else + kfree(GET_KVEC(desc)); kfree(desc); } -EXPORT_SYMBOL(__ptlrpc_free_bulk); +EXPORT_SYMBOL(ptlrpc_free_bulk); /** * Set server timelimit for this req, i.e. how long are we willing to wait @@ -589,6 +653,42 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request) spin_unlock(&pool->prp_lock); } +void ptlrpc_add_unreplied(struct ptlrpc_request *req) +{ + struct obd_import *imp = req->rq_import; + struct list_head *tmp; + struct ptlrpc_request *iter; + + assert_spin_locked(&imp->imp_lock); + LASSERT(list_empty(&req->rq_unreplied_list)); + + /* unreplied list is sorted by xid in ascending order */ + list_for_each_prev(tmp, &imp->imp_unreplied_list) { + iter = list_entry(tmp, struct ptlrpc_request, + rq_unreplied_list); + + LASSERT(req->rq_xid != iter->rq_xid); + if (req->rq_xid < iter->rq_xid) + continue; + list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list); + return; + } + list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list); +} + +void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req) +{ + req->rq_xid = ptlrpc_next_xid(); + ptlrpc_add_unreplied(req); +} + +static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req) +{ + spin_lock(&req->rq_import->imp_lock); + ptlrpc_assign_next_xid_nolock(req); + spin_unlock(&req->rq_import->imp_lock); +} + int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, __u32 version, int opcode, char **bufs, struct ptlrpc_cli_ctx *ctx) @@ -637,8 +737,8 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, ptlrpc_at_set_req_timeout(request); - request->rq_xid = ptlrpc_next_xid(); lustre_msg_set_opc(request->rq_reqmsg, opcode); + ptlrpc_assign_next_xid(request); /* Let's setup deadline for req/reply/bulk unlink for opcode. */ if (cfs_fail_val == opcode) { @@ -1129,7 +1229,9 @@ static int ptlrpc_check_status(struct ptlrpc_request *req) lnet_nid_t nid = imp->imp_connection->c_peer.nid; __u32 opc = lustre_msg_get_opc(req->rq_reqmsg); - if (ptlrpc_console_allow(req)) + /* -EAGAIN is normal when using POSIX flocks */ + if (ptlrpc_console_allow(req) && + !(opc == LDLM_ENQUEUE && err == -EAGAIN)) LCONSOLE_ERROR_MSG(0x011, "%s: operation %s to node %s failed: rc = %d\n", imp->imp_obd->obd_name, ll_opcode2str(opc), @@ -1166,6 +1268,24 @@ static void ptlrpc_save_versions(struct ptlrpc_request *req) versions[0], versions[1]); } +__u64 ptlrpc_known_replied_xid(struct obd_import *imp) +{ + struct ptlrpc_request *req; + + assert_spin_locked(&imp->imp_lock); + if (list_empty(&imp->imp_unreplied_list)) + return 0; + + req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request, + rq_unreplied_list); + LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid); + + if (imp->imp_known_replied_xid < req->rq_xid - 1) + imp->imp_known_replied_xid = req->rq_xid - 1; + + return req->rq_xid - 1; +} + /** * Callback function called when client receives RPC reply for \a req. * Returns 0 on success or error code. @@ -1180,6 +1300,7 @@ static int after_reply(struct ptlrpc_request *req) int rc; struct timespec64 work_start; long timediff; + u64 committed; LASSERT(obd); /* repbuf must be unlinked */ @@ -1206,6 +1327,10 @@ static int after_reply(struct ptlrpc_request *req) return 0; } + ktime_get_real_ts64(&work_start); + timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC + + (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) / + NSEC_PER_USEC; /* * NB Until this point, the whole of the incoming message, * including buflens, status etc is in the sender's byte order. @@ -1235,13 +1360,6 @@ static int after_reply(struct ptlrpc_request *req) spin_unlock(&req->rq_lock); req->rq_nr_resend++; - /* allocate new xid to avoid reply reconstruction */ - if (!req->rq_bulk) { - /* new xid is already allocated for bulk in ptlrpc_check_set() */ - req->rq_xid = ptlrpc_next_xid(); - DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for resend on EINPROGRESS"); - } - /* Readjust the timeout for current conditions */ ptlrpc_at_set_req_timeout(req); /* @@ -1255,13 +1373,14 @@ static int after_reply(struct ptlrpc_request *req) else req->rq_sent = now + req->rq_nr_resend; + /* Resend for EINPROGRESS will use a new XID */ + spin_lock(&imp->imp_lock); + list_del_init(&req->rq_unreplied_list); + spin_unlock(&imp->imp_lock); + return 0; } - ktime_get_real_ts64(&work_start); - timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC + - (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) / - NSEC_PER_USEC; if (obd->obd_svc_stats) { lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, timediff); @@ -1338,10 +1457,9 @@ static int after_reply(struct ptlrpc_request *req) } /* Replay-enabled imports return commit-status information. */ - if (lustre_msg_get_last_committed(req->rq_repmsg)) { - imp->imp_peer_committed_transno = - lustre_msg_get_last_committed(req->rq_repmsg); - } + committed = lustre_msg_get_last_committed(req->rq_repmsg); + if (likely(committed > imp->imp_peer_committed_transno)) + imp->imp_peer_committed_transno = committed; ptlrpc_free_committed(imp); @@ -1373,9 +1491,17 @@ static int after_reply(struct ptlrpc_request *req) static int ptlrpc_send_new_req(struct ptlrpc_request *req) { struct obd_import *imp = req->rq_import; + u64 min_xid = 0; int rc; LASSERT(req->rq_phase == RQ_PHASE_NEW); + + /* do not try to go further if there is not enough memory in enc_pool */ + if (req->rq_sent && req->rq_bulk) + if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() && + pool_is_at_full_capacity()) + return -ENOMEM; + if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) && (!req->rq_generation_set || req->rq_import_generation == imp->imp_generation)) @@ -1385,6 +1511,9 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) spin_lock(&imp->imp_lock); + LASSERT(req->rq_xid); + LASSERT(!list_empty(&req->rq_unreplied_list)); + if (!req->rq_generation_set) req->rq_import_generation = imp->imp_generation; @@ -1414,8 +1543,25 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) LASSERT(list_empty(&req->rq_list)); list_add_tail(&req->rq_list, &imp->imp_sending_list); atomic_inc(&req->rq_import->imp_inflight); + + /* find the known replied XID from the unreplied list, CONNECT + * and DISCONNECT requests are skipped to make the sanity check + * on server side happy. see process_req_last_xid(). + * + * For CONNECT: Because replay requests have lower XID, it'll + * break the sanity check if CONNECT bump the exp_last_xid on + * server. + * + * For DISCONNECT: Since client will abort inflight RPC before + * sending DISCONNECT, DISCONNECT may carry an XID which higher + * than the inflight RPC. + */ + if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req)) + min_xid = ptlrpc_known_replied_xid(imp); spin_unlock(&imp->imp_lock); + lustre_msg_set_last_xid(req->rq_reqmsg, min_xid); + lustre_msg_set_status(req->rq_reqmsg, current_pid()); rc = sptlrpc_req_refresh_ctx(req, -1); @@ -1438,6 +1584,16 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) lustre_msg_get_opc(req->rq_reqmsg)); rc = ptl_send_rpc(req, 0); + if (rc == -ENOMEM) { + spin_lock(&imp->imp_lock); + if (!list_empty(&req->rq_list)) { + list_del_init(&req->rq_list); + atomic_dec(&req->rq_import->imp_inflight); + } + spin_unlock(&imp->imp_lock); + ptlrpc_rqphase_move(req, RQ_PHASE_NEW); + return rc; + } if (rc) { DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc); spin_lock(&req->rq_lock); @@ -1688,18 +1844,9 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) spin_lock(&req->rq_lock); req->rq_resend = 1; spin_unlock(&req->rq_lock); - if (req->rq_bulk) { - __u64 old_xid; - - if (!ptlrpc_unregister_bulk(req, 1)) - continue; - - /* ensure previous bulk fails */ - old_xid = req->rq_xid; - req->rq_xid = ptlrpc_next_xid(); - CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n", - old_xid, req->rq_xid); - } + if (req->rq_bulk && + !ptlrpc_unregister_bulk(req, 1)) + continue; } /* * rq_wait_ctx is only touched by ptlrpcd, @@ -1727,6 +1874,14 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) } rc = ptl_send_rpc(req, 0); + if (rc == -ENOMEM) { + spin_lock(&imp->imp_lock); + if (!list_empty(&req->rq_list)) + list_del_init(&req->rq_list); + spin_unlock(&imp->imp_lock); + ptlrpc_rqphase_move(req, RQ_PHASE_NEW); + continue; + } if (rc) { DEBUG_REQ(D_HA, req, "send failed: rc = %d", rc); @@ -1850,6 +2005,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) list_del_init(&req->rq_list); atomic_dec(&imp->imp_inflight); } + list_del_init(&req->rq_unreplied_list); spin_unlock(&imp->imp_lock); atomic_dec(&set->set_remaining); @@ -2247,6 +2403,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) if (!locked) spin_lock(&request->rq_import->imp_lock); list_del_init(&request->rq_replay_list); + list_del_init(&request->rq_unreplied_list); if (!locked) spin_unlock(&request->rq_import->imp_lock); } @@ -2266,7 +2423,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) request->rq_import = NULL; } if (request->rq_bulk) - ptlrpc_free_bulk_pin(request->rq_bulk); + ptlrpc_free_bulk(request->rq_bulk); if (request->rq_reqbuf || request->rq_clrbuf) sptlrpc_cli_free_reqbuf(request); @@ -2542,14 +2699,6 @@ void ptlrpc_resend_req(struct ptlrpc_request *req) req->rq_resend = 1; req->rq_net_err = 0; req->rq_timedout = 0; - if (req->rq_bulk) { - __u64 old_xid = req->rq_xid; - - /* ensure previous bulk fails */ - req->rq_xid = ptlrpc_next_xid(); - CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n", - old_xid, req->rq_xid); - } ptlrpc_client_wake_req(req); spin_unlock(&req->rq_lock); } @@ -2592,6 +2741,10 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY); + spin_lock(&req->rq_lock); + req->rq_resend = 0; + spin_unlock(&req->rq_lock); + LASSERT(imp->imp_replayable); /* Balanced in ptlrpc_free_committed, usually. */ ptlrpc_request_addref(req); @@ -2667,8 +2820,15 @@ static int ptlrpc_replay_interpret(const struct lu_env *env, atomic_dec(&imp->imp_replay_inflight); - if (!ptlrpc_client_replied(req)) { - CERROR("request replay timed out, restarting recovery\n"); + /* + * Note: if it is bulk replay (MDS-MDS replay), then even if + * server got the request, but bulk transfer timeout, let's + * replay the bulk req again + */ + if (!ptlrpc_client_replied(req) || + (req->rq_bulk && + lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) { + DEBUG_REQ(D_ERROR, req, "request replay timed out.\n"); rc = -ETIMEDOUT; goto out; } @@ -2938,6 +3098,48 @@ __u64 ptlrpc_next_xid(void) return next; } +/** + * If request has a new allocated XID (new request or EINPROGRESS resend), + * use this XID as matchbits of bulk, otherwise allocate a new matchbits for + * request to ensure previous bulk fails and avoid problems with lost replies + * and therefore several transfers landing into the same buffer from different + * sending attempts. + */ +void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req) +{ + struct ptlrpc_bulk_desc *bd = req->rq_bulk; + + LASSERT(bd); + + if (!req->rq_resend) { + /* this request has a new xid, just use it as bulk matchbits */ + req->rq_mbits = req->rq_xid; + + } else { /* needs to generate a new matchbits for resend */ + u64 old_mbits = req->rq_mbits; + + if ((bd->bd_import->imp_connect_data.ocd_connect_flags & + OBD_CONNECT_BULK_MBITS)) { + req->rq_mbits = ptlrpc_next_xid(); + } else { + /* old version transfers rq_xid to peer as matchbits */ + req->rq_mbits = ptlrpc_next_xid(); + req->rq_xid = req->rq_mbits; + } + + CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n", + old_mbits, req->rq_mbits); + } + + /* + * For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so + * that server can infer the number of bulks that were prepared, + * see LU-1431 + */ + req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) / + LNET_MAX_IOV) - 1; +} + /** * Get a glimpse at what next xid value might have been. * Returns possible next xid. diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c index 7b020d60c9e51f5d79f72f00679f2f193d3244a6..6c7c8b68a90908981bcfb64aaf496f67e6555daf 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/connection.c +++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c @@ -152,8 +152,8 @@ void ptlrpc_connection_fini(void) /* * Hash operations for net_peer<->connection */ -static unsigned -conn_hashfn(struct cfs_hash *hs, const void *key, unsigned mask) +static unsigned int +conn_hashfn(struct cfs_hash *hs, const void *key, unsigned int mask) { return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask); } diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c index 283dfb296d3530335baed2ba59434fb597a8e4f9..49f3e63684156679e31455e6b432fc3a53cf698c 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/events.c +++ b/drivers/staging/lustre/lustre/ptlrpc/events.c @@ -182,9 +182,9 @@ void client_bulk_callback(lnet_event_t *ev) struct ptlrpc_bulk_desc *desc = cbid->cbid_arg; struct ptlrpc_request *req; - LASSERT((desc->bd_type == BULK_PUT_SINK && + LASSERT((ptlrpc_is_bulk_put_sink(desc->bd_type) && ev->type == LNET_EVENT_PUT) || - (desc->bd_type == BULK_GET_SOURCE && + (ptlrpc_is_bulk_get_source(desc->bd_type) && ev->type == LNET_EVENT_GET) || ev->type == LNET_EVENT_UNLINK); LASSERT(ev->unlinked); diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c index a23d0a05b5743d4210b3f9761609f4168657efd9..e8280194001cbae45d95763905fdf07eaa00c5c4 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/import.c +++ b/drivers/staging/lustre/lustre/ptlrpc/import.c @@ -396,7 +396,7 @@ void ptlrpc_activate_import(struct obd_import *imp) } EXPORT_SYMBOL(ptlrpc_activate_import); -static void ptlrpc_pinger_force(struct obd_import *imp) +void ptlrpc_pinger_force(struct obd_import *imp) { CDEBUG(D_HA, "%s: waking up pinger s:%s\n", obd2cli_tgt(imp->imp_obd), ptlrpc_import_state_name(imp->imp_state)); @@ -408,6 +408,7 @@ static void ptlrpc_pinger_force(struct obd_import *imp) if (imp->imp_state != LUSTRE_IMP_CONNECTING) ptlrpc_pinger_wake_up(); } +EXPORT_SYMBOL(ptlrpc_pinger_force); void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt) { @@ -621,7 +622,8 @@ int ptlrpc_connect_import(struct obd_import *imp) spin_unlock(&imp->imp_lock); CERROR("already connected\n"); return 0; - } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) { + } else if (imp->imp_state == LUSTRE_IMP_CONNECTING || + imp->imp_connected) { spin_unlock(&imp->imp_lock); CERROR("already connecting\n"); return -EALREADY; @@ -691,8 +693,6 @@ int ptlrpc_connect_import(struct obd_import *imp) request->rq_timeout = INITIAL_CONNECT_TIMEOUT; lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout); - lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER); - request->rq_no_resend = 1; request->rq_no_delay = 1; request->rq_send_state = LUSTRE_IMP_CONNECTING; @@ -858,6 +858,17 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp, client_adjust_max_dirty(cli); + /* + * Update client max modify RPCs in flight with value returned + * by the server + */ + if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS) + cli->cl_max_mod_rpcs_in_flight = min( + cli->cl_max_mod_rpcs_in_flight, + ocd->ocd_maxmodrpcs); + else + cli->cl_max_mod_rpcs_in_flight = 1; + /* * Reset ns_connect_flags only for initial connect. It might be * changed in while using FS and if we reset it in reconnect @@ -873,8 +884,7 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp, ocd->ocd_connect_flags; } - if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) && - (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2)) + if (ocd->ocd_connect_flags & OBD_CONNECT_AT) /* * We need a per-message support flag, because * a. we don't know if the incoming connect reply @@ -889,15 +899,44 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp, else imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT; - if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) && - (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2)) - imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18; - else - imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18; + imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18; return 0; } +/** + * Add all replay requests back to unreplied list before start replay, + * so that we can make sure the known replied XID is always increased + * only even if when replaying requests. + */ +static void ptlrpc_prepare_replay(struct obd_import *imp) +{ + struct ptlrpc_request *req; + + if (imp->imp_state != LUSTRE_IMP_REPLAY || + imp->imp_resend_replay) + return; + + /* + * If the server was restart during repaly, the requests may + * have been added to the unreplied list in former replay. + */ + spin_lock(&imp->imp_lock); + + list_for_each_entry(req, &imp->imp_committed_list, rq_replay_list) { + if (list_empty(&req->rq_unreplied_list)) + ptlrpc_add_unreplied(req); + } + + list_for_each_entry(req, &imp->imp_replay_list, rq_replay_list) { + if (list_empty(&req->rq_unreplied_list)) + ptlrpc_add_unreplied(req); + } + + imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp); + spin_unlock(&imp->imp_lock); +} + /** * interpret_reply callback for connect RPCs. * Looks into returned status of connect operation and decides @@ -933,6 +972,13 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, ptlrpc_maybe_ping_import_soon(imp); goto out; } + + /* + * LU-7558: indicate that we are interpretting connect reply, + * pltrpc_connect_import() will not try to reconnect until + * interpret will finish. + */ + imp->imp_connected = 1; spin_unlock(&imp->imp_lock); LASSERT(imp->imp_conn_current); @@ -967,6 +1013,16 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, spin_unlock(&imp->imp_lock); + if (!exp) { + /* This could happen if export is cleaned during the + * connect attempt + */ + CERROR("%s: missing export after connect\n", + imp->imp_obd->obd_name); + rc = -ENODEV; + goto out; + } + /* check that server granted subset of flags we asked for. */ if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) != ocd->ocd_connect_flags) { @@ -977,15 +1033,6 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, goto out; } - if (!exp) { - /* This could happen if export is cleaned during the - * connect attempt - */ - CERROR("%s: missing export after connect\n", - imp->imp_obd->obd_name); - rc = -ENODEV; - goto out; - } old_connect_flags = exp_connect_flags(exp); exp->exp_connect_data = *ocd; imp->imp_obd->obd_self_export->exp_connect_data = *ocd; @@ -1124,6 +1171,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, imp->imp_remote_handle = *lustre_msg_get_handle(request->rq_repmsg); imp->imp_last_replay_transno = 0; + imp->imp_replay_cursor = &imp->imp_committed_list; IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY); } else { DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags not set: %x)", @@ -1147,18 +1195,25 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, } finish: + ptlrpc_prepare_replay(imp); rc = ptlrpc_import_recovery_state_machine(imp); if (rc == -ENOTCONN) { CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery; invalidating and reconnecting\n", obd2cli_tgt(imp->imp_obd), imp->imp_connection->c_remote_uuid.uuid); ptlrpc_connect_import(imp); + spin_lock(&imp->imp_lock); + imp->imp_connected = 0; imp->imp_connect_tried = 1; + spin_unlock(&imp->imp_lock); return 0; } out: + spin_lock(&imp->imp_lock); + imp->imp_connected = 0; imp->imp_connect_tried = 1; + spin_unlock(&imp->imp_lock); if (rc != 0) { IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON); diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c index 839ef3e80c1aa2ff46fd66c4d8e5366dec5fa1b4..99d7c667df284de7e244faacd76d26c34ea278f3 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/layout.c +++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c @@ -48,14 +48,14 @@ #include -/* LUSTRE_VERSION_CODE */ -#include "../include/lustre_ver.h" - -#include "../include/obd_support.h" -/* lustre_swab_mdt_body */ #include "../include/lustre/lustre_idl.h" -/* obd2cli_tgt() (required by DEBUG_REQ()) */ + +#include "../include/llog_swab.h" +#include "../include/lustre_debug.h" +#include "../include/lustre_swab.h" +#include "../include/lustre_ver.h" #include "../include/obd.h" +#include "../include/obd_support.h" /* __REQ_LAYOUT_USER__ */ #endif @@ -121,7 +121,7 @@ static const struct req_msg_field *mdt_close_client[] = { &RMF_CAPA1 }; -static const struct req_msg_field *mdt_release_close_client[] = { +static const struct req_msg_field *mdt_intent_close_client[] = { &RMF_PTLRPC_BODY, &RMF_MDT_EPOCH, &RMF_REC_REINT, @@ -257,6 +257,18 @@ static const struct req_msg_field *mds_reint_rename_client[] = { &RMF_DLM_REQ }; +static const struct req_msg_field *mds_reint_migrate_client[] = { + &RMF_PTLRPC_BODY, + &RMF_REC_REINT, + &RMF_CAPA1, + &RMF_CAPA2, + &RMF_NAME, + &RMF_SYMTGT, + &RMF_DLM_REQ, + &RMF_MDT_EPOCH, + &RMF_CLOSE_DATA +}; + static const struct req_msg_field *mds_last_unlink_server[] = { &RMF_PTLRPC_BODY, &RMF_MDT_BODY, @@ -666,10 +678,9 @@ static struct req_format *req_formats[] = { &RQF_MDS_GETXATTR, &RQF_MDS_SYNC, &RQF_MDS_CLOSE, - &RQF_MDS_RELEASE_CLOSE, + &RQF_MDS_INTENT_CLOSE, &RQF_MDS_READPAGE, &RQF_MDS_WRITEPAGE, - &RQF_MDS_DONE_WRITING, &RQF_MDS_REINT, &RQF_MDS_REINT_CREATE, &RQF_MDS_REINT_CREATE_ACL, @@ -679,9 +690,9 @@ static struct req_format *req_formats[] = { &RQF_MDS_REINT_UNLINK, &RQF_MDS_REINT_LINK, &RQF_MDS_REINT_RENAME, + &RQF_MDS_REINT_MIGRATE, &RQF_MDS_REINT_SETATTR, &RQF_MDS_REINT_SETXATTR, - &RQF_MDS_QUOTACHECK, &RQF_MDS_QUOTACTL, &RQF_MDS_HSM_PROGRESS, &RQF_MDS_HSM_CT_REGISTER, @@ -691,10 +702,8 @@ static struct req_format *req_formats[] = { &RQF_MDS_HSM_ACTION, &RQF_MDS_HSM_REQUEST, &RQF_MDS_SWAP_LAYOUTS, - &RQF_QC_CALLBACK, &RQF_OST_CONNECT, &RQF_OST_DISCONNECT, - &RQF_OST_QUOTACHECK, &RQF_OST_QUOTACTL, &RQF_OST_GETATTR, &RQF_OST_SETATTR, @@ -1180,14 +1189,6 @@ struct req_format RQF_LOG_CANCEL = DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty); EXPORT_SYMBOL(RQF_LOG_CANCEL); -struct req_format RQF_MDS_QUOTACHECK = - DEFINE_REQ_FMT0("MDS_QUOTACHECK", quotactl_only, empty); -EXPORT_SYMBOL(RQF_MDS_QUOTACHECK); - -struct req_format RQF_OST_QUOTACHECK = - DEFINE_REQ_FMT0("OST_QUOTACHECK", quotactl_only, empty); -EXPORT_SYMBOL(RQF_OST_QUOTACHECK); - struct req_format RQF_MDS_QUOTACTL = DEFINE_REQ_FMT0("MDS_QUOTACTL", quotactl_only, quotactl_only); EXPORT_SYMBOL(RQF_MDS_QUOTACTL); @@ -1196,10 +1197,6 @@ struct req_format RQF_OST_QUOTACTL = DEFINE_REQ_FMT0("OST_QUOTACTL", quotactl_only, quotactl_only); EXPORT_SYMBOL(RQF_OST_QUOTACTL); -struct req_format RQF_QC_CALLBACK = - DEFINE_REQ_FMT0("QC_CALLBACK", quotactl_only, empty); -EXPORT_SYMBOL(RQF_QC_CALLBACK); - struct req_format RQF_MDS_GETSTATUS = DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa); EXPORT_SYMBOL(RQF_MDS_GETSTATUS); @@ -1270,6 +1267,11 @@ struct req_format RQF_MDS_REINT_RENAME = mds_last_unlink_server); EXPORT_SYMBOL(RQF_MDS_REINT_RENAME); +struct req_format RQF_MDS_REINT_MIGRATE = + DEFINE_REQ_FMT0("MDS_REINT_MIGRATE", mds_reint_migrate_client, + mds_last_unlink_server); +EXPORT_SYMBOL(RQF_MDS_REINT_MIGRATE); + struct req_format RQF_MDS_REINT_SETATTR = DEFINE_REQ_FMT0("MDS_REINT_SETATTR", mds_reint_setattr_client, mds_setattr_server); @@ -1381,15 +1383,10 @@ struct req_format RQF_MDS_CLOSE = mdt_close_client, mds_last_unlink_server); EXPORT_SYMBOL(RQF_MDS_CLOSE); -struct req_format RQF_MDS_RELEASE_CLOSE = +struct req_format RQF_MDS_INTENT_CLOSE = DEFINE_REQ_FMT0("MDS_CLOSE", - mdt_release_close_client, mds_last_unlink_server); -EXPORT_SYMBOL(RQF_MDS_RELEASE_CLOSE); - -struct req_format RQF_MDS_DONE_WRITING = - DEFINE_REQ_FMT0("MDS_DONE_WRITING", - mdt_close_client, mdt_body_only); -EXPORT_SYMBOL(RQF_MDS_DONE_WRITING); + mdt_intent_close_client, mds_last_unlink_server); +EXPORT_SYMBOL(RQF_MDS_INTENT_CLOSE); struct req_format RQF_MDS_READPAGE = DEFINE_REQ_FMT0("MDS_READPAGE", @@ -1874,13 +1871,14 @@ static void *__req_capsule_get(struct req_capsule *pill, getter = (field->rmf_flags & RMF_F_STRING) ? (typeof(getter))lustre_msg_string : lustre_msg_buf; - if (field->rmf_flags & RMF_F_STRUCT_ARRAY) { + if (field->rmf_flags & (RMF_F_STRUCT_ARRAY | RMF_F_NO_SIZE_CHECK)) { /* * We've already asserted that field->rmf_size > 0 in * req_layout_init(). */ len = lustre_msg_buflen(msg, offset); - if ((len % field->rmf_size) != 0) { + if (!(field->rmf_flags & RMF_F_NO_SIZE_CHECK) && + (len % field->rmf_size)) { CERROR("%s: array field size mismatch %d modulo %u != 0 (%d)\n", field->rmf_name, len, field->rmf_size, loc); return NULL; diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c index 0f55c01feba877da2615957555883a8e08ccbe03..110d9f5057879d476fc83cc3d9756d2160306651 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c +++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c @@ -287,8 +287,13 @@ static int llog_client_read_header(const struct lu_env *env, goto out; } - memcpy(handle->lgh_hdr, hdr, sizeof(*hdr)); - handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index; + if (handle->lgh_hdr_size < hdr->llh_hdr.lrh_len) { + rc = -EFAULT; + goto out; + } + + memcpy(handle->lgh_hdr, hdr, hdr->llh_hdr.lrh_len); + handle->lgh_last_idx = LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_index; /* sanity checks */ llh_hdr = &handle->lgh_hdr->llh_hdr; @@ -296,9 +301,14 @@ static int llog_client_read_header(const struct lu_env *env, CERROR("bad log header magic: %#x (expecting %#x)\n", llh_hdr->lrh_type, LLOG_HDR_MAGIC); rc = -EIO; - } else if (llh_hdr->lrh_len != LLOG_CHUNK_SIZE) { - CERROR("incorrectly sized log header: %#x (expecting %#x)\n", - llh_hdr->lrh_len, LLOG_CHUNK_SIZE); + } else if (llh_hdr->lrh_len != + LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len || + (llh_hdr->lrh_len & (llh_hdr->lrh_len - 1)) || + llh_hdr->lrh_len < LLOG_MIN_CHUNK_SIZE || + llh_hdr->lrh_len > handle->lgh_hdr_size) { + CERROR("incorrectly sized log header: %#x (expecting %#x) (power of two > 8192)\n", + llh_hdr->lrh_len, + LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len); CERROR("you may need to re-run lconf --write_conf.\n"); rc = -EIO; } diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c index 9bad57d65db45ae85be21b671d21820c2b63b488..f8747818001381d84eb3d7330e9629e4ce805a65 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c +++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c @@ -479,8 +479,8 @@ static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n) struct ptlrpc_nrs_policy *policy; struct ptlrpc_nrs_pol_info *infos; struct ptlrpc_nrs_pol_info tmp; - unsigned num_pols; - unsigned pol_idx = 0; + unsigned int num_pols; + unsigned int pol_idx = 0; bool hp = false; int i; int rc = 0; diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c index 9c937398a0856acbad13b4813e7cc05423aeaa3f..da1209e40f03636763589d76c57baade1dd43cb8 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c +++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c @@ -114,7 +114,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req) int rc2; int posted_md; int total_md; - __u64 xid; + u64 mbits; lnet_handle_me_t me_h; lnet_md_t md; @@ -127,8 +127,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req) LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT); LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); LASSERT(desc->bd_req); - LASSERT(desc->bd_type == BULK_PUT_SINK || - desc->bd_type == BULK_GET_SOURCE); + LASSERT(ptlrpc_is_bulk_op_passive(desc->bd_type)); /* cleanup the state of the bulk for it will be reused */ if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY) @@ -143,40 +142,37 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req) LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback); LASSERT(desc->bd_cbid.cbid_arg == desc); - /* An XID is only used for a single request from the client. - * For retried bulk transfers, a new XID will be allocated in - * in ptlrpc_check_set() if it needs to be resent, so it is not - * using the same RDMA match bits after an error. - * - * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The - * first bulk XID is power-of-two aligned before rq_xid. LU-1431 - */ - xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1); + total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV; + /* rq_mbits is matchbits of the final bulk */ + mbits = req->rq_mbits - total_md + 1; + + LASSERTF(mbits == (req->rq_mbits & PTLRPC_BULK_OPS_MASK), + "first mbits = x%llu, last mbits = x%llu\n", + mbits, req->rq_mbits); LASSERTF(!(desc->bd_registered && req->rq_send_state != LUSTRE_IMP_REPLAY) || - xid != desc->bd_last_xid, - "registered: %d rq_xid: %llu bd_last_xid: %llu\n", - desc->bd_registered, xid, desc->bd_last_xid); + mbits != desc->bd_last_mbits, + "registered: %d rq_mbits: %llu bd_last_mbits: %llu\n", + desc->bd_registered, mbits, desc->bd_last_mbits); - total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV; desc->bd_registered = 1; - desc->bd_last_xid = xid; + desc->bd_last_mbits = mbits; desc->bd_md_count = total_md; md.user_ptr = &desc->bd_cbid; md.eq_handle = ptlrpc_eq_h; md.threshold = 1; /* PUT or GET */ - for (posted_md = 0; posted_md < total_md; posted_md++, xid++) { + for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) { md.options = PTLRPC_MD_OPTIONS | - ((desc->bd_type == BULK_GET_SOURCE) ? + (ptlrpc_is_bulk_op_get(desc->bd_type) ? LNET_MD_OP_GET : LNET_MD_OP_PUT); ptlrpc_fill_bulk_md(&md, desc, posted_md); - rc = LNetMEAttach(desc->bd_portal, peer, xid, 0, + rc = LNetMEAttach(desc->bd_portal, peer, mbits, 0, LNET_UNLINK, LNET_INS_AFTER, &me_h); if (rc != 0) { CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n", - desc->bd_import->imp_obd->obd_name, xid, + desc->bd_import->imp_obd->obd_name, mbits, posted_md, rc); break; } @@ -186,7 +182,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req) &desc->bd_mds[posted_md]); if (rc != 0) { CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n", - desc->bd_import->imp_obd->obd_name, xid, + desc->bd_import->imp_obd->obd_name, mbits, posted_md, rc); rc2 = LNetMEUnlink(me_h); LASSERT(rc2 == 0); @@ -205,27 +201,19 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req) return -ENOMEM; } - /* Set rq_xid to matchbits of the final bulk so that server can - * infer the number of bulks that were prepared - */ - req->rq_xid = --xid; - LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK), - "bd_last_xid = x%llu, rq_xid = x%llu\n", - desc->bd_last_xid, req->rq_xid); - spin_lock(&desc->bd_lock); - /* Holler if peer manages to touch buffers before he knows the xid */ + /* Holler if peer manages to touch buffers before he knows the mbits */ if (desc->bd_md_count != total_md) CWARN("%s: Peer %s touched %d buffers while I registered\n", desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer), total_md - desc->bd_md_count); spin_unlock(&desc->bd_lock); - CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, xid x%#llx-%#llx, portal %u\n", + CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, mbits x%#llx-%#llx, portal %u\n", desc->bd_md_count, - desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink", + ptlrpc_is_bulk_op_get(desc->bd_type) ? "get-source" : "put-sink", desc->bd_iov_count, desc->bd_nob, - desc->bd_last_xid, req->rq_xid, desc->bd_portal); + desc->bd_last_mbits, req->rq_mbits, desc->bd_portal); return 0; } @@ -521,6 +509,39 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) lustre_msg_set_conn_cnt(request->rq_reqmsg, imp->imp_conn_cnt); lustre_msghdr_set_flags(request->rq_reqmsg, imp->imp_msghdr_flags); + /* + * If it's the first time to resend the request for EINPROGRESS, + * we need to allocate a new XID (see after_reply()), it's different + * from the resend for reply timeout. + */ + if (request->rq_nr_resend && list_empty(&request->rq_unreplied_list)) { + __u64 min_xid = 0; + /* + * resend for EINPROGRESS, allocate new xid to avoid reply + * reconstruction + */ + spin_lock(&imp->imp_lock); + ptlrpc_assign_next_xid_nolock(request); + request->rq_mbits = request->rq_xid; + min_xid = ptlrpc_known_replied_xid(imp); + spin_unlock(&imp->imp_lock); + + lustre_msg_set_last_xid(request->rq_reqmsg, min_xid); + DEBUG_REQ(D_RPCTRACE, request, "Allocating new xid for resend on EINPROGRESS"); + } else if (request->rq_bulk) { + ptlrpc_set_bulk_mbits(request); + lustre_msg_set_mbits(request->rq_reqmsg, request->rq_mbits); + } + + if (list_empty(&request->rq_unreplied_list) || + request->rq_xid <= imp->imp_known_replied_xid) { + DEBUG_REQ(D_ERROR, request, + "xid: %llu, replied: %llu, list_empty:%d\n", + request->rq_xid, imp->imp_known_replied_xid, + list_empty(&request->rq_unreplied_list)); + LBUG(); + } + /** * For enabled AT all request should have AT_SUPPORT in the * FULL import state when OBD_CONNECT_AT is set @@ -537,8 +558,15 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) mpflag = cfs_memory_pressure_get_and_set(); rc = sptlrpc_cli_wrap_request(request); - if (rc) + if (rc) { + /* + * set rq_sent so that this request is treated + * as a delayed send in the upper layers + */ + if (rc == -ENOMEM) + request->rq_sent = ktime_get_seconds(); goto out; + } /* bulk register should be done after wrap_request() */ if (request->rq_bulk) { diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c index d88faf61e740d560e00d02aa8c334bc98b653ad5..7b6ffb1958341c95db7b4b25a4d437dbbd8374f8 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c +++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c @@ -82,16 +82,9 @@ static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy, static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy) { - struct ptlrpc_nrs *nrs = policy->pol_nrs; - - if (policy->pol_desc->pd_ops->op_policy_stop) { - spin_unlock(&nrs->nrs_lock); - + if (policy->pol_desc->pd_ops->op_policy_stop) policy->pol_desc->pd_ops->op_policy_stop(policy); - spin_lock(&nrs->nrs_lock); - } - LASSERT(list_empty(&policy->pol_list_queued)); LASSERT(policy->pol_req_queued == 0 && policy->pol_req_started == 0); @@ -619,6 +612,12 @@ static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name, goto out; } + if (policy->pol_state != NRS_POL_STATE_STARTED && + policy->pol_state != NRS_POL_STATE_STOPPED) { + rc = -EAGAIN; + goto out; + } + switch (opc) { /** * Unknown opcode, pass it down to the policy-specific control diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c index 871768511e8cc8ebe4797435d262e00a6058010a..13f00b7cbbe59ec94449bf14679d6ce28ddef848 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c +++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c @@ -42,11 +42,14 @@ #include "../../include/linux/libcfs/libcfs.h" -#include "../include/obd_support.h" -#include "../include/obd_class.h" +#include "../include/lustre/ll_fiemap.h" + +#include "../include/llog_swab.h" #include "../include/lustre_net.h" +#include "../include/lustre_swab.h" #include "../include/obd_cksum.h" -#include "../include/lustre/ll_fiemap.h" +#include "../include/obd_support.h" +#include "../include/obd_class.h" #include "ptlrpc_internal.h" @@ -942,6 +945,25 @@ __u32 lustre_msg_get_opc(struct lustre_msg *msg) } EXPORT_SYMBOL(lustre_msg_get_opc); +__u16 lustre_msg_get_tag(struct lustre_msg *msg) +{ + switch (msg->lm_magic) { + case LUSTRE_MSG_MAGIC_V2: { + struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); + + if (!pb) { + CERROR("invalid msg %p: no ptlrpc body!\n", msg); + return 0; + } + return pb->pb_tag; + } + default: + CERROR("incorrect message magic: %08x\n", msg->lm_magic); + return 0; + } +} +EXPORT_SYMBOL(lustre_msg_get_tag); + __u64 lustre_msg_get_last_committed(struct lustre_msg *msg) { switch (msg->lm_magic) { @@ -1236,6 +1258,37 @@ void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc) } } +void lustre_msg_set_last_xid(struct lustre_msg *msg, u64 last_xid) +{ + switch (msg->lm_magic) { + case LUSTRE_MSG_MAGIC_V2: { + struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); + + LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); + pb->pb_last_xid = last_xid; + return; + } + default: + LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); + } +} + +void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag) +{ + switch (msg->lm_magic) { + case LUSTRE_MSG_MAGIC_V2: { + struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); + + LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); + pb->pb_tag = tag; + return; + } + default: + LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); + } +} +EXPORT_SYMBOL(lustre_msg_set_tag); + void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions) { switch (msg->lm_magic) { @@ -1373,6 +1426,21 @@ void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum) } } +void lustre_msg_set_mbits(struct lustre_msg *msg, __u64 mbits) +{ + switch (msg->lm_magic) { + case LUSTRE_MSG_MAGIC_V2: { + struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); + + LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); + pb->pb_mbits = mbits; + return; + } + default: + LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); + } +} + void ptlrpc_request_set_replen(struct ptlrpc_request *req) { int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER); @@ -1442,7 +1510,7 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b) __swab32s(&b->pb_opc); __swab32s(&b->pb_status); __swab64s(&b->pb_last_xid); - __swab64s(&b->pb_last_seen); + __swab16s(&b->pb_tag); __swab64s(&b->pb_last_committed); __swab64s(&b->pb_transno); __swab32s(&b->pb_flags); @@ -1456,7 +1524,12 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b) __swab64s(&b->pb_pre_versions[1]); __swab64s(&b->pb_pre_versions[2]); __swab64s(&b->pb_pre_versions[3]); - CLASSERT(offsetof(typeof(*b), pb_padding) != 0); + __swab64s(&b->pb_mbits); + CLASSERT(offsetof(typeof(*b), pb_padding0) != 0); + CLASSERT(offsetof(typeof(*b), pb_padding1) != 0); + CLASSERT(offsetof(typeof(*b), pb_padding64_0) != 0); + CLASSERT(offsetof(typeof(*b), pb_padding64_1) != 0); + CLASSERT(offsetof(typeof(*b), pb_padding64_2) != 0); /* While we need to maintain compatibility between * clients and servers without ptlrpc_body_v2 (< 2.3) * do not swab any fields beyond pb_jobid, as we are @@ -1492,8 +1565,12 @@ void lustre_swab_connect(struct obd_connect_data *ocd) __swab32s(&ocd->ocd_max_easize); if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES) __swab64s(&ocd->ocd_maxbytes); + if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS) + __swab16s(&ocd->ocd_maxmodrpcs); + CLASSERT(offsetof(typeof(*ocd), padding0)); CLASSERT(offsetof(typeof(*ocd), padding1) != 0); - CLASSERT(offsetof(typeof(*ocd), padding2) != 0); + if (ocd->ocd_connect_flags & OBD_CONNECT_FLAGS2) + __swab64s(&ocd->ocd_connect_flags2); CLASSERT(offsetof(typeof(*ocd), padding3) != 0); CLASSERT(offsetof(typeof(*ocd), padding4) != 0); CLASSERT(offsetof(typeof(*ocd), padding5) != 0); @@ -1666,7 +1743,7 @@ void lustre_swab_mdt_body(struct mdt_body *b) __swab32s(&b->mbo_eadatasize); __swab32s(&b->mbo_aclsize); __swab32s(&b->mbo_max_mdsize); - __swab32s(&b->mbo_max_cookiesize); + CLASSERT(offsetof(typeof(*b), mbo_unused3)); __swab32s(&b->mbo_uid_h); __swab32s(&b->mbo_gid_h); CLASSERT(offsetof(typeof(*b), mbo_padding_5) != 0); @@ -1675,9 +1752,10 @@ void lustre_swab_mdt_body(struct mdt_body *b) void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b) { /* handle is opaque */ - __swab64s(&b->ioepoch); - __swab32s(&b->flags); - CLASSERT(offsetof(typeof(*b), padding) != 0); + /* mio_handle is opaque */ + CLASSERT(offsetof(typeof(*b), mio_unused1)); + CLASSERT(offsetof(typeof(*b), mio_unused2)); + CLASSERT(offsetof(typeof(*b), mio_padding)); } void lustre_swab_mgs_target_info(struct mgs_target_info *mti) @@ -1772,7 +1850,7 @@ void lustre_swab_fid2path(struct getinfo_fid2path *gf) } EXPORT_SYMBOL(lustre_swab_fid2path); -static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent) +static void lustre_swab_fiemap_extent(struct fiemap_extent *fm_extent) { __swab64s(&fm_extent->fe_logical); __swab64s(&fm_extent->fe_physical); @@ -1781,7 +1859,7 @@ static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent) __swab32s(&fm_extent->fe_device); } -void lustre_swab_fiemap(struct ll_user_fiemap *fiemap) +void lustre_swab_fiemap(struct fiemap *fiemap) { __u32 i; @@ -1938,7 +2016,7 @@ static void lustre_swab_ldlm_res_id(struct ldlm_res_id *id) __swab64s(&id->name[i]); } -static void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d) +static void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d) { /* the lock data is a union and the first two fields are always an * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock @@ -2062,8 +2140,6 @@ static void dump_obdo(struct obdo *oa) if (valid & OBD_MD_FLHANDLE) CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n", oa->o_handle.cookie); - if (valid & OBD_MD_FLCOOKIE) - CDEBUG(D_RPCTRACE, "obdo: o_lcookie = (llog_cookie dumping not yet implemented)\n"); } void dump_ost_body(struct ost_body *ob) diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c index 5b9fb11c0b6bfe3593eca796db0b9c8204396295..94e9fa85d774005893533dcdb45521e91a273ede 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/pers.c +++ b/drivers/staging/lustre/lustre/ptlrpc/pers.c @@ -43,6 +43,8 @@ void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, int mdidx) { + int offset = mdidx * LNET_MAX_IOV; + CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON); LASSERT(mdidx < desc->bd_md_max_brw); @@ -50,23 +52,20 @@ void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS))); - md->options |= LNET_MD_KIOV; md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV); md->length = min_t(unsigned int, LNET_MAX_IOV, md->length); - if (desc->bd_enc_iov) - md->start = &desc->bd_enc_iov[mdidx * LNET_MAX_IOV]; - else - md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV]; -} - -void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, - int pageoffset, int len) -{ - lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count]; - - kiov->bv_page = page; - kiov->bv_offset = pageoffset; - kiov->bv_len = len; - desc->bd_iov_count++; + if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) { + md->options |= LNET_MD_KIOV; + if (GET_ENC_KIOV(desc)) + md->start = &BD_GET_ENC_KIOV(desc, offset); + else + md->start = &BD_GET_KIOV(desc, offset); + } else { + md->options |= LNET_MD_IOVEC; + if (GET_ENC_KVEC(desc)) + md->start = &BD_GET_ENC_KVEC(desc, offset); + else + md->start = &BD_GET_KVEC(desc, offset); + } } diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h index f14d193287da4e2f6fe0e462dbc6c402ae2abee2..e0f859ca62230969b1862845e52410e15b332862 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h +++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h @@ -55,8 +55,11 @@ int ptlrpcd_start(struct ptlrpcd_ctl *pc); /* client.c */ void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req, unsigned int service_time); -struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw, - unsigned type, unsigned portal); +struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags, + unsigned int max_brw, + enum ptlrpc_bulk_op_type type, + unsigned int portal, + const struct ptlrpc_bulk_frag_ops *ops); int ptlrpc_request_cache_init(void); void ptlrpc_request_cache_fini(void); struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags); @@ -67,6 +70,10 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, int ptlrpc_expired_set(void *data); int ptlrpc_set_next_timeout(struct ptlrpc_request_set *); void ptlrpc_resend_req(struct ptlrpc_request *request); +void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req); +void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req); +__u64 ptlrpc_known_replied_xid(struct obd_import *imp); +void ptlrpc_add_unreplied(struct ptlrpc_request *req); /* events.c */ int ptlrpc_init_portals(void); @@ -226,8 +233,6 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink); /* pers.c */ void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, int mdcnt); -void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, - int pageoffset, int len); /* pack_generic.c */ struct ptlrpc_reply_state * @@ -322,6 +327,7 @@ static inline void ptlrpc_cli_req_init(struct ptlrpc_request *req) INIT_LIST_HEAD(&cr->cr_set_chain); INIT_LIST_HEAD(&cr->cr_ctx_chain); + INIT_LIST_HEAD(&cr->cr_unreplied_list); init_waitqueue_head(&cr->cr_reply_waitq); init_waitqueue_head(&cr->cr_set_waitq); } @@ -338,4 +344,24 @@ static inline void ptlrpc_srv_req_init(struct ptlrpc_request *req) INIT_LIST_HEAD(&sr->sr_hist_list); } +static inline bool ptlrpc_req_is_connect(struct ptlrpc_request *req) +{ + if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CONNECT || + lustre_msg_get_opc(req->rq_reqmsg) == OST_CONNECT || + lustre_msg_get_opc(req->rq_reqmsg) == MGS_CONNECT) + return true; + else + return false; +} + +static inline bool ptlrpc_req_is_disconnect(struct ptlrpc_request *req) +{ + if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_DISCONNECT || + lustre_msg_get_opc(req->rq_reqmsg) == OST_DISCONNECT || + lustre_msg_get_opc(req->rq_reqmsg) == MGS_DISCONNECT) + return true; + else + return false; +} + #endif /* PTLRPC_INTERNAL_H */ diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c index 405faf0dc9fcbea1c2072df1e02a13ccaf582f0a..c00449036884591f7b5910242ee39a646237eb43 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/recover.c +++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c @@ -111,7 +111,9 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) * all of it's requests being replayed, it's safe to * use a cursor to accelerate the search */ - imp->imp_replay_cursor = imp->imp_replay_cursor->next; + if (!imp->imp_resend_replay || + imp->imp_replay_cursor == &imp->imp_committed_list) + imp->imp_replay_cursor = imp->imp_replay_cursor->next; while (imp->imp_replay_cursor != &imp->imp_committed_list) { @@ -155,10 +157,24 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT); spin_lock(&imp->imp_lock); + /* The resend replay request may have been removed from the + * unreplied list. + */ + if (req && imp->imp_resend_replay && + list_empty(&req->rq_unreplied_list)) { + ptlrpc_add_unreplied(req); + imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp); + } + imp->imp_resend_replay = 0; spin_unlock(&imp->imp_lock); if (req) { + /* The request should have been added back in unreplied list + * by ptlrpc_prepare_replay(). + */ + LASSERT(!list_empty(&req->rq_unreplied_list)); + rc = ptlrpc_replay_req(req); if (rc) { CERROR("recovery replay error %d for req %llu\n", @@ -194,7 +210,13 @@ int ptlrpc_resend(struct obd_import *imp) LASSERTF((long)req > PAGE_SIZE && req != LP_POISON, "req %p bad\n", req); LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); - if (!ptlrpc_no_resend(req)) + + /* + * If the request is allowed to be sent during replay and it + * is not timeout yet, then it does not need to be resent. + */ + if (!ptlrpc_no_resend(req) && + (req->rq_timedout || !req->rq_allow_replay)) ptlrpc_resend_req(req); } spin_unlock(&imp->imp_lock); diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c index a7416cd9ac71777424aee2101c5993b2447435d2..e860df7c45a2bde330be035b15b2cc1133df2f28 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c @@ -379,7 +379,7 @@ int sptlrpc_req_get_ctx(struct ptlrpc_request *req) if (!req->rq_cli_ctx) { CERROR("req %p: fail to get context\n", req); - return -ENOMEM; + return -ECONNREFUSED; } return 0; @@ -515,6 +515,13 @@ static int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req) set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC)); + } else if (unlikely(!test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags))) { + /* + * new ctx not up to date yet + */ + CDEBUG(D_SEC, + "ctx (%p, fl %lx) doesn't switch, not up to date yet\n", + newctx, newctx->cc_flags); } else { /* * it's possible newctx == oldctx if we're switching diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c index b2cc5ea6cb938545083412cb6b20070e6f3fb3c2..2fe9085e20340523420f44433aef8e1d3971e2bd 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c @@ -108,6 +108,7 @@ static struct ptlrpc_enc_page_pool { unsigned long epp_st_lowfree; /* lowest free pages reached */ unsigned int epp_st_max_wqlen; /* highest waitqueue length */ unsigned long epp_st_max_wait; /* in jiffies */ + unsigned long epp_st_outofmem; /* # of out of mem requests */ /* * pointers to pools */ @@ -139,7 +140,8 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v) "cache missing: %lu\n" "low free mark: %lu\n" "max waitqueue depth: %u\n" - "max wait time: %ld/%lu\n", + "max wait time: %ld/%lu\n" + "out of mem: %lu\n", totalram_pages, PAGES_PER_POOL, page_pools.epp_max_pages, @@ -158,7 +160,8 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v) page_pools.epp_st_lowfree, page_pools.epp_st_max_wqlen, page_pools.epp_st_max_wait, - msecs_to_jiffies(MSEC_PER_SEC)); + msecs_to_jiffies(MSEC_PER_SEC), + page_pools.epp_st_outofmem); spin_unlock(&page_pools.epp_lock); @@ -306,12 +309,30 @@ static inline void enc_pools_wakeup(void) } } +/* + * Export the number of free pages in the pool + */ +int get_free_pages_in_pool(void) +{ + return page_pools.epp_free_pages; +} + +/* + * Let outside world know if enc_pool full capacity is reached + */ +int pool_is_at_full_capacity(void) +{ + return (page_pools.epp_total_pages == page_pools.epp_max_pages); +} + void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) { int p_idx, g_idx; int i; - if (!desc->bd_enc_iov) + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + + if (!GET_ENC_KIOV(desc)) return; LASSERT(desc->bd_iov_count > 0); @@ -326,12 +347,12 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) LASSERT(page_pools.epp_pools[p_idx]); for (i = 0; i < desc->bd_iov_count; i++) { - LASSERT(desc->bd_enc_iov[i].bv_page); + LASSERT(BD_GET_ENC_KIOV(desc, i).bv_page); LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]); LASSERT(!page_pools.epp_pools[p_idx][g_idx]); page_pools.epp_pools[p_idx][g_idx] = - desc->bd_enc_iov[i].bv_page; + BD_GET_ENC_KIOV(desc, i).bv_page; if (++g_idx == PAGES_PER_POOL) { p_idx++; @@ -345,8 +366,8 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) spin_unlock(&page_pools.epp_lock); - kfree(desc->bd_enc_iov); - desc->bd_enc_iov = NULL; + kfree(GET_ENC_KIOV(desc)); + GET_ENC_KIOV(desc) = NULL; } static inline void enc_pools_alloc(void) @@ -404,6 +425,7 @@ int sptlrpc_enc_pool_init(void) page_pools.epp_st_lowfree = 0; page_pools.epp_st_max_wqlen = 0; page_pools.epp_st_max_wait = 0; + page_pools.epp_st_outofmem = 0; enc_pools_alloc(); if (!page_pools.epp_pools) @@ -431,13 +453,14 @@ void sptlrpc_enc_pool_fini(void) if (page_pools.epp_st_access > 0) { CDEBUG(D_SEC, - "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%ld\n", + "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%ld, out of mem %lu\n", page_pools.epp_st_max_pages, page_pools.epp_st_grows, page_pools.epp_st_grow_fails, page_pools.epp_st_shrinks, page_pools.epp_st_access, page_pools.epp_st_missings, page_pools.epp_st_max_wqlen, page_pools.epp_st_max_wait, - msecs_to_jiffies(MSEC_PER_SEC)); + msecs_to_jiffies(MSEC_PER_SEC), + page_pools.epp_st_outofmem); } } @@ -520,10 +543,11 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg, hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]); for (i = 0; i < desc->bd_iov_count; i++) { - cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].bv_page, - desc->bd_iov[i].bv_offset & + cfs_crypto_hash_update_page(hdesc, + BD_GET_KIOV(desc, i).bv_page, + BD_GET_KIOV(desc, i).bv_offset & ~PAGE_MASK, - desc->bd_iov[i].bv_len); + BD_GET_KIOV(desc, i).bv_len); } if (hashsize > buflen) { diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c index cd305bcb334a039484fe5579fc54789fe4f4f8ed..c5e7a2309fce4c77ecc1315d4aee537da8b522ba 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c @@ -153,14 +153,16 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc) char *ptr; unsigned int off, i; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + for (i = 0; i < desc->bd_iov_count; i++) { - if (desc->bd_iov[i].bv_len == 0) + if (!BD_GET_KIOV(desc, i).bv_len) continue; - ptr = kmap(desc->bd_iov[i].bv_page); - off = desc->bd_iov[i].bv_offset & ~PAGE_MASK; + ptr = kmap(BD_GET_KIOV(desc, i).bv_page); + off = BD_GET_KIOV(desc, i).bv_offset & ~PAGE_MASK; ptr[off] ^= 0x1; - kunmap(desc->bd_iov[i].bv_page); + kunmap(BD_GET_KIOV(desc, i).bv_page); return; } } @@ -352,11 +354,11 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, /* fix the actual data size */ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) { - if (desc->bd_iov[i].bv_len + nob > desc->bd_nob_transferred) { - desc->bd_iov[i].bv_len = - desc->bd_nob_transferred - nob; - } - nob += desc->bd_iov[i].bv_len; + struct bio_vec bv_desc = BD_GET_KIOV(desc, i); + + if (bv_desc.bv_len + nob > desc->bd_nob_transferred) + bv_desc.bv_len = desc->bd_nob_transferred - nob; + nob += bv_desc.bv_len; } rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg, diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c index 72f39308eebb7354e9a29168f6584aa59c672e94..70c70558e1779ffe08422af0a7b4bd0637ad91a2 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/service.c +++ b/drivers/staging/lustre/lustre/ptlrpc/service.c @@ -343,9 +343,9 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, struct ptlrpc_service_conf *conf) { struct ptlrpc_service_thr_conf *tc = &conf->psc_thr; - unsigned init; - unsigned total; - unsigned nthrs; + unsigned int init; + unsigned int total; + unsigned int nthrs; int weight; /* @@ -2541,8 +2541,9 @@ int ptlrpc_hr_init(void) hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i); hrp->hrp_nthrs /= weight; + if (hrp->hrp_nthrs == 0) + hrp->hrp_nthrs = 1; - LASSERT(hrp->hrp_nthrs > 0); hrp->hrp_thrs = kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS, cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table, diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c index b05b1f935e4c04e5713b8bbe14ec1fd50dfcabec..a04e36cf6dd4393c80714d9644f49953e236e471 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c +++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c @@ -195,49 +195,29 @@ void lustre_assert_wire_constants(void) LASSERTF(REINT_MAX == 10, "found %lld\n", (long long)REINT_MAX); LASSERTF(DISP_IT_EXECD == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned)DISP_IT_EXECD); + (unsigned int)DISP_IT_EXECD); LASSERTF(DISP_LOOKUP_EXECD == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned)DISP_LOOKUP_EXECD); + (unsigned int)DISP_LOOKUP_EXECD); LASSERTF(DISP_LOOKUP_NEG == 0x00000004UL, "found 0x%.8xUL\n", - (unsigned)DISP_LOOKUP_NEG); + (unsigned int)DISP_LOOKUP_NEG); LASSERTF(DISP_LOOKUP_POS == 0x00000008UL, "found 0x%.8xUL\n", - (unsigned)DISP_LOOKUP_POS); + (unsigned int)DISP_LOOKUP_POS); LASSERTF(DISP_OPEN_CREATE == 0x00000010UL, "found 0x%.8xUL\n", - (unsigned)DISP_OPEN_CREATE); + (unsigned int)DISP_OPEN_CREATE); LASSERTF(DISP_OPEN_OPEN == 0x00000020UL, "found 0x%.8xUL\n", - (unsigned)DISP_OPEN_OPEN); + (unsigned int)DISP_OPEN_OPEN); LASSERTF(DISP_ENQ_COMPLETE == 0x00400000UL, "found 0x%.8xUL\n", - (unsigned)DISP_ENQ_COMPLETE); + (unsigned int)DISP_ENQ_COMPLETE); LASSERTF(DISP_ENQ_OPEN_REF == 0x00800000UL, "found 0x%.8xUL\n", - (unsigned)DISP_ENQ_OPEN_REF); + (unsigned int)DISP_ENQ_OPEN_REF); LASSERTF(DISP_ENQ_CREATE_REF == 0x01000000UL, "found 0x%.8xUL\n", - (unsigned)DISP_ENQ_CREATE_REF); + (unsigned int)DISP_ENQ_CREATE_REF); LASSERTF(DISP_OPEN_LOCK == 0x02000000UL, "found 0x%.8xUL\n", - (unsigned)DISP_OPEN_LOCK); + (unsigned int)DISP_OPEN_LOCK); LASSERTF(MDS_STATUS_CONN == 1, "found %lld\n", (long long)MDS_STATUS_CONN); LASSERTF(MDS_STATUS_LOV == 2, "found %lld\n", (long long)MDS_STATUS_LOV); - LASSERTF(LUSTRE_BFLAG_UNCOMMITTED_WRITES == 1, "found %lld\n", - (long long)LUSTRE_BFLAG_UNCOMMITTED_WRITES); - LASSERTF(MF_SOM_CHANGE == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned)MF_SOM_CHANGE); - LASSERTF(MF_EPOCH_OPEN == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned)MF_EPOCH_OPEN); - LASSERTF(MF_EPOCH_CLOSE == 0x00000004UL, "found 0x%.8xUL\n", - (unsigned)MF_EPOCH_CLOSE); - LASSERTF(MF_MDC_CANCEL_FID1 == 0x00000008UL, "found 0x%.8xUL\n", - (unsigned)MF_MDC_CANCEL_FID1); - LASSERTF(MF_MDC_CANCEL_FID2 == 0x00000010UL, "found 0x%.8xUL\n", - (unsigned)MF_MDC_CANCEL_FID2); - LASSERTF(MF_MDC_CANCEL_FID3 == 0x00000020UL, "found 0x%.8xUL\n", - (unsigned)MF_MDC_CANCEL_FID3); - LASSERTF(MF_MDC_CANCEL_FID4 == 0x00000040UL, "found 0x%.8xUL\n", - (unsigned)MF_MDC_CANCEL_FID4); - LASSERTF(MF_SOM_AU == 0x00000080UL, "found 0x%.8xUL\n", - (unsigned)MF_SOM_AU); - LASSERTF(MF_GETATTR_LOCK == 0x00000100UL, "found 0x%.8xUL\n", - (unsigned)MF_GETATTR_LOCK); LASSERTF(MDS_ATTR_MODE == 0x0000000000000001ULL, "found 0x%.16llxULL\n", (long long)MDS_ATTR_MODE); LASSERTF(MDS_ATTR_UID == 0x0000000000000002ULL, "found 0x%.16llxULL\n", @@ -420,15 +400,13 @@ void lustre_assert_wire_constants(void) LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid) == 16, "found %lld\n", (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid)); LASSERTF(LMAI_RELEASED == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned)LMAI_RELEASED); + (unsigned int)LMAI_RELEASED); LASSERTF(LMAC_HSM == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned)LMAC_HSM); - LASSERTF(LMAC_SOM == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned)LMAC_SOM); + (unsigned int)LMAC_HSM); LASSERTF(LMAC_NOT_IN_OI == 0x00000004UL, "found 0x%.8xUL\n", - (unsigned)LMAC_NOT_IN_OI); + (unsigned int)LMAC_NOT_IN_OI); LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n", - (unsigned)LMAC_FID_ON_OST); + (unsigned int)LMAC_FID_ON_OST); /* Checks for struct ost_id */ LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n", @@ -478,11 +456,11 @@ void lustre_assert_wire_constants(void) LASSERTF(FID_SEQ_LOV_DEFAULT == 0xffffffffffffffffULL, "found 0x%.16llxULL\n", (long long)FID_SEQ_LOV_DEFAULT); LASSERTF(FID_OID_SPECIAL_BFL == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned)FID_OID_SPECIAL_BFL); + (unsigned int)FID_OID_SPECIAL_BFL); LASSERTF(FID_OID_DOT_LUSTRE == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned)FID_OID_DOT_LUSTRE); + (unsigned int)FID_OID_DOT_LUSTRE); LASSERTF(FID_OID_DOT_LUSTRE_OBF == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned)FID_OID_DOT_LUSTRE_OBF); + (unsigned int)FID_OID_DOT_LUSTRE_OBF); /* Checks for struct lu_dirent */ LASSERTF((int)sizeof(struct lu_dirent) == 32, "found %lld\n", @@ -512,11 +490,11 @@ void lustre_assert_wire_constants(void) LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_name[0]) == 1, "found %lld\n", (long long)(int)sizeof(((struct lu_dirent *)0)->lde_name[0])); LASSERTF(LUDA_FID == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned)LUDA_FID); + (unsigned int)LUDA_FID); LASSERTF(LUDA_TYPE == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned)LUDA_TYPE); + (unsigned int)LUDA_TYPE); LASSERTF(LUDA_64BITHASH == 0x00000004UL, "found 0x%.8xUL\n", - (unsigned)LUDA_64BITHASH); + (unsigned int)LUDA_64BITHASH); /* Checks for struct luda_type */ LASSERTF((int)sizeof(struct luda_type) == 2, "found %lld\n", @@ -635,10 +613,18 @@ void lustre_assert_wire_constants(void) (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_xid)); LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == 8, "found %lld\n", (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_seen) == 32, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_seen)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen)); + LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_tag) == 32, "found %lld\n", + (long long)(int)offsetof(struct ptlrpc_body_v3, pb_tag)); + LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag) == 2, "found %lld\n", + (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag)); + LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding0) == 34, "found %lld\n", + (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding0)); + LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0) == 2, "found %lld\n", + (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0)); + LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding1) == 36, "found %lld\n", + (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding1)); + LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1) == 4, "found %lld\n", + (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1)); LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == 40, "found %lld\n", (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_committed)); LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == 8, "found %lld\n", @@ -680,10 +666,22 @@ void lustre_assert_wire_constants(void) (long long)(int)offsetof(struct ptlrpc_body_v3, pb_pre_versions)); LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == 32, "found %lld\n", (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding) == 120, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == 32, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding)); + LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_mbits) == 120, "found %lld\n", + (long long)(int)offsetof(struct ptlrpc_body_v3, pb_mbits)); + LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits) == 8, "found %lld\n", + (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits)); + LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_0) == 128, "found %lld\n", + (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_0)); + LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0) == 8, "found %lld\n", + (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0)); + LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_1) == 136, "found %lld\n", + (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_1)); + LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1) == 8, "found %lld\n", + (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1)); + LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_2) == 144, "found %lld\n", + (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_2)); + LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2) == 8, "found %lld\n", + (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2)); CLASSERT(LUSTRE_JOBID_SIZE == 32); LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_jobid) == 152, "found %lld\n", (long long)(int)offsetof(struct ptlrpc_body_v3, pb_jobid)); @@ -713,10 +711,18 @@ void lustre_assert_wire_constants(void) (int)offsetof(struct ptlrpc_body_v3, pb_last_xid), (int)offsetof(struct ptlrpc_body_v2, pb_last_xid)); LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid), "%d != %d\n", (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_seen) == (int)offsetof(struct ptlrpc_body_v2, pb_last_seen), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_last_seen), (int)offsetof(struct ptlrpc_body_v2, pb_last_seen)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_seen), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_seen)); + LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_tag) == (int)offsetof(struct ptlrpc_body_v2, pb_tag), "%d != %d\n", + (int)offsetof(struct ptlrpc_body_v3, pb_tag), (int)offsetof(struct ptlrpc_body_v2, pb_tag)); + LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_tag), "%d != %d\n", + (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_tag)); + LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding0) == (int)offsetof(struct ptlrpc_body_v2, pb_padding0), "%d != %d\n", + (int)offsetof(struct ptlrpc_body_v3, pb_padding0), (int)offsetof(struct ptlrpc_body_v2, pb_padding0)); + LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding0), "%d != %d\n", + (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding0)); + LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding1) == (int)offsetof(struct ptlrpc_body_v2, pb_padding1), "%d != %d\n", + (int)offsetof(struct ptlrpc_body_v3, pb_padding1), (int)offsetof(struct ptlrpc_body_v2, pb_padding1)); + LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding1), "%d != %d\n", + (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding1)); LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == (int)offsetof(struct ptlrpc_body_v2, pb_last_committed), "%d != %d\n", (int)offsetof(struct ptlrpc_body_v3, pb_last_committed), (int)offsetof(struct ptlrpc_body_v2, pb_last_committed)); LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_committed), "%d != %d\n", @@ -757,10 +763,22 @@ void lustre_assert_wire_constants(void) (int)offsetof(struct ptlrpc_body_v3, pb_pre_versions), (int)offsetof(struct ptlrpc_body_v2, pb_pre_versions)); LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions), "%d != %d\n", (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding) == (int)offsetof(struct ptlrpc_body_v2, pb_padding), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_padding), (int)offsetof(struct ptlrpc_body_v2, pb_padding)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding)); + LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_mbits) == (int)offsetof(struct ptlrpc_body_v2, pb_mbits), "%d != %d\n", + (int)offsetof(struct ptlrpc_body_v3, pb_mbits), (int)offsetof(struct ptlrpc_body_v2, pb_mbits)); + LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_mbits), "%d != %d\n", + (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_mbits)); + LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_0) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_0), "%d != %d\n", + (int)offsetof(struct ptlrpc_body_v3, pb_padding64_0), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_0)); + LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_0), "%d != %d\n", + (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_0)); + LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_1) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_1), "%d != %d\n", + (int)offsetof(struct ptlrpc_body_v3, pb_padding64_1), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_1)); + LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_1), "%d != %d\n", + (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_1)); + LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_2) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_2), "%d != %d\n", + (int)offsetof(struct ptlrpc_body_v3, pb_padding64_2), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_2)); + LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_2), "%d != %d\n", + (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_2)); LASSERTF(MSG_PTLRPC_BODY_OFF == 0, "found %lld\n", (long long)MSG_PTLRPC_BODY_OFF); LASSERTF(REQ_REC_OFF == 1, "found %lld\n", @@ -802,41 +820,41 @@ void lustre_assert_wire_constants(void) LASSERTF(MSGHDR_CKSUM_INCOMPAT18 == 2, "found %lld\n", (long long)MSGHDR_CKSUM_INCOMPAT18); LASSERTF(MSG_OP_FLAG_MASK == 0xffff0000UL, "found 0x%.8xUL\n", - (unsigned)MSG_OP_FLAG_MASK); + (unsigned int)MSG_OP_FLAG_MASK); LASSERTF(MSG_OP_FLAG_SHIFT == 16, "found %lld\n", (long long)MSG_OP_FLAG_SHIFT); LASSERTF(MSG_GEN_FLAG_MASK == 0x0000ffffUL, "found 0x%.8xUL\n", - (unsigned)MSG_GEN_FLAG_MASK); + (unsigned int)MSG_GEN_FLAG_MASK); LASSERTF(MSG_LAST_REPLAY == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned)MSG_LAST_REPLAY); + (unsigned int)MSG_LAST_REPLAY); LASSERTF(MSG_RESENT == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned)MSG_RESENT); + (unsigned int)MSG_RESENT); LASSERTF(MSG_REPLAY == 0x00000004UL, "found 0x%.8xUL\n", - (unsigned)MSG_REPLAY); + (unsigned int)MSG_REPLAY); LASSERTF(MSG_DELAY_REPLAY == 0x00000010UL, "found 0x%.8xUL\n", - (unsigned)MSG_DELAY_REPLAY); + (unsigned int)MSG_DELAY_REPLAY); LASSERTF(MSG_VERSION_REPLAY == 0x00000020UL, "found 0x%.8xUL\n", - (unsigned)MSG_VERSION_REPLAY); + (unsigned int)MSG_VERSION_REPLAY); LASSERTF(MSG_REQ_REPLAY_DONE == 0x00000040UL, "found 0x%.8xUL\n", - (unsigned)MSG_REQ_REPLAY_DONE); + (unsigned int)MSG_REQ_REPLAY_DONE); LASSERTF(MSG_LOCK_REPLAY_DONE == 0x00000080UL, "found 0x%.8xUL\n", - (unsigned)MSG_LOCK_REPLAY_DONE); + (unsigned int)MSG_LOCK_REPLAY_DONE); LASSERTF(MSG_CONNECT_RECOVERING == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned)MSG_CONNECT_RECOVERING); + (unsigned int)MSG_CONNECT_RECOVERING); LASSERTF(MSG_CONNECT_RECONNECT == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned)MSG_CONNECT_RECONNECT); + (unsigned int)MSG_CONNECT_RECONNECT); LASSERTF(MSG_CONNECT_REPLAYABLE == 0x00000004UL, "found 0x%.8xUL\n", - (unsigned)MSG_CONNECT_REPLAYABLE); + (unsigned int)MSG_CONNECT_REPLAYABLE); LASSERTF(MSG_CONNECT_LIBCLIENT == 0x00000010UL, "found 0x%.8xUL\n", - (unsigned)MSG_CONNECT_LIBCLIENT); + (unsigned int)MSG_CONNECT_LIBCLIENT); LASSERTF(MSG_CONNECT_INITIAL == 0x00000020UL, "found 0x%.8xUL\n", - (unsigned)MSG_CONNECT_INITIAL); + (unsigned int)MSG_CONNECT_INITIAL); LASSERTF(MSG_CONNECT_ASYNC == 0x00000040UL, "found 0x%.8xUL\n", - (unsigned)MSG_CONNECT_ASYNC); + (unsigned int)MSG_CONNECT_ASYNC); LASSERTF(MSG_CONNECT_NEXT_VER == 0x00000080UL, "found 0x%.8xUL\n", - (unsigned)MSG_CONNECT_NEXT_VER); + (unsigned int)MSG_CONNECT_NEXT_VER); LASSERTF(MSG_CONNECT_TRANSNO == 0x00000100UL, "found 0x%.8xUL\n", - (unsigned)MSG_CONNECT_TRANSNO); + (unsigned int)MSG_CONNECT_TRANSNO); /* Checks for struct obd_connect_data */ LASSERTF((int)sizeof(struct obd_connect_data) == 192, "found %lld\n", @@ -905,14 +923,22 @@ void lustre_assert_wire_constants(void) (long long)(int)offsetof(struct obd_connect_data, ocd_maxbytes)); LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes) == 8, "found %lld\n", (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes)); - LASSERTF((int)offsetof(struct obd_connect_data, padding1) == 72, "found %lld\n", + LASSERTF((int)offsetof(struct obd_connect_data, ocd_maxmodrpcs) == 72, "found %lld\n", + (long long)(int)offsetof(struct obd_connect_data, ocd_maxmodrpcs)); + LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxmodrpcs) == 2, "found %lld\n", + (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxmodrpcs)); + LASSERTF((int)offsetof(struct obd_connect_data, padding0) == 74, "found %lld\n", + (long long)(int)offsetof(struct obd_connect_data, padding0)); + LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding0) == 2, "found %lld\n", + (long long)(int)sizeof(((struct obd_connect_data *)0)->padding0)); + LASSERTF((int)offsetof(struct obd_connect_data, padding1) == 76, "found %lld\n", (long long)(int)offsetof(struct obd_connect_data, padding1)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding1) == 8, "found %lld\n", + LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding1) == 4, "found %lld\n", (long long)(int)sizeof(((struct obd_connect_data *)0)->padding1)); - LASSERTF((int)offsetof(struct obd_connect_data, padding2) == 80, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, padding2)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->padding2)); + LASSERTF((int)offsetof(struct obd_connect_data, ocd_connect_flags2) == 80, "found %lld\n", + (long long)(int)offsetof(struct obd_connect_data, ocd_connect_flags2)); + LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags2) == 8, "found %lld\n", + (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags2)); LASSERTF((int)offsetof(struct obd_connect_data, padding3) == 88, "found %lld\n", (long long)(int)offsetof(struct obd_connect_data, padding3)); LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding3) == 8, "found %lld\n", @@ -1075,14 +1101,24 @@ void lustre_assert_wire_constants(void) OBD_CONNECT_LFSCK); LASSERTF(OBD_CONNECT_UNLINK_CLOSE == 0x100000000000000ULL, "found 0x%.16llxULL\n", OBD_CONNECT_UNLINK_CLOSE); + LASSERTF(OBD_CONNECT_MULTIMODRPCS == 0x200000000000000ULL, "found 0x%.16llxULL\n", + OBD_CONNECT_MULTIMODRPCS); LASSERTF(OBD_CONNECT_DIR_STRIPE == 0x400000000000000ULL, "found 0x%.16llxULL\n", OBD_CONNECT_DIR_STRIPE); + LASSERTF(OBD_CONNECT_SUBTREE == 0x800000000000000ULL, "found 0x%.16llxULL\n", + OBD_CONNECT_SUBTREE); + LASSERTF(OBD_CONNECT_LOCK_AHEAD == 0x1000000000000000ULL, "found 0x%.16llxULL\n", + OBD_CONNECT_LOCK_AHEAD); + LASSERTF(OBD_CONNECT_OBDOPACK == 0x4000000000000000ULL, "found 0x%.16llxULL\n", + OBD_CONNECT_OBDOPACK); + LASSERTF(OBD_CONNECT_FLAGS2 == 0x8000000000000000ULL, "found 0x%.16llxULL\n", + OBD_CONNECT_FLAGS2); LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned)OBD_CKSUM_CRC32); + (unsigned int)OBD_CKSUM_CRC32); LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned)OBD_CKSUM_ADLER); + (unsigned int)OBD_CKSUM_ADLER); LASSERTF(OBD_CKSUM_CRC32C == 0x00000004UL, "found 0x%.8xUL\n", - (unsigned)OBD_CKSUM_CRC32C); + (unsigned int)OBD_CKSUM_CRC32C); /* Checks for struct obdo */ LASSERTF((int)sizeof(struct obdo) == 208, "found %lld\n", @@ -1239,8 +1275,6 @@ void lustre_assert_wire_constants(void) OBD_MD_FLCKSUM); LASSERTF(OBD_MD_FLQOS == (0x00200000ULL), "found 0x%.16llxULL\n", OBD_MD_FLQOS); - LASSERTF(OBD_MD_FLCOOKIE == (0x00800000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLCOOKIE); LASSERTF(OBD_MD_FLGROUP == (0x01000000ULL), "found 0x%.16llxULL\n", OBD_MD_FLGROUP); LASSERTF(OBD_MD_FLFID == (0x02000000ULL), "found 0x%.16llxULL\n", @@ -1394,13 +1428,13 @@ void lustre_assert_wire_constants(void) (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0])); CLASSERT(LOV_MAGIC_V3 == (0x0BD30000 | 0x0BD0)); LASSERTF(LOV_PATTERN_RAID0 == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned)LOV_PATTERN_RAID0); + (unsigned int)LOV_PATTERN_RAID0); LASSERTF(LOV_PATTERN_RAID1 == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned)LOV_PATTERN_RAID1); + (unsigned int)LOV_PATTERN_RAID1); LASSERTF(LOV_PATTERN_FIRST == 0x00000100UL, "found 0x%.8xUL\n", - (unsigned)LOV_PATTERN_FIRST); + (unsigned int)LOV_PATTERN_FIRST); LASSERTF(LOV_PATTERN_CMOBD == 0x00000200UL, "found 0x%.8xUL\n", - (unsigned)LOV_PATTERN_CMOBD); + (unsigned int)LOV_PATTERN_CMOBD); /* Checks for struct lmv_mds_md_v1 */ LASSERTF((int)sizeof(struct lmv_mds_md_v1) == 56, "found %lld\n", @@ -1542,6 +1576,8 @@ void lustre_assert_wire_constants(void) (long long)(int)offsetof(struct obd_ioobj, ioo_bufcnt)); LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt) == 4, "found %lld\n", (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt)); + LASSERTF(IOOBJ_MAX_BRW_BITS == 16, "found %lld\n", + (long long)IOOBJ_MAX_BRW_BITS); /* Checks for union lquota_id */ LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n", @@ -1817,10 +1853,10 @@ void lustre_assert_wire_constants(void) (long long)(int)offsetof(struct mdt_body, mbo_max_mdsize)); LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize) == 4, "found %lld\n", (long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize)); - LASSERTF((int)offsetof(struct mdt_body, mbo_max_cookiesize) == 160, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_max_cookiesize)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_cookiesize) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_cookiesize)); + LASSERTF((int)offsetof(struct mdt_body, mbo_unused3) == 160, "found %lld\n", + (long long)(int)offsetof(struct mdt_body, mbo_unused3)); + LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_unused3) == 4, "found %lld\n", + (long long)(int)sizeof(((struct mdt_body *)0)->mbo_unused3)); LASSERTF((int)offsetof(struct mdt_body, mbo_uid_h) == 164, "found %lld\n", (long long)(int)offsetof(struct mdt_body, mbo_uid_h)); LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_uid_h) == 4, "found %lld\n", @@ -1857,12 +1893,6 @@ void lustre_assert_wire_constants(void) MDS_FMODE_CLOSED); LASSERTF(MDS_FMODE_EXEC == 000000000004UL, "found 0%.11oUL\n", MDS_FMODE_EXEC); - LASSERTF(MDS_FMODE_EPOCH == 000001000000UL, "found 0%.11oUL\n", - MDS_FMODE_EPOCH); - LASSERTF(MDS_FMODE_TRUNC == 000002000000UL, "found 0%.11oUL\n", - MDS_FMODE_TRUNC); - LASSERTF(MDS_FMODE_SOM == 000004000000UL, "found 0%.11oUL\n", - MDS_FMODE_SOM); LASSERTF(MDS_OPEN_CREATED == 000000000010UL, "found 0%.11oUL\n", MDS_OPEN_CREATED); LASSERTF(MDS_OPEN_CROSS == 000000000020UL, "found 0%.11oUL\n", @@ -1905,10 +1935,20 @@ void lustre_assert_wire_constants(void) LUSTRE_IMMUTABLE_FL); LASSERTF(LUSTRE_APPEND_FL == 0x00000020, "found 0x%.8x\n", LUSTRE_APPEND_FL); + LASSERTF(LUSTRE_NODUMP_FL == 0x00000040, "found 0x%.8x\n", + LUSTRE_NODUMP_FL); LASSERTF(LUSTRE_NOATIME_FL == 0x00000080, "found 0x%.8x\n", LUSTRE_NOATIME_FL); + LASSERTF(LUSTRE_INDEX_FL == 0x00001000, "found 0x%.8x\n", + LUSTRE_INDEX_FL); LASSERTF(LUSTRE_DIRSYNC_FL == 0x00010000, "found 0x%.8x\n", LUSTRE_DIRSYNC_FL); + LASSERTF(LUSTRE_TOPDIR_FL == 0x00020000, "found 0x%.8x\n", + LUSTRE_TOPDIR_FL); + LASSERTF(LUSTRE_DIRECTIO_FL == 0x00100000, "found 0x%.8x\n", + LUSTRE_DIRECTIO_FL); + LASSERTF(LUSTRE_INLINE_DATA_FL == 0x10000000, "found 0x%.8x\n", + LUSTRE_INLINE_DATA_FL); LASSERTF(MDS_INODELOCK_LOOKUP == 0x000001, "found 0x%.8x\n", MDS_INODELOCK_LOOKUP); LASSERTF(MDS_INODELOCK_UPDATE == 0x000002, "found 0x%.8x\n", @@ -1921,22 +1961,22 @@ void lustre_assert_wire_constants(void) /* Checks for struct mdt_ioepoch */ LASSERTF((int)sizeof(struct mdt_ioepoch) == 24, "found %lld\n", (long long)(int)sizeof(struct mdt_ioepoch)); - LASSERTF((int)offsetof(struct mdt_ioepoch, handle) == 0, "found %lld\n", - (long long)(int)offsetof(struct mdt_ioepoch, handle)); - LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->handle) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_ioepoch *)0)->handle)); - LASSERTF((int)offsetof(struct mdt_ioepoch, ioepoch) == 8, "found %lld\n", - (long long)(int)offsetof(struct mdt_ioepoch, ioepoch)); - LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->ioepoch) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_ioepoch *)0)->ioepoch)); - LASSERTF((int)offsetof(struct mdt_ioepoch, flags) == 16, "found %lld\n", - (long long)(int)offsetof(struct mdt_ioepoch, flags)); - LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_ioepoch *)0)->flags)); - LASSERTF((int)offsetof(struct mdt_ioepoch, padding) == 20, "found %lld\n", - (long long)(int)offsetof(struct mdt_ioepoch, padding)); - LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_ioepoch *)0)->padding)); + LASSERTF((int)offsetof(struct mdt_ioepoch, mio_handle) == 0, "found %lld\n", + (long long)(int)offsetof(struct mdt_ioepoch, mio_handle)); + LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_handle) == 8, "found %lld\n", + (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_handle)); + LASSERTF((int)offsetof(struct mdt_ioepoch, mio_unused1) == 8, "found %lld\n", + (long long)(int)offsetof(struct mdt_ioepoch, mio_unused1)); + LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_unused1) == 8, "found %lld\n", + (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_unused1)); + LASSERTF((int)offsetof(struct mdt_ioepoch, mio_unused2) == 16, "found %lld\n", + (long long)(int)offsetof(struct mdt_ioepoch, mio_unused2)); + LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_unused2) == 4, "found %lld\n", + (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_unused2)); + LASSERTF((int)offsetof(struct mdt_ioepoch, mio_padding) == 20, "found %lld\n", + (long long)(int)offsetof(struct mdt_ioepoch, mio_padding)); + LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_padding) == 4, "found %lld\n", + (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_padding)); /* Checks for struct mdt_rec_setattr */ LASSERTF((int)sizeof(struct mdt_rec_setattr) == 136, "found %lld\n", @@ -3520,21 +3560,21 @@ void lustre_assert_wire_constants(void) LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx) == 4, "found %lld\n", (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx)); - /* Checks for struct ll_fiemap_info_key */ + /* Checks for struct fiemap_info_key */ LASSERTF((int)sizeof(struct ll_fiemap_info_key) == 248, "found %lld\n", (long long)(int)sizeof(struct ll_fiemap_info_key)); - LASSERTF((int)offsetof(struct ll_fiemap_info_key, name[8]) == 8, "found %lld\n", - (long long)(int)offsetof(struct ll_fiemap_info_key, name[8])); - LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->name[8]) == 1, "found %lld\n", - (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->name[8])); - LASSERTF((int)offsetof(struct ll_fiemap_info_key, oa) == 8, "found %lld\n", - (long long)(int)offsetof(struct ll_fiemap_info_key, oa)); - LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->oa) == 208, "found %lld\n", - (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->oa)); - LASSERTF((int)offsetof(struct ll_fiemap_info_key, fiemap) == 216, "found %lld\n", - (long long)(int)offsetof(struct ll_fiemap_info_key, fiemap)); - LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap) == 32, "found %lld\n", - (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap)); + LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_name[8]) == 8, "found %lld\n", + (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_name[8])); + LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_name[8]) == 1, "found %lld\n", + (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_name[8])); + LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_oa) == 8, "found %lld\n", + (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_oa)); + LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_oa) == 208, "found %lld\n", + (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_oa)); + LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_fiemap) == 216, "found %lld\n", + (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_fiemap)); + LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_fiemap) == 32, "found %lld\n", + (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_fiemap)); /* Checks for struct mgs_target_info */ LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n", @@ -3670,64 +3710,64 @@ void lustre_assert_wire_constants(void) LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]) == 1, "found %lld\n", (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0])); - /* Checks for struct ll_user_fiemap */ - LASSERTF((int)sizeof(struct ll_user_fiemap) == 32, "found %lld\n", - (long long)(int)sizeof(struct ll_user_fiemap)); - LASSERTF((int)offsetof(struct ll_user_fiemap, fm_start) == 0, "found %lld\n", - (long long)(int)offsetof(struct ll_user_fiemap, fm_start)); - LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_start) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_start)); - LASSERTF((int)offsetof(struct ll_user_fiemap, fm_length) == 8, "found %lld\n", - (long long)(int)offsetof(struct ll_user_fiemap, fm_length)); - LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_length) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_length)); - LASSERTF((int)offsetof(struct ll_user_fiemap, fm_flags) == 16, "found %lld\n", - (long long)(int)offsetof(struct ll_user_fiemap, fm_flags)); - LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_flags)); - LASSERTF((int)offsetof(struct ll_user_fiemap, fm_mapped_extents) == 20, "found %lld\n", - (long long)(int)offsetof(struct ll_user_fiemap, fm_mapped_extents)); - LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_mapped_extents) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_mapped_extents)); - LASSERTF((int)offsetof(struct ll_user_fiemap, fm_extent_count) == 24, "found %lld\n", - (long long)(int)offsetof(struct ll_user_fiemap, fm_extent_count)); - LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_extent_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_extent_count)); - LASSERTF((int)offsetof(struct ll_user_fiemap, fm_reserved) == 28, "found %lld\n", - (long long)(int)offsetof(struct ll_user_fiemap, fm_reserved)); - LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_reserved) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_reserved)); - LASSERTF((int)offsetof(struct ll_user_fiemap, fm_extents) == 32, "found %lld\n", - (long long)(int)offsetof(struct ll_user_fiemap, fm_extents)); - LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_extents) == 0, "found %lld\n", - (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_extents)); + /* Checks for struct fiemap */ + LASSERTF((int)sizeof(struct fiemap) == 32, "found %lld\n", + (long long)(int)sizeof(struct fiemap)); + LASSERTF((int)offsetof(struct fiemap, fm_start) == 0, "found %lld\n", + (long long)(int)offsetof(struct fiemap, fm_start)); + LASSERTF((int)sizeof(((struct fiemap *)0)->fm_start) == 8, "found %lld\n", + (long long)(int)sizeof(((struct fiemap *)0)->fm_start)); + LASSERTF((int)offsetof(struct fiemap, fm_length) == 8, "found %lld\n", + (long long)(int)offsetof(struct fiemap, fm_length)); + LASSERTF((int)sizeof(((struct fiemap *)0)->fm_length) == 8, "found %lld\n", + (long long)(int)sizeof(((struct fiemap *)0)->fm_length)); + LASSERTF((int)offsetof(struct fiemap, fm_flags) == 16, "found %lld\n", + (long long)(int)offsetof(struct fiemap, fm_flags)); + LASSERTF((int)sizeof(((struct fiemap *)0)->fm_flags) == 4, "found %lld\n", + (long long)(int)sizeof(((struct fiemap *)0)->fm_flags)); + LASSERTF((int)offsetof(struct fiemap, fm_mapped_extents) == 20, "found %lld\n", + (long long)(int)offsetof(struct fiemap, fm_mapped_extents)); + LASSERTF((int)sizeof(((struct fiemap *)0)->fm_mapped_extents) == 4, "found %lld\n", + (long long)(int)sizeof(((struct fiemap *)0)->fm_mapped_extents)); + LASSERTF((int)offsetof(struct fiemap, fm_extent_count) == 24, "found %lld\n", + (long long)(int)offsetof(struct fiemap, fm_extent_count)); + LASSERTF((int)sizeof(((struct fiemap *)0)->fm_extent_count) == 4, "found %lld\n", + (long long)(int)sizeof(((struct fiemap *)0)->fm_extent_count)); + LASSERTF((int)offsetof(struct fiemap, fm_reserved) == 28, "found %lld\n", + (long long)(int)offsetof(struct fiemap, fm_reserved)); + LASSERTF((int)sizeof(((struct fiemap *)0)->fm_reserved) == 4, "found %lld\n", + (long long)(int)sizeof(((struct fiemap *)0)->fm_reserved)); + LASSERTF((int)offsetof(struct fiemap, fm_extents) == 32, "found %lld\n", + (long long)(int)offsetof(struct fiemap, fm_extents)); + LASSERTF((int)sizeof(((struct fiemap *)0)->fm_extents) == 0, "found %lld\n", + (long long)(int)sizeof(((struct fiemap *)0)->fm_extents)); CLASSERT(FIEMAP_FLAG_SYNC == 0x00000001); CLASSERT(FIEMAP_FLAG_XATTR == 0x00000002); CLASSERT(FIEMAP_FLAG_DEVICE_ORDER == 0x40000000); - /* Checks for struct ll_fiemap_extent */ - LASSERTF((int)sizeof(struct ll_fiemap_extent) == 56, "found %lld\n", - (long long)(int)sizeof(struct ll_fiemap_extent)); - LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_logical) == 0, "found %lld\n", - (long long)(int)offsetof(struct ll_fiemap_extent, fe_logical)); - LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_logical) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_logical)); - LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_physical) == 8, "found %lld\n", - (long long)(int)offsetof(struct ll_fiemap_extent, fe_physical)); - LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_physical) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_physical)); - LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_length) == 16, "found %lld\n", - (long long)(int)offsetof(struct ll_fiemap_extent, fe_length)); - LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_length) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_length)); - LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_flags) == 40, "found %lld\n", - (long long)(int)offsetof(struct ll_fiemap_extent, fe_flags)); - LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_flags)); - LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_device) == 44, "found %lld\n", - (long long)(int)offsetof(struct ll_fiemap_extent, fe_device)); - LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_device) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_device)); + /* Checks for struct fiemap_extent */ + LASSERTF((int)sizeof(struct fiemap_extent) == 56, "found %lld\n", + (long long)(int)sizeof(struct fiemap_extent)); + LASSERTF((int)offsetof(struct fiemap_extent, fe_logical) == 0, "found %lld\n", + (long long)(int)offsetof(struct fiemap_extent, fe_logical)); + LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_logical) == 8, "found %lld\n", + (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_logical)); + LASSERTF((int)offsetof(struct fiemap_extent, fe_physical) == 8, "found %lld\n", + (long long)(int)offsetof(struct fiemap_extent, fe_physical)); + LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_physical) == 8, "found %lld\n", + (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_physical)); + LASSERTF((int)offsetof(struct fiemap_extent, fe_length) == 16, "found %lld\n", + (long long)(int)offsetof(struct fiemap_extent, fe_length)); + LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_length) == 8, "found %lld\n", + (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_length)); + LASSERTF((int)offsetof(struct fiemap_extent, fe_flags) == 40, "found %lld\n", + (long long)(int)offsetof(struct fiemap_extent, fe_flags)); + LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_flags) == 4, "found %lld\n", + (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_flags)); + LASSERTF((int)offsetof(struct fiemap_extent, fe_reserved[0]) == 44, "found %lld\n", + (long long)(int)offsetof(struct fiemap_extent, fe_reserved[0])); + LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_reserved[0]) == 4, "found %lld\n", + (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_reserved[0])); CLASSERT(FIEMAP_EXTENT_LAST == 0x00000001); CLASSERT(FIEMAP_EXTENT_UNKNOWN == 0x00000002); CLASSERT(FIEMAP_EXTENT_DELALLOC == 0x00000004); @@ -4093,9 +4133,9 @@ void lustre_assert_wire_constants(void) LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_data_len) == 4, "found %lld\n", (long long)(int)sizeof(((struct hsm_request *)0)->hr_data_len)); LASSERTF(HSM_FORCE_ACTION == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned)HSM_FORCE_ACTION); + (unsigned int)HSM_FORCE_ACTION); LASSERTF(HSM_GHOST_COPY == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned)HSM_GHOST_COPY); + (unsigned int)HSM_GHOST_COPY); /* Checks for struct hsm_user_request */ LASSERTF((int)sizeof(struct hsm_user_request) == 24, "found %lld\n", diff --git a/drivers/staging/lustre/sysfs-fs-lustre b/drivers/staging/lustre/sysfs-fs-lustre index 20206ba965affe333423de87fe6b470a026953ba..8691c6543a9c0f76d9926793e8af726fa957cede 100644 --- a/drivers/staging/lustre/sysfs-fs-lustre +++ b/drivers/staging/lustre/sysfs-fs-lustre @@ -11,7 +11,7 @@ Description: Shows if the lustre module has pinger support. "on" means yes and "off" means no. -What: /sys/fs/lustre/health +What: /sys/fs/lustre/health_check Date: May 2015 Contact: "Oleg Drokin" Description: diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig index 6620d96ee44d2f851587dfa0a8db2f4b15bec881..ffb8fa72c3dabdb5abf045d3bc1dcb584e3ede71 100644 --- a/drivers/staging/media/Kconfig +++ b/drivers/staging/media/Kconfig @@ -21,16 +21,12 @@ if STAGING_MEDIA && MEDIA_SUPPORT # Please keep them in alphabetic order source "drivers/staging/media/bcm2048/Kconfig" -source "drivers/staging/media/cec/Kconfig" - source "drivers/staging/media/cxd2099/Kconfig" source "drivers/staging/media/davinci_vpfe/Kconfig" source "drivers/staging/media/omap4iss/Kconfig" -source "drivers/staging/media/pulse8-cec/Kconfig" - source "drivers/staging/media/s5p-cec/Kconfig" # Keep LIRC at the end, as it has sub-menus diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile index 906257e94dda894f4f6c9b208ac07548b8f2c04b..a28e82cf6447a932c92a807b0aa290dfa56aae04 100644 --- a/drivers/staging/media/Makefile +++ b/drivers/staging/media/Makefile @@ -1,9 +1,7 @@ obj-$(CONFIG_I2C_BCM2048) += bcm2048/ -obj-$(CONFIG_MEDIA_CEC) += cec/ obj-$(CONFIG_VIDEO_SAMSUNG_S5P_CEC) += s5p-cec/ obj-$(CONFIG_DVB_CXD2099) += cxd2099/ obj-$(CONFIG_LIRC_STAGING) += lirc/ obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/ obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/ -obj-$(CONFIG_USB_PULSE8_CEC) += pulse8-cec/ obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += st-cec/ diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c index 4d9bd02ede4700c8a89460ed68b0ce66898d3c26..37bd439ee08bba4ad8e24c6c3bf7cff66434be63 100644 --- a/drivers/staging/media/bcm2048/radio-bcm2048.c +++ b/drivers/staging/media/bcm2048/radio-bcm2048.c @@ -17,10 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA */ /* @@ -999,7 +995,7 @@ static int bcm2048_set_fm_search_tune_mode(struct bcm2048_device *bdev, timeout = BCM2048_AUTO_SEARCH_TIMEOUT; if (!wait_for_completion_timeout(&bdev->compl, - msecs_to_jiffies(timeout))) + msecs_to_jiffies(timeout))) dev_err(&bdev->client->dev, "IRQ timeout.\n"); if (value) @@ -2059,67 +2055,67 @@ property_signed_read(fm_rssi, int, "%d") DEFINE_SYSFS_PROPERTY(region, unsigned, int, "%u", 0) static struct device_attribute attrs[] = { - __ATTR(power_state, S_IRUGO | S_IWUSR, bcm2048_power_state_read, + __ATTR(power_state, 0644, bcm2048_power_state_read, bcm2048_power_state_write), - __ATTR(mute, S_IRUGO | S_IWUSR, bcm2048_mute_read, + __ATTR(mute, 0644, bcm2048_mute_read, bcm2048_mute_write), - __ATTR(audio_route, S_IRUGO | S_IWUSR, bcm2048_audio_route_read, + __ATTR(audio_route, 0644, bcm2048_audio_route_read, bcm2048_audio_route_write), - __ATTR(dac_output, S_IRUGO | S_IWUSR, bcm2048_dac_output_read, + __ATTR(dac_output, 0644, bcm2048_dac_output_read, bcm2048_dac_output_write), - __ATTR(fm_hi_lo_injection, S_IRUGO | S_IWUSR, + __ATTR(fm_hi_lo_injection, 0644, bcm2048_fm_hi_lo_injection_read, bcm2048_fm_hi_lo_injection_write), - __ATTR(fm_frequency, S_IRUGO | S_IWUSR, bcm2048_fm_frequency_read, + __ATTR(fm_frequency, 0644, bcm2048_fm_frequency_read, bcm2048_fm_frequency_write), - __ATTR(fm_af_frequency, S_IRUGO | S_IWUSR, + __ATTR(fm_af_frequency, 0644, bcm2048_fm_af_frequency_read, bcm2048_fm_af_frequency_write), - __ATTR(fm_deemphasis, S_IRUGO | S_IWUSR, bcm2048_fm_deemphasis_read, + __ATTR(fm_deemphasis, 0644, bcm2048_fm_deemphasis_read, bcm2048_fm_deemphasis_write), - __ATTR(fm_rds_mask, S_IRUGO | S_IWUSR, bcm2048_fm_rds_mask_read, + __ATTR(fm_rds_mask, 0644, bcm2048_fm_rds_mask_read, bcm2048_fm_rds_mask_write), - __ATTR(fm_best_tune_mode, S_IRUGO | S_IWUSR, + __ATTR(fm_best_tune_mode, 0644, bcm2048_fm_best_tune_mode_read, bcm2048_fm_best_tune_mode_write), - __ATTR(fm_search_rssi_threshold, S_IRUGO | S_IWUSR, + __ATTR(fm_search_rssi_threshold, 0644, bcm2048_fm_search_rssi_threshold_read, bcm2048_fm_search_rssi_threshold_write), - __ATTR(fm_search_mode_direction, S_IRUGO | S_IWUSR, + __ATTR(fm_search_mode_direction, 0644, bcm2048_fm_search_mode_direction_read, bcm2048_fm_search_mode_direction_write), - __ATTR(fm_search_tune_mode, S_IRUGO | S_IWUSR, + __ATTR(fm_search_tune_mode, 0644, bcm2048_fm_search_tune_mode_read, bcm2048_fm_search_tune_mode_write), - __ATTR(rds, S_IRUGO | S_IWUSR, bcm2048_rds_read, + __ATTR(rds, 0644, bcm2048_rds_read, bcm2048_rds_write), - __ATTR(rds_b_block_mask, S_IRUGO | S_IWUSR, + __ATTR(rds_b_block_mask, 0644, bcm2048_rds_b_block_mask_read, bcm2048_rds_b_block_mask_write), - __ATTR(rds_b_block_match, S_IRUGO | S_IWUSR, + __ATTR(rds_b_block_match, 0644, bcm2048_rds_b_block_match_read, bcm2048_rds_b_block_match_write), - __ATTR(rds_pi_mask, S_IRUGO | S_IWUSR, bcm2048_rds_pi_mask_read, + __ATTR(rds_pi_mask, 0644, bcm2048_rds_pi_mask_read, bcm2048_rds_pi_mask_write), - __ATTR(rds_pi_match, S_IRUGO | S_IWUSR, bcm2048_rds_pi_match_read, + __ATTR(rds_pi_match, 0644, bcm2048_rds_pi_match_read, bcm2048_rds_pi_match_write), - __ATTR(rds_wline, S_IRUGO | S_IWUSR, bcm2048_rds_wline_read, + __ATTR(rds_wline, 0644, bcm2048_rds_wline_read, bcm2048_rds_wline_write), - __ATTR(rds_pi, S_IRUGO, bcm2048_rds_pi_read, NULL), - __ATTR(rds_rt, S_IRUGO, bcm2048_rds_rt_read, NULL), - __ATTR(rds_ps, S_IRUGO, bcm2048_rds_ps_read, NULL), - __ATTR(fm_rds_flags, S_IRUGO, bcm2048_fm_rds_flags_read, NULL), - __ATTR(region_bottom_frequency, S_IRUGO, + __ATTR(rds_pi, 0444, bcm2048_rds_pi_read, NULL), + __ATTR(rds_rt, 0444, bcm2048_rds_rt_read, NULL), + __ATTR(rds_ps, 0444, bcm2048_rds_ps_read, NULL), + __ATTR(fm_rds_flags, 0444, bcm2048_fm_rds_flags_read, NULL), + __ATTR(region_bottom_frequency, 0444, bcm2048_region_bottom_frequency_read, NULL), - __ATTR(region_top_frequency, S_IRUGO, + __ATTR(region_top_frequency, 0444, bcm2048_region_top_frequency_read, NULL), - __ATTR(fm_carrier_error, S_IRUGO, + __ATTR(fm_carrier_error, 0444, bcm2048_fm_carrier_error_read, NULL), - __ATTR(fm_rssi, S_IRUGO, + __ATTR(fm_rssi, 0444, bcm2048_fm_rssi_read, NULL), - __ATTR(region, S_IRUGO | S_IWUSR, bcm2048_region_read, + __ATTR(region, 0644, bcm2048_region_read, bcm2048_region_write), - __ATTR(rds_data, S_IRUGO, bcm2048_rds_data_read, NULL), + __ATTR(rds_data, 0444, bcm2048_rds_data_read, NULL), }; static int bcm2048_sysfs_unregister_properties(struct bcm2048_device *bdev, @@ -2204,7 +2200,7 @@ static ssize_t bcm2048_fops_read(struct file *file, char __user *buf, } /* interruptible_sleep_on(&bdev->read_queue); */ if (wait_event_interruptible(bdev->read_queue, - bdev->rds_data_available) < 0) { + bdev->rds_data_available) < 0) { retval = -EINTR; goto done; } @@ -2542,7 +2538,7 @@ static int bcm2048_vidioc_s_hw_freq_seek(struct file *file, void *priv, return err; } -static struct v4l2_ioctl_ops bcm2048_ioctl_ops = { +static const struct v4l2_ioctl_ops bcm2048_ioctl_ops = { .vidioc_querycap = bcm2048_vidioc_querycap, .vidioc_g_input = bcm2048_vidioc_g_input, .vidioc_s_input = bcm2048_vidioc_s_input, diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.h b/drivers/staging/media/bcm2048/radio-bcm2048.h index 4c90a32db79521347cd479d54823aefe4f36454b..4d950c1e2e8b6a6c7517c5aff5cfcfc03f79bbab 100644 --- a/drivers/staging/media/bcm2048/radio-bcm2048.h +++ b/drivers/staging/media/bcm2048/radio-bcm2048.h @@ -14,11 +14,6 @@ * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA */ #ifndef BCM2048_H diff --git a/drivers/staging/media/cec/Kconfig b/drivers/staging/media/cec/Kconfig deleted file mode 100644 index 6e12d41b1f86a782321fdf19b161317fed8f98d5..0000000000000000000000000000000000000000 --- a/drivers/staging/media/cec/Kconfig +++ /dev/null @@ -1,12 +0,0 @@ -config MEDIA_CEC - bool "CEC API (EXPERIMENTAL)" - depends on MEDIA_SUPPORT - select MEDIA_CEC_EDID - ---help--- - Enable the CEC API. - -config MEDIA_CEC_DEBUG - bool "CEC debugfs interface (EXPERIMENTAL)" - depends on MEDIA_CEC && DEBUG_FS - ---help--- - Turns on the DebugFS interface for CEC devices. diff --git a/drivers/staging/media/cec/TODO b/drivers/staging/media/cec/TODO deleted file mode 100644 index 13224694a8aef5e8bf5f48dc57cb40e9b70ae6b0..0000000000000000000000000000000000000000 --- a/drivers/staging/media/cec/TODO +++ /dev/null @@ -1,32 +0,0 @@ -The reason why cec.c is still in staging is that I would like -to have a bit more confidence in the uABI. The kABI is fine, -no problem there, but I would like to let the public API mature -a bit. - -Once I'm confident that I didn't miss anything then the cec.c source -can move to drivers/media and the linux/cec.h and linux/cec-funcs.h -headers can move to uapi/linux and added to uapi/linux/Kbuild to make -them public. - -Hopefully this will happen later in 2016. - -Other TODOs: - -- There are two possible replies to CEC_MSG_INITIATE_ARC. How to handle that? -- Add a flag to inhibit passing CEC RC messages to the rc subsystem. - Applications should be able to choose this when calling S_LOG_ADDRS. -- If the reply field of cec_msg is set then when the reply arrives it - is only sent to the filehandle that transmitted the original message - and not to any followers. Should this behavior change or perhaps - controlled through a cec_msg flag? -- Should CEC_LOG_ADDR_TYPE_SPECIFIC be replaced by TYPE_2ND_TV and TYPE_PROCESSOR? - And also TYPE_SWITCH and TYPE_CDC_ONLY in addition to the TYPE_UNREGISTERED? - This should give the framework more information about the device type - since SPECIFIC and UNREGISTERED give no useful information. -- Once this is out of staging this should no longer be a separate - config option, instead it should be selected by drivers that want it. -- Revisit the IS_REACHABLE(RC_CORE): perhaps the RC_CORE support should - be enabled through a separate config option in drivers/media/Kconfig - or rc/Kconfig? - -Hans Verkuil diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c index fedeb3c3549e3e812d3328e2e83b5522ee1fad2d..c72c3f09f17528bea94b2d4b8f2db88a94795670 100644 --- a/drivers/staging/media/cxd2099/cxd2099.c +++ b/drivers/staging/media/cxd2099/cxd2099.c @@ -336,7 +336,8 @@ static int init(struct cxd *ci) break; #endif /* TOSTRT = 8, Mode B (gated clock), falling Edge, - * Serial, POL=HIGH, MSB */ + * Serial, POL=HIGH, MSB + */ status = write_reg(ci, 0x0A, 0xA7); if (status < 0) break; diff --git a/drivers/staging/media/davinci_vpfe/Makefile b/drivers/staging/media/davinci_vpfe/Makefile index c64515c644cdde945182836a6b2b5e421d69a5c7..3019c9ecd54868bc37ca9e77f61e46c92ea39937 100644 --- a/drivers/staging/media/davinci_vpfe/Makefile +++ b/drivers/staging/media/davinci_vpfe/Makefile @@ -1,3 +1,5 @@ -obj-$(CONFIG_VIDEO_DM365_VPFE) += \ +obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci-vfpe.o + +davinci-vfpe-objs := \ dm365_isif.o dm365_ipipe_hw.o dm365_ipipe.o \ dm365_resizer.o dm365_ipipeif.o vpfe_mc_capture.o vpfe_video.o diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c index 128662623ea8fa92fbbcbf64f277dd524dbbf5ac..5fbc2d447ff2f702c969ae52b904d7880a529d19 100644 --- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c +++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c @@ -237,9 +237,8 @@ resizer_calculate_resize_ratios(struct vpfe_resizer_device *resizer, int index) ((informat->width) * 256) / (outformat->width); } -void -static resizer_enable_422_420_conversion(struct resizer_params *param, - int index, bool en) +static void resizer_enable_422_420_conversion(struct resizer_params *param, + int index, bool en) { param->rsz_rsc_param[index].cen = en; param->rsz_rsc_param[index].yen = en; @@ -490,7 +489,7 @@ resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer) int line_len; int ret; - if (resizer->resizer_a.output != RESIZER_OUPUT_MEMORY) { + if (resizer->resizer_a.output != RESIZER_OUTPUT_MEMORY) { dev_err(dev, "enable resizer - Resizer-A\n"); return -EINVAL; } @@ -502,7 +501,7 @@ resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer) param->rsz_en[RSZ_B] = DISABLE; param->oper_mode = RESIZER_MODE_CONTINIOUS; - if (resizer->resizer_b.output == RESIZER_OUPUT_MEMORY) { + if (resizer->resizer_b.output == RESIZER_OUTPUT_MEMORY) { struct v4l2_mbus_framefmt *outformat2; param->rsz_en[RSZ_B] = ENABLE; @@ -825,7 +824,7 @@ resizer_set_defualt_configuration(struct vpfe_resizer_device *resizer) .o_hsz = WIDTH_O - 1, .v_dif = 256, .v_typ_y = VPFE_RSZ_INTP_CUBIC, - .h_typ_c = VPFE_RSZ_INTP_CUBIC, + .v_typ_c = VPFE_RSZ_INTP_CUBIC, .h_dif = 256, .h_typ_y = VPFE_RSZ_INTP_CUBIC, .h_typ_c = VPFE_RSZ_INTP_CUBIC, @@ -843,7 +842,7 @@ resizer_set_defualt_configuration(struct vpfe_resizer_device *resizer) .o_hsz = WIDTH_O - 1, .v_dif = 256, .v_typ_y = VPFE_RSZ_INTP_CUBIC, - .h_typ_c = VPFE_RSZ_INTP_CUBIC, + .v_typ_c = VPFE_RSZ_INTP_CUBIC, .h_dif = 256, .h_typ_y = VPFE_RSZ_INTP_CUBIC, .h_typ_c = VPFE_RSZ_INTP_CUBIC, @@ -1043,13 +1042,13 @@ static void resizer_ss_isr(struct vpfe_resizer_device *resizer) if (ipipeif_sink != IPIPEIF_INPUT_MEMORY) return; - if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) { + if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) { val = vpss_dma_complete_interrupt(); if (val != 0 && val != 2) return; } - if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) { + if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) { spin_lock(&video_out->dma_queue_lock); vpfe_video_process_buffer_complete(video_out); video_out->state = VPFE_VIDEO_BUFFER_NOT_QUEUED; @@ -1059,7 +1058,7 @@ static void resizer_ss_isr(struct vpfe_resizer_device *resizer) /* If resizer B is enabled */ if (pipe->output_num > 1 && resizer->resizer_b.output == - RESIZER_OUPUT_MEMORY) { + RESIZER_OUTPUT_MEMORY) { spin_lock(&video_out->dma_queue_lock); vpfe_video_process_buffer_complete(video_out2); video_out2->state = VPFE_VIDEO_BUFFER_NOT_QUEUED; @@ -1069,7 +1068,7 @@ static void resizer_ss_isr(struct vpfe_resizer_device *resizer) /* start HW if buffers are queued */ if (vpfe_video_is_pipe_ready(pipe) && - resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) { + resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) { resizer_enable(resizer, 1); vpfe_ipipe_enable(vpfe_dev, 1); vpfe_ipipeif_enable(vpfe_dev); @@ -1237,8 +1236,8 @@ static int resizer_do_hw_setup(struct vpfe_resizer_device *resizer) struct resizer_params *param = &resizer->config; int ret = 0; - if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY || - resizer->resizer_b.output == RESIZER_OUPUT_MEMORY) { + if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY || + resizer->resizer_b.output == RESIZER_OUTPUT_MEMORY) { if (ipipeif_sink == IPIPEIF_INPUT_MEMORY && ipipeif_source == IPIPEIF_OUTPUT_RESIZER) ret = resizer_configure_in_single_shot_mode(resizer); @@ -1263,7 +1262,7 @@ static int resizer_set_stream(struct v4l2_subdev *sd, int enable) if (&resizer->crop_resizer.subdev != sd) return 0; - if (resizer->resizer_a.output != RESIZER_OUPUT_MEMORY) + if (resizer->resizer_a.output != RESIZER_OUTPUT_MEMORY) return 0; switch (enable) { @@ -1724,7 +1723,7 @@ static int resizer_link_setup(struct media_entity *entity, } if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE) return -EBUSY; - resizer->resizer_a.output = RESIZER_OUPUT_MEMORY; + resizer->resizer_a.output = RESIZER_OUTPUT_MEMORY; break; default: @@ -1749,7 +1748,7 @@ static int resizer_link_setup(struct media_entity *entity, } if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE) return -EBUSY; - resizer->resizer_b.output = RESIZER_OUPUT_MEMORY; + resizer->resizer_b.output = RESIZER_OUTPUT_MEMORY; break; default: diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.h b/drivers/staging/media/davinci_vpfe/dm365_resizer.h index 93b0f44030aa68288723100268f169e95dfcca96..00e64b0d0295eda8b0c3214da9deea364110f2dc 100644 --- a/drivers/staging/media/davinci_vpfe/dm365_resizer.h +++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.h @@ -210,7 +210,7 @@ enum resizer_input_entity { enum resizer_output_entity { RESIZER_OUTPUT_NONE = 0, - RESIZER_OUPUT_MEMORY = 1, + RESIZER_OUTPUT_MEMORY = 1, }; struct dm365_resizer_device { diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c index 8be9f854510f4df1d287ce7a49521f2b48e32db0..c27d7e9a1bdbe84059b484c7971f0f6ff9a16c33 100644 --- a/drivers/staging/media/davinci_vpfe/vpfe_video.c +++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c @@ -198,7 +198,7 @@ static int vpfe_update_pipe_state(struct vpfe_video_device *video) return 0; } -/* checks wether pipeline is ready for enabling */ +/* checks whether pipeline is ready for enabling */ int vpfe_video_is_pipe_ready(struct vpfe_pipeline *pipe) { int i; @@ -1143,8 +1143,8 @@ static int vpfe_buffer_prepare(struct vb2_buffer *vb) /* Initialize buffer */ vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage); if (vb2_plane_vaddr(vb, 0) && - vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) - return -EINVAL; + vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) + return -EINVAL; addr = vb2_dma_contig_plane_dma_addr(vb, 0); /* Make sure user addresses are aligned to 32 bytes */ @@ -1362,7 +1362,7 @@ static int vpfe_reqbufs(struct file *file, void *priv, ret = vb2_queue_init(q); if (ret) { v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n"); - return ret; + goto unlock_out; } fh->io_allowed = 1; diff --git a/drivers/staging/media/lirc/Kconfig b/drivers/staging/media/lirc/Kconfig index 6879c4651b46c8b48460626cb346b8ee23fa6587..25b7e7ccf55479ec7a3d83a6e4b8b6aaba207a26 100644 --- a/drivers/staging/media/lirc/Kconfig +++ b/drivers/staging/media/lirc/Kconfig @@ -38,19 +38,6 @@ config LIRC_SASEM help Driver for the Sasem OnAir Remocon-V or Dign HV5 HTPC IR/VFD Module -config LIRC_SERIAL - tristate "Homebrew Serial Port Receiver" - depends on LIRC - help - Driver for Homebrew Serial Port Receivers - -config LIRC_SERIAL_TRANSMITTER - bool "Serial Port Transmitter" - default y - depends on LIRC_SERIAL - help - Serial Port Transmitter support - config LIRC_SIR tristate "Built-in SIR IrDA port" depends on LIRC diff --git a/drivers/staging/media/lirc/Makefile b/drivers/staging/media/lirc/Makefile index 5430adf0475ded5abe2ef4b4cf4e7f2c08297894..7f919eab1989e33f49917f51e9857bc439f51d1f 100644 --- a/drivers/staging/media/lirc/Makefile +++ b/drivers/staging/media/lirc/Makefile @@ -7,6 +7,5 @@ obj-$(CONFIG_LIRC_BT829) += lirc_bt829.o obj-$(CONFIG_LIRC_IMON) += lirc_imon.o obj-$(CONFIG_LIRC_PARALLEL) += lirc_parallel.o obj-$(CONFIG_LIRC_SASEM) += lirc_sasem.o -obj-$(CONFIG_LIRC_SERIAL) += lirc_serial.o obj-$(CONFIG_LIRC_SIR) += lirc_sir.o obj-$(CONFIG_LIRC_ZILOG) += lirc_zilog.o diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c index 198a8057f2f17ca96e37e4472b4c1eb51e2d5c43..1e650fba4a92640ed5c00dd74b0daa8f9625a549 100644 --- a/drivers/staging/media/lirc/lirc_imon.c +++ b/drivers/staging/media/lirc/lirc_imon.c @@ -334,7 +334,7 @@ static int send_packet(struct imon_context *context) context->tx_urb->actual_length = 0; - init_completion(&context->tx.finished); + reinit_completion(&context->tx.finished); atomic_set(&context->tx.busy, 1); retval = usb_submit_urb(context->tx_urb, GFP_KERNEL); @@ -408,9 +408,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf, data_buf = memdup_user(buf, n_bytes); if (IS_ERR(data_buf)) { - retval = PTR_ERR(data_buf); - data_buf = NULL; - goto exit; + mutex_unlock(&context->ctx_lock); + return PTR_ERR(data_buf); } memcpy(context->tx.data_buf, data_buf, n_bytes); @@ -497,6 +496,8 @@ static int ir_open(void *data) context->rx.initial_space = 1; context->rx.prev_bit = 0; + init_completion(&context->tx.finished); + context->ir_isopen = 1; dev_info(context->driver->dev, "IR port opened\n"); @@ -930,7 +931,7 @@ static void imon_disconnect(struct usb_interface *interface) /* Abort ongoing write */ if (atomic_read(&context->tx.busy)) { usb_kill_urb(context->tx_urb); - complete_all(&context->tx.finished); + complete(&context->tx.finished); } context->dev_present = 0; diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c index 4678ae10b0300fb4ac27e90ebb6e7bc24ab3826c..b0c176e14b6b006d41f3fcbcd42de4dcd731c61b 100644 --- a/drivers/staging/media/lirc/lirc_sasem.c +++ b/drivers/staging/media/lirc/lirc_sasem.c @@ -103,7 +103,8 @@ struct sasem_context { struct tx_t { unsigned char data_buf[SASEM_DATA_BUF_SZ]; /* user data - * buffer */ + * buffer + */ struct completion finished; /* wait for write to finish */ atomic_t busy; /* write in progress */ int status; /* status of tx completion */ @@ -295,7 +296,8 @@ static int vfd_close(struct inode *inode, struct file *file) if (!context->dev_present && !context->ir_isopen) { /* Device disconnected before close and IR port is * not open. If IR port is open, context will be - * deleted by ir_close. */ + * deleted by ir_close. + */ mutex_unlock(&context->ctx_lock); delete_context(context); return retval; @@ -384,9 +386,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf, data_buf = memdup_user(buf, n_bytes); if (IS_ERR(data_buf)) { - retval = PTR_ERR(data_buf); - data_buf = NULL; - goto exit; + mutex_unlock(&context->ctx_lock); + return PTR_ERR(data_buf); } memcpy(context->tx.data_buf, data_buf, n_bytes); @@ -397,7 +398,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf, /* Nine 8 byte packets to be sent */ /* NOTE: "\x07\x01\0\0\0\0\0\0" or "\x0c\0\0\0\0\0\0\0" - * will clear the VFD */ + * will clear the VFD + */ for (i = 0; i < 9; i++) { switch (i) { case 0: diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c deleted file mode 100644 index b798b311d32ccbe47129e0b6626aa23ae73dade4..0000000000000000000000000000000000000000 --- a/drivers/staging/media/lirc/lirc_serial.c +++ /dev/null @@ -1,1130 +0,0 @@ -/* - * lirc_serial.c - * - * lirc_serial - Device driver that records pulse- and pause-lengths - * (space-lengths) between DDCD event on a serial port. - * - * Copyright (C) 1996,97 Ralph Metzler - * Copyright (C) 1998 Trent Piepho - * Copyright (C) 1998 Ben Pfaff - * Copyright (C) 1999 Christoph Bartelmus - * Copyright (C) 2007 Andrei Tanas (suspend/resume support) - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -/* - * Steve's changes to improve transmission fidelity: - * - for systems with the rdtsc instruction and the clock counter, a - * send_pule that times the pulses directly using the counter. - * This means that the LIRC_SERIAL_TRANSMITTER_LATENCY fudge is - * not needed. Measurement shows very stable waveform, even where - * PCI activity slows the access to the UART, which trips up other - * versions. - * - For other system, non-integer-microsecond pulse/space lengths, - * done using fixed point binary. So, much more accurate carrier - * frequency. - * - fine tuned transmitter latency, taking advantage of fractional - * microseconds in previous change - * - Fixed bug in the way transmitter latency was accounted for by - * tuning the pulse lengths down - the send_pulse routine ignored - * this overhead as it timed the overall pulse length - so the - * pulse frequency was right but overall pulse length was too - * long. Fixed by accounting for latency on each pulse/space - * iteration. - * - * Steve Davies July 2001 - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* From Intel IXP42X Developer's Manual (#252480-005): */ -/* ftp://download.intel.com/design/network/manuals/25248005.pdf */ -#define UART_IE_IXP42X_UUE 0x40 /* IXP42X UART Unit enable */ -#define UART_IE_IXP42X_RTOIE 0x10 /* IXP42X Receiver Data Timeout int.enable */ - -#include -#include - -#define LIRC_DRIVER_NAME "lirc_serial" - -struct lirc_serial { - int signal_pin; - int signal_pin_change; - u8 on; - u8 off; - long (*send_pulse)(unsigned long length); - void (*send_space)(long length); - int features; - spinlock_t lock; -}; - -#define LIRC_HOMEBREW 0 -#define LIRC_IRDEO 1 -#define LIRC_IRDEO_REMOTE 2 -#define LIRC_ANIMAX 3 -#define LIRC_IGOR 4 -#define LIRC_NSLU2 5 - -/*** module parameters ***/ -static int type; -static int io; -static int irq; -static bool iommap; -static int ioshift; -static bool softcarrier = true; -static bool share_irq; -static int sense = -1; /* -1 = auto, 0 = active high, 1 = active low */ -static bool txsense; /* 0 = active high, 1 = active low */ - -/* forward declarations */ -static long send_pulse_irdeo(unsigned long length); -static long send_pulse_homebrew(unsigned long length); -static void send_space_irdeo(long length); -static void send_space_homebrew(long length); - -static struct lirc_serial hardware[] = { - [LIRC_HOMEBREW] = { - .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_HOMEBREW].lock), - .signal_pin = UART_MSR_DCD, - .signal_pin_change = UART_MSR_DDCD, - .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR), - .off = (UART_MCR_RTS | UART_MCR_OUT2), - .send_pulse = send_pulse_homebrew, - .send_space = send_space_homebrew, -#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER - .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE | - LIRC_CAN_SET_SEND_CARRIER | - LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2) -#else - .features = LIRC_CAN_REC_MODE2 -#endif - }, - - [LIRC_IRDEO] = { - .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO].lock), - .signal_pin = UART_MSR_DSR, - .signal_pin_change = UART_MSR_DDSR, - .on = UART_MCR_OUT2, - .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2), - .send_pulse = send_pulse_irdeo, - .send_space = send_space_irdeo, - .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE | - LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2) - }, - - [LIRC_IRDEO_REMOTE] = { - .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO_REMOTE].lock), - .signal_pin = UART_MSR_DSR, - .signal_pin_change = UART_MSR_DDSR, - .on = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2), - .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2), - .send_pulse = send_pulse_irdeo, - .send_space = send_space_irdeo, - .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE | - LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2) - }, - - [LIRC_ANIMAX] = { - .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_ANIMAX].lock), - .signal_pin = UART_MSR_DCD, - .signal_pin_change = UART_MSR_DDCD, - .on = 0, - .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2), - .send_pulse = NULL, - .send_space = NULL, - .features = LIRC_CAN_REC_MODE2 - }, - - [LIRC_IGOR] = { - .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IGOR].lock), - .signal_pin = UART_MSR_DSR, - .signal_pin_change = UART_MSR_DDSR, - .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR), - .off = (UART_MCR_RTS | UART_MCR_OUT2), - .send_pulse = send_pulse_homebrew, - .send_space = send_space_homebrew, -#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER - .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE | - LIRC_CAN_SET_SEND_CARRIER | - LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2) -#else - .features = LIRC_CAN_REC_MODE2 -#endif - }, -}; - -#define RS_ISR_PASS_LIMIT 256 - -/* - * A long pulse code from a remote might take up to 300 bytes. The - * daemon should read the bytes as soon as they are generated, so take - * the number of keys you think you can push before the daemon runs - * and multiply by 300. The driver will warn you if you overrun this - * buffer. If you have a slow computer or non-busmastering IDE disks, - * maybe you will need to increase this. - */ - -/* This MUST be a power of two! It has to be larger than 1 as well. */ - -#define RBUF_LEN 256 - -static ktime_t lastkt; - -static struct lirc_buffer rbuf; - -static unsigned int freq = 38000; -static unsigned int duty_cycle = 50; - -/* Initialized in init_timing_params() */ -static unsigned long period; -static unsigned long pulse_width; -static unsigned long space_width; - -#if defined(__i386__) -/* - * From: - * Linux I/O port programming mini-HOWTO - * Author: Riku Saikkonen - * v, 28 December 1997 - * - * [...] - * Actually, a port I/O instruction on most ports in the 0-0x3ff range - * takes almost exactly 1 microsecond, so if you're, for example, using - * the parallel port directly, just do additional inb()s from that port - * to delay. - * [...] - */ -/* transmitter latency 1.5625us 0x1.90 - this figure arrived at from - * comment above plus trimming to match actual measured frequency. - * This will be sensitive to cpu speed, though hopefully most of the 1.5us - * is spent in the uart access. Still - for reference test machine was a - * 1.13GHz Athlon system - Steve - */ - -/* - * changed from 400 to 450 as this works better on slower machines; - * faster machines will use the rdtsc code anyway - */ -#define LIRC_SERIAL_TRANSMITTER_LATENCY 450 - -#else - -/* does anybody have information on other platforms ? */ -/* 256 = 1<<8 */ -#define LIRC_SERIAL_TRANSMITTER_LATENCY 256 - -#endif /* __i386__ */ -/* - * FIXME: should we be using hrtimers instead of this - * LIRC_SERIAL_TRANSMITTER_LATENCY nonsense? - */ - -/* fetch serial input packet (1 byte) from register offset */ -static u8 sinp(int offset) -{ - if (iommap) - /* the register is memory-mapped */ - offset <<= ioshift; - - return inb(io + offset); -} - -/* write serial output packet (1 byte) of value to register offset */ -static void soutp(int offset, u8 value) -{ - if (iommap) - /* the register is memory-mapped */ - offset <<= ioshift; - - outb(value, io + offset); -} - -static void on(void) -{ - if (txsense) - soutp(UART_MCR, hardware[type].off); - else - soutp(UART_MCR, hardware[type].on); -} - -static void off(void) -{ - if (txsense) - soutp(UART_MCR, hardware[type].on); - else - soutp(UART_MCR, hardware[type].off); -} - -#ifndef MAX_UDELAY_MS -#define MAX_UDELAY_US 5000 -#else -#define MAX_UDELAY_US (MAX_UDELAY_MS*1000) -#endif - -static void safe_udelay(unsigned long usecs) -{ - while (usecs > MAX_UDELAY_US) { - udelay(MAX_UDELAY_US); - usecs -= MAX_UDELAY_US; - } - udelay(usecs); -} - -#ifdef USE_RDTSC -/* - * This is an overflow/precision juggle, complicated in that we can't - * do long long divide in the kernel - */ - -/* - * When we use the rdtsc instruction to measure clocks, we keep the - * pulse and space widths as clock cycles. As this is CPU speed - * dependent, the widths must be calculated in init_port and ioctl - * time - */ - -static int init_timing_params(unsigned int new_duty_cycle, - unsigned int new_freq) -{ - __u64 loops_per_sec, work; - - duty_cycle = new_duty_cycle; - freq = new_freq; - - loops_per_sec = __this_cpu_read(cpu.info.loops_per_jiffy); - loops_per_sec *= HZ; - - /* How many clocks in a microsecond?, avoiding long long divide */ - work = loops_per_sec; - work *= 4295; /* 4295 = 2^32 / 1e6 */ - - /* - * Carrier period in clocks, approach good up to 32GHz clock, - * gets carrier frequency within 8Hz - */ - period = loops_per_sec >> 3; - period /= (freq >> 3); - - /* Derive pulse and space from the period */ - pulse_width = period * duty_cycle / 100; - space_width = period - pulse_width; - pr_debug("in init_timing_params, freq=%d, duty_cycle=%d, clk/jiffy=%ld, pulse=%ld, space=%ld, conv_us_to_clocks=%ld\n", - freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy), - pulse_width, space_width, conv_us_to_clocks); - return 0; -} -#else /* ! USE_RDTSC */ -static int init_timing_params(unsigned int new_duty_cycle, - unsigned int new_freq) -{ -/* - * period, pulse/space width are kept with 8 binary places - - * IE multiplied by 256. - */ - if (256 * 1000000L / new_freq * new_duty_cycle / 100 <= - LIRC_SERIAL_TRANSMITTER_LATENCY) - return -EINVAL; - if (256 * 1000000L / new_freq * (100 - new_duty_cycle) / 100 <= - LIRC_SERIAL_TRANSMITTER_LATENCY) - return -EINVAL; - duty_cycle = new_duty_cycle; - freq = new_freq; - period = 256 * 1000000L / freq; - pulse_width = period * duty_cycle / 100; - space_width = period - pulse_width; - pr_debug("in init_timing_params, freq=%d pulse=%ld, space=%ld\n", - freq, pulse_width, space_width); - return 0; -} -#endif /* USE_RDTSC */ - - -/* return value: space length delta */ - -static long send_pulse_irdeo(unsigned long length) -{ - long rawbits, ret; - int i; - unsigned char output; - unsigned char chunk, shifted; - - /* how many bits have to be sent ? */ - rawbits = length * 1152 / 10000; - if (duty_cycle > 50) - chunk = 3; - else - chunk = 1; - for (i = 0, output = 0x7f; rawbits > 0; rawbits -= 3) { - shifted = chunk << (i * 3); - shifted >>= 1; - output &= (~shifted); - i++; - if (i == 3) { - soutp(UART_TX, output); - while (!(sinp(UART_LSR) & UART_LSR_THRE)) - ; - output = 0x7f; - i = 0; - } - } - if (i != 0) { - soutp(UART_TX, output); - while (!(sinp(UART_LSR) & UART_LSR_TEMT)) - ; - } - - if (i == 0) - ret = (-rawbits) * 10000 / 1152; - else - ret = (3 - i) * 3 * 10000 / 1152 + (-rawbits) * 10000 / 1152; - - return ret; -} - -/* Version using udelay() */ - -/* - * here we use fixed point arithmetic, with 8 - * fractional bits. that gets us within 0.1% or so of the right average - * frequency, albeit with some jitter in pulse length - Steve - * - * This should use ndelay instead. - */ - -/* To match 8 fractional bits used for pulse/space length */ - -static long send_pulse_homebrew_softcarrier(unsigned long length) -{ - int flag; - unsigned long actual, target, d; - - length <<= 8; - - actual = 0; target = 0; flag = 0; - while (actual < length) { - if (flag) { - off(); - target += space_width; - } else { - on(); - target += pulse_width; - } - d = (target - actual - - LIRC_SERIAL_TRANSMITTER_LATENCY + 128) >> 8; - /* - * Note - we've checked in ioctl that the pulse/space - * widths are big enough so that d is > 0 - */ - udelay(d); - actual += (d << 8) + LIRC_SERIAL_TRANSMITTER_LATENCY; - flag = !flag; - } - return (actual-length) >> 8; -} - -static long send_pulse_homebrew(unsigned long length) -{ - if (length <= 0) - return 0; - - if (softcarrier) - return send_pulse_homebrew_softcarrier(length); - - on(); - safe_udelay(length); - return 0; -} - -static void send_space_irdeo(long length) -{ - if (length <= 0) - return; - - safe_udelay(length); -} - -static void send_space_homebrew(long length) -{ - off(); - if (length <= 0) - return; - safe_udelay(length); -} - -static void rbwrite(int l) -{ - if (lirc_buffer_full(&rbuf)) { - /* no new signals will be accepted */ - pr_debug("Buffer overrun\n"); - return; - } - lirc_buffer_write(&rbuf, (void *)&l); -} - -static void frbwrite(int l) -{ - /* simple noise filter */ - static int pulse, space; - static unsigned int ptr; - - if (ptr > 0 && (l & PULSE_BIT)) { - pulse += l & PULSE_MASK; - if (pulse > 250) { - rbwrite(space); - rbwrite(pulse | PULSE_BIT); - ptr = 0; - pulse = 0; - } - return; - } - if (!(l & PULSE_BIT)) { - if (ptr == 0) { - if (l > 20000) { - space = l; - ptr++; - return; - } - } else { - if (l > 20000) { - space += pulse; - if (space > PULSE_MASK) - space = PULSE_MASK; - space += l; - if (space > PULSE_MASK) - space = PULSE_MASK; - pulse = 0; - return; - } - rbwrite(space); - rbwrite(pulse | PULSE_BIT); - ptr = 0; - pulse = 0; - } - } - rbwrite(l); -} - -static irqreturn_t lirc_irq_handler(int i, void *blah) -{ - ktime_t kt; - int counter, dcd; - u8 status; - ktime_t delkt; - int data; - static int last_dcd = -1; - - if ((sinp(UART_IIR) & UART_IIR_NO_INT)) { - /* not our interrupt */ - return IRQ_NONE; - } - - counter = 0; - do { - counter++; - status = sinp(UART_MSR); - if (counter > RS_ISR_PASS_LIMIT) { - pr_warn("AIEEEE: We're caught!\n"); - break; - } - if ((status & hardware[type].signal_pin_change) - && sense != -1) { - /* get current time */ - kt = ktime_get(); - - /* New mode, written by Trent Piepho - . */ - - /* - * The old format was not very portable. - * We now use an int to pass pulses - * and spaces to user space. - * - * If PULSE_BIT is set a pulse has been - * received, otherwise a space has been - * received. The driver needs to know if your - * receiver is active high or active low, or - * the space/pulse sense could be - * inverted. The bits denoted by PULSE_MASK are - * the length in microseconds. Lengths greater - * than or equal to 16 seconds are clamped to - * PULSE_MASK. All other bits are unused. - * This is a much simpler interface for user - * programs, as well as eliminating "out of - * phase" errors with space/pulse - * autodetection. - */ - - /* calc time since last interrupt in microseconds */ - dcd = (status & hardware[type].signal_pin) ? 1 : 0; - - if (dcd == last_dcd) { - pr_warn("ignoring spike: %d %d %llx %llx\n", - dcd, sense, ktime_to_us(kt), - ktime_to_us(lastkt)); - continue; - } - - delkt = ktime_sub(kt, lastkt); - if (ktime_compare(delkt, ktime_set(15, 0)) > 0) { - data = PULSE_MASK; /* really long time */ - if (!(dcd^sense)) { - /* sanity check */ - pr_warn("AIEEEE: %d %d %llx %llx\n", - dcd, sense, ktime_to_us(kt), - ktime_to_us(lastkt)); - /* - * detecting pulse while this - * MUST be a space! - */ - sense = sense ? 0 : 1; - } - } else - data = (int) ktime_to_us(delkt); - frbwrite(dcd^sense ? data : (data|PULSE_BIT)); - lastkt = kt; - last_dcd = dcd; - wake_up_interruptible(&rbuf.wait_poll); - } - } while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */ - return IRQ_HANDLED; -} - - -static int hardware_init_port(void) -{ - u8 scratch, scratch2, scratch3; - - /* - * This is a simple port existence test, borrowed from the autoconfig - * function in drivers/serial/8250.c - */ - scratch = sinp(UART_IER); - soutp(UART_IER, 0); -#ifdef __i386__ - outb(0xff, 0x080); -#endif - scratch2 = sinp(UART_IER) & 0x0f; - soutp(UART_IER, 0x0f); -#ifdef __i386__ - outb(0x00, 0x080); -#endif - scratch3 = sinp(UART_IER) & 0x0f; - soutp(UART_IER, scratch); - if (scratch2 != 0 || scratch3 != 0x0f) { - /* we fail, there's nothing here */ - pr_err("port existence test failed, cannot continue\n"); - return -ENODEV; - } - - - - /* Set DLAB 0. */ - soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); - - /* First of all, disable all interrupts */ - soutp(UART_IER, sinp(UART_IER) & - (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI))); - - /* Clear registers. */ - sinp(UART_LSR); - sinp(UART_RX); - sinp(UART_IIR); - sinp(UART_MSR); - - /* Set line for power source */ - off(); - - /* Clear registers again to be sure. */ - sinp(UART_LSR); - sinp(UART_RX); - sinp(UART_IIR); - sinp(UART_MSR); - - switch (type) { - case LIRC_IRDEO: - case LIRC_IRDEO_REMOTE: - /* setup port to 7N1 @ 115200 Baud */ - /* 7N1+start = 9 bits at 115200 ~ 3 bits at 38kHz */ - - /* Set DLAB 1. */ - soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB); - /* Set divisor to 1 => 115200 Baud */ - soutp(UART_DLM, 0); - soutp(UART_DLL, 1); - /* Set DLAB 0 + 7N1 */ - soutp(UART_LCR, UART_LCR_WLEN7); - /* THR interrupt already disabled at this point */ - break; - default: - break; - } - - return 0; -} - -static int lirc_serial_probe(struct platform_device *dev) -{ - int i, nlow, nhigh, result; - - result = devm_request_irq(&dev->dev, irq, lirc_irq_handler, - (share_irq ? IRQF_SHARED : 0), - LIRC_DRIVER_NAME, &hardware); - if (result < 0) { - if (result == -EBUSY) - dev_err(&dev->dev, "IRQ %d busy\n", irq); - else if (result == -EINVAL) - dev_err(&dev->dev, "Bad irq number or handler\n"); - return result; - } - - /* Reserve io region. */ - /* - * Future MMAP-Developers: Attention! - * For memory mapped I/O you *might* need to use ioremap() first, - * for the NSLU2 it's done in boot code. - */ - if (((iommap) - && (devm_request_mem_region(&dev->dev, iommap, 8 << ioshift, - LIRC_DRIVER_NAME) == NULL)) - || ((!iommap) - && (devm_request_region(&dev->dev, io, 8, - LIRC_DRIVER_NAME) == NULL))) { - dev_err(&dev->dev, "port %04x already in use\n", io); - dev_warn(&dev->dev, "use 'setserial /dev/ttySX uart none'\n"); - dev_warn(&dev->dev, - "or compile the serial port driver as module and\n"); - dev_warn(&dev->dev, "make sure this module is loaded first\n"); - return -EBUSY; - } - - result = hardware_init_port(); - if (result < 0) - return result; - - /* Initialize pulse/space widths */ - init_timing_params(duty_cycle, freq); - - /* If pin is high, then this must be an active low receiver. */ - if (sense == -1) { - /* wait 1/2 sec for the power supply */ - msleep(500); - - /* - * probe 9 times every 0.04s, collect "votes" for - * active high/low - */ - nlow = 0; - nhigh = 0; - for (i = 0; i < 9; i++) { - if (sinp(UART_MSR) & hardware[type].signal_pin) - nlow++; - else - nhigh++; - msleep(40); - } - sense = nlow >= nhigh ? 1 : 0; - dev_info(&dev->dev, "auto-detected active %s receiver\n", - sense ? "low" : "high"); - } else - dev_info(&dev->dev, "Manually using active %s receiver\n", - sense ? "low" : "high"); - - dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io); - return 0; -} - -static int set_use_inc(void *data) -{ - unsigned long flags; - - /* initialize timestamp */ - lastkt = ktime_get(); - - spin_lock_irqsave(&hardware[type].lock, flags); - - /* Set DLAB 0. */ - soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); - - soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI); - - spin_unlock_irqrestore(&hardware[type].lock, flags); - - return 0; -} - -static void set_use_dec(void *data) -{ unsigned long flags; - - spin_lock_irqsave(&hardware[type].lock, flags); - - /* Set DLAB 0. */ - soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); - - /* First of all, disable all interrupts */ - soutp(UART_IER, sinp(UART_IER) & - (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI))); - spin_unlock_irqrestore(&hardware[type].lock, flags); -} - -static ssize_t lirc_write(struct file *file, const char __user *buf, - size_t n, loff_t *ppos) -{ - int i, count; - unsigned long flags; - long delta = 0; - int *wbuf; - - if (!(hardware[type].features & LIRC_CAN_SEND_PULSE)) - return -EPERM; - - count = n / sizeof(int); - if (n % sizeof(int) || count % 2 == 0) - return -EINVAL; - wbuf = memdup_user(buf, n); - if (IS_ERR(wbuf)) - return PTR_ERR(wbuf); - spin_lock_irqsave(&hardware[type].lock, flags); - if (type == LIRC_IRDEO) { - /* DTR, RTS down */ - on(); - } - for (i = 0; i < count; i++) { - if (i%2) - hardware[type].send_space(wbuf[i] - delta); - else - delta = hardware[type].send_pulse(wbuf[i]); - } - off(); - spin_unlock_irqrestore(&hardware[type].lock, flags); - kfree(wbuf); - return n; -} - -static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) -{ - int result; - u32 __user *uptr = (u32 __user *)arg; - u32 value; - - switch (cmd) { - case LIRC_GET_SEND_MODE: - if (!(hardware[type].features&LIRC_CAN_SEND_MASK)) - return -ENOIOCTLCMD; - - result = put_user(LIRC_SEND2MODE - (hardware[type].features&LIRC_CAN_SEND_MASK), - uptr); - if (result) - return result; - break; - - case LIRC_SET_SEND_MODE: - if (!(hardware[type].features&LIRC_CAN_SEND_MASK)) - return -ENOIOCTLCMD; - - result = get_user(value, uptr); - if (result) - return result; - /* only LIRC_MODE_PULSE supported */ - if (value != LIRC_MODE_PULSE) - return -EINVAL; - break; - - case LIRC_GET_LENGTH: - return -ENOIOCTLCMD; - - case LIRC_SET_SEND_DUTY_CYCLE: - pr_debug("SET_SEND_DUTY_CYCLE\n"); - if (!(hardware[type].features&LIRC_CAN_SET_SEND_DUTY_CYCLE)) - return -ENOIOCTLCMD; - - result = get_user(value, uptr); - if (result) - return result; - if (value <= 0 || value > 100) - return -EINVAL; - return init_timing_params(value, freq); - - case LIRC_SET_SEND_CARRIER: - pr_debug("SET_SEND_CARRIER\n"); - if (!(hardware[type].features&LIRC_CAN_SET_SEND_CARRIER)) - return -ENOIOCTLCMD; - - result = get_user(value, uptr); - if (result) - return result; - if (value > 500000 || value < 20000) - return -EINVAL; - return init_timing_params(duty_cycle, value); - - default: - return lirc_dev_fop_ioctl(filep, cmd, arg); - } - return 0; -} - -static const struct file_operations lirc_fops = { - .owner = THIS_MODULE, - .write = lirc_write, - .unlocked_ioctl = lirc_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = lirc_ioctl, -#endif - .read = lirc_dev_fop_read, - .poll = lirc_dev_fop_poll, - .open = lirc_dev_fop_open, - .release = lirc_dev_fop_close, - .llseek = no_llseek, -}; - -static struct lirc_driver driver = { - .name = LIRC_DRIVER_NAME, - .minor = -1, - .code_length = 1, - .sample_rate = 0, - .data = NULL, - .add_to_buf = NULL, - .rbuf = &rbuf, - .set_use_inc = set_use_inc, - .set_use_dec = set_use_dec, - .fops = &lirc_fops, - .dev = NULL, - .owner = THIS_MODULE, -}; - -static struct platform_device *lirc_serial_dev; - -static int lirc_serial_suspend(struct platform_device *dev, - pm_message_t state) -{ - /* Set DLAB 0. */ - soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); - - /* Disable all interrupts */ - soutp(UART_IER, sinp(UART_IER) & - (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI))); - - /* Clear registers. */ - sinp(UART_LSR); - sinp(UART_RX); - sinp(UART_IIR); - sinp(UART_MSR); - - return 0; -} - -/* twisty maze... need a forward-declaration here... */ -static void lirc_serial_exit(void); - -static int lirc_serial_resume(struct platform_device *dev) -{ - unsigned long flags; - int result; - - result = hardware_init_port(); - if (result < 0) - return result; - - spin_lock_irqsave(&hardware[type].lock, flags); - /* Enable Interrupt */ - lastkt = ktime_get(); - soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI); - off(); - - lirc_buffer_clear(&rbuf); - - spin_unlock_irqrestore(&hardware[type].lock, flags); - - return 0; -} - -static struct platform_driver lirc_serial_driver = { - .probe = lirc_serial_probe, - .suspend = lirc_serial_suspend, - .resume = lirc_serial_resume, - .driver = { - .name = "lirc_serial", - }, -}; - -static int __init lirc_serial_init(void) -{ - int result; - - /* Init read buffer. */ - result = lirc_buffer_init(&rbuf, sizeof(int), RBUF_LEN); - if (result < 0) - return result; - - result = platform_driver_register(&lirc_serial_driver); - if (result) { - printk("lirc register returned %d\n", result); - goto exit_buffer_free; - } - - lirc_serial_dev = platform_device_alloc("lirc_serial", 0); - if (!lirc_serial_dev) { - result = -ENOMEM; - goto exit_driver_unregister; - } - - result = platform_device_add(lirc_serial_dev); - if (result) - goto exit_device_put; - - return 0; - -exit_device_put: - platform_device_put(lirc_serial_dev); -exit_driver_unregister: - platform_driver_unregister(&lirc_serial_driver); -exit_buffer_free: - lirc_buffer_free(&rbuf); - return result; -} - -static void lirc_serial_exit(void) -{ - platform_device_unregister(lirc_serial_dev); - platform_driver_unregister(&lirc_serial_driver); - lirc_buffer_free(&rbuf); -} - -static int __init lirc_serial_init_module(void) -{ - int result; - - switch (type) { - case LIRC_HOMEBREW: - case LIRC_IRDEO: - case LIRC_IRDEO_REMOTE: - case LIRC_ANIMAX: - case LIRC_IGOR: - /* if nothing specified, use ttyS0/com1 and irq 4 */ - io = io ? io : 0x3f8; - irq = irq ? irq : 4; - break; - default: - return -EINVAL; - } - if (!softcarrier) { - switch (type) { - case LIRC_HOMEBREW: - case LIRC_IGOR: - hardware[type].features &= - ~(LIRC_CAN_SET_SEND_DUTY_CYCLE| - LIRC_CAN_SET_SEND_CARRIER); - break; - } - } - - /* make sure sense is either -1, 0, or 1 */ - if (sense != -1) - sense = !!sense; - - result = lirc_serial_init(); - if (result) - return result; - - driver.features = hardware[type].features; - driver.dev = &lirc_serial_dev->dev; - driver.minor = lirc_register_driver(&driver); - if (driver.minor < 0) { - pr_err("register_chrdev failed!\n"); - lirc_serial_exit(); - return driver.minor; - } - return 0; -} - -static void __exit lirc_serial_exit_module(void) -{ - lirc_unregister_driver(driver.minor); - lirc_serial_exit(); - pr_debug("cleaned up module\n"); -} - - -module_init(lirc_serial_init_module); -module_exit(lirc_serial_exit_module); - -MODULE_DESCRIPTION("Infra-red receiver driver for serial ports."); -MODULE_AUTHOR("Ralph Metzler, Trent Piepho, Ben Pfaff, " - "Christoph Bartelmus, Andrei Tanas"); -MODULE_LICENSE("GPL"); - -module_param(type, int, S_IRUGO); -MODULE_PARM_DESC(type, "Hardware type (0 = home-brew, 1 = IRdeo," - " 2 = IRdeo Remote, 3 = AnimaX, 4 = IgorPlug," - " 5 = NSLU2 RX:CTS2/TX:GreenLED)"); - -module_param(io, int, S_IRUGO); -MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)"); - -/* some architectures (e.g. intel xscale) have memory mapped registers */ -module_param(iommap, bool, S_IRUGO); -MODULE_PARM_DESC(iommap, "physical base for memory mapped I/O" - " (0 = no memory mapped io)"); - -/* - * some architectures (e.g. intel xscale) align the 8bit serial registers - * on 32bit word boundaries. - * See linux-kernel/drivers/tty/serial/8250/8250.c serial_in()/out() - */ -module_param(ioshift, int, S_IRUGO); -MODULE_PARM_DESC(ioshift, "shift I/O register offset (0 = no shift)"); - -module_param(irq, int, S_IRUGO); -MODULE_PARM_DESC(irq, "Interrupt (4 or 3)"); - -module_param(share_irq, bool, S_IRUGO); -MODULE_PARM_DESC(share_irq, "Share interrupts (0 = off, 1 = on)"); - -module_param(sense, int, S_IRUGO); -MODULE_PARM_DESC(sense, "Override autodetection of IR receiver circuit" - " (0 = active high, 1 = active low )"); - -#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER -module_param(txsense, bool, S_IRUGO); -MODULE_PARM_DESC(txsense, "Sense of transmitter circuit" - " (0 = active high, 1 = active low )"); -#endif - -module_param(softcarrier, bool, S_IRUGO); -MODULE_PARM_DESC(softcarrier, "Software carrier (0 = off, 1 = on, default on)"); diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c index 3551aed589c010d5961e9e2f7672d6097a758232..34aac3e2eb871faa957ebb10765547a3a4cf3c1f 100644 --- a/drivers/staging/media/lirc/lirc_zilog.c +++ b/drivers/staging/media/lirc/lirc_zilog.c @@ -1157,8 +1157,8 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n, /* Send the code */ if (ret == 0) { - ret = send_code(tx, (unsigned)command >> 16, - (unsigned)command & 0xFFFF); + ret = send_code(tx, (unsigned int)command >> 16, + (unsigned int)command & 0xFFFF); if (ret == -EPROTO) { mutex_unlock(&ir->ir_lock); mutex_unlock(&tx->client_lock); diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c index aaca39d751a5d50a39d005c439a16a420e70aadb..f71d5f2f179fb6114671ea85a81c86c6bb4a9fec 100644 --- a/drivers/staging/media/omap4iss/iss_csi2.c +++ b/drivers/staging/media/omap4iss/iss_csi2.c @@ -224,7 +224,7 @@ static u16 csi2_ctx_map_format(struct iss_csi2_device *csi2) fmtidx = 3; break; default: - WARN(1, KERN_ERR "CSI2: pixel format %08x unsupported!\n", + WARN(1, "CSI2: pixel format %08x unsupported!\n", fmt->code); return 0; } diff --git a/drivers/staging/media/pulse8-cec/TODO b/drivers/staging/media/pulse8-cec/TODO deleted file mode 100644 index fa6660245e5fb45b513b6b335a68ed7d2d9ca6fa..0000000000000000000000000000000000000000 --- a/drivers/staging/media/pulse8-cec/TODO +++ /dev/null @@ -1,52 +0,0 @@ -This driver needs to mature a bit more and another round of -code cleanups. - -Otherwise it looks to be in good shape. And of course the fact -that the CEC framework is in staging at the moment also prevents -this driver from being mainlined. - -Some notes: - -1) Regarding the "autonomous" mode of the Pulse-Eight: currently this -is disabled, but the idea is that this allows basic functionality -when the PC is off, and it can wake-up the PC through USB. - -To prevent the device to go into autonomous mode the driver would -have to send MSGCODE_SET_CONTROLLED 1 and then send a ping every -30 seconds (in practice once every 15 seconds would be good). When -powering off or going to standby send MSGCODE_SET_CONTROLLED 0 to -turn the autonomous mode back on. - -This needs to be implemented in the driver. Autonomous mode was -added in firmware v2. - -2) Writing to the EEPROM can only be done once every 10 seconds. - -3) To use this driver you also need to patch the inputattach utility, -this patch will be submitted once this driver is moved out of staging. - -diff -urN linuxconsoletools-1.4.9/utils/inputattach.c linuxconsoletools-1.4.9.new/utils/inputattach.c ---- linuxconsoletools-1.4.9/utils/inputattach.c 2016-01-09 16:27:02.000000000 +0100 -+++ linuxconsoletools-1.4.9.new/utils/inputattach.c 2016-03-20 11:35:31.707788967 +0100 -@@ -861,6 +861,9 @@ - { "--wacom_iv", "-wacom_iv", "Wacom protocol IV tablet", - B9600, CS8 | CRTSCTS, - SERIO_WACOM_IV, 0x00, 0x00, 0, wacom_iv_init }, -+{ "--pulse8-cec", "-pulse8-cec", "Pulse Eight HDMI CEC dongle", -+ B9600, CS8, -+ SERIO_PULSE8_CEC, 0x00, 0x00, 0, NULL }, - { NULL, NULL, NULL, 0, 0, 0, 0, 0, 0, NULL } - }; - -diff -urN linuxconsoletools-1.4.9/utils/serio-ids.h linuxconsoletools-1.4.9.new/utils/serio-ids.h ---- linuxconsoletools-1.4.9/utils/serio-ids.h 2015-04-26 18:29:42.000000000 +0200 -+++ linuxconsoletools-1.4.9.new/utils/serio-ids.h 2016-03-20 11:41:00.153558539 +0100 -@@ -131,5 +131,8 @@ - #ifndef SERIO_EASYPEN - # define SERIO_EASYPEN 0x3f - #endif -+#ifndef SERIO_PULSE8_CEC -+# define SERIO_PULSE8_CEC 0x40 -+#endif - - #endif diff --git a/drivers/staging/media/s5p-cec/Kconfig b/drivers/staging/media/s5p-cec/Kconfig index 0315fd7ad0f1bd3d900902eefb948bc2b8b603dc..ddfd955da0d40d48e830fe41395f1a2f37dae2e8 100644 --- a/drivers/staging/media/s5p-cec/Kconfig +++ b/drivers/staging/media/s5p-cec/Kconfig @@ -1,6 +1,6 @@ config VIDEO_SAMSUNG_S5P_CEC tristate "Samsung S5P CEC driver" - depends on VIDEO_DEV && MEDIA_CEC && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST) + depends on VIDEO_DEV && MEDIA_CEC_SUPPORT && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST) ---help--- This is a driver for Samsung S5P HDMI CEC interface. It uses the generic CEC framework interface. diff --git a/drivers/staging/media/s5p-cec/TODO b/drivers/staging/media/s5p-cec/TODO index f51d5268ac40e6532d4cad60141fe2c23ebeabf7..64f21bab38f59ee9b38613e6b8fcbc7c87a57637 100644 --- a/drivers/staging/media/s5p-cec/TODO +++ b/drivers/staging/media/s5p-cec/TODO @@ -1,7 +1,7 @@ -This driver depends on the CEC framework, which is currently in -staging, so therefor this driver is in staging as well. +This driver requires that userspace sets the physical address. +However, this should be passed on from the corresponding +Samsung HDMI driver. -In addition, this driver requires that userspace sets the physical -address. However, this should be passed on from the corresponding -samsung HDMI driver. It is very annoying if userspace has to do this, -and other than USB CEC adapters this must be handled automatically. +We have to wait until the HDMI notifier framework has been merged +in order to handle this gracefully, until that time this driver +has to remain in staging. diff --git a/drivers/staging/media/s5p-cec/s5p_cec.c b/drivers/staging/media/s5p-cec/s5p_cec.c index 1780a08b73c96193f063f4811ef5ee2a366ec5e3..2a07968b5ac6f19fb32671b877651ec52e8dd58b 100644 --- a/drivers/staging/media/s5p-cec/s5p_cec.c +++ b/drivers/staging/media/s5p-cec/s5p_cec.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -204,12 +203,11 @@ static int s5p_cec_probe(struct platform_device *pdev) cec->adap = cec_allocate_adapter(&s5p_cec_adap_ops, cec, CEC_NAME, CEC_CAP_PHYS_ADDR | CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT | - CEC_CAP_PASSTHROUGH | CEC_CAP_RC, - 1, &pdev->dev); + CEC_CAP_PASSTHROUGH | CEC_CAP_RC, 1); ret = PTR_ERR_OR_ZERO(cec->adap); if (ret) return ret; - ret = cec_register_adapter(cec->adap); + ret = cec_register_adapter(cec->adap, &pdev->dev); if (ret) { cec_delete_adapter(cec->adap); return ret; @@ -231,7 +229,7 @@ static int s5p_cec_remove(struct platform_device *pdev) return 0; } -static int s5p_cec_runtime_suspend(struct device *dev) +static int __maybe_unused s5p_cec_runtime_suspend(struct device *dev) { struct s5p_cec_dev *cec = dev_get_drvdata(dev); @@ -239,7 +237,7 @@ static int s5p_cec_runtime_suspend(struct device *dev) return 0; } -static int s5p_cec_runtime_resume(struct device *dev) +static int __maybe_unused s5p_cec_runtime_resume(struct device *dev) { struct s5p_cec_dev *cec = dev_get_drvdata(dev); int ret; @@ -263,6 +261,7 @@ static const struct of_device_id s5p_cec_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, s5p_cec_match); static struct platform_driver s5p_cec_pdrv = { .probe = s5p_cec_probe, diff --git a/drivers/staging/media/st-cec/Kconfig b/drivers/staging/media/st-cec/Kconfig index 784d2c600aca100bd6bd17d6223db550ccb1219a..c04283db58d6d17b7917cbca1823ca627aa90868 100644 --- a/drivers/staging/media/st-cec/Kconfig +++ b/drivers/staging/media/st-cec/Kconfig @@ -1,6 +1,6 @@ config VIDEO_STI_HDMI_CEC tristate "STMicroelectronics STiH4xx HDMI CEC driver" - depends on VIDEO_DEV && MEDIA_CEC && (ARCH_STI || COMPILE_TEST) + depends on VIDEO_DEV && MEDIA_CEC_SUPPORT && (ARCH_STI || COMPILE_TEST) ---help--- This is a driver for STIH4xx HDMI CEC interface. It uses the generic CEC framework interface. diff --git a/drivers/staging/media/st-cec/TODO b/drivers/staging/media/st-cec/TODO new file mode 100644 index 0000000000000000000000000000000000000000..c61289742c5cdd0084c68c374e61e76868c3a637 --- /dev/null +++ b/drivers/staging/media/st-cec/TODO @@ -0,0 +1,7 @@ +This driver requires that userspace sets the physical address. +However, this should be passed on from the corresponding +ST HDMI driver. + +We have to wait until the HDMI notifier framework has been merged +in order to handle this gracefully, until that time this driver +has to remain in staging. diff --git a/drivers/staging/media/st-cec/stih-cec.c b/drivers/staging/media/st-cec/stih-cec.c index 214344866a6b854aa04a5041bf94d0665cf5d180..3c25638a961031205823785bde5bf0d6c8c584e3 100644 --- a/drivers/staging/media/st-cec/stih-cec.c +++ b/drivers/staging/media/st-cec/stih-cec.c @@ -16,7 +16,6 @@ #include #include #include -#include #include @@ -108,11 +107,11 @@ /* Constants for CEC_BIT_TOUT_THRESH register */ #define CEC_SBIT_TOUT_47MS BIT(1) -#define CEC_SBIT_TOUT_48MS BIT(0) | BIT(1) +#define CEC_SBIT_TOUT_48MS (BIT(0) | BIT(1)) #define CEC_SBIT_TOUT_50MS BIT(2) #define CEC_DBIT_TOUT_27MS BIT(0) #define CEC_DBIT_TOUT_28MS BIT(1) -#define CEC_DBIT_TOUT_29MS BIT(0) | BIT(1) +#define CEC_DBIT_TOUT_29MS (BIT(0) | BIT(1)) /* Constants for CEC_BIT_PULSE_THRESH register */ #define CEC_BIT_LPULSE_03MS BIT(1) @@ -336,13 +335,12 @@ static int stih_cec_probe(struct platform_device *pdev) cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec, CEC_NAME, CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH | - CEC_CAP_PHYS_ADDR | CEC_CAP_TRANSMIT, - 1, &pdev->dev); + CEC_CAP_PHYS_ADDR | CEC_CAP_TRANSMIT, 1); ret = PTR_ERR_OR_ZERO(cec->adap); if (ret) return ret; - ret = cec_register_adapter(cec->adap); + ret = cec_register_adapter(cec->adap, &pdev->dev); if (ret) { cec_delete_adapter(cec->adap); return ret; @@ -363,6 +361,7 @@ static const struct of_device_id stih_cec_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, stih_cec_match); static struct platform_driver stih_cec_pdrv = { .probe = stih_cec_probe, diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c index 4659a6450c04f1c30c0695f473ba9875c0c08bbc..ce1764cba5f0ffdb66d3e7ddbab8a1b7a6e6c1ab 100644 --- a/drivers/staging/most/aim-network/networking.c +++ b/drivers/staging/most/aim-network/networking.c @@ -67,10 +67,10 @@ struct net_dev_context { struct most_interface *iface; bool channels_opened; bool is_mamac; - unsigned char link_stat; struct net_device *dev; struct net_dev_channel rx; struct net_dev_channel tx; + struct completion mac_compl; struct list_head list; }; @@ -181,6 +181,7 @@ static int most_nd_set_mac_address(struct net_device *dev, void *p) static int most_nd_open(struct net_device *dev) { struct net_dev_context *nd = dev->ml_priv; + long ret; netdev_info(dev, "open net device\n"); @@ -202,16 +203,30 @@ static int most_nd_open(struct net_device *dev) return -EBUSY; } - nd->channels_opened = true; - - if (nd->is_mamac) { - nd->link_stat = 1; - netif_wake_queue(dev); - } else { + if (!is_valid_ether_addr(dev->dev_addr)) { nd->iface->request_netinfo(nd->iface, nd->tx.ch_id); + ret = wait_for_completion_interruptible_timeout( + &nd->mac_compl, msecs_to_jiffies(5000)); + if (!ret) { + netdev_err(dev, "mac timeout\n"); + ret = -EBUSY; + goto err; + } + + if (ret < 0) { + netdev_warn(dev, "mac waiting interrupted\n"); + goto err; + } } + nd->channels_opened = true; + netif_wake_queue(dev); return 0; + +err: + most_stop_channel(nd->iface, nd->tx.ch_id, &aim); + most_stop_channel(nd->iface, nd->rx.ch_id, &aim); + return ret; } static int most_nd_stop(struct net_device *dev) @@ -277,7 +292,6 @@ static const struct net_device_ops most_nd_ops = { static void most_nd_setup(struct net_device *dev) { - netdev_info(dev, "setup net device\n"); ether_setup(dev); dev->netdev_ops = &most_nd_ops; } @@ -332,6 +346,7 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx, if (!nd) return -ENOMEM; + init_completion(&nd->mac_compl); nd->iface = iface; spin_lock_irqsave(&list_lock, flags); @@ -548,8 +563,7 @@ void most_deliver_netinfo(struct most_interface *iface, { struct net_dev_context *nd; struct net_device *dev; - - pr_info("Received netinfo from %s\n", iface->description); + const u8 *m = mac_addr; nd = get_net_dev_context(iface); if (!nd) @@ -559,15 +573,16 @@ void most_deliver_netinfo(struct most_interface *iface, if (!dev) return; - if (mac_addr) - ether_addr_copy(dev->dev_addr, mac_addr); - - if (nd->link_stat != link_stat) { - nd->link_stat = link_stat; - if (nd->link_stat) - netif_wake_queue(dev); - else - netif_stop_queue(dev); + if (m && is_valid_ether_addr(m)) { + if (!is_valid_ether_addr(dev->dev_addr)) { + netdev_info(dev, "set mac %02x-%02x-%02x-%02x-%02x-%02x\n", + m[0], m[1], m[2], m[3], m[4], m[5]); + ether_addr_copy(dev->dev_addr, m); + complete(&nd->mac_compl); + } else if (!ether_addr_equal(dev->dev_addr, m)) { + netdev_warn(dev, "reject mac %02x-%02x-%02x-%02x-%02x-%02x\n", + m[0], m[1], m[2], m[3], m[4], m[5]); + } } } EXPORT_SYMBOL(most_deliver_netinfo); diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c index 78b2c3dd9bb3f9fb5e126416ad520eefcaf34554..35aee9fbbf02e0a1693dd7ffccdb32ff484206ce 100644 --- a/drivers/staging/most/hdm-dim2/dim2_hdm.c +++ b/drivers/staging/most/hdm-dim2/dim2_hdm.c @@ -306,14 +306,11 @@ static int deliver_netinfo_thread(void *data) static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo) { u8 *data = mbo->virt_address; - u8 *mac = dev->mac_addrs; pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]); dev->link_state = data[18]; pr_info("NIState: %d\n", dev->link_state); - memcpy(mac, data + 19, 6); - pr_info("MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n", - mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + memcpy(dev->mac_addrs, data + 19, 6); dev->deliver_netinfo++; wake_up_interruptible(&dev->netinfo_waitq); } diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c index 26c9adb2930898a0a034ea562760efdb6915fa39..d6db0bd65be0df554848aca1b496e431e82d53b3 100644 --- a/drivers/staging/most/hdm-usb/hdm_usb.c +++ b/drivers/staging/most/hdm-usb/hdm_usb.c @@ -97,9 +97,7 @@ struct clear_hold_work { * @cap: channel capabilities * @conf: channel configuration * @dci: direct communication interface of hardware - * @hw_addr: MAC address of hardware * @ep_address: endpoint address table - * @link_stat: link status of hardware * @description: device description * @suffix: suffix for channel name * @channel_lock: synchronize channel access @@ -117,9 +115,7 @@ struct most_dev { struct most_channel_capability *cap; struct most_channel_config *conf; struct most_dci_obj *dci; - u8 hw_addr[6]; u8 *ep_address; - u16 link_stat; char description[MAX_STRING_LEN]; char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN]; spinlock_t channel_lock[MAX_NUM_ENDPOINTS]; /* sync channel access */ @@ -186,28 +182,9 @@ static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data) 5 * HZ); } -/** - * free_anchored_buffers - free device's anchored items - * @mdev: the device - * @channel: channel ID - * @status: status of MBO termination - */ -static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel, - enum mbo_status_flags status) +static inline int start_sync_ep(struct usb_device *usb_dev, u16 ep) { - struct mbo *mbo; - struct urb *urb; - - while ((urb = usb_get_from_anchor(&mdev->busy_urbs[channel]))) { - mbo = urb->context; - usb_kill_urb(urb); - if (mbo && mbo->complete) { - mbo->status = status; - mbo->processed_length = 0; - mbo->complete(mbo); - } - usb_free_urb(urb); - } + return drci_wr_reg(usb_dev, DRCI_REG_BASE + DRCI_COMMAND + ep * 16, 1); } /** @@ -278,7 +255,7 @@ static int hdm_poison_channel(struct most_interface *iface, int channel) cancel_work_sync(&mdev->clear_work[channel].ws); mutex_lock(&mdev->io_mutex); - free_anchored_buffers(mdev, channel, MBO_E_CLOSE); + usb_kill_anchored_urbs(&mdev->busy_urbs[channel]); if (mdev->padding_active[channel]) mdev->padding_active[channel] = false; @@ -377,33 +354,27 @@ static void hdm_write_completion(struct urb *urb) unsigned long flags; spin_lock_irqsave(lock, flags); - if (urb->status == -ENOENT || urb->status == -ECONNRESET || - !mdev->is_channel_healthy[channel]) { - spin_unlock_irqrestore(lock, flags); - return; - } - if (unlikely(urb->status && urb->status != -ESHUTDOWN)) { - mbo->processed_length = 0; + mbo->processed_length = 0; + mbo->status = MBO_E_INVAL; + if (likely(mdev->is_channel_healthy[channel])) { switch (urb->status) { + case 0: + case -ESHUTDOWN: + mbo->processed_length = urb->actual_length; + mbo->status = MBO_SUCCESS; + break; case -EPIPE: dev_warn(dev, "Broken OUT pipe detected\n"); mdev->is_channel_healthy[channel] = false; - spin_unlock_irqrestore(lock, flags); mdev->clear_work[channel].pipe = urb->pipe; schedule_work(&mdev->clear_work[channel].ws); - return; + break; case -ENODEV: case -EPROTO: mbo->status = MBO_E_CLOSE; break; - default: - mbo->status = MBO_E_INVAL; - break; } - } else { - mbo->status = MBO_SUCCESS; - mbo->processed_length = urb->actual_length; } spin_unlock_irqrestore(lock, flags); @@ -531,40 +502,35 @@ static void hdm_read_completion(struct urb *urb) unsigned long flags; spin_lock_irqsave(lock, flags); - if (urb->status == -ENOENT || urb->status == -ECONNRESET || - !mdev->is_channel_healthy[channel]) { - spin_unlock_irqrestore(lock, flags); - return; - } - if (unlikely(urb->status && urb->status != -ESHUTDOWN)) { - mbo->processed_length = 0; + mbo->processed_length = 0; + mbo->status = MBO_E_INVAL; + if (likely(mdev->is_channel_healthy[channel])) { switch (urb->status) { + case 0: + case -ESHUTDOWN: + mbo->processed_length = urb->actual_length; + mbo->status = MBO_SUCCESS; + if (mdev->padding_active[channel] && + hdm_remove_padding(mdev, channel, mbo)) { + mbo->processed_length = 0; + mbo->status = MBO_E_INVAL; + } + break; case -EPIPE: dev_warn(dev, "Broken IN pipe detected\n"); mdev->is_channel_healthy[channel] = false; - spin_unlock_irqrestore(lock, flags); mdev->clear_work[channel].pipe = urb->pipe; schedule_work(&mdev->clear_work[channel].ws); - return; + break; case -ENODEV: case -EPROTO: mbo->status = MBO_E_CLOSE; break; case -EOVERFLOW: dev_warn(dev, "Babble on IN pipe detected\n"); - default: - mbo->status = MBO_E_INVAL; break; } - } else { - mbo->processed_length = urb->actual_length; - mbo->status = MBO_SUCCESS; - if (mdev->padding_active[channel] && - hdm_remove_padding(mdev, channel, mbo)) { - mbo->processed_length = 0; - mbo->status = MBO_E_INVAL; - } } spin_unlock_irqrestore(lock, flags); @@ -668,6 +634,15 @@ static int hdm_enqueue(struct most_interface *iface, int channel, * @iface: interface * @channel: channel ID * @conf: structure that holds the configuration information + * + * The attached network interface controller (NIC) supports a padding mode + * to avoid short packets on USB, hence increasing the performance due to a + * lower interrupt load. This mode is default for synchronous data and can + * be switched on for isochronous data. In case padding is active the + * driver needs to know the frame size of the payload in order to calculate + * the number of bytes it needs to pad when transmitting or to cut off when + * receiving data. + * */ static int hdm_configure_channel(struct most_interface *iface, int channel, struct most_channel_config *conf) @@ -701,6 +676,11 @@ static int hdm_configure_channel(struct most_interface *iface, int channel, !(conf->data_type == MOST_CH_ISOC && conf->packets_per_xact != 0xFF)) { mdev->padding_active[channel] = false; + /* + * Since the NIC's padding mode is not going to be + * used, we can skip the frame size calculations and + * move directly on to exit. + */ goto exit; } @@ -734,56 +714,12 @@ static int hdm_configure_channel(struct most_interface *iface, int channel, - conf->buffer_size; exit: mdev->conf[channel] = *conf; - return 0; -} + if (conf->data_type == MOST_CH_ASYNC) { + u16 ep = mdev->ep_address[channel]; -/** - * hdm_update_netinfo - retrieve latest networking information - * @mdev: device interface - * - * This triggers the USB vendor requests to read the hardware address and - * the current link status of the attached device. - */ -static int hdm_update_netinfo(struct most_dev *mdev) -{ - struct usb_device *usb_device = mdev->usb_device; - struct device *dev = &usb_device->dev; - u16 hi, mi, lo, link; - - if (!is_valid_ether_addr(mdev->hw_addr)) { - if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) { - dev_err(dev, "Vendor request \"hw_addr_hi\" failed\n"); - return -EFAULT; - } - - if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) { - dev_err(dev, "Vendor request \"hw_addr_mid\" failed\n"); - return -EFAULT; - } - - if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) { - dev_err(dev, "Vendor request \"hw_addr_low\" failed\n"); - return -EFAULT; - } - - mutex_lock(&mdev->io_mutex); - mdev->hw_addr[0] = hi >> 8; - mdev->hw_addr[1] = hi; - mdev->hw_addr[2] = mi >> 8; - mdev->hw_addr[3] = mi; - mdev->hw_addr[4] = lo >> 8; - mdev->hw_addr[5] = lo; - mutex_unlock(&mdev->io_mutex); - } - - if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) { - dev_err(dev, "Vendor request \"link status\" failed\n"); - return -EFAULT; + if (start_sync_ep(mdev->usb_device, ep) < 0) + dev_warn(dev, "sync for ep%02x failed", ep); } - - mutex_lock(&mdev->io_mutex); - mdev->link_stat = link; - mutex_unlock(&mdev->io_mutex); return 0; } @@ -807,7 +743,7 @@ static void hdm_request_netinfo(struct most_interface *iface, int channel) } /** - * link_stat_timer_handler - add work to link_stat work queue + * link_stat_timer_handler - schedule work obtaining mac address and link status * @data: pointer to USB device instance * * The handler runs in interrupt context. That's why we need to defer the @@ -823,33 +759,47 @@ static void link_stat_timer_handler(unsigned long data) } /** - * wq_netinfo - work queue function + * wq_netinfo - work queue function to deliver latest networking information * @wq_obj: object that holds data for our deferred work to do * * This retrieves the network interface status of the USB INIC - * and compares it with the current status. If the status has - * changed, it updates the status of the core. */ static void wq_netinfo(struct work_struct *wq_obj) { struct most_dev *mdev = to_mdev_from_work(wq_obj); - int i, prev_link_stat = mdev->link_stat; - u8 prev_hw_addr[6]; + struct usb_device *usb_device = mdev->usb_device; + struct device *dev = &usb_device->dev; + u16 hi, mi, lo, link; + u8 hw_addr[6]; - for (i = 0; i < 6; i++) - prev_hw_addr[i] = mdev->hw_addr[i]; + if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) { + dev_err(dev, "Vendor request 'hw_addr_hi' failed\n"); + return; + } + + if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) { + dev_err(dev, "Vendor request 'hw_addr_mid' failed\n"); + return; + } - if (hdm_update_netinfo(mdev) < 0) + if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) { + dev_err(dev, "Vendor request 'hw_addr_low' failed\n"); + return; + } + + if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) { + dev_err(dev, "Vendor request 'link status' failed\n"); return; - if (prev_link_stat != mdev->link_stat || - prev_hw_addr[0] != mdev->hw_addr[0] || - prev_hw_addr[1] != mdev->hw_addr[1] || - prev_hw_addr[2] != mdev->hw_addr[2] || - prev_hw_addr[3] != mdev->hw_addr[3] || - prev_hw_addr[4] != mdev->hw_addr[4] || - prev_hw_addr[5] != mdev->hw_addr[5]) - most_deliver_netinfo(&mdev->iface, mdev->link_stat, - &mdev->hw_addr[0]); + } + + hw_addr[0] = hi >> 8; + hw_addr[1] = hi; + hw_addr[2] = mi >> 8; + hw_addr[3] = mi; + hw_addr[4] = lo >> 8; + hw_addr[5] = lo; + + most_deliver_netinfo(&mdev->iface, link, hw_addr); } /** @@ -867,7 +817,7 @@ static void wq_clear_halt(struct work_struct *wq_obj) mutex_lock(&mdev->io_mutex); most_stop_enqueue(&mdev->iface, channel); - free_anchored_buffers(mdev, channel, MBO_E_INVAL); + usb_kill_anchored_urbs(&mdev->busy_urbs[channel]); if (usb_clear_halt(mdev->usb_device, pipe)) dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n"); @@ -1053,6 +1003,7 @@ static ssize_t store_value(struct most_dci_obj *dci_obj, u16 val; u16 reg_addr; const char *name = attr->attr.name; + struct usb_device *usb_dev = dci_obj->usb_device; int err = kstrtou16(buf, 16, &val); if (err) @@ -1063,18 +1014,15 @@ static ssize_t store_value(struct most_dci_obj *dci_obj, return count; } - if (!strcmp(name, "arb_value")) { - reg_addr = dci_obj->reg_addr; - } else if (!strcmp(name, "sync_ep")) { - u16 ep = val; - - reg_addr = DRCI_REG_BASE + DRCI_COMMAND + ep * 16; - val = 1; - } else if (get_static_reg_addr(ro_regs, name, ®_addr)) { + if (!strcmp(name, "arb_value")) + err = drci_wr_reg(usb_dev, dci_obj->reg_addr, val); + else if (!strcmp(name, "sync_ep")) + err = start_sync_ep(usb_dev, val); + else if (!get_static_reg_addr(ro_regs, name, ®_addr)) + err = drci_wr_reg(usb_dev, reg_addr, val); + else return -EFAULT; - } - err = drci_wr_reg(dci_obj->usb_device, reg_addr, val); if (err < 0) return err; @@ -1186,7 +1134,6 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id) struct most_channel_capability *tmp_cap; struct usb_endpoint_descriptor *ep_desc; int ret = 0; - int err; if (!mdev) goto exit_ENOMEM; @@ -1262,13 +1209,6 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id) tmp_cap++; init_usb_anchor(&mdev->busy_urbs[i]); spin_lock_init(&mdev->channel_lock[i]); - err = drci_wr_reg(usb_dev, - DRCI_REG_BASE + DRCI_COMMAND + - ep_desc->bEndpointAddress * 16, - 1); - if (err < 0) - dev_warn(dev, "DCI Sync for EP %02x failed", - ep_desc->bEndpointAddress); } dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n", le16_to_cpu(usb_dev->descriptor.idVendor), diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c index 329109c0024f44bfba4a9948009cb143804cb8e9..191404bc590601303cacf4c7b47a44de37d73e1b 100644 --- a/drivers/staging/most/mostcore/core.c +++ b/drivers/staging/most/mostcore/core.c @@ -342,7 +342,7 @@ static ssize_t show_channel_starving(struct most_c_obj *c, } #define create_show_channel_attribute(val) \ - static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL) + static MOST_CHNL_ATTR(val, 0444, show_##val, NULL) create_show_channel_attribute(available_directions); create_show_channel_attribute(available_datatypes); @@ -494,9 +494,7 @@ static ssize_t store_set_packets_per_xact(struct most_c_obj *c, } #define create_channel_attribute(value) \ - static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \ - show_##value, \ - store_##value) + static MOST_CHNL_ATTR(value, 0644, show_##value, store_##value) create_channel_attribute(set_buffer_size); create_channel_attribute(set_number_of_buffers); @@ -690,7 +688,7 @@ static ssize_t show_interface(struct most_inst_obj *instance_obj, } #define create_inst_attribute(value) \ - static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL) + static MOST_INST_ATTR(value, 0444, show_##value, NULL) create_inst_attribute(description); create_inst_attribute(interface); @@ -763,8 +761,6 @@ struct most_aim_obj { struct kobject kobj; struct list_head list; struct most_aim *driver; - char add_link[STRING_SIZE]; - char remove_link[STRING_SIZE]; }; #define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj) @@ -851,7 +847,7 @@ static void most_aim_release(struct kobject *kobj) kfree(aim_obj); } -static ssize_t show_add_link(struct most_aim_obj *aim_obj, +static ssize_t add_link_show(struct most_aim_obj *aim_obj, struct most_aim_attribute *attr, char *buf) { @@ -885,16 +881,16 @@ static ssize_t show_add_link(struct most_aim_obj *aim_obj, * * Examples: * - * Input: "mdev0:ch0@ep_81:my_channel\n" or - * "mdev0:ch0@ep_81:my_channel" + * Input: "mdev0:ch6:my_channel\n" or + * "mdev0:ch6:my_channel" * - * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel" + * Output: *a -> "mdev0", *b -> "ch6", *c -> "my_channel" * - * Input: "mdev0:ch0@ep_81\n" - * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "" + * Input: "mdev1:ep81\n" + * Output: *a -> "mdev1", *b -> "ep81", *c -> "" * - * Input: "mdev0:ch0@ep_81" - * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL + * Input: "mdev1:ep81" + * Output: *a -> "mdev1", *b -> "ep81", *c == NULL */ static int split_string(char *buf, char **a, char **b, char **c) { @@ -962,13 +958,13 @@ most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch) * Searches for a pair of device and channel and probes the AIM * * Example: - * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link - * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link + * (1) echo "mdev0:ch6:my_rxchannel" >add_link + * (2) echo "mdev1:ep81" >add_link * * (1) would create the device node /dev/my_rxchannel - * (2) would create the device node /dev/mdev0-ch0@ep_81 + * (2) would create the device node /dev/mdev1-ep81 */ -static ssize_t store_add_link(struct most_aim_obj *aim_obj, +static ssize_t add_link_store(struct most_aim_obj *aim_obj, struct most_aim_attribute *attr, const char *buf, size_t len) @@ -984,7 +980,6 @@ static ssize_t store_add_link(struct most_aim_obj *aim_obj, size_t max_len = min_t(size_t, len + 1, STRING_SIZE); strlcpy(buffer, buf, max_len); - strlcpy(aim_obj->add_link, buf, max_len); ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod); if (ret) @@ -1019,14 +1014,7 @@ static ssize_t store_add_link(struct most_aim_obj *aim_obj, } static struct most_aim_attribute most_aim_attr_add_link = - __ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link); - -static ssize_t show_remove_link(struct most_aim_obj *aim_obj, - struct most_aim_attribute *attr, - char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link); -} + __ATTR_RW(add_link); /** * store_remove_link - store function for remove_link attribute @@ -1036,9 +1024,9 @@ static ssize_t show_remove_link(struct most_aim_obj *aim_obj, * @len: buffer length * * Example: - * echo -n -e "mdev0:ch0@ep_81\n" >remove_link + * echo "mdev0:ep81" >remove_link */ -static ssize_t store_remove_link(struct most_aim_obj *aim_obj, +static ssize_t remove_link_store(struct most_aim_obj *aim_obj, struct most_aim_attribute *attr, const char *buf, size_t len) @@ -1051,7 +1039,6 @@ static ssize_t store_remove_link(struct most_aim_obj *aim_obj, size_t max_len = min_t(size_t, len + 1, STRING_SIZE); strlcpy(buffer, buf, max_len); - strlcpy(aim_obj->remove_link, buf, max_len); ret = split_string(buffer, &mdev, &mdev_ch, NULL); if (ret) return ret; @@ -1070,8 +1057,7 @@ static ssize_t store_remove_link(struct most_aim_obj *aim_obj, } static struct most_aim_attribute most_aim_attr_remove_link = - __ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link, - store_remove_link); + __ATTR_WO(remove_link); static struct attribute *most_aim_def_attrs[] = { &most_aim_attr_add_link.attr, @@ -1761,9 +1747,6 @@ struct kobject *most_register_interface(struct most_interface *iface) if (!name_suffix) snprintf(channel_name, STRING_SIZE, "ch%d", i); - else if (name_suffix[0] == '@') - snprintf(channel_name, STRING_SIZE, "ch%d%s", i, - name_suffix); else snprintf(channel_name, STRING_SIZE, "%s", name_suffix); diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index 552a7dcbf50bb8497c007ead50576ff285cbf514..fb0928a4fb97ed9053114a7e90d876d4f5710dae 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c @@ -172,29 +172,31 @@ static struct phy_device *xlr_get_phydev(struct xlr_net_priv *priv) /* * Ethtool operation */ -static int xlr_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +static int xlr_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *ecmd) { struct xlr_net_priv *priv = netdev_priv(ndev); struct phy_device *phydev = xlr_get_phydev(priv); if (!phydev) return -ENODEV; - return phy_ethtool_gset(phydev, ecmd); + return phy_ethtool_ksettings_get(phydev, ecmd); } -static int xlr_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +static int xlr_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *ecmd) { struct xlr_net_priv *priv = netdev_priv(ndev); struct phy_device *phydev = xlr_get_phydev(priv); if (!phydev) return -ENODEV; - return phy_ethtool_sset(phydev, ecmd); + return phy_ethtool_ksettings_set(phydev, ecmd); } static const struct ethtool_ops xlr_ethtool_ops = { - .get_settings = xlr_get_settings, - .set_settings = xlr_set_settings, + .get_link_ksettings = xlr_get_link_ksettings, + .set_link_ksettings = xlr_set_link_ksettings, }; /* @@ -1005,10 +1007,8 @@ static int xlr_net_probe(struct platform_device *pdev) */ adapter = (struct xlr_adapter *) devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL); - if (!adapter) { - err = -ENOMEM; - return err; - } + if (!adapter) + return -ENOMEM; /* * XLR and XLS have 1 and 2 NAE controller respectively diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c index a324322ee0ad1ebec13e280368538056a691fb18..499952c8ef3915ba54afe52347bc220cbad1387c 100644 --- a/drivers/staging/nvec/nvec_ps2.c +++ b/drivers/staging/nvec/nvec_ps2.c @@ -106,13 +106,12 @@ static int nvec_mouse_probe(struct platform_device *pdev) { struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); struct serio *ser_dev; - char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 }; - ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL); + ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!ser_dev) return -ENOMEM; - ser_dev->id.type = SERIO_PS_PSTHRU; + ser_dev->id.type = SERIO_8042; ser_dev->write = ps2_sendcommand; ser_dev->start = ps2_startstreaming; ser_dev->stop = ps2_stopstreaming; @@ -127,9 +126,6 @@ static int nvec_mouse_probe(struct platform_device *pdev) serio_register_port(ser_dev); - /* mouse reset */ - nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset)); - return 0; } diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index d02e3e31ed293dbe97fbe5327ec670d8fc91ecc9..8130dfe897451268adcea02c420d3aa61c9809aa 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -259,17 +259,6 @@ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) #endif int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes; - /* - * Limit the MTU to make sure the ethernet packets are between - * 64 bytes and 65535 bytes. - */ - if ((new_mtu + mtu_overhead < VLAN_ETH_ZLEN) || - (new_mtu + mtu_overhead > OCTEON_MAX_MTU)) { - pr_err("MTU must be between %d and %d.\n", - VLAN_ETH_ZLEN - mtu_overhead, - OCTEON_MAX_MTU - mtu_overhead); - return -EINVAL; - } dev->mtu = new_mtu; if ((interface < 2) && @@ -457,7 +446,7 @@ int cvm_oct_common_init(struct net_device *dev) dev->ethtool_ops = &cvm_oct_ethtool_ops; cvm_oct_set_mac_filter(dev); - dev->netdev_ops->ndo_change_mtu(dev, dev->mtu); + dev_set_mtu(dev, dev->mtu); /* * Zero out stats for port so we won't mistakenly show @@ -685,6 +674,11 @@ static int cvm_oct_probe(struct platform_device *pdev) int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; int qos; struct device_node *pip; + int mtu_overhead = ETH_HLEN + ETH_FCS_LEN; + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + mtu_overhead += VLAN_HLEN; +#endif octeon_mdiobus_force_mod_depencency(); @@ -783,6 +777,8 @@ static int cvm_oct_probe(struct platform_device *pdev) strcpy(dev->name, "pow%d"); for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); + dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead; + dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead; if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device for POW\n"); @@ -836,6 +832,8 @@ static int cvm_oct_probe(struct platform_device *pdev) for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++) cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); + dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead; + dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead; switch (priv->imode) { /* These types don't support ports to IPD/PKO */ diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile index 29b9834870fda9962523c07f813db0b992d1a3e4..27af86e05098faca6cbab94d6886ac30407d9e76 100644 --- a/drivers/staging/rtl8188eu/Makefile +++ b/drivers/staging/rtl8188eu/Makefile @@ -53,4 +53,4 @@ r8188eu-y := \ obj-$(CONFIG_R8188EU) := r8188eu.o -ccflags-y += -D__CHECK_ENDIAN__ -I$(srctree)/$(src)/include +ccflags-y += -I$(srctree)/$(src)/include diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c index f1f4788dbd86ce4a3f4ec5df9429883483183f42..36109cec87062047054f9a77910ddb40091bb86e 100644 --- a/drivers/staging/rtl8188eu/core/rtw_cmd.c +++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c @@ -308,7 +308,7 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, mod_timer(&pmlmepriv->scan_to_timer, jiffies + msecs_to_jiffies(SCANNING_TIMEOUT)); - rtw_led_control(padapter, LED_CTL_SITE_SURVEY); + LedControl8188eu(padapter, LED_CTL_SITE_SURVEY); pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */ } else { @@ -335,7 +335,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter) u8 res = _SUCCESS; - rtw_led_control(padapter, LED_CTL_START_TO_LINK); + LedControl8188eu(padapter, LED_CTL_START_TO_LINK); if (pmlmepriv->assoc_ssid.SsidLength == 0) RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for Any SSid:%s\n", pmlmepriv->assoc_ssid.Ssid)); @@ -379,7 +379,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork) struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); - rtw_led_control(padapter, LED_CTL_START_TO_LINK); + LedControl8188eu(padapter, LED_CTL_START_TO_LINK); if (pmlmepriv->assoc_ssid.SsidLength == 0) RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+Join cmd: Any SSid\n")); diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c index 14461cf34037b9d211e421efef0bedec4b1cfa51..c1478cff5854a6de98fead2325804861d9cb2614 100644 --- a/drivers/staging/rtl8188eu/core/rtw_led.c +++ b/drivers/staging/rtl8188eu/core/rtw_led.c @@ -30,7 +30,7 @@ void BlinkTimerCallback(unsigned long data) if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped)) return; - schedule_work(&(pLed->BlinkWorkItem)); + schedule_work(&pLed->BlinkWorkItem); } /* */ @@ -60,7 +60,6 @@ void ResetLedStatus(struct LED_871x *pLed) pLed->bLedNoLinkBlinkInProgress = false; pLed->bLedLinkBlinkInProgress = false; - pLed->bLedStartToLinkBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false; } @@ -72,10 +71,10 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed) ResetLedStatus(pLed); - setup_timer(&(pLed->BlinkTimer), BlinkTimerCallback, + setup_timer(&pLed->BlinkTimer, BlinkTimerCallback, (unsigned long)pLed); - INIT_WORK(&(pLed->BlinkWorkItem), BlinkWorkItemCallback); + INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback); } @@ -85,8 +84,8 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed) /* */ void DeInitLed871x(struct LED_871x *pLed) { - cancel_work_sync(&(pLed->BlinkWorkItem)); - del_timer_sync(&(pLed->BlinkTimer)); + cancel_work_sync(&pLed->BlinkWorkItem); + del_timer_sync(&pLed->BlinkTimer); ResetLedStatus(pLed); } @@ -99,7 +98,7 @@ void DeInitLed871x(struct LED_871x *pLed) static void SwLedBlink1(struct LED_871x *pLed) { struct adapter *padapter = pLed->padapter; - struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); + struct mlme_priv *pmlmepriv = &padapter->mlmepriv; u8 bStopBlinking = false; /* Change LED according to BlinkingLedState specified. */ @@ -247,9 +246,9 @@ static void SwLedBlink1(struct LED_871x *pLed) /* ALPHA, added by chiyoko, 20090106 */ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAction) { - struct led_priv *ledpriv = &(padapter->ledpriv); - struct LED_871x *pLed = &(ledpriv->SwLed0); - struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); + struct led_priv *ledpriv = &padapter->ledpriv; + struct LED_871x *pLed = &ledpriv->SwLed0; + struct mlme_priv *pmlmepriv = &padapter->mlmepriv; switch (LedAction) { case LED_CTL_POWER_ON: @@ -259,11 +258,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedLinkBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } @@ -282,11 +281,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedLinkBlinkInProgress = true; @@ -306,15 +305,15 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct if (IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -326,7 +325,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct pLed->BlinkingLedState = RTW_LED_ON; mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA)); - } + } break; case LED_CTL_TX: case LED_CTL_RX: @@ -334,11 +333,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } pLed->bLedBlinkInProgress = true; @@ -354,21 +353,21 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct break; case LED_CTL_START_WPS: /* wait until xinpin finish */ case LED_CTL_START_WPS_BOTTON: - if (!pLed->bLedWPSBlinkInProgress) { + if (!pLed->bLedWPSBlinkInProgress) { if (pLed->bLedNoLinkBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -379,27 +378,27 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct pLed->BlinkingLedState = RTW_LED_ON; mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA)); - } + } break; case LED_CTL_STOP_WPS: if (pLed->bLedNoLinkBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); else pLed->bLedWPSBlinkInProgress = true; pLed->CurrLedState = LED_BLINK_WPS_STOP; @@ -415,7 +414,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct break; case LED_CTL_STOP_WPS_FAIL: if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -431,23 +430,23 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct pLed->CurrLedState = RTW_LED_OFF; pLed->BlinkingLedState = RTW_LED_OFF; if (pLed->bLedNoLinkBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer_sync(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } SwLedOff(padapter, pLed); @@ -475,15 +474,10 @@ void BlinkHandler(struct LED_871x *pLed) void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction) { - struct led_priv *ledpriv = &(padapter->ledpriv); - if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) || (!padapter->hw_init_completed)) return; - if (!ledpriv->bRegUseLed) - return; - if ((padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) && (LedAction == LED_CTL_TX || LedAction == LED_CTL_RX || diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c index ee2dcd05010f6f07c9ff0e6f8648763bfd09dc00..032f783b0d83a8be86ab7609349da6c331158038 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c @@ -801,7 +801,7 @@ void rtw_indicate_connect(struct adapter *padapter) if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) { set_fwstate(pmlmepriv, _FW_LINKED); - rtw_led_control(padapter, LED_CTL_LINK); + LedControl8188eu(padapter, LED_CTL_LINK); rtw_os_indicate_connect(padapter); } @@ -833,7 +833,7 @@ void rtw_indicate_disconnect(struct adapter *padapter) rtw_os_indicate_disconnect(padapter); _clr_fwstate_(pmlmepriv, _FW_LINKED); - rtw_led_control(padapter, LED_CTL_NO_LINK); + LedControl8188eu(padapter, LED_CTL_NO_LINK); rtw_clear_scan_deny(padapter); } diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c index fb13df586441b6f3cca147a5bf614a6cd314f6dd..d9c114776cab11f4adc6aa1dd7a1f2404ba3678d 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c @@ -133,7 +133,9 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = { {0x03}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G */ }; -static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03}; /* use the combination for max channel numbers */ +static const struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = { + 0x03 +}; /* use the combination for max channel numbers */ /* * Search the @param channel_num in given @param channel_set @@ -667,10 +669,10 @@ static int issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pss get_rate_set(padapter, bssrate, &bssrate_len); if (bssrate_len > 8) { - pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , 8, bssrate, &(pattrib->pktlen)); - pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_ , (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen)); + pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &(pattrib->pktlen)); + pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen)); } else { - pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , bssrate_len , bssrate, &(pattrib->pktlen)); + pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &(pattrib->pktlen)); } /* add wps_ie for wps2.0 */ @@ -999,7 +1001,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status, } if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK) - pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6 , REALTEK_96B_IE, &(pattrib->pktlen)); + pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &(pattrib->pktlen)); /* add WPS IE ie for wps 2.0 */ if (pmlmepriv->wps_assoc_resp_ie && pmlmepriv->wps_assoc_resp_ie_len > 0) { @@ -1120,10 +1122,10 @@ static void issue_assocreq(struct adapter *padapter) if (bssrate_len > 8) { - pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , 8, bssrate, &(pattrib->pktlen)); - pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_ , (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen)); + pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &(pattrib->pktlen)); + pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen)); } else { - pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , bssrate_len , bssrate, &(pattrib->pktlen)); + pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &(pattrib->pktlen)); } /* RSN */ @@ -1165,7 +1167,7 @@ static void issue_assocreq(struct adapter *padapter) memcpy(&pmlmeinfo->HT_caps.mcs, MCS_rate_2R, 16); break; } - pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len , (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen)); + pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len, (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen)); } } @@ -1194,7 +1196,7 @@ static void issue_assocreq(struct adapter *padapter) } if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK) - pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6 , REALTEK_96B_IE, &(pattrib->pktlen)); + pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &(pattrib->pktlen)); pattrib->last_txcmdsz = pattrib->pktlen; dump_mgntframe(padapter, pmgntframe); @@ -2644,7 +2646,7 @@ static unsigned int OnBeacon(struct adapter *padapter, ret = rtw_check_bcn_info(padapter, pframe, len); if (!ret) { DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n "); - receive_disconnect(padapter, pmlmeinfo->network.MacAddress , 65535); + receive_disconnect(padapter, pmlmeinfo->network.MacAddress, 65535); return _SUCCESS; } /* update WMM, ERP in the beacon */ @@ -2802,7 +2804,7 @@ static unsigned int OnAuth(struct adapter *padapter, /* checking for challenging txt... */ DBG_88E("checking for challenging txt...\n"); - p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_ , _CHLGETXT_IE_, (int *)&ie_len, + p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&ie_len, len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4); if ((p == NULL) || (ie_len <= 0)) { @@ -3046,7 +3048,7 @@ static unsigned int OnAssocReq(struct adapter *padapter, memcpy(supportRate, p+2, ie_len); supportRateNum = ie_len; - p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_ , &ie_len, + p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset); if (p != NULL) { if (supportRateNum <= sizeof(supportRate)) { @@ -3146,7 +3148,7 @@ static unsigned int OnAssocReq(struct adapter *padapter, if (pmlmepriv->wps_beacon_ie) { u8 selected_registrar = 0; - rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR , &selected_registrar, NULL); + rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR, &selected_registrar, NULL); if (!selected_registrar) { DBG_88E("selected_registrar is false , or AP is not ready to do WPS\n"); @@ -3511,7 +3513,7 @@ static unsigned int OnDeAuth(struct adapter *padapter, DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n", reason, GetAddr3Ptr(pframe)); - receive_disconnect(padapter, GetAddr3Ptr(pframe) , reason); + receive_disconnect(padapter, GetAddr3Ptr(pframe), reason); } pmlmepriv->LinkDetectInfo.bBusyTraffic = false; return _SUCCESS; diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c index 0b70fe7d3b7265395108a7a748b8381e5455a3ae..4032121a06f36c82c943b26208940af219204215 100644 --- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c +++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c @@ -56,7 +56,7 @@ static int rtw_hw_suspend(struct adapter *padapter) if (check_fwstate(pmlmepriv, _FW_LINKED)) { _clr_fwstate_(pmlmepriv, _FW_LINKED); - rtw_led_control(padapter, LED_CTL_NO_LINK); + LedControl8188eu(padapter, LED_CTL_NO_LINK); rtw_os_indicate_disconnect(padapter); @@ -94,7 +94,7 @@ static int rtw_hw_resume(struct adapter *padapter) pwrpriv->bips_processing = true; rtw_reset_drv_sw(padapter); - if (pm_netdev_open(pnetdev, false) != 0) { + if (ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev)) != _SUCCESS) { mutex_unlock(&pwrpriv->mutex_lock); goto error_exit; } diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c index b87cbbbee0549284ad36c120b4bbdc07ec52ab5c..3e6edb63d36b5db9d62c8d4ba58fff333403d236 100644 --- a/drivers/staging/rtl8188eu/core/rtw_recv.c +++ b/drivers/staging/rtl8188eu/core/rtw_recv.c @@ -66,16 +66,12 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter) precvpriv->adapter = padapter; - precvpriv->free_recvframe_cnt = NR_RECVFRAME; - precvpriv->pallocated_frame_buf = vzalloc(NR_RECVFRAME * sizeof(struct recv_frame) + RXFRAME_ALIGN_SZ); if (!precvpriv->pallocated_frame_buf) return _FAIL; - precvpriv->precv_frame_buf = PTR_ALIGN(precvpriv->pallocated_frame_buf, RXFRAME_ALIGN_SZ); - - precvframe = (struct recv_frame *)precvpriv->precv_frame_buf; + precvframe = PTR_ALIGN(precvpriv->pallocated_frame_buf, RXFRAME_ALIGN_SZ); for (i = 0; i < NR_RECVFRAME; i++) { INIT_LIST_HEAD(&(precvframe->list)); @@ -83,15 +79,12 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter) list_add_tail(&(precvframe->list), &(precvpriv->free_recv_queue.queue)); - rtw_os_recv_resource_alloc(precvframe); - + precvframe->pkt = NULL; precvframe->len = 0; precvframe->adapter = padapter; precvframe++; } - precvpriv->rx_pending_cnt = 1; - res = rtw_hal_init_recv_priv(padapter); setup_timer(&precvpriv->signal_stat_timer, @@ -120,20 +113,11 @@ void _rtw_free_recv_priv(struct recv_priv *precvpriv) struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue) { struct recv_frame *hdr; - struct adapter *padapter; - struct recv_priv *precvpriv; hdr = list_first_entry_or_null(&pfree_recv_queue->queue, struct recv_frame, list); - if (hdr) { + if (hdr) list_del_init(&hdr->list); - padapter = hdr->adapter; - if (padapter) { - precvpriv = &padapter->recvpriv; - if (pfree_recv_queue == &precvpriv->free_recv_queue) - precvpriv->free_recvframe_cnt--; - } - } return hdr; } @@ -154,13 +138,8 @@ struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue) int rtw_free_recvframe(struct recv_frame *precvframe, struct __queue *pfree_recv_queue) { - struct adapter *padapter; - struct recv_priv *precvpriv; - if (!precvframe) return _FAIL; - padapter = precvframe->adapter; - precvpriv = &padapter->recvpriv; if (precvframe->pkt) { dev_kfree_skb_any(precvframe->pkt);/* free skb by driver */ precvframe->pkt = NULL; @@ -174,29 +153,16 @@ int rtw_free_recvframe(struct recv_frame *precvframe, list_add_tail(&(precvframe->list), get_list_head(pfree_recv_queue)); - if (padapter != NULL) { - if (pfree_recv_queue == &precvpriv->free_recv_queue) - precvpriv->free_recvframe_cnt++; - } - - spin_unlock_bh(&pfree_recv_queue->lock); + spin_unlock_bh(&pfree_recv_queue->lock); return _SUCCESS; } int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue) { - struct adapter *padapter = precvframe->adapter; - struct recv_priv *precvpriv = &padapter->recvpriv; - list_del_init(&(precvframe->list)); list_add_tail(&(precvframe->list), get_list_head(queue)); - if (padapter != NULL) { - if (queue == &precvpriv->free_recv_queue) - precvpriv->free_recvframe_cnt++; - } - return _SUCCESS; } @@ -1294,7 +1260,7 @@ static int validate_recv_frame(struct adapter *adapter, retval = _FAIL; /* only data frame return _SUCCESS */ break; case WIFI_DATA_TYPE: /* data */ - rtw_led_control(adapter, LED_CTL_RX); + LedControl8188eu(adapter, LED_CTL_RX); pattrib->qos = (subtype & BIT(7)) ? 1 : 0; retval = validate_recv_data_frame(adapter, precv_frame); if (retval == _FAIL) { @@ -1989,7 +1955,7 @@ static int recv_func_posthandle(struct adapter *padapter, struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue; /* DATA FRAME */ - rtw_led_control(padapter, LED_CTL_RX); + LedControl8188eu(padapter, LED_CTL_RX); prframe = decryptor(padapter, prframe); if (prframe == NULL) { diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c index a71e25294adde891e7fef1342ef2c64853107f56..941d1a069d2057af17b2de58266432ba3eb30429 100644 --- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c +++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c @@ -310,7 +310,6 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta) /* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */ for (i = 0; i < 16; i++) { struct list_head *phead, *plist; - struct recv_frame *prhdr; struct recv_frame *prframe; struct __queue *ppending_recvframe_queue; struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue; @@ -327,8 +326,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta) plist = phead->next; while (!list_empty(phead)) { - prhdr = container_of(plist, struct recv_frame, list); - prframe = (struct recv_frame *)prhdr; + prframe = container_of(plist, struct recv_frame, list); plist = plist->next; diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c index 0f8b8e0bffdf9826a6f6e8e7b226a9ac6ab53268..b60b126b860e3b95f2c283dc295561b1bdfc6470 100644 --- a/drivers/staging/rtl8188eu/core/rtw_xmit.c +++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c @@ -220,7 +220,6 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv) struct adapter *padapter = pxmitpriv->adapter; struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf; struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf; - u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ; u32 num_xmit_extbuf = NR_XMIT_EXTBUFF; if (pxmitpriv->pxmit_frame_buf == NULL) @@ -233,7 +232,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv) } for (i = 0; i < NR_XMITBUFF; i++) { - rtw_os_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ)); + rtw_os_xmit_resource_free(pxmitbuf); pxmitbuf++; } @@ -243,7 +242,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv) /* free xmit extension buff */ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf; for (i = 0; i < num_xmit_extbuf; i++) { - rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ)); + rtw_os_xmit_resource_free(pxmitbuf); pxmitbuf++; } @@ -1064,7 +1063,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct frg_inx++; - if (bmcst || rtw_endofpktfile(&pktfile)) { + if (bmcst || pktfile.pkt_len == 0) { pattrib->nr_frags = frg_inx; pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags == 1) ? llc_sz : 0) + @@ -1677,7 +1676,7 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt) } pxmitframe->pkt = *ppkt; - rtw_led_control(padapter, LED_CTL_TX); + LedControl8188eu(padapter, LED_CTL_TX); pxmitframe->attrib.qsel = pxmitframe->attrib.priority; diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c index d983a8029f4c24a874759e7c0b94187cbf7ecf51..16476e73501162b1b6f41b25f14d35cec7f8225a 100644 --- a/drivers/staging/rtl8188eu/hal/odm.c +++ b/drivers/staging/rtl8188eu/hal/odm.c @@ -991,7 +991,6 @@ void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm) { pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true; pDM_Odm->RFCalibrateInfo.TXPowercount = 0; - pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = false; if (*(pDM_Odm->mp_mode) != 1) pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true; MSG_88E("pDM_Odm TxPowerTrackControl = %d\n", pDM_Odm->RFCalibrateInfo.TxPowerTrackControl); diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c index 5192ef70bcfcd415fc0023f39029a561f2d358b7..35c91e06cc47b53d62dbb081f4307051ab0abaa1 100644 --- a/drivers/staging/rtl8188eu/hal/phy.c +++ b/drivers/staging/rtl8188eu/hal/phy.c @@ -40,12 +40,11 @@ static u32 cal_bit_shift(u32 bitmask) u32 phy_query_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask) { - u32 return_value = 0, original_value, bit_shift; + u32 original_value, bit_shift; original_value = usb_read32(adapt, regaddr); bit_shift = cal_bit_shift(bitmask); - return_value = (original_value & bitmask) >> bit_shift; - return return_value; + return (original_value & bitmask) >> bit_shift; } void phy_set_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask, u32 data) @@ -119,12 +118,11 @@ static void rf_serial_write(struct adapter *adapt, u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rf_path, u32 reg_addr, u32 bit_mask) { - u32 original_value, readback_value, bit_shift; + u32 original_value, bit_shift; original_value = rf_serial_read(adapt, rf_path, reg_addr); bit_shift = cal_bit_shift(bit_mask); - readback_value = (original_value & bit_mask) >> bit_shift; - return readback_value; + return (original_value & bit_mask) >> bit_shift; } void phy_set_rf_reg(struct adapter *adapt, enum rf_radio_path rf_path, @@ -210,13 +208,6 @@ static void phy_set_bw_mode_callback(struct adapter *adapt) u8 reg_bw_opmode; u8 reg_prsr_rsc; - if (hal_data->rf_chip == RF_PSEUDO_11N) - return; - - /* There is no 40MHz mode in RF_8225. */ - if (hal_data->rf_chip == RF_8225) - return; - if (adapt->bDriverStopped) return; @@ -265,8 +256,7 @@ static void phy_set_bw_mode_callback(struct adapter *adapt) } /* Set RF related register */ - if (hal_data->rf_chip == RF_6052) - rtl88eu_phy_rf6052_set_bandwidth(adapt, hal_data->CurrentChannelBW); + rtl88eu_phy_rf6052_set_bandwidth(adapt, hal_data->CurrentChannelBW); } void rtw_hal_set_bwmode(struct adapter *adapt, enum ht_channel_width bandwidth, @@ -286,7 +276,6 @@ void rtw_hal_set_bwmode(struct adapter *adapt, enum ht_channel_width bandwidth, static void phy_sw_chnl_callback(struct adapter *adapt, u8 channel) { - u8 rf_path; u32 param1, param2; struct hal_data_8188e *hal_data = adapt->HalData; @@ -294,12 +283,10 @@ static void phy_sw_chnl_callback(struct adapter *adapt, u8 channel) param1 = RF_CHNLBW; param2 = channel; - for (rf_path = 0; rf_path < hal_data->NumTotalRFPath; rf_path++) { - hal_data->RfRegChnlVal[rf_path] = (hal_data->RfRegChnlVal[rf_path] & - 0xfffffc00) | param2; - phy_set_rf_reg(adapt, (enum rf_radio_path)rf_path, param1, - bRFRegOffsetMask, hal_data->RfRegChnlVal[rf_path]); - } + hal_data->RfRegChnlVal[0] = (hal_data->RfRegChnlVal[0] & + 0xfffffc00) | param2; + phy_set_rf_reg(adapt, 0, param1, + bRFRegOffsetMask, hal_data->RfRegChnlVal[0]); } void rtw_hal_set_chan(struct adapter *adapt, u8 channel) @@ -307,9 +294,6 @@ void rtw_hal_set_chan(struct adapter *adapt, u8 channel) struct hal_data_8188e *hal_data = adapt->HalData; u8 tmpchannel = hal_data->CurrentChannel; - if (hal_data->rf_chip == RF_PSEUDO_11N) - return; - if (channel == 0) channel = 1; @@ -407,9 +391,8 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt) s8 ofdm_index[2], cck_index = 0; s8 ofdm_index_old[2] = {0, 0}, cck_index_old = 0; u32 i = 0, j = 0; - bool is2t = false; - u8 ofdm_min_index = 6, rf; /* OFDM BB Swing should be less than +3.0dB */ + u8 ofdm_min_index = 6; /* OFDM BB Swing should be less than +3.0dB */ s8 ofdm_index_mapping[2][index_mapping_NUM_88E] = { /* 2.4G, decrease power */ {0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11}, @@ -427,18 +410,12 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt) dm_txpwr_track_setpwr(dm_odm); dm_odm->RFCalibrateInfo.TXPowerTrackingCallbackCnt++; - dm_odm->RFCalibrateInfo.bTXPowerTrackingInit = true; dm_odm->RFCalibrateInfo.RegA24 = 0x090e1317; thermal_val = (u8)rtw_hal_read_rfreg(adapt, RF_PATH_A, RF_T_METER_88E, 0xfc00); - if (is2t) - rf = 2; - else - rf = 1; - if (thermal_val) { /* Query OFDM path A default setting */ ele_d = phy_query_bb_reg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord)&bMaskOFDM_D; @@ -450,17 +427,6 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt) } } - /* Query OFDM path B default setting */ - if (is2t) { - ele_d = phy_query_bb_reg(adapt, rOFDM0_XBTxIQImbalance, bMaskDWord)&bMaskOFDM_D; - for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { - if (ele_d == (OFDMSwingTable[i]&bMaskOFDM_D)) { - ofdm_index_old[1] = (u8)i; - break; - } - } - } - /* Query CCK default setting From 0xa24 */ temp_cck = dm_odm->RFCalibrateInfo.RegA24; @@ -479,8 +445,7 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt) dm_odm->RFCalibrateInfo.ThermalValue_LCK = thermal_val; dm_odm->RFCalibrateInfo.ThermalValue_IQK = thermal_val; - for (i = 0; i < rf; i++) - dm_odm->RFCalibrateInfo.OFDM_index[i] = ofdm_index_old[i]; + dm_odm->RFCalibrateInfo.OFDM_index[0] = ofdm_index_old[0]; dm_odm->RFCalibrateInfo.CCK_index = cck_index_old; } @@ -539,13 +504,11 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt) offset = index_mapping_NUM_88E-1; /* Updating ofdm_index values with new OFDM / CCK offset */ - for (i = 0; i < rf; i++) { - ofdm_index[i] = dm_odm->RFCalibrateInfo.OFDM_index[i] + ofdm_index_mapping[j][offset]; - if (ofdm_index[i] > OFDM_TABLE_SIZE_92D-1) - ofdm_index[i] = OFDM_TABLE_SIZE_92D-1; - else if (ofdm_index[i] < ofdm_min_index) - ofdm_index[i] = ofdm_min_index; - } + ofdm_index[0] = dm_odm->RFCalibrateInfo.OFDM_index[0] + ofdm_index_mapping[j][offset]; + if (ofdm_index[0] > OFDM_TABLE_SIZE_92D-1) + ofdm_index[0] = OFDM_TABLE_SIZE_92D-1; + else if (ofdm_index[0] < ofdm_min_index) + ofdm_index[0] = ofdm_min_index; cck_index = dm_odm->RFCalibrateInfo.CCK_index + ofdm_index_mapping[j][offset]; if (cck_index > CCK_TABLE_SIZE-1) diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c index 2f3edf0f850a6e6b4eb1089bc4a94ba9dc28b180..8f8c9de6a9bc4577fa2057a838bdf37693d8380c 100644 --- a/drivers/staging/rtl8188eu/hal/rf.c +++ b/drivers/staging/rtl8188eu/hal/rf.c @@ -61,8 +61,6 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel) (powerlevel[idx1]<<8) | (powerlevel[idx1]<<16) | (powerlevel[idx1]<<24); - if (tx_agc[idx1] > 0x20 && hal_data->ExternalPA) - tx_agc[idx1] = 0x20; } } else { if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) { @@ -139,17 +137,15 @@ static void getpowerbase88e(struct adapter *adapt, u8 *pwr_level_ofdm, (powerbase0<<8) | powerbase0; *(ofdmbase+i) = powerbase0; } - for (i = 0; i < adapt->HalData->NumTotalRFPath; i++) { - /* Check HT20 to HT40 diff */ - if (adapt->HalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20) - powerlevel[i] = pwr_level_bw20[i]; - else - powerlevel[i] = pwr_level_bw40[i]; - powerbase1 = powerlevel[i]; - powerbase1 = (powerbase1<<24) | (powerbase1<<16) | - (powerbase1<<8) | powerbase1; - *(mcs_base+i) = powerbase1; - } + /* Check HT20 to HT40 diff */ + if (adapt->HalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20) + powerlevel[0] = pwr_level_bw20[0]; + else + powerlevel[0] = pwr_level_bw40[0]; + powerbase1 = powerlevel[0]; + powerbase1 = (powerbase1<<24) | (powerbase1<<16) | + (powerbase1<<8) | powerbase1; + *mcs_base = powerbase1; } static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel, u8 index, u32 *powerbase0, u32 *powerbase1, diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c index dde64417e66a10d890ada7ec570918e1024993d8..9712d7b74345f1167e7c90e64bb06e5883badafe 100644 --- a/drivers/staging/rtl8188eu/hal/rf_cfg.c +++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c @@ -230,79 +230,33 @@ static bool rf6052_conf_para(struct adapter *adapt) { struct hal_data_8188e *hal_data = adapt->HalData; u32 u4val = 0; - u8 rfpath; bool rtstatus = true; struct bb_reg_def *pphyreg; - for (rfpath = 0; rfpath < hal_data->NumTotalRFPath; rfpath++) { - pphyreg = &hal_data->PHYRegDef[rfpath]; + pphyreg = &hal_data->PHYRegDef[RF90_PATH_A]; + u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs, BRFSI_RFENV); - switch (rfpath) { - case RF90_PATH_A: - case RF90_PATH_C: - u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs, - BRFSI_RFENV); - break; - case RF90_PATH_B: - case RF90_PATH_D: - u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs, - BRFSI_RFENV << 16); - break; - } + phy_set_bb_reg(adapt, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); + udelay(1); - phy_set_bb_reg(adapt, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); - udelay(1); + phy_set_bb_reg(adapt, pphyreg->rfintfo, BRFSI_RFENV, 0x1); + udelay(1); - phy_set_bb_reg(adapt, pphyreg->rfintfo, BRFSI_RFENV, 0x1); - udelay(1); + phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2, B3WIREADDREAALENGTH, 0x0); + udelay(1); - phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2, - B3WIREADDREAALENGTH, 0x0); - udelay(1); + phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2, B3WIREDATALENGTH, 0x0); + udelay(1); - phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2, - B3WIREDATALENGTH, 0x0); - udelay(1); + rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt); - switch (rfpath) { - case RF90_PATH_A: - rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt); - break; - case RF90_PATH_B: - rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt); - break; - case RF90_PATH_C: - break; - case RF90_PATH_D: - break; - } - - switch (rfpath) { - case RF90_PATH_A: - case RF90_PATH_C: - phy_set_bb_reg(adapt, pphyreg->rfintfs, - BRFSI_RFENV, u4val); - break; - case RF90_PATH_B: - case RF90_PATH_D: - phy_set_bb_reg(adapt, pphyreg->rfintfs, - BRFSI_RFENV << 16, u4val); - break; - } - - if (!rtstatus) - return false; - } + phy_set_bb_reg(adapt, pphyreg->rfintfs, BRFSI_RFENV, u4val); return rtstatus; } static bool rtl88e_phy_rf6052_config(struct adapter *adapt) { - struct hal_data_8188e *hal_data = adapt->HalData; - - hal_data->NumTotalRFPath = 1; - return rf6052_conf_para(adapt); } diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c index 385bc2f56f2f07b6c146e070954f779761353c26..0ce7db723a5da292a4398b0813ff3dec321475c8 100644 --- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c +++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c @@ -135,7 +135,6 @@ void rtw_hal_read_chip_version(struct adapter *padapter) dump_chip_info(ChipVersion); pHalData->VersionID = ChipVersion; - pHalData->NumTotalRFPath = 1; } void rtw_hal_set_odm_var(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet) @@ -470,7 +469,7 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto { struct hal_data_8188e *pHalData = padapter->HalData; struct txpowerinfo24g pwrInfo24G; - u8 rfPath, ch, group; + u8 ch, group; u8 bIn24G, TxCount; Hal_ReadPowerValueFromPROM_8188E(&pwrInfo24G, PROMContent, AutoLoadFail); @@ -478,34 +477,32 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto if (!AutoLoadFail) pHalData->bTXPowerDataReadFromEEPORM = true; - for (rfPath = 0; rfPath < pHalData->NumTotalRFPath; rfPath++) { - for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) { - bIn24G = Hal_GetChnlGroup88E(ch, &group); - if (bIn24G) { - pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][group]; - if (ch == 14) - pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][4]; - else - pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][group]; - } - if (bIn24G) { - DBG_88E("======= Path %d, Channel %d =======\n", rfPath, ch); - DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", rfPath, ch , pHalData->Index24G_CCK_Base[rfPath][ch]); - DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", rfPath, ch , pHalData->Index24G_BW40_Base[rfPath][ch]); - } + for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) { + bIn24G = Hal_GetChnlGroup88E(ch, &group); + if (bIn24G) { + pHalData->Index24G_CCK_Base[0][ch] = pwrInfo24G.IndexCCK_Base[0][group]; + if (ch == 14) + pHalData->Index24G_BW40_Base[0][ch] = pwrInfo24G.IndexBW40_Base[0][4]; + else + pHalData->Index24G_BW40_Base[0][ch] = pwrInfo24G.IndexBW40_Base[0][group]; } - for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { - pHalData->CCK_24G_Diff[rfPath][TxCount] = pwrInfo24G.CCK_Diff[rfPath][TxCount]; - pHalData->OFDM_24G_Diff[rfPath][TxCount] = pwrInfo24G.OFDM_Diff[rfPath][TxCount]; - pHalData->BW20_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW20_Diff[rfPath][TxCount]; - pHalData->BW40_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW40_Diff[rfPath][TxCount]; - DBG_88E("======= TxCount %d =======\n", TxCount); - DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->CCK_24G_Diff[rfPath][TxCount]); - DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->OFDM_24G_Diff[rfPath][TxCount]); - DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW20_24G_Diff[rfPath][TxCount]); - DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW40_24G_Diff[rfPath][TxCount]); + if (bIn24G) { + DBG_88E("======= Path %d, Channel %d =======\n", 0, ch); + DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", 0, ch, pHalData->Index24G_CCK_Base[0][ch]); + DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", 0, ch, pHalData->Index24G_BW40_Base[0][ch]); } } + for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { + pHalData->CCK_24G_Diff[0][TxCount] = pwrInfo24G.CCK_Diff[0][TxCount]; + pHalData->OFDM_24G_Diff[0][TxCount] = pwrInfo24G.OFDM_Diff[0][TxCount]; + pHalData->BW20_24G_Diff[0][TxCount] = pwrInfo24G.BW20_Diff[0][TxCount]; + pHalData->BW40_24G_Diff[0][TxCount] = pwrInfo24G.BW40_Diff[0][TxCount]; + DBG_88E("======= TxCount %d =======\n", TxCount); + DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->CCK_24G_Diff[0][TxCount]); + DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->OFDM_24G_Diff[0][TxCount]); + DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->BW20_24G_Diff[0][TxCount]); + DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->BW40_24G_Diff[0][TxCount]); + } /* 2010/10/19 MH Add Regulator recognize for CU. */ if (!AutoLoadFail) { diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c index 780666a755ee30514414d850e0fc7d2c14c5161e..12879afb992e02d347802a1aee0dab62bbdf9bce 100644 --- a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c +++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c @@ -46,16 +46,12 @@ void SwLedOff(struct adapter *padapter, struct LED_871x *pLed) LedCfg = usb_read8(padapter, REG_LEDCFG2);/* 0x4E */ - if (padapter->HalData->bLedOpenDrain) { - /* Open-drain arrangement for controlling the LED) */ - LedCfg &= 0x90; /* Set to software control. */ - usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3))); - LedCfg = usb_read8(padapter, REG_MAC_PINMUX_CFG); - LedCfg &= 0xFE; - usb_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg); - } else { - usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3) | BIT(5) | BIT(6))); - } + /* Open-drain arrangement for controlling the LED) */ + LedCfg &= 0x90; /* Set to software control. */ + usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3))); + LedCfg = usb_read8(padapter, REG_MAC_PINMUX_CFG); + LedCfg &= 0xFE; + usb_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg); exit: pLed->bLedOn = false; } @@ -69,10 +65,6 @@ void rtw_hal_sw_led_init(struct adapter *padapter) { struct led_priv *pledpriv = &(padapter->ledpriv); - pledpriv->bRegUseLed = true; - pledpriv->LedControlHandler = LedControl8188eu; - padapter->HalData->bLedOpenDrain = true; - InitLed871x(padapter, &(pledpriv->SwLed0)); } diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c index d0495a16ff798eb8d225ac074c80983b35f2977b..0fc093eb7a771c7c95157196628506fe363538d8 100644 --- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c +++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c @@ -37,19 +37,15 @@ int rtw_hal_init_recv_priv(struct adapter *padapter) /* init recv_buf */ _rtw_init_queue(&precvpriv->free_recv_buf_queue); - precvpriv->pallocated_recv_buf = + precvpriv->precv_buf = kcalloc(NR_RECVBUFF, sizeof(struct recv_buf), GFP_KERNEL); - if (!precvpriv->pallocated_recv_buf) { + if (!precvpriv->precv_buf) { res = _FAIL; RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("alloc recv_buf fail!\n")); goto exit; } - - precvpriv->precv_buf = precvpriv->pallocated_recv_buf; - - - precvbuf = (struct recv_buf *)precvpriv->precv_buf; + precvbuf = precvpriv->precv_buf; for (i = 0; i < NR_RECVBUFF; i++) { res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf); @@ -58,27 +54,18 @@ int rtw_hal_init_recv_priv(struct adapter *padapter) precvbuf->adapter = padapter; precvbuf++; } - precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF; skb_queue_head_init(&precvpriv->rx_skb_queue); { int i; - size_t tmpaddr = 0; - size_t alignm = 0; struct sk_buff *pskb = NULL; skb_queue_head_init(&precvpriv->free_recv_skb_queue); for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) { pskb = __netdev_alloc_skb(padapter->pnetdev, - MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ, - GFP_KERNEL); + MAX_RECVBUF_SZ, GFP_KERNEL); if (pskb) { kmemleak_not_leak(pskb); - pskb->dev = padapter->pnetdev; - tmpaddr = (size_t)pskb->data; - alignm = tmpaddr & (RECVBUFF_ALIGN_SZ-1); - skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignm)); - skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb); } @@ -95,14 +82,14 @@ void rtw_hal_free_recv_priv(struct adapter *padapter) struct recv_buf *precvbuf; struct recv_priv *precvpriv = &padapter->recvpriv; - precvbuf = (struct recv_buf *)precvpriv->precv_buf; + precvbuf = precvpriv->precv_buf; for (i = 0; i < NR_RECVBUFF; i++) { usb_free_urb(precvbuf->purb); precvbuf++; } - kfree(precvpriv->pallocated_recv_buf); + kfree(precvpriv->precv_buf); if (skb_queue_len(&precvpriv->rx_skb_queue)) DBG_88E(KERN_WARNING "rx_skb_queue not empty\n"); diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c index 7692ca495ee5734f3e1c9a59ae046d304639a6bf..3675edb6194213800fc73599bf6323dd538fd5bb 100644 --- a/drivers/staging/rtl8188eu/hal/usb_halinit.c +++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c @@ -562,9 +562,6 @@ static void InitUsbAggregationSetting(struct adapter *Adapter) /* Rx aggregation setting */ usb_AggSettingRxUpdate(Adapter); - - /* 201/12/10 MH Add for USB agg mode dynamic switch. */ - Adapter->HalData->UsbRxHighSpeedMode = false; } static void _InitBeaconParameters(struct adapter *Adapter) @@ -604,11 +601,6 @@ static void _BBTurnOnBlock(struct adapter *Adapter) phy_set_bb_reg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1); } -enum { - Antenna_Lfet = 1, - Antenna_Right = 2, -}; - static void _InitAntenna_Selection(struct adapter *Adapter) { struct hal_data_8188e *haldata = Adapter->HalData; @@ -994,19 +986,16 @@ u32 rtw_hal_inirp_init(struct adapter *Adapter) RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("===> usb_inirp_init\n")); - precvpriv->ff_hwaddr = RECV_BULK_IN_ADDR; - /* issue Rx irp to receive data */ - precvbuf = (struct recv_buf *)precvpriv->precv_buf; + precvbuf = precvpriv->precv_buf; for (i = 0; i < NR_RECVBUFF; i++) { - if (usb_read_port(Adapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf) == false) { + if (usb_read_port(Adapter, RECV_BULK_IN_ADDR, precvbuf) == false) { RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("usb_rx_init: usb_read_port error\n")); status = _FAIL; goto exit; } precvbuf++; - precvpriv->free_recv_buf_queue_cnt--; } exit: @@ -1107,18 +1096,12 @@ static void _ReadPROMContent( readAdapterInfo_8188EU(Adapter); } -static void _ReadRFType(struct adapter *Adapter) -{ - Adapter->HalData->rf_chip = RF_6052; -} - void rtw_hal_read_chip_info(struct adapter *Adapter) { unsigned long start = jiffies; MSG_88E("====> %s\n", __func__); - _ReadRFType(Adapter);/* rf_chip -> _InitRFType() */ _ReadPROMContent(Adapter); MSG_88E("<==== %s in %d ms\n", __func__, diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h index 0976a761b280893bb62f586da8486e269bae5ca4..550ad62e706403541fdf4300d3f41ed0867987de 100644 --- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h +++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h @@ -99,17 +99,6 @@ enum phy_rate_tx_offset_area { RA_OFFSET_HT_CCK, }; -/* BB/RF related */ -enum RF_TYPE_8190P { - RF_TYPE_MIN, /* 0 */ - RF_8225 = 1, /* 1 11b/g RF for verification only */ - RF_8256 = 2, /* 2 11b/g/n */ - RF_8258 = 3, /* 3 11a/b/g/n RF */ - RF_6052 = 4, /* 4 11b/g/n RF */ - /* TODO: We should remove this psudo PHY RF after we get new RF. */ - RF_PSEUDO_11N = 5, /* 5, It is a temporality RF. */ -}; - struct bb_reg_def { u32 rfintfs; /* set software control: */ /* 0x870~0x877[8 bytes] */ diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h index 32326fd1dd24f64800749399e7642ccab8de4634..e86419e525d869133be39cf5786984c61053b4c7 100644 --- a/drivers/staging/rtl8188eu/include/drv_types.h +++ b/drivers/staging/rtl8188eu/include/drv_types.h @@ -156,8 +156,6 @@ struct adapter { u8 hw_init_completed; void *cmdThread; - void (*intf_start)(struct adapter *adapter); - void (*intf_stop)(struct adapter *adapter); struct net_device *pnetdev; struct net_device *pmondev; diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h index fa032b0c12ffe8704d73b8ff9c5fb2a537e78e21..e1114a95d442b4bc5a8d3afe2b1be6c5da350898 100644 --- a/drivers/staging/rtl8188eu/include/hal_intf.h +++ b/drivers/staging/rtl8188eu/include/hal_intf.h @@ -190,6 +190,7 @@ void rtw_hal_set_odm_var(struct adapter *padapter, u32 rtw_hal_inirp_init(struct adapter *padapter); void rtw_hal_inirp_deinit(struct adapter *padapter); +void usb_intf_stop(struct adapter *padapter); s32 rtw_hal_xmit(struct adapter *padapter, struct xmit_frame *pxmitframe); s32 rtw_hal_mgnt_xmit(struct adapter *padapter, diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h index 805f52e108b287c7f1ce058a8dafe69b78e5f587..4fb3bb07ceaa6a46ab7f67c018f8901c85fdb6a5 100644 --- a/drivers/staging/rtl8188eu/include/odm.h +++ b/drivers/staging/rtl8188eu/include/odm.h @@ -80,11 +80,6 @@ #define DM_DIG_FA_TH2_LPS 30 /* 30 lps */ #define RSSI_OFFSET_DIG 0x05; -/* ANT Test */ -#define ANTTESTALL 0x00 /* Ant A or B will be Testing */ -#define ANTTESTA 0x01 /* Ant A will be Testing */ -#define ANTTESTB 0x02 /* Ant B will be testing */ - struct rtw_dig { u8 Dig_Enable_Flag; u8 Dig_Ext_Port_Stage; @@ -590,7 +585,6 @@ struct odm_rf_cal { s32 RegEBC; u8 TXPowercount; - bool bTXPowerTrackingInit; bool bTXPowerTracking; u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking * as default */ diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h index dbd7dc4f87dd4dbbf3cf80dbc3f7fff8a95ba544..97d3d850418405a3ab38f0cdfec3ea414dc1268d 100644 --- a/drivers/staging/rtl8188eu/include/osdep_intf.h +++ b/drivers/staging/rtl8188eu/include/osdep_intf.h @@ -35,7 +35,8 @@ int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname); struct net_device *rtw_init_netdev(struct adapter *padapter); u16 rtw_recv_select_queue(struct sk_buff *skb); -int pm_netdev_open(struct net_device *pnetdev, u8 bnormal); +int netdev_open(struct net_device *pnetdev); +int ips_netdrv_open(struct adapter *padapter); void rtw_ips_dev_unload(struct adapter *padapter); int rtw_ips_pwr_up(struct adapter *padapter); void rtw_ips_pwr_down(struct adapter *padapter); diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h index 7550d58f6b5b23cbbd51b75cc02aed2b050388a4..9b43a1314bd5e80111c8cbf1468348ec966b2c8e 100644 --- a/drivers/staging/rtl8188eu/include/recv_osdep.h +++ b/drivers/staging/rtl8188eu/include/recv_osdep.h @@ -29,8 +29,6 @@ int rtw_recv_indicatepkt(struct adapter *adapter, void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup); -void rtw_os_recv_resource_alloc(struct recv_frame *recvfr); - int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf); void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl); diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h index 7c81e3f3d12e843515f4061d34f336cfc3199d94..9330361da4adf9b838de27bd1c1b3e3b06d323f5 100644 --- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h +++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h @@ -200,10 +200,6 @@ struct hal_data_8188e { u16 BasicRateSet; - /* rf_ctrl */ - u8 rf_chip; - u8 NumTotalRFPath; - u8 BoardType; /* EEPROM setting. */ @@ -265,14 +261,6 @@ struct hal_data_8188e { u32 CCKTxPowerLevelOriginalOffset; u8 CrystalCap; - u32 AntennaTxPath; /* Antenna path Tx */ - u32 AntennaRxPath; /* Antenna path Rx */ - u8 BluetoothCoexist; - u8 ExternalPA; - - u8 bLedOpenDrain; /* Open-drain support for controlling the LED.*/ - - u8 b1x1RecvCombine; /* for 1T1R receive combining */ u32 AcParam_BE; /* Original parameter for BE, use for EDCA turbo. */ @@ -316,14 +304,6 @@ struct hal_data_8188e { u8 OutEpQueueSel; u8 OutEpNumber; - /* Add for USB aggreation mode dynamic shceme. */ - bool UsbRxHighSpeedMode; - - /* 2010/11/22 MH Add for slim combo debug mode selective. */ - /* This is used for fix the drawback of CU TSMC-A/UMC-A cut. - * HW auto suspend ability. Close BT clock. */ - bool SlimComboDbg; - u16 EfuseUsedBytes; /* Auto FSM to Turn On, include clock, isolation, power control diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h index 80832a5f07328253f5f8b3adf5dab48ec6517c66..0d8bf51c72a99c24a86cd5f066d4e953bbefac15 100644 --- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h +++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h @@ -51,9 +51,7 @@ enum rx_packet_type { }; #define INTERRUPT_MSG_FORMAT_LEN 60 -void rtl8188eu_recv_hdl(struct adapter *padapter, struct recv_buf *precvbuf); void rtl8188eu_recv_tasklet(void *priv); -void rtl8188e_query_rx_phy_status(struct recv_frame *fr, struct phy_stat *phy); void rtl8188e_process_phy_info(struct adapter *padapter, struct recv_frame *prframe); void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy); diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h index f2054ef70358ac6456371ff4290966e6934949f9..607d1ba56a468d5aab384aaf108c738d232bfd6f 100644 --- a/drivers/staging/rtl8188eu/include/rtw_led.h +++ b/drivers/staging/rtl8188eu/include/rtw_led.h @@ -70,12 +70,9 @@ struct LED_871x { struct timer_list BlinkTimer; /* Timer object for led blinking. */ - u8 bSWLedCtrl; - /* ALPHA, added by chiyoko, 20090106 */ u8 bLedNoLinkBlinkInProgress; u8 bLedLinkBlinkInProgress; - u8 bLedStartToLinkBlinkInProgress; u8 bLedScanBlinkInProgress; struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to * manipulate H/W to blink LED. */ @@ -91,18 +88,9 @@ void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction); struct led_priv { /* add for led control */ struct LED_871x SwLed0; - u8 bRegUseLed; - void (*LedControlHandler)(struct adapter *padapter, - enum LED_CTL_MODE LedAction); /* add for led control */ }; -#define rtw_led_control(adapt, action) \ - do { \ - if ((adapt)->ledpriv.LedControlHandler) \ - (adapt)->ledpriv.LedControlHandler((adapt), (action)); \ - } while (0) - void BlinkTimerCallback(unsigned long data); void BlinkWorkItemCallback(struct work_struct *work); diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h index 9434b869c5e9d1737254a9e14becce3e0b9d4b42..18fb7e7b227332611bf909c1d942fddf870128ec 100644 --- a/drivers/staging/rtl8188eu/include/rtw_mlme.h +++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h @@ -504,7 +504,7 @@ void rtw_scan_abort(struct adapter *adapter); int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len); int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, - uint in_len, uint initial_out_len); + uint in_len, uint initial_out_len); void rtw_init_registrypriv_dev_network(struct adapter *adapter); void rtw_update_registrypriv_dev_network(struct adapter *adapter); diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h index 49d973881a04e41862610ebbafadc2ad9767ae10..052af7b891dad9f83a05fa5108d65191a0808ca0 100644 --- a/drivers/staging/rtl8188eu/include/rtw_recv.h +++ b/drivers/staging/rtl8188eu/include/rtw_recv.h @@ -139,8 +139,6 @@ struct rx_pkt_attrib { #define SN_EQUAL(a, b) (a == b) #define REORDER_WAIT_TIME (50) /* (ms) */ -#define RECVBUFF_ALIGN_SZ 8 - #define RXDESC_SIZE 24 #define RXDESC_OFFSET RXDESC_SIZE @@ -166,9 +164,7 @@ struct recv_priv { struct __queue free_recv_queue; struct __queue recv_pending_queue; struct __queue uc_swdec_pending_queue; - u8 *pallocated_frame_buf; - u8 *precv_frame_buf; - uint free_recvframe_cnt; + void *pallocated_frame_buf; struct adapter *adapter; u32 bIsAnyNonBEPkts; u64 rx_bytes; @@ -176,17 +172,12 @@ struct recv_priv { u64 rx_drop; u64 last_rx_bytes; - uint ff_hwaddr; - u8 rx_pending_cnt; - struct tasklet_struct irq_prepare_beacon_tasklet; struct tasklet_struct recv_tasklet; struct sk_buff_head free_recv_skb_queue; struct sk_buff_head rx_skb_queue; - u8 *pallocated_recv_buf; - u8 *precv_buf; /* 4 alignment */ + struct recv_buf *precv_buf; /* 4 alignment */ struct __queue free_recv_buf_queue; - u32 free_recv_buf_queue_cnt; /* For display the phy informatiom */ u8 is_signal_dbg; /* for debug */ u8 signal_strength_dbg; /* for debug */ diff --git a/drivers/staging/rtl8188eu/include/usb_ops_linux.h b/drivers/staging/rtl8188eu/include/usb_ops_linux.h index 78d9b6e035bfcc40e4b147a5804a9f0c5f7ba5ce..fb586365d2e573bf2d94b444a157a6390570431a 100644 --- a/drivers/staging/rtl8188eu/include/usb_ops_linux.h +++ b/drivers/staging/rtl8188eu/include/usb_ops_linux.h @@ -53,7 +53,7 @@ u8 usb_read8(struct adapter *adapter, u32 addr); u16 usb_read16(struct adapter *adapter, u32 addr); u32 usb_read32(struct adapter *adapter, u32 addr); -u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem); +u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf); void usb_read_port_cancel(struct adapter *adapter); int usb_write8(struct adapter *adapter, u32 addr, u8 val); diff --git a/drivers/staging/rtl8188eu/include/xmit_osdep.h b/drivers/staging/rtl8188eu/include/xmit_osdep.h index f96ca6af934d66580d76bd3bff3888e9279e2681..959ef4b3066c1d7dc27eb0c350f60b26a9da80f0 100644 --- a/drivers/staging/rtl8188eu/include/xmit_osdep.h +++ b/drivers/staging/rtl8188eu/include/xmit_osdep.h @@ -41,13 +41,11 @@ void rtw_os_xmit_schedule(struct adapter *padapter); int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz); -void rtw_os_xmit_resource_free(struct adapter *padapter, - struct xmit_buf *pxmitbuf, u32 free_sz); +void rtw_os_xmit_resource_free(struct xmit_buf *pxmitbuf); uint rtw_remainder_len(struct pkt_file *pfile); void _rtw_open_pktfile(struct sk_buff *pkt, struct pkt_file *pfile); uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen); -int rtw_endofpktfile(struct pkt_file *pfile); void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt); void rtw_os_xmit_complete(struct adapter *padapter, diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c index d976e5e18d50ef7a469e437585da074d7f3b3af0..c9c9821cfc32c662f579eb77358c34ad963d0882 100644 --- a/drivers/staging/rtl8188eu/os_dep/mon.c +++ b/drivers/staging/rtl8188eu/os_dep/mon.c @@ -145,7 +145,6 @@ static netdev_tx_t mon_xmit(struct sk_buff *skb, struct net_device *dev) static const struct net_device_ops mon_netdev_ops = { .ndo_start_xmit = mon_xmit, - .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c index 40691f1ec5070ec49854acadaa0ab5b03c9336d8..8fc3fadf065f91183ab897b501c990d5024f96f7 100644 --- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c +++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c @@ -144,7 +144,6 @@ static bool rtw_monitor_enable; module_param_named(monitor_enable, rtw_monitor_enable, bool, 0444); MODULE_PARM_DESC(monitor_enable, "Enable monitor inferface (default: false)"); -static int netdev_open(struct net_device *pnetdev); static int netdev_close(struct net_device *pnetdev); static void loadparam(struct adapter *padapter, struct net_device *pnetdev) @@ -596,10 +595,9 @@ static int _netdev_open(struct net_device *pnetdev) pr_info("can't init mlme_ext_priv\n"); goto netdev_open_error; } - if (padapter->intf_start) - padapter->intf_start(padapter); + rtw_hal_inirp_init(padapter); - rtw_led_control(padapter, LED_CTL_NO_LINK); + LedControl8188eu(padapter, LED_CTL_NO_LINK); padapter->bup = true; } @@ -630,7 +628,7 @@ static int _netdev_open(struct net_device *pnetdev) return -1; } -static int netdev_open(struct net_device *pnetdev) +int netdev_open(struct net_device *pnetdev) { int ret; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev); @@ -642,7 +640,7 @@ static int netdev_open(struct net_device *pnetdev) return ret; } -static int ips_netdrv_open(struct adapter *padapter) +int ips_netdrv_open(struct adapter *padapter) { int status = _SUCCESS; @@ -658,8 +656,7 @@ static int ips_netdrv_open(struct adapter *padapter) goto netdev_open_error; } - if (padapter->intf_start) - padapter->intf_start(padapter); + rtw_hal_inirp_init(padapter); rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv); mod_timer(&padapter->mlmepriv.dynamic_chk_timer, @@ -684,7 +681,7 @@ int rtw_ips_pwr_up(struct adapter *padapter) result = ips_netdrv_open(padapter); - rtw_led_control(padapter, LED_CTL_NO_LINK); + LedControl8188eu(padapter, LED_CTL_NO_LINK); DBG_88E("<=== rtw_ips_pwr_up.............. in %dms\n", jiffies_to_msecs(jiffies - start_time)); @@ -699,7 +696,7 @@ void rtw_ips_pwr_down(struct adapter *padapter) padapter->net_closed = true; - rtw_led_control(padapter, LED_CTL_POWER_OFF); + LedControl8188eu(padapter, LED_CTL_POWER_OFF); rtw_ips_dev_unload(padapter); DBG_88E("<=== rtw_ips_pwr_down..................... in %dms\n", @@ -712,25 +709,13 @@ void rtw_ips_dev_unload(struct adapter *padapter) rtw_hal_set_hwreg(padapter, HW_VAR_FIFO_CLEARN_UP, NULL); - if (padapter->intf_stop) - padapter->intf_stop(padapter); + usb_intf_stop(padapter); /* s5. */ if (!padapter->bSurpriseRemoved) rtw_hal_deinit(padapter); } -int pm_netdev_open(struct net_device *pnetdev, u8 bnormal) -{ - int status; - - if (bnormal) - status = netdev_open(pnetdev); - else - status = (_SUCCESS == ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev))) ? (0) : (-1); - return status; -} - static int netdev_close(struct net_device *pnetdev) { struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev); @@ -763,7 +748,7 @@ static int netdev_close(struct net_device *pnetdev) /* s2-4. */ rtw_free_network_queue(padapter, true); /* Close LED */ - rtw_led_control(padapter, LED_CTL_POWER_OFF); + LedControl8188eu(padapter, LED_CTL_POWER_OFF); } RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n")); diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c index 7cd2655f27fe1952e28e92c28f90e0f96ecfce72..6ff836f481daca9f935bb2903bdbcff05158378f 100644 --- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c +++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c @@ -12,8 +12,6 @@ * more details. * ******************************************************************************/ - - #define _OSDEP_SERVICE_C_ #include @@ -24,9 +22,10 @@ #include /* -* Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE -* @return: one of RTW_STATUS_CODE -*/ + * Translate the OS dependent @param error_code to OS independent + * RTW_STATUS_CODE + * @return: one of RTW_STATUS_CODE + */ inline int RTW_STATUS_CODE(int error_code) { if (error_code >= 0) @@ -43,22 +42,20 @@ void *rtw_malloc2d(int h, int w, int size) { int j; - void **a = kzalloc(h*sizeof(void *) + h*w*size, GFP_KERNEL); - if (!a) { - pr_info("%s: alloc memory fail!\n", __func__); - return NULL; - } + void **a = kzalloc(h * sizeof(void *) + h * w * size, GFP_KERNEL); + if (!a) + goto out; for (j = 0; j < h; j++) - a[j] = ((char *)(a+h)) + j*w*size; - + a[j] = ((char *)(a + h)) + j * w * size; +out: return a; } -void _rtw_init_queue(struct __queue *pqueue) +void _rtw_init_queue(struct __queue *pqueue) { - INIT_LIST_HEAD(&(pqueue->queue)); - spin_lock_init(&(pqueue->lock)); + INIT_LIST_HEAD(&pqueue->queue); + spin_lock_init(&pqueue->lock); } struct net_device *rtw_alloc_etherdev_with_old_priv(void *old_priv) diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c index 103cdb4ed0736c83fca2965c5cbb1e3fa6ba02ef..b85824ec5354c62a6985d92100aa99512377fb17 100644 --- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c @@ -21,12 +21,6 @@ #include #include -/* alloc os related resource in struct recv_frame */ -void rtw_os_recv_resource_alloc(struct recv_frame *precvframe) -{ - precvframe->pkt = NULL; -} - /* alloc os related resource in struct recv_buf */ int rtw_os_recvbuf_resource_alloc(struct adapter *padapter, struct recv_buf *precvbuf) diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 68e1e6bbe87f162ee44576216aeee2787661d0b3..c6316ffa64d38398f11ee400929b4db7a711a786 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -141,16 +141,7 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf) } -static void usb_intf_start(struct adapter *padapter) -{ - RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_start\n")); - - rtw_hal_inirp_init(padapter); - - RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-usb_intf_start\n")); -} - -static void usb_intf_stop(struct adapter *padapter) +void usb_intf_stop(struct adapter *padapter) { RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_stop\n")); @@ -183,8 +174,7 @@ static void rtw_dev_unload(struct adapter *padapter) if (padapter->xmitpriv.ack_tx) rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_DRV_STOP); /* s3. */ - if (padapter->intf_stop) - padapter->intf_stop(padapter); + usb_intf_stop(padapter); /* s4. */ if (!padapter->pwrctrlpriv.bInternalAutoSuspend) rtw_stop_drv_threads(padapter); @@ -294,7 +284,7 @@ static int rtw_resume_process(struct adapter *padapter) pwrpriv->bkeepfwalive = false; pr_debug("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive); - if (pm_netdev_open(pnetdev, true) != 0) { + if (netdev_open(pnetdev) != 0) { mutex_unlock(&pwrpriv->mutex_lock); goto exit; } @@ -366,9 +356,6 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj, if (!padapter->HalData) DBG_88E("cant not alloc memory for HAL DATA\n"); - padapter->intf_start = &usb_intf_start; - padapter->intf_stop = &usb_intf_stop; - /* step read_chip_version */ rtw_hal_read_chip_version(padapter); diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c index d0d591501b739b0122d27cdc43a535e6888c5557..e2dbe1b4afd3f84621dddb4c9596ee10146585e3 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c @@ -167,27 +167,26 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb) } if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */ if (pattrib->physt) - update_recvframe_phyinfo_88e(precvframe, (struct phy_stat *)pphy_status); + update_recvframe_phyinfo_88e(precvframe, pphy_status); if (rtw_recv_entry(precvframe) != _SUCCESS) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvbuf2recvframe: rtw_recv_entry(precvframe) != _SUCCESS\n")); } - } else { - /* enqueue recvframe to txrtp queue */ - if (pattrib->pkt_rpt_type == TX_REPORT1) { - /* CCX-TXRPT ack for xmit mgmt frames. */ - handle_txrpt_ccx_88e(adapt, precvframe->rx_data); - } else if (pattrib->pkt_rpt_type == TX_REPORT2) { - ODM_RA_TxRPT2Handle_8188E( - &haldata->odmpriv, - precvframe->rx_data, - pattrib->pkt_len, - pattrib->MacIDValidEntry[0], - pattrib->MacIDValidEntry[1] - ); - } else if (pattrib->pkt_rpt_type == HIS_REPORT) { - interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data); - } + } else if (pattrib->pkt_rpt_type == TX_REPORT1) { + /* CCX-TXRPT ack for xmit mgmt frames. */ + handle_txrpt_ccx_88e(adapt, precvframe->rx_data); + rtw_free_recvframe(precvframe, pfree_recv_queue); + } else if (pattrib->pkt_rpt_type == TX_REPORT2) { + ODM_RA_TxRPT2Handle_8188E( + &haldata->odmpriv, + precvframe->rx_data, + pattrib->pkt_len, + pattrib->MacIDValidEntry[0], + pattrib->MacIDValidEntry[1] + ); + rtw_free_recvframe(precvframe, pfree_recv_queue); + } else if (pattrib->pkt_rpt_type == HIS_REPORT) { + interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data); rtw_free_recvframe(precvframe, pfree_recv_queue); } pkt_cnt--; @@ -253,7 +252,7 @@ static int usbctrl_vendorreq(struct adapter *adapt, u8 request, u16 value, u16 i /* Acquire IO memory for vendorreq */ pIo_buf = kmalloc(MAX_USB_IO_CTL_SIZE, GFP_ATOMIC); - if (pIo_buf == NULL) { + if (!pIo_buf) { DBG_88E("[%s] pIo_buf == NULL\n", __func__); status = -ENOMEM; goto release_mutex; @@ -384,8 +383,6 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs) RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete!!!\n")); - precvpriv->rx_pending_cnt--; - if (adapt->bSurpriseRemoved || adapt->bDriverStopped || adapt->bReadPortCancel) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n", @@ -403,7 +400,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs) RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete: (purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)\n")); precvbuf->reuse = true; - usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); + usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf); DBG_88E("%s()-%d: RX Warning!\n", __func__, __LINE__); } else { skb_put(precvbuf->pskb, purb->actual_length); @@ -414,7 +411,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs) precvbuf->pskb = NULL; precvbuf->reuse = false; - usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); + usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf); } } else { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete : purb->status(%d) != 0\n", purb->status)); @@ -437,7 +434,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs) case -EOVERFLOW: adapt->HalData->srestpriv.Wifi_Error_Status = USB_READ_PORT_FAIL; precvbuf->reuse = true; - usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); + usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf); break; case -EINPROGRESS: DBG_88E("ERROR: URB IS IN PROGRESS!\n"); @@ -448,17 +445,14 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs) } } -u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem) +u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf) { struct urb *purb = NULL; - struct recv_buf *precvbuf = (struct recv_buf *)rmem; struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter); struct recv_priv *precvpriv = &adapter->recvpriv; struct usb_device *pusbd = pdvobj->pusbdev; int err; unsigned int pipe; - size_t tmpaddr = 0; - size_t alignment = 0; u32 ret = _SUCCESS; @@ -483,22 +477,16 @@ u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem) /* re-assign for linux based on skb */ if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) { - precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ); + precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ); if (precvbuf->pskb == NULL) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("init_recvbuf(): alloc_skb fail!\n")); DBG_88E("#### usb_read_port() alloc_skb fail!#####\n"); return _FAIL; } - - tmpaddr = (size_t)precvbuf->pskb->data; - alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1); - skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment)); } else { /* reuse skb */ precvbuf->reuse = false; } - precvpriv->rx_pending_cnt++; - purb = precvbuf->purb; /* translate DMA FIFO addr to pipehandle */ @@ -528,7 +516,7 @@ void rtw_hal_inirp_deinit(struct adapter *padapter) int i; struct recv_buf *precvbuf; - precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf; + precvbuf = padapter->recvpriv.precv_buf; DBG_88E("%s\n", __func__); diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c index 4b1b04e007151aa260b1f3c36f11e40f0f7e4e2f..e097c619ed1bcec57e336cc1ea877fb6ec538a56 100644 --- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c @@ -59,11 +59,6 @@ uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen) return len; } -int rtw_endofpktfile(struct pkt_file *pfile) -{ - return pfile->pkt_len == 0; -} - int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz) { int i; @@ -85,8 +80,7 @@ int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitb return _SUCCESS; } -void rtw_os_xmit_resource_free(struct adapter *padapter, - struct xmit_buf *pxmitbuf, u32 free_sz) +void rtw_os_xmit_resource_free(struct xmit_buf *pxmitbuf) { int i; diff --git a/drivers/staging/rtl8192e/Makefile b/drivers/staging/rtl8192e/Makefile index cb18db74d78cf82957cd9d6e1cef4bb49b7b3fb6..7101fcc8871bba87dbf84a0dfbde4e19cf63eabf 100644 --- a/drivers/staging/rtl8192e/Makefile +++ b/drivers/staging/rtl8192e/Makefile @@ -17,5 +17,3 @@ obj-$(CONFIG_RTLLIB_CRYPTO_TKIP) += rtllib_crypt_tkip.o obj-$(CONFIG_RTLLIB_CRYPTO_WEP) += rtllib_crypt_wep.o obj-$(CONFIG_RTL8192E) += rtl8192e/ - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c index 25725b158eca5c1aa0f9be956860e2f394031470..017fe04ebe2d511cb53eb8039de5cae4b935616a 100644 --- a/drivers/staging/rtl8192e/dot11d.c +++ b/drivers/staging/rtl8192e/dot11d.c @@ -11,7 +11,7 @@ * * Contact Information: * wlanfae -******************************************************************************/ + ******************************************************************************/ #include "dot11d.h" struct channel_list { diff --git a/drivers/staging/rtl8192e/rtl8192e/Makefile b/drivers/staging/rtl8192e/rtl8192e/Makefile index a2c4fb4ba1af9e2dd5c6d030285cc95d9dcb79c2..176a4a2b8b209466383326fcf9f03c3591b9c3d2 100644 --- a/drivers/staging/rtl8192e/rtl8192e/Makefile +++ b/drivers/staging/rtl8192e/rtl8192e/Makefile @@ -16,5 +16,3 @@ r8192e_pci-objs := \ rtl_wx.o \ obj-$(CONFIG_RTL8192E) += r8192e_pci.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c index f9003a28cae25de9aa9c7325ecc97ee6b3d84821..757ffd4f2f89c2f7d676043f1a4fb443bb647867 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c @@ -49,7 +49,7 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data, else skb = dev_alloc_skb(frag_length + 4); - if (skb == NULL) { + if (!skb) { rt_status = false; goto Failed; } diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c index 9aaa85526eb8d3ae1086f4fadd6ff2e3d018e2f6..bbe399010be15a873fac744fefd683ddde1af542 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c @@ -202,7 +202,5 @@ bool rtl92e_init_fw(struct net_device *dev) download_firmware_fail: netdev_err(dev, "%s: Failed to initialize firmware.\n", __func__); - rt_status = false; - return rt_status; - + return false; } diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index 4c30eea45f89df5792217aa4594b17bd54773e13..8a9172aa817848801fb0c8324e1021c8f5de32fa 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -367,7 +367,7 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap) } } -static struct rtllib_qos_parameters def_qos_parameters = { +static const struct rtllib_qos_parameters def_qos_parameters = { {cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3)}, {cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7)}, {2, 2, 2, 2}, @@ -2545,7 +2545,6 @@ static const struct net_device_ops rtl8192_netdev_ops = { .ndo_set_rx_mode = _rtl92e_set_multicast, .ndo_set_mac_address = _rtl92e_set_mac_adr, .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, .ndo_start_xmit = rtllib_xmit, }; diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c index c7fd1b1653d639898979251151ddaa9393255d33..20260af49ee7eda5b50df7229a02b142d0c9fe21 100644 --- a/drivers/staging/rtl8192e/rtl819x_BAProc.c +++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c @@ -11,7 +11,7 @@ * * Contact Information: * wlanfae -******************************************************************************/ + ******************************************************************************/ #include #include #include diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c index dd9c0c8683614cccd20d90dc2915523908be41ca..cded0f43cd330b395687be2433a5c26ea566f570 100644 --- a/drivers/staging/rtl8192e/rtl819x_HTProc.c +++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c @@ -11,7 +11,7 @@ * * Contact Information: * wlanfae -******************************************************************************/ + ******************************************************************************/ #include "rtllib.h" #include "rtl819x_HT.h" u8 MCS_FILTER_ALL[16] = { diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c index a966a8e490abfe3ba9aca46107b30bf765b62560..48bbd9e8a52f3444ff345a934a0721faf556ccc7 100644 --- a/drivers/staging/rtl8192e/rtl819x_TSProc.c +++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c @@ -11,7 +11,7 @@ * * Contact Information: * wlanfae -******************************************************************************/ + ******************************************************************************/ #include "rtllib.h" #include #include "rtl819x_TS.h" diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index c743182b933e65d79d938fcca0d136507b18c014..e5ba7d1a809fdb30922bb1649e58eb4f426bf45f 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -130,7 +130,7 @@ rtllib_frag_cache_get(struct rtllib_device *ieee, ETH_ALEN /* WDS */ + /* QOS Control */ (RTLLIB_QOS_HAS_SEQ(fc) ? 2 : 0)); - if (skb == NULL) + if (!skb) return NULL; entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]]; @@ -986,7 +986,7 @@ static void rtllib_rx_extract_addr(struct rtllib_device *ieee, ether_addr_copy(src, hdr->addr4); ether_addr_copy(bssid, ieee->current_network.bssid); break; - case 0: + default: ether_addr_copy(dst, hdr->addr1); ether_addr_copy(src, hdr->addr2); ether_addr_copy(bssid, hdr->addr3); @@ -1201,6 +1201,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb, if (crypt && !(fc & RTLLIB_FCTL_WEP) && rtllib_is_eapol_frame(ieee, skb, hdrlen)) { struct eapol *eap = (struct eapol *)(skb->data + 24); + netdev_dbg(ieee->dev, "RX: IEEE 802.1X EAPOL frame: %s\n", eap_get_type(eap->type)); } @@ -1430,7 +1431,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb, /* skb: hdr + (possible reassembled) full plaintext payload */ payload = skb->data + hdrlen; rxb = kmalloc(sizeof(struct rtllib_rxb), GFP_ATOMIC); - if (rxb == NULL) + if (!rxb) goto rx_dropped; /* to parse amsdu packets */ diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c index da74dc49b95ec7fa498c3225447b4f3875dbdc47..1430ba27b0494803242f42bf9a2bc32ed9eddb2d 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac.c +++ b/drivers/staging/rtl8192e/rtllib_softmac.c @@ -1524,6 +1524,7 @@ static void rtllib_associate_complete_wq(void *data) struct rtllib_device, associate_complete_wq); struct rt_pwr_save_ctrl *pPSC = &(ieee->PowerSaveControl); + netdev_info(ieee->dev, "Associated successfully\n"); if (!ieee->is_silent_reset) { netdev_info(ieee->dev, "normal associate\n"); diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c index 6fa96d57d31629aebfd49ec81cd6f13e0f46cf81..e68850897adf8a8656d1c96ea3b139364282f121 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c @@ -553,7 +553,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr) memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */ break; - case 0: + default: memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */ break; diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c index 89cbc077a48d896b016df1bc3ed29336ca6ef10d..82f654305414cb9ede533b5459842a0ee7e8a5a7 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c @@ -129,7 +129,7 @@ ieee80211_frag_cache_get(struct ieee80211_device *ieee, 8 /* WEP */ + ETH_ALEN /* WDS */ + (IEEE80211_QOS_HAS_SEQ(fc)?2:0) /* QOS Control */); - if (skb == NULL) + if (!skb) return NULL; entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]]; @@ -1079,7 +1079,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, memcpy(src, hdr->addr4, ETH_ALEN); memcpy(bssid, ieee->current_network.bssid, ETH_ALEN); break; - case 0: + default: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); memcpy(bssid, hdr->addr3, ETH_ALEN); diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 457eeb5f5239d502c8407c692d9a0ef2df90bf48..fdb03dccb449031b07231c24ade33fc933b6d77c 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -4930,7 +4930,6 @@ static const struct net_device_ops rtl8192_netdev_ops = { .ndo_set_rx_mode = r8192_set_multicast, .ndo_set_mac_address = r8192_set_mac_adr, .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, .ndo_start_xmit = ieee80211_xmit, }; diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h index c9ea50daffff0a28ca919404cc270380d45ee676..b8a17097843487594e3f6f7d506ad0305db76e45 100644 --- a/drivers/staging/rtl8712/osdep_service.h +++ b/drivers/staging/rtl8712/osdep_service.h @@ -63,15 +63,6 @@ static inline u32 end_of_queue_search(struct list_head *head, return (head == plist); } -static inline void sleep_schedulable(int ms) -{ - u32 delta; - - delta = msecs_to_jiffies(ms);/*(ms)*/ - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(delta); -} - static inline void flush_signals_thread(void) { if (signal_pending(current)) diff --git a/drivers/staging/rtl8712/rtl8712_hal.h b/drivers/staging/rtl8712/rtl8712_hal.h index 57d5d2db3c7755035c1584b06ad27b0d1f3c9e08..84456bb560ef70a416034ded9cc2582cca19710d 100644 --- a/drivers/staging/rtl8712/rtl8712_hal.h +++ b/drivers/staging/rtl8712/rtl8712_hal.h @@ -68,14 +68,14 @@ struct fw_priv { /*8-bytes alignment required*/ unsigned char signature_0; /*0x12: CE product, 0x92: IT product*/ unsigned char signature_1; /*0x87: CE product, 0x81: IT product*/ unsigned char hci_sel; /*0x81: PCI-AP, 01:PCIe, 02: 92S-U, 0x82: USB-AP, - * 0x12: 72S-U, 03:SDIO - */ + * 0x12: 72S-U, 03:SDIO + */ unsigned char chip_version; /*the same value as register value*/ unsigned char customer_ID_0; /*customer ID low byte*/ unsigned char customer_ID_1; /*customer ID high byte*/ unsigned char rf_config; /*0x11: 1T1R, 0x12: 1T2R, 0x92: 1T2R turbo, - * 0x22: 2T2R - */ + * 0x22: 2T2R + */ unsigned char usb_ep_num; /* 4: 4EP, 6: 6EP, 11: 11EP*/ /*--- long word 1 ----*/ unsigned char regulatory_class_0; /*regulatory class bit map 0*/ @@ -99,8 +99,8 @@ struct fw_priv { /*8-bytes alignment required*/ unsigned char qos_en; /*1: QoS enable*/ unsigned char bw_40MHz_en; /*1: 40MHz BW enable*/ unsigned char AMSDU2AMPDU_en; /*1: 4181 convert AMSDU to AMPDU, - * 0: disable - */ + * 0: disable + */ unsigned char AMPDU_en; /*1: 11n AMPDU enable*/ unsigned char rate_control_offload; /*1: FW offloads,0: driver handles*/ unsigned char aggregation_offload; /*1: FW offloads,0: driver handles*/ diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c index a8e237e480c9eb6db5ae23f509705534936c1d7d..317aeeed38e8c5cf0fe03057bf6a250572de3b14 100644 --- a/drivers/staging/rtl8712/rtl8712_led.c +++ b/drivers/staging/rtl8712/rtl8712_led.c @@ -355,7 +355,7 @@ static void SwLedBlink1(struct LED_871x *pLed) } pLed->bLedScanBlinkInProgress = false; } else { - if (pLed->bLedOn) + if (pLed->bLedOn) pLed->BlinkingLedState = LED_STATE_OFF; else pLed->BlinkingLedState = LED_STATE_ON; @@ -390,7 +390,7 @@ static void SwLedBlink1(struct LED_871x *pLed) pLed->BlinkTimes = 0; pLed->bLedBlinkInProgress = false; } else { - if (pLed->bLedOn) + if (pLed->bLedOn) pLed->BlinkingLedState = LED_STATE_OFF; else pLed->BlinkingLedState = LED_STATE_ON; @@ -460,7 +460,7 @@ static void SwLedBlink2(struct LED_871x *pLed) } pLed->bLedScanBlinkInProgress = false; } else { - if (pLed->bLedOn) + if (pLed->bLedOn) pLed->BlinkingLedState = LED_STATE_OFF; else pLed->BlinkingLedState = LED_STATE_ON; @@ -667,7 +667,7 @@ static void SwLedBlink4(struct LED_871x *pLed) msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA)); pLed->bLedBlinkInProgress = false; } else { - if (pLed->bLedOn) + if (pLed->bLedOn) pLed->BlinkingLedState = LED_STATE_OFF; else pLed->BlinkingLedState = LED_STATE_ON; @@ -764,7 +764,7 @@ static void SwLedBlink5(struct LED_871x *pLed) msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA)); pLed->bLedBlinkInProgress = false; } else { - if (pLed->bLedOn) + if (pLed->bLedOn) pLed->BlinkingLedState = LED_STATE_OFF; else pLed->BlinkingLedState = LED_STATE_ON; @@ -946,7 +946,7 @@ static void SwLedControlMode1(struct _adapter *padapter, if (psitesurveyctrl->traffic_busy && check_fwstate(pmlmepriv, _FW_LINKED)) ; /* dummy branch */ - else if (!pLed->bLedScanBlinkInProgress) { + else if (!pLed->bLedScanBlinkInProgress) { if (IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress) { @@ -970,7 +970,7 @@ static void SwLedControlMode1(struct _adapter *padapter, pLed->BlinkingLedState = LED_STATE_ON; mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA)); - } + } break; case LED_CTL_TX: case LED_CTL_RX: @@ -1000,7 +1000,7 @@ static void SwLedControlMode1(struct _adapter *padapter, case LED_CTL_START_WPS: /*wait until xinpin finish */ case LED_CTL_START_WPS_BOTTON: - if (!pLed->bLedWPSBlinkInProgress) { + if (!pLed->bLedWPSBlinkInProgress) { if (pLed->bLedNoLinkBlinkInProgress) { del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; @@ -1113,9 +1113,9 @@ static void SwLedControlMode2(struct _adapter *padapter, switch (LedAction) { case LED_CTL_SITE_SURVEY: - if (pmlmepriv->sitesurveyctrl.traffic_busy) + if (pmlmepriv->sitesurveyctrl.traffic_busy) ; /* dummy branch */ - else if (!pLed->bLedScanBlinkInProgress) { + else if (!pLed->bLedScanBlinkInProgress) { if (IS_LED_WPS_BLINKING(pLed)) return; @@ -1132,7 +1132,7 @@ static void SwLedControlMode2(struct _adapter *padapter, pLed->BlinkingLedState = LED_STATE_ON; mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA)); - } + } break; case LED_CTL_TX: @@ -1186,7 +1186,7 @@ static void SwLedControlMode2(struct _adapter *padapter, pLed->BlinkingLedState = LED_STATE_ON; mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(0)); - } + } break; case LED_CTL_STOP_WPS: diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c index b7ee5e63af33ac20870b4b2bc93b2bacd0df0809..04638f1e4e881da9b6105977367b70ee2a3cef52 100644 --- a/drivers/staging/rtl8712/rtl871x_cmd.c +++ b/drivers/staging/rtl8712/rtl871x_cmd.c @@ -72,8 +72,11 @@ static sint _init_cmd_priv(struct cmd_priv *pcmdpriv) ((addr_t)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ - 1)); pcmdpriv->rsp_allocated_buf = kmalloc(MAX_RSPSZ + 4, GFP_ATOMIC); - if (!pcmdpriv->rsp_allocated_buf) + if (!pcmdpriv->rsp_allocated_buf) { + kfree(pcmdpriv->cmd_allocated_buf); + pcmdpriv->cmd_allocated_buf = NULL; return _FAIL; + } pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((addr_t)(pcmdpriv->rsp_allocated_buf) & 3); pcmdpriv->cmd_issued_cnt = 0; diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c index 475e7904fe45fa1463ddefe56100ae76a76ae2c5..590acb5aea3d609c34432dc685cdf3e952aba98d 100644 --- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c +++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c @@ -588,9 +588,9 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie, netdev_info(padapter->pnetdev, "r8712u: SET WPS_IE, wps_phase==true\n"); cnt += buf[cnt + 1] + 2; break; - } else { - cnt += buf[cnt + 1] + 2; } + + cnt += buf[cnt + 1] + 2; } } } diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c index 0aaf2aab6dd09bf5e94447b297a2fa1bb550ad3d..01a150446f5a2df7e1ff9fb499db82d917cb99ee 100644 --- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c +++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c @@ -139,9 +139,10 @@ u8 r8712_set_802_11_bssid(struct _adapter *padapter, u8 *bssid) if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid, ETH_ALEN)) { if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE)) - goto _Abort_Set_BSSID; /* driver is in - * WIFI_ADHOC_MASTER_STATE - */ + /* driver is in + * WIFI_ADHOC_MASTER_STATE + */ + goto _Abort_Set_BSSID; } else { r8712_disassoc_cmd(padapter); if (check_fwstate(pmlmepriv, _FW_LINKED)) @@ -203,9 +204,10 @@ void r8712_set_802_11_ssid(struct _adapter *padapter, WIFI_ADHOC_STATE); } } else { - goto _Abort_Set_SSID; /* driver is in - * WIFI_ADHOC_MASTER_STATE - */ + /* driver is in + * WIFI_ADHOC_MASTER_STATE + */ + goto _Abort_Set_SSID; } } } else { diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c index c1feef3da26c4d651b856e9aa3da5ba553fb5001..35cbdc71cad448b5c0a2e3fe776eb1356068b2bf 100644 --- a/drivers/staging/rtl8712/rtl871x_mlme.c +++ b/drivers/staging/rtl8712/rtl871x_mlme.c @@ -137,11 +137,10 @@ static void free_network_nolock(struct mlme_priv *pmlmepriv, } -/* - return the wlan_network with the matching addr - Shall be called under atomic context... - to avoid possible racing condition... -*/ +/* return the wlan_network with the matching addr + * Shall be called under atomic context... + * to avoid possible racing condition... + */ static struct wlan_network *_r8712_find_network(struct __queue *scanned_queue, u8 *addr) { @@ -239,11 +238,10 @@ void r8712_free_network_queue(struct _adapter *dev) } /* - return the wlan_network with the matching addr - - Shall be called under atomic context... - to avoid possible racing condition... -*/ + * return the wlan_network with the matching addr + * Shall be called under atomic context... + * to avoid possible racing condition... + */ static struct wlan_network *r8712_find_network(struct __queue *scanned_queue, u8 *addr) { @@ -369,9 +367,7 @@ static void update_current_network(struct _adapter *adapter, } } -/* -Caller must hold pmlmepriv->lock first. -*/ +/* Caller must hold pmlmepriv->lock first */ static void update_scanned_network(struct _adapter *adapter, struct wlan_bssid_ex *target) { @@ -651,8 +647,8 @@ void r8712_free_assoc_resources(struct _adapter *adapter) } /* -*r8712_indicate_connect: the caller has to lock pmlmepriv->lock -*/ + * r8712_indicate_connect: the caller has to lock pmlmepriv->lock + */ void r8712_indicate_connect(struct _adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; @@ -668,8 +664,8 @@ void r8712_indicate_connect(struct _adapter *padapter) /* -*r8712_ind_disconnect: the caller has to lock pmlmepriv->lock -*/ + * r8712_ind_disconnect: the caller has to lock pmlmepriv->lock + */ void r8712_ind_disconnect(struct _adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; @@ -1347,8 +1343,8 @@ static int SecIsInPMKIDList(struct _adapter *Adapter, u8 *bssid) (!memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN))) break; - else - i++; + i++; + } while (i < NUM_PMKID_CACHE); if (i == NUM_PMKID_CACHE) { diff --git a/drivers/staging/rtl8712/rtl871x_mlme.h b/drivers/staging/rtl8712/rtl871x_mlme.h index ddaaab058b2f020ca03f61d6c0454e8b74505f36..53a23234c5987d3f0c447a1e2e42091715f05a64 100644 --- a/drivers/staging/rtl8712/rtl871x_mlme.h +++ b/drivers/staging/rtl8712/rtl871x_mlme.h @@ -162,24 +162,6 @@ static inline void clr_fwstate(struct mlme_priv *pmlmepriv, sint state) spin_unlock_irqrestore(&pmlmepriv->lock, irqL); } -static inline void up_scanned_network(struct mlme_priv *pmlmepriv) -{ - unsigned long irqL; - - spin_lock_irqsave(&pmlmepriv->lock, irqL); - pmlmepriv->num_of_scanned++; - spin_unlock_irqrestore(&pmlmepriv->lock, irqL); -} - -static inline void down_scanned_network(struct mlme_priv *pmlmepriv) -{ - unsigned long irqL; - - spin_lock_irqsave(&pmlmepriv->lock, irqL); - pmlmepriv->num_of_scanned--; - spin_unlock_irqrestore(&pmlmepriv->lock, irqL); -} - static inline void set_scanned_network_val(struct mlme_priv *pmlmepriv, sint val) { diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c index d464c136dd987ff2ae89be879e90d1bb4103633d..e42fc1404c3539ffcf9a0c144388075613a738ab 100644 --- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c +++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c @@ -190,19 +190,15 @@ void r8712_init_pwrctrl_priv(struct _adapter *padapter) } /* -Caller: r8712_cmd_thread - -Check if the fw_pwrstate is okay for issuing cmd. -If not (cpwm should be is less than P2 state), then the sub-routine -will raise the cpwm to be greater than or equal to P2. - -Calling Context: Passive - -Return Value: - -_SUCCESS: r8712_cmd_thread can issue cmds to firmware afterwards. -_FAIL: r8712_cmd_thread can not do anything. -*/ + * Caller: r8712_cmd_thread + * Check if the fw_pwrstate is okay for issuing cmd. + * If not (cpwm should be is less than P2 state), then the sub-routine + * will raise the cpwm to be greater than or equal to P2. + * Calling Context: Passive + * Return Value: + * _SUCCESS: r8712_cmd_thread can issue cmds to firmware afterwards. + * _FAIL: r8712_cmd_thread can not do anything. + */ sint r8712_register_cmd_alive(struct _adapter *padapter) { uint res = _SUCCESS; @@ -219,13 +215,11 @@ sint r8712_register_cmd_alive(struct _adapter *padapter) } /* -Caller: ISR - -If ISR's txdone, -No more pkts for TX, -Then driver shall call this fun. to power down firmware again. -*/ - + * Caller: ISR + * If ISR's txdone, + * No more pkts for TX, + * Then driver shall call this fun. to power down firmware again. + */ void r8712_unregister_cmd_alive(struct _adapter *padapter) { struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv; diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c index cbd2e51ba42ba64972cdd72b9a937c96518bb625..35c721a505985cea9d8b4fa5249f8b7537e18288 100644 --- a/drivers/staging/rtl8712/rtl871x_recv.c +++ b/drivers/staging/rtl8712/rtl871x_recv.c @@ -125,13 +125,10 @@ union recv_frame *r8712_alloc_recvframe(struct __queue *pfree_recv_queue) } /* -caller : defrag; recvframe_chk_defrag in recv_thread (passive) -pframequeue: defrag_queue : will be accessed in recv_thread (passive) - -using spin_lock to protect - -*/ - + * caller : defrag; recvframe_chk_defrag in recv_thread (passive) + * pframequeue: defrag_queue : will be accessed in recv_thread (passive) + * using spin_lock to protect + */ void r8712_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue) { @@ -405,7 +402,7 @@ static sint ap2sta_data_frame(struct _adapter *adapter, } /* filter packets that SA is myself or multicast or broadcast */ - if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) + if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) return _FAIL; /* da should be for me */ diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c index 09242425dad4d622bd7baad5322c0eaf890ba843..a7f04a4b089dd3c84658bd508fb4b92a14f05385 100644 --- a/drivers/staging/rtl8712/rtl871x_security.c +++ b/drivers/staging/rtl8712/rtl871x_security.c @@ -159,8 +159,8 @@ static u32 getcrc32(u8 *buf, u32 len) } /* - Need to consider the fragment situation -*/ + * Need to consider the fragment situation + */ void r8712_wep_encrypt(struct _adapter *padapter, u8 *pxmitframe) { /* exclude ICV */ unsigned char crc[4]; @@ -467,22 +467,22 @@ static const unsigned short Sbox1[2][256] = {/* Sbox for hash (can be in ROM) */ }; /* -********************************************************************** -* Routine: Phase 1 -- generate P1K, given TA, TK, IV32 -* -* Inputs: -* tk[] = temporal key [128 bits] -* ta[] = transmitter's MAC address [ 48 bits] -* iv32 = upper 32 bits of IV [ 32 bits] -* Output: -* p1k[] = Phase 1 key [ 80 bits] -* -* Note: -* This function only needs to be called every 2**16 packets, -* although in theory it could be called every packet. -* -********************************************************************** -*/ + ********************************************************************** + * Routine: Phase 1 -- generate P1K, given TA, TK, IV32 + * + * Inputs: + * tk[] = temporal key [128 bits] + * ta[] = transmitter's MAC address [ 48 bits] + * iv32 = upper 32 bits of IV [ 32 bits] + * Output: + * p1k[] = Phase 1 key [ 80 bits] + * + * Note: + * This function only needs to be called every 2**16 packets, + * although in theory it could be called every packet. + * + ********************************************************************** + */ static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32) { sint i; @@ -506,28 +506,28 @@ static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32) } /* -********************************************************************** -* Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16 -* -* Inputs: -* tk[] = Temporal key [128 bits] -* p1k[] = Phase 1 output key [ 80 bits] -* iv16 = low 16 bits of IV counter [ 16 bits] -* Output: -* rc4key[] = the key used to encrypt the packet [128 bits] -* -* Note: -* The value {TA,IV32,IV16} for Phase1/Phase2 must be unique -* across all packets using the same key TK value. Then, for a -* given value of TK[], this TKIP48 construction guarantees that -* the final RC4KEY value is unique across all packets. -* -* Suggested implementation optimization: if PPK[] is "overlaid" -* appropriately on RC4KEY[], there is no need for the final -* for loop below that copies the PPK[] result into RC4KEY[]. -* -********************************************************************** -*/ + ********************************************************************** + * Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16 + * + * Inputs: + * tk[] = Temporal key [128 bits] + * p1k[] = Phase 1 output key [ 80 bits] + * iv16 = low 16 bits of IV counter [ 16 bits] + * Output: + * rc4key[] = the key used to encrypt the packet [128 bits] + * + * Note: + * The value {TA,IV32,IV16} for Phase1/Phase2 must be unique + * across all packets using the same key TK value. Then, for a + * given value of TK[], this TKIP48 construction guarantees that + * the final RC4KEY value is unique across all packets. + * + * Suggested implementation optimization: if PPK[] is "overlaid" + * appropriately on RC4KEY[], there is no need for the final + * for loop below that copies the PPK[] result into RC4KEY[]. + * + ********************************************************************** + */ static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16) { sint i; diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c index be38364c8a7c6684139e24fe287ac215a8133b01..4ab82ba9bb3f5e9a9f177edc9219a3995fbb30fb 100644 --- a/drivers/staging/rtl8712/rtl871x_xmit.c +++ b/drivers/staging/rtl8712/rtl871x_xmit.c @@ -71,8 +71,8 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv, memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv)); spin_lock_init(&pxmitpriv->lock); /* - Please insert all the queue initialization using _init_queue below - */ + *Please insert all the queue initialization using _init_queue below + */ pxmitpriv->adapter = padapter; _init_queue(&pxmitpriv->be_pending); _init_queue(&pxmitpriv->bk_pending); @@ -83,10 +83,10 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv, _init_queue(&pxmitpriv->apsd_queue); _init_queue(&pxmitpriv->free_xmit_queue); /* - Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME, - and initialize free_xmit_frame below. - Please also apply free_txobj to link_up all the xmit_frames... - */ + * Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME, + * and initialize free_xmit_frame below. + * Please also apply free_txobj to link_up all the xmit_frames... + */ pxmitpriv->pallocated_frame_buf = kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4, GFP_ATOMIC); if (!pxmitpriv->pallocated_frame_buf) { @@ -109,8 +109,8 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv, } pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME; /* - init xmit hw_txqueue - */ + * init xmit hw_txqueue + */ _r8712_init_hw_txqueue(&pxmitpriv->be_txqueue, BE_QUEUE_INX); _r8712_init_hw_txqueue(&pxmitpriv->bk_txqueue, BK_QUEUE_INX); _r8712_init_hw_txqueue(&pxmitpriv->vi_txqueue, VI_QUEUE_INX); @@ -128,8 +128,11 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv, _init_queue(&pxmitpriv->pending_xmitbuf_queue); pxmitpriv->pallocated_xmitbuf = kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4, GFP_ATOMIC); - if (!pxmitpriv->pallocated_xmitbuf) + if (!pxmitpriv->pallocated_xmitbuf) { + kfree(pxmitpriv->pallocated_frame_buf); + pxmitpriv->pallocated_frame_buf = NULL; return _FAIL; + } pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - ((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3); pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf; @@ -777,24 +780,23 @@ int r8712_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) } /* -Calling context: -1. OS_TXENTRY -2. RXENTRY (rx_thread or RX_ISR/RX_CallBack) - -If we turn on USE_RXTHREAD, then, no need for critical section. -Otherwise, we must use _enter/_exit critical to protect free_xmit_queue... - -Must be very very cautious... - -*/ - + * Calling context: + * 1. OS_TXENTRY + * 2. RXENTRY (rx_thread or RX_ISR/RX_CallBack) + * + * If we turn on USE_RXTHREAD, then, no need for critical section. + * Otherwise, we must use _enter/_exit critical to protect free_xmit_queue... + * + * Must be very very cautious... + * + */ struct xmit_frame *r8712_alloc_xmitframe(struct xmit_priv *pxmitpriv) { /* - Please remember to use all the osdep_service api, - and lock/unlock or _enter/_exit critical to protect - pfree_xmit_queue - */ + * Please remember to use all the osdep_service api, + * and lock/unlock or _enter/_exit critical to protect + * pfree_xmit_queue + */ unsigned long irqL; struct xmit_frame *pxframe; struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue; diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h index d899d0c6d3a63eaf286842a8bfa1661a9e0dd22b..40927277f498447f752740976d5a531266c2abf4 100644 --- a/drivers/staging/rtl8712/rtl871x_xmit.h +++ b/drivers/staging/rtl8712/rtl871x_xmit.h @@ -261,12 +261,6 @@ struct xmit_priv { uint free_xmitbuf_cnt; }; -static inline struct __queue *get_free_xmit_queue( - struct xmit_priv *pxmitpriv) -{ - return &(pxmitpriv->free_xmit_queue); -} - int r8712_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf); struct xmit_buf *r8712_alloc_xmitbuf(struct xmit_priv *pxmitpriv); diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c index f27df0b4cb444757e38fb615c7ee8ce3a3ab00e1..28d56c5d144999891dc45fc0a7bb7b1196ba2589 100644 --- a/drivers/staging/rts5208/ms.c +++ b/drivers/staging/rts5208/ms.c @@ -432,31 +432,36 @@ static int ms_pull_ctl_disable(struct rtsx_chip *chip) if (CHECK_PID(chip, 0x5208)) { retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF, - MS_D1_PD | MS_D2_PD | MS_CLK_PD | MS_D6_PD); + MS_D1_PD | MS_D2_PD | MS_CLK_PD | + MS_D6_PD); if (retval) { rtsx_trace(chip); return retval; } retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF, - MS_D3_PD | MS_D0_PD | MS_BS_PD | XD_D4_PD); + MS_D3_PD | MS_D0_PD | MS_BS_PD | + XD_D4_PD); if (retval) { rtsx_trace(chip); return retval; } retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF, - MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); + MS_D7_PD | XD_CE_PD | XD_CLE_PD | + XD_CD_PU); if (retval) { rtsx_trace(chip); return retval; } retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF, - XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD); + XD_RDY_PD | SD_D3_PD | SD_D2_PD | + XD_ALE_PD); if (retval) { rtsx_trace(chip); return retval; } retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF, - MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); + MS_INS_PU | SD_WP_PD | SD_CD_PU | + SD_CMD_PD); if (retval) { rtsx_trace(chip); return retval; @@ -507,17 +512,17 @@ static int ms_pull_ctl_enable(struct rtsx_chip *chip) if (CHECK_PID(chip, 0x5208)) { rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, - MS_D1_PD | MS_D2_PD | MS_CLK_NP | MS_D6_PD); + MS_D1_PD | MS_D2_PD | MS_CLK_NP | MS_D6_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, - MS_D3_PD | MS_D0_PD | MS_BS_NP | XD_D4_PD); + MS_D3_PD | MS_D0_PD | MS_BS_NP | XD_D4_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, - MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); + MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, - XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD); + XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, - MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); + MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, - MS_D5_PD | MS_D4_PD); + MS_D5_PD | MS_D4_PD); } else if (CHECK_PID(chip, 0x5288)) { if (CHECK_BARO_PKG(chip, QFN)) { rtsx_add_cmd(chip, WRITE_REG_CMD, @@ -616,14 +621,20 @@ static int ms_prepare_reset(struct rtsx_chip *chip) if (chip->asic_code) { retval = rtsx_write_register(chip, MS_CFG, 0xFF, - SAMPLE_TIME_RISING | PUSH_TIME_DEFAULT | NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1); + SAMPLE_TIME_RISING | + PUSH_TIME_DEFAULT | + NO_EXTEND_TOGGLE | + MS_BUS_WIDTH_1); if (retval) { rtsx_trace(chip); return retval; } } else { retval = rtsx_write_register(chip, MS_CFG, 0xFF, - SAMPLE_TIME_FALLING | PUSH_TIME_DEFAULT | NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1); + SAMPLE_TIME_FALLING | + PUSH_TIME_DEFAULT | + NO_EXTEND_TOGGLE | + MS_BUS_WIDTH_1); if (retval) { rtsx_trace(chip); return retval; @@ -665,7 +676,7 @@ static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus) for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, READ_REG, - 6, NO_WAIT_INT); + 6, NO_WAIT_INT); if (retval == STATUS_SUCCESS) break; } @@ -765,7 +776,7 @@ static int ms_confirm_cpu_startup(struct rtsx_chip *chip) for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_read_bytes(chip, GET_INT, 1, - NO_WAIT_INT, &val, 1); + NO_WAIT_INT, &val, 1); if (retval == STATUS_SUCCESS) break; } @@ -794,9 +805,9 @@ static int ms_confirm_cpu_startup(struct rtsx_chip *chip) } if (val & INT_REG_ERR) { - if (val & INT_REG_CMDNK) + if (val & INT_REG_CMDNK) { chip->card_wp |= (MS_CARD); - else { + } else { rtsx_trace(chip); return STATUS_FAIL; } @@ -861,7 +872,7 @@ static int ms_switch_8bit_bus(struct rtsx_chip *chip) for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, - 1, NO_WAIT_INT); + 1, NO_WAIT_INT); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1061,8 +1072,8 @@ static int ms_read_attribute_info(struct rtsx_chip *chip) return STATUS_FAIL; } retval = ms_transfer_data(chip, MS_TM_AUTO_READ, - PRO_READ_LONG_DATA, 0x40, WAIT_INT, - 0, 0, buf, 64 * 512); + PRO_READ_LONG_DATA, 0x40, WAIT_INT, + 0, 0, buf, 64 * 512); if (retval == STATUS_SUCCESS) break; @@ -1087,7 +1098,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip) break; retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, - PRO_READ_LONG_DATA, 0, WAIT_INT); + PRO_READ_LONG_DATA, 0, WAIT_INT); if (retval != STATUS_SUCCESS) { kfree(buf); rtsx_trace(chip); @@ -1121,7 +1132,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip) #ifdef SUPPORT_MSXC if ((buf[cur_addr_off + 8] == 0x10) || - (buf[cur_addr_off + 8] == 0x13)) { + (buf[cur_addr_off + 8] == 0x13)) { #else if (buf[cur_addr_off + 8] == 0x10) { #endif @@ -1264,7 +1275,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip) if (device_type != 0x00) { if ((device_type == 0x01) || (device_type == 0x02) || - (device_type == 0x03)) { + (device_type == 0x03)) { chip->card_wp |= MS_CARD; } else { rtsx_trace(chip); @@ -1298,7 +1309,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip) #ifdef SUPPORT_MAGIC_GATE static int mg_set_tpc_para_sub(struct rtsx_chip *chip, - int type, u8 mg_entry_num); + int type, u8 mg_entry_num); #endif static int reset_ms_pro(struct rtsx_chip *chip) @@ -1317,7 +1328,7 @@ static int reset_ms_pro(struct rtsx_chip *chip) #endif #ifdef XC_POWERCLASS -Retry: +retry: #endif retval = ms_pro_reset_flow(chip, 1); if (retval != STATUS_SUCCESS) { @@ -1365,10 +1376,10 @@ static int reset_ms_pro(struct rtsx_chip *chip) change_power_class = power_class_mode; if (change_power_class) { retval = msxc_change_power(chip, - change_power_class); + change_power_class); if (retval != STATUS_SUCCESS) { change_power_class--; - goto Retry; + goto retry; } } } @@ -1418,14 +1429,14 @@ static int ms_read_status_reg(struct rtsx_chip *chip) } static int ms_read_extra_data(struct rtsx_chip *chip, - u16 block_addr, u8 page_num, u8 *buf, int buf_len) + u16 block_addr, u8 page_num, u8 *buf, int buf_len) { struct ms_info *ms_card = &chip->ms_card; int retval, i; u8 val, data[10]; retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, - SystemParm, 6); + SystemParm, 6); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1488,7 +1499,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip, } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, - MS_EXTRA_SIZE, SystemParm, 6); + MS_EXTRA_SIZE, SystemParm, + 6); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1497,7 +1509,7 @@ static int ms_read_extra_data(struct rtsx_chip *chip, } retval = ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT, - data, MS_EXTRA_SIZE); + data, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1512,8 +1524,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip, return STATUS_SUCCESS; } -static int ms_write_extra_data(struct rtsx_chip *chip, - u16 block_addr, u8 page_num, u8 *buf, int buf_len) +static int ms_write_extra_data(struct rtsx_chip *chip, u16 block_addr, + u8 page_num, u8 *buf, int buf_len) { struct ms_info *ms_card = &chip->ms_card; int retval, i; @@ -1525,7 +1537,7 @@ static int ms_write_extra_data(struct rtsx_chip *chip, } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, - SystemParm, 6 + MS_EXTRA_SIZE); + SystemParm, 6 + MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1588,7 +1600,7 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num) u8 val, data[6]; retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, - SystemParm, 6); + SystemParm, 6); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1651,7 +1663,7 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num) } retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA, - 0, NO_WAIT_INT); + 0, NO_WAIT_INT); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1678,7 +1690,7 @@ static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk) } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, - SystemParm, 7); + SystemParm, 7); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1742,7 +1754,7 @@ static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk) u8 val, data[6]; retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, - SystemParm, 6); + SystemParm, 6); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1844,7 +1856,7 @@ static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk, } retval = ms_write_extra_data(chip, phy_blk, i, - extra, MS_EXTRA_SIZE); + extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1855,7 +1867,7 @@ static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk, } static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, - u16 log_blk, u8 start_page, u8 end_page) + u16 log_blk, u8 start_page, u8 end_page) { struct ms_info *ms_card = &chip->ms_card; bool uncorrect_flag = false; @@ -1915,7 +1927,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, ms_read_extra_data(chip, old_blk, i, extra, MS_EXTRA_SIZE); retval = ms_set_rw_reg_addr(chip, OverwriteFlag, - MS_EXTRA_SIZE, SystemParm, 6); + MS_EXTRA_SIZE, SystemParm, 6); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1971,9 +1983,9 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, } retval = ms_transfer_tpc(chip, - MS_TM_NORMAL_READ, - READ_PAGE_DATA, - 0, NO_WAIT_INT); + MS_TM_NORMAL_READ, + READ_PAGE_DATA, + 0, NO_WAIT_INT); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1981,20 +1993,24 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, if (uncorrect_flag) { ms_set_page_status(log_blk, setPS_NG, - extra, MS_EXTRA_SIZE); + extra, + MS_EXTRA_SIZE); if (i == 0) extra[0] &= 0xEF; ms_write_extra_data(chip, old_blk, i, - extra, MS_EXTRA_SIZE); + extra, + MS_EXTRA_SIZE); dev_dbg(rtsx_dev(chip), "page %d : extra[0] = 0x%x\n", i, extra[0]); MS_SET_BAD_BLOCK_FLG(ms_card); ms_set_page_status(log_blk, setPS_Error, - extra, MS_EXTRA_SIZE); + extra, + MS_EXTRA_SIZE); ms_write_extra_data(chip, new_blk, i, - extra, MS_EXTRA_SIZE); + extra, + MS_EXTRA_SIZE); continue; } @@ -2021,8 +2037,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, } } - retval = ms_set_rw_reg_addr(chip, OverwriteFlag, - MS_EXTRA_SIZE, SystemParm, (6 + MS_EXTRA_SIZE)); + retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, + SystemParm, (6 + MS_EXTRA_SIZE)); ms_set_err_code(chip, MS_NO_ERROR); @@ -2085,7 +2101,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, if (i == 0) { retval = ms_set_rw_reg_addr(chip, OverwriteFlag, - MS_EXTRA_SIZE, SystemParm, 7); + MS_EXTRA_SIZE, SystemParm, + 7); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -2121,7 +2138,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, ms_set_err_code(chip, MS_NO_ERROR); retval = ms_read_bytes(chip, GET_INT, 1, - NO_WAIT_INT, &val, 1); + NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -2361,7 +2378,7 @@ static int reset_ms(struct rtsx_chip *chip) } retval = ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG, 1, - NO_WAIT_INT); + NO_WAIT_INT); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -2369,7 +2386,9 @@ static int reset_ms(struct rtsx_chip *chip) retval = rtsx_write_register(chip, MS_CFG, 0x58 | MS_NO_CHECK_INT, - MS_BUS_WIDTH_4 | PUSH_TIME_ODD | MS_NO_CHECK_INT); + MS_BUS_WIDTH_4 | + PUSH_TIME_ODD | + MS_NO_CHECK_INT); if (retval) { rtsx_trace(chip); return retval; @@ -2474,7 +2493,7 @@ static u16 ms_get_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off) } static void ms_set_l2p_tbl(struct rtsx_chip *chip, - int seg_no, u16 log_off, u16 phy_blk) + int seg_no, u16 log_off, u16 phy_blk) { struct ms_info *ms_card = &chip->ms_card; struct zone_entry *segment; @@ -2530,7 +2549,7 @@ static const unsigned short ms_start_idx[] = {0, 494, 990, 1486, 1982, 2478, 7934}; static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk, - u16 log_off, u8 us1, u8 us2) + u16 log_off, u8 us1, u8 us2) { struct ms_info *ms_card = &chip->ms_card; struct zone_entry *segment; @@ -2627,7 +2646,8 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no) disable_cnt = segment->disable_count; - segment->get_index = segment->set_index = 0; + segment->get_index = 0; + segment->set_index = 0; segment->unused_blk_cnt = 0; for (phy_blk = start; phy_blk < end; phy_blk++) { @@ -2646,7 +2666,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no) } retval = ms_read_extra_data(chip, phy_blk, 0, - extra, MS_EXTRA_SIZE); + extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) { dev_dbg(rtsx_dev(chip), "read extra data fail\n"); ms_set_bad_block(chip, phy_blk); @@ -2685,7 +2705,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no) } if ((log_blk < ms_start_idx[seg_no]) || - (log_blk >= ms_start_idx[seg_no + 1])) { + (log_blk >= ms_start_idx[seg_no + 1])) { if (!(chip->card_wp & MS_CARD)) { retval = ms_erase_block(chip, phy_blk); if (retval != STATUS_SUCCESS) @@ -2705,7 +2725,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no) us1 = extra[0] & 0x10; tmp_blk = segment->l2p_table[idx]; retval = ms_read_extra_data(chip, tmp_blk, 0, - extra, MS_EXTRA_SIZE); + extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) continue; us2 = extra[0] & 0x10; @@ -2774,7 +2794,8 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no) phy_blk = ms_get_unused_block(chip, 0); retval = ms_copy_page(chip, tmp_blk, phy_blk, - log_blk, 0, ms_card->page_off + 1); + log_blk, 0, + ms_card->page_off + 1); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -2861,7 +2882,7 @@ int reset_ms_card(struct rtsx_chip *chip) } static int mspro_set_rw_cmd(struct rtsx_chip *chip, - u32 start_sec, u16 sec_cnt, u8 cmd) + u32 start_sec, u16 sec_cnt, u8 cmd) { int retval, i; u8 data[8]; @@ -2932,8 +2953,8 @@ static inline int ms_auto_tune_clock(struct rtsx_chip *chip) } static int mspro_rw_multi_sector(struct scsi_cmnd *srb, - struct rtsx_chip *chip, u32 start_sector, - u16 sector_cnt) + struct rtsx_chip *chip, u32 start_sector, + u16 sector_cnt) { struct ms_info *ms_card = &chip->ms_card; bool mode_2k = false; @@ -2992,12 +3013,13 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb, } if (ms_card->seq_mode) { - if ((ms_card->pre_dir != srb->sc_data_direction) - || ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) != start_sector) - || (mode_2k && (ms_card->seq_mode & MODE_512_SEQ)) - || (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ)) - || !(val & MS_INT_BREQ) - || ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) { + if ((ms_card->pre_dir != srb->sc_data_direction) || + ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) != + start_sector) || + (mode_2k && (ms_card->seq_mode & MODE_512_SEQ)) || + (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ)) || + !(val & MS_INT_BREQ) || + ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) { ms_card->seq_mode = 0; ms_card->total_sec_cnt = 0; if (val & MS_INT_BREQ) { @@ -3007,7 +3029,8 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb, return STATUS_FAIL; } - rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH); + rtsx_write_register(chip, RBCTL, RB_FLUSH, + RB_FLUSH); } } } @@ -3038,8 +3061,8 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb, } retval = ms_transfer_data(chip, trans_mode, rw_tpc, sector_cnt, - WAIT_INT, mode_2k, scsi_sg_count(srb), - scsi_sglist(srb), scsi_bufflen(srb)); + WAIT_INT, mode_2k, scsi_sg_count(srb), + scsi_sglist(srb), scsi_bufflen(srb)); if (retval != STATUS_SUCCESS) { ms_card->seq_mode = 0; rtsx_read_register(chip, MS_TRANS_CFG, &val); @@ -3076,7 +3099,7 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb, } static int mspro_read_format_progress(struct rtsx_chip *chip, - const int short_data_len) + const int short_data_len) { struct ms_info *ms_card = &chip->ms_card; int retval, i; @@ -3102,7 +3125,8 @@ static int mspro_read_format_progress(struct rtsx_chip *chip, } if (!(tmp & MS_INT_BREQ)) { - if ((tmp & (MS_INT_CED | MS_INT_BREQ | MS_INT_CMDNK | MS_INT_ERR)) == MS_INT_CED) { + if ((tmp & (MS_INT_CED | MS_INT_BREQ | MS_INT_CMDNK | + MS_INT_ERR)) == MS_INT_CED) { ms_card->format_status = FORMAT_SUCCESS; return STATUS_SUCCESS; } @@ -3117,7 +3141,7 @@ static int mspro_read_format_progress(struct rtsx_chip *chip, cnt = (u8)short_data_len; retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT, - MS_NO_CHECK_INT); + MS_NO_CHECK_INT); if (retval != STATUS_SUCCESS) { ms_card->format_status = FORMAT_FAIL; rtsx_trace(chip); @@ -3125,7 +3149,7 @@ static int mspro_read_format_progress(struct rtsx_chip *chip, } retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, cnt, WAIT_INT, - data, 8); + data, 8); if (retval != STATUS_SUCCESS) { ms_card->format_status = FORMAT_FAIL; rtsx_trace(chip); @@ -3204,7 +3228,7 @@ void mspro_polling_format_status(struct rtsx_chip *chip) int i; if (ms_card->pro_under_formatting && - (rtsx_get_stat(chip) != RTSX_STAT_SS)) { + (rtsx_get_stat(chip) != RTSX_STAT_SS)) { rtsx_set_stat(chip, RTSX_STAT_RUN); for (i = 0; i < 65535; i++) { @@ -3216,7 +3240,7 @@ void mspro_polling_format_status(struct rtsx_chip *chip) } int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip, - int short_data_len, bool quick_format) + int short_data_len, bool quick_format) { struct ms_info *ms_card = &chip->ms_card; int retval, i; @@ -3305,9 +3329,9 @@ int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip, } static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk, - u16 log_blk, u8 start_page, u8 end_page, - u8 *buf, unsigned int *index, - unsigned int *offset) + u16 log_blk, u8 start_page, u8 end_page, + u8 *buf, unsigned int *index, + unsigned int *offset) { struct ms_info *ms_card = &chip->ms_card; int retval, i; @@ -3315,7 +3339,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk, u8 *ptr; retval = ms_read_extra_data(chip, phy_blk, start_page, - extra, MS_EXTRA_SIZE); + extra, MS_EXTRA_SIZE); if (retval == STATUS_SUCCESS) { if ((extra[1] & 0x30) != 0x30) { ms_set_err_code(chip, MS_FLASH_READ_ERROR); @@ -3325,7 +3349,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk, } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, - SystemParm, 6); + SystemParm, 6); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3389,11 +3413,17 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk, if (retval != STATUS_SUCCESS) { if (!(chip->card_wp & MS_CARD)) { reset_ms(chip); - ms_set_page_status(log_blk, setPS_NG, extra, MS_EXTRA_SIZE); - ms_write_extra_data(chip, phy_blk, - page_addr, extra, MS_EXTRA_SIZE); + ms_set_page_status + (log_blk, setPS_NG, + extra, + MS_EXTRA_SIZE); + ms_write_extra_data + (chip, phy_blk, + page_addr, extra, + MS_EXTRA_SIZE); } - ms_set_err_code(chip, MS_FLASH_READ_ERROR); + ms_set_err_code(chip, + MS_FLASH_READ_ERROR); rtsx_trace(chip); return STATUS_FAIL; } @@ -3420,7 +3450,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk, } retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, - &val, 1); + &val, 1); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3441,23 +3471,24 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk, rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, READ_PAGE_DATA); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, - 0xFF, trans_cfg); + 0xFF, trans_cfg); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, - 0x01, RING_BUFFER); + 0x01, RING_BUFFER); trans_dma_enable(DMA_FROM_DEVICE, chip, 512, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, - MS_TRANSFER_START | MS_TM_NORMAL_READ); + MS_TRANSFER_START | MS_TM_NORMAL_READ); rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, - MS_TRANSFER_END, MS_TRANSFER_END); + MS_TRANSFER_END, MS_TRANSFER_END); rtsx_send_cmd_no_wait(chip); - retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, - 512, scsi_sg_count(chip->srb), - index, offset, DMA_FROM_DEVICE, - chip->ms_timeout); + retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 512, + scsi_sg_count(chip->srb), + index, offset, + DMA_FROM_DEVICE, + chip->ms_timeout); if (retval < 0) { if (retval == -ETIMEDOUT) { ms_set_err_code(chip, MS_TO_ERROR); @@ -3489,7 +3520,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk, } static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk, - u16 new_blk, u16 log_blk, u8 start_page, + u16 new_blk, u16 log_blk, u8 start_page, u8 end_page, u8 *buf, unsigned int *index, unsigned int *offset) { @@ -3500,7 +3531,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk, if (!start_page) { retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, - SystemParm, 7); + SystemParm, 7); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3534,7 +3565,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk, ms_set_err_code(chip, MS_NO_ERROR); retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1, - NO_WAIT_INT); + NO_WAIT_INT); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3542,7 +3573,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk, } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, - SystemParm, (6 + MS_EXTRA_SIZE)); + SystemParm, (6 + MS_EXTRA_SIZE)); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3630,25 +3661,26 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk, rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, - 0xFF, WRITE_PAGE_DATA); + 0xFF, WRITE_PAGE_DATA); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, - 0xFF, WAIT_INT); + 0xFF, WAIT_INT); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, - 0x01, RING_BUFFER); + 0x01, RING_BUFFER); trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, - MS_TRANSFER_START | MS_TM_NORMAL_WRITE); + MS_TRANSFER_START | MS_TM_NORMAL_WRITE); rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, - MS_TRANSFER_END, MS_TRANSFER_END); + MS_TRANSFER_END, MS_TRANSFER_END); rtsx_send_cmd_no_wait(chip); - retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, - 512, scsi_sg_count(chip->srb), - index, offset, DMA_TO_DEVICE, - chip->ms_timeout); + retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 512, + scsi_sg_count(chip->srb), + index, offset, + DMA_TO_DEVICE, + chip->ms_timeout); if (retval < 0) { ms_set_err_code(chip, MS_TO_ERROR); rtsx_clear_ms_error(chip); @@ -3677,7 +3709,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk, if (page_addr == (end_page - 1)) { if (!(val & INT_REG_CED)) { retval = ms_send_cmd(chip, BLOCK_END, - WAIT_INT); + WAIT_INT); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3685,7 +3717,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk, } retval = ms_read_bytes(chip, GET_INT, 1, - NO_WAIT_INT, &val, 1); + NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3693,7 +3725,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk, } if ((page_addr == (end_page - 1)) || - (page_addr == ms_card->page_off)) { + (page_addr == ms_card->page_off)) { if (!(val & INT_REG_CED)) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); @@ -3711,13 +3743,13 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk, } static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, - u16 log_blk, u8 page_off) + u16 log_blk, u8 page_off) { struct ms_info *ms_card = &chip->ms_card; int retval, seg_no; retval = ms_copy_page(chip, old_blk, new_blk, log_blk, - page_off, ms_card->page_off + 1); + page_off, ms_card->page_off + 1); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3740,13 +3772,13 @@ static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, } static int ms_prepare_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, - u16 log_blk, u8 start_page) + u16 log_blk, u8 start_page) { int retval; if (start_page) { retval = ms_copy_page(chip, old_blk, new_blk, log_blk, - 0, start_page); + 0, start_page); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3772,7 +3804,7 @@ int ms_delay_write(struct rtsx_chip *chip) delay_write->delay_write_flag = 0; retval = ms_finish_write(chip, - delay_write->old_phyblock, + delay_write->old_phyblock, delay_write->new_phyblock, delay_write->logblock, delay_write->pageoff); @@ -3790,13 +3822,13 @@ static inline void ms_rw_fail(struct scsi_cmnd *srb, struct rtsx_chip *chip) { if (srb->sc_data_direction == DMA_FROM_DEVICE) set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); else set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR); } static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, - u32 start_sector, u16 sector_cnt) + u32 start_sector, u16 sector_cnt) { struct ms_info *ms_card = &chip->ms_card; unsigned int lun = SCSI_LUN(srb); @@ -3843,16 +3875,17 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, if (srb->sc_data_direction == DMA_TO_DEVICE) { #ifdef MS_DELAY_WRITE if (delay_write->delay_write_flag && - (delay_write->logblock == log_blk) && - (start_page > delay_write->pageoff)) { + (delay_write->logblock == log_blk) && + (start_page > delay_write->pageoff)) { delay_write->delay_write_flag = 0; retval = ms_copy_page(chip, - delay_write->old_phyblock, - delay_write->new_phyblock, log_blk, - delay_write->pageoff, start_page); + delay_write->old_phyblock, + delay_write->new_phyblock, + log_blk, + delay_write->pageoff, start_page); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return STATUS_FAIL; } @@ -3868,32 +3901,35 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, retval = ms_delay_write(chip); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return STATUS_FAIL; } #endif - old_blk = ms_get_l2p_tbl(chip, seg_no, - log_blk - ms_start_idx[seg_no]); + old_blk = ms_get_l2p_tbl + (chip, seg_no, + log_blk - ms_start_idx[seg_no]); new_blk = ms_get_unused_block(chip, seg_no); if ((old_blk == 0xFFFF) || (new_blk == 0xFFFF)) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return STATUS_FAIL; } retval = ms_prepare_write(chip, old_blk, new_blk, - log_blk, start_page); + log_blk, start_page); if (retval != STATUS_SUCCESS) { - if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { - set_sense_type(chip, lun, + if (detect_card_cd(chip, MS_CARD) != + STATUS_SUCCESS) { + set_sense_type + (chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); rtsx_trace(chip); return STATUS_FAIL; } set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return STATUS_FAIL; } @@ -3906,21 +3942,21 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, if (retval != STATUS_SUCCESS) { if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_NOT_PRESENT); + SENSE_TYPE_MEDIA_NOT_PRESENT); rtsx_trace(chip); return STATUS_FAIL; } set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return STATUS_FAIL; } #endif old_blk = ms_get_l2p_tbl(chip, seg_no, - log_blk - ms_start_idx[seg_no]); + log_blk - ms_start_idx[seg_no]); if (old_blk == 0xFFFF) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return STATUS_FAIL; } @@ -3942,19 +3978,21 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, if (srb->sc_data_direction == DMA_FROM_DEVICE) { retval = ms_read_multiple_pages(chip, - old_blk, log_blk, start_page, end_page, - ptr, &index, &offset); + old_blk, log_blk, + start_page, end_page, + ptr, &index, &offset); } else { - retval = ms_write_multiple_pages(chip, old_blk, - new_blk, log_blk, start_page, end_page, - ptr, &index, &offset); + retval = ms_write_multiple_pages(chip, old_blk, new_blk, + log_blk, start_page, + end_page, ptr, &index, + &offset); } if (retval != STATUS_SUCCESS) { toggle_gpio(chip, 1); if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_NOT_PRESENT); + SENSE_TYPE_MEDIA_NOT_PRESENT); rtsx_trace(chip); return STATUS_FAIL; } @@ -3970,8 +4008,8 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, ms_set_unused_block(chip, old_blk); ms_set_l2p_tbl(chip, seg_no, - log_blk - ms_start_idx[seg_no], - new_blk); + log_blk - ms_start_idx[seg_no], + new_blk); } } @@ -3995,14 +4033,14 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, if (retval != STATUS_SUCCESS) { chip->card_fail |= MS_CARD; set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_NOT_PRESENT); + SENSE_TYPE_MEDIA_NOT_PRESENT); rtsx_trace(chip); return STATUS_FAIL; } } old_blk = ms_get_l2p_tbl(chip, seg_no, - log_blk - ms_start_idx[seg_no]); + log_blk - ms_start_idx[seg_no]); if (old_blk == 0xFFFF) { ms_rw_fail(srb, chip); rtsx_trace(chip); @@ -4034,10 +4072,12 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, delay_write->pageoff = end_page; #else retval = ms_finish_write(chip, old_blk, new_blk, - log_blk, end_page); + log_blk, end_page); if (retval != STATUS_SUCCESS) { - if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { - set_sense_type(chip, lun, + if (detect_card_cd(chip, MS_CARD) != + STATUS_SUCCESS) { + set_sense_type + (chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); rtsx_trace(chip); return STATUS_FAIL; @@ -4057,17 +4097,17 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, } int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, - u32 start_sector, u16 sector_cnt) + u32 start_sector, u16 sector_cnt) { struct ms_info *ms_card = &chip->ms_card; int retval; if (CHK_MSPRO(ms_card)) retval = mspro_rw_multi_sector(srb, chip, start_sector, - sector_cnt); + sector_cnt); else retval = ms_rw_multi_sector(srb, chip, start_sector, - sector_cnt); + sector_cnt); return retval; } @@ -4189,7 +4229,7 @@ static int mg_send_ex_cmd(struct rtsx_chip *chip, u8 cmd, u8 entry_num) } static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type, - u8 mg_entry_num) + u8 mg_entry_num) { int retval; u8 buf[6]; @@ -4306,7 +4346,7 @@ int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip) } retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA, - 3, WAIT_INT, 0, 0, buf + 4, 1536); + 3, WAIT_INT, 0, 0, buf + 4, 1536); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN); rtsx_clear_ms_error(chip); @@ -4354,7 +4394,7 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip) } retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT, - buf, 32); + buf, 32); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM); rtsx_trace(chip); @@ -4437,7 +4477,7 @@ int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip) } retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT, - buf1, 32); + buf1, 32); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN); rtsx_trace(chip); @@ -4560,7 +4600,7 @@ int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip) } retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA, - 2, WAIT_INT, 0, 0, buf + 4, 1024); + 2, WAIT_INT, 0, 0, buf + 4, 1024); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_clear_ms_error(chip); @@ -4615,11 +4655,12 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { if (ms_card->mg_auth == 0) { if ((buf[5] & 0xC0) != 0) - set_sense_type(chip, lun, + set_sense_type + (chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB); else set_sense_type(chip, lun, - SENSE_TYPE_MG_WRITE_ERR); + SENSE_TYPE_MG_WRITE_ERR); } else { set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR); } @@ -4634,17 +4675,17 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip) rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, - 0xFF, PRO_WRITE_LONG_DATA); + 0xFF, PRO_WRITE_LONG_DATA); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, WAIT_INT); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, - 0x01, RING_BUFFER); + 0x01, RING_BUFFER); trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, - MS_TRANSFER_START | MS_TM_NORMAL_WRITE); + MS_TRANSFER_START | MS_TM_NORMAL_WRITE); rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, - MS_TRANSFER_END, MS_TRANSFER_END); + MS_TRANSFER_END, MS_TRANSFER_END); rtsx_send_cmd_no_wait(chip); @@ -4654,13 +4695,15 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip) rtsx_clear_ms_error(chip); if (ms_card->mg_auth == 0) { if ((buf[5] & 0xC0) != 0) - set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB); + set_sense_type + (chip, lun, + SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB); else set_sense_type(chip, lun, - SENSE_TYPE_MG_WRITE_ERR); + SENSE_TYPE_MG_WRITE_ERR); } else { set_sense_type(chip, lun, - SENSE_TYPE_MG_WRITE_ERR); + SENSE_TYPE_MG_WRITE_ERR); } retval = STATUS_FAIL; rtsx_trace(chip); @@ -4669,16 +4712,17 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip) } #else retval = ms_transfer_data(chip, MS_TM_AUTO_WRITE, PRO_WRITE_LONG_DATA, - 2, WAIT_INT, 0, 0, buf + 4, 1024); + 2, WAIT_INT, 0, 0, buf + 4, 1024); if ((retval != STATUS_SUCCESS) || check_ms_err(chip)) { rtsx_clear_ms_error(chip); if (ms_card->mg_auth == 0) { if ((buf[5] & 0xC0) != 0) - set_sense_type(chip, lun, - SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB); + set_sense_type + (chip, lun, + SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB); else set_sense_type(chip, lun, - SENSE_TYPE_MG_WRITE_ERR); + SENSE_TYPE_MG_WRITE_ERR); } else { set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR); } @@ -4706,11 +4750,12 @@ void ms_cleanup_work(struct rtsx_chip *chip) } if (CHK_MSHG(ms_card)) { rtsx_write_register(chip, MS_CFG, - MS_2K_SECTOR_MODE, 0x00); + MS_2K_SECTOR_MODE, 0x00); } } #ifdef MS_DELAY_WRITE - else if ((!CHK_MSPRO(ms_card)) && ms_card->delay_write.delay_write_flag) { + else if ((!CHK_MSPRO(ms_card)) && + ms_card->delay_write.delay_write_flag) { dev_dbg(rtsx_dev(chip), "MS: delay write\n"); ms_delay_write(chip); ms_card->cleanup_counter = 0; diff --git a/drivers/staging/rts5208/ms.h b/drivers/staging/rts5208/ms.h index d7686399df97d8fd2abcdb704f6ab887b2dfad1c..71f98cc03eed64afd62acfaa4cc282c3d96834eb 100644 --- a/drivers/staging/rts5208/ms.h +++ b/drivers/staging/rts5208/ms.h @@ -202,9 +202,9 @@ void mspro_polling_format_status(struct rtsx_chip *chip); void mspro_stop_seq_mode(struct rtsx_chip *chip); int reset_ms_card(struct rtsx_chip *chip); int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, - u32 start_sector, u16 sector_cnt); + u32 start_sector, u16 sector_cnt); int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip, - int short_data_len, bool quick_format); + int short_data_len, bool quick_format); void ms_free_l2p_tbl(struct rtsx_chip *chip); void ms_cleanup_work(struct rtsx_chip *chip); int ms_power_off_card3v3(struct rtsx_chip *chip); diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c index 5d65a5cdc748b555bb429766e7add2664a155919..68d75d0d5efd0c394145a2df802d0a12c490b459 100644 --- a/drivers/staging/rts5208/rtsx.c +++ b/drivers/staging/rts5208/rtsx.c @@ -107,8 +107,10 @@ static int slave_configure(struct scsi_device *sdev) * the actual value or the modified one, depending on where the * data comes from. */ - if (sdev->scsi_level < SCSI_2) - sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2; + if (sdev->scsi_level < SCSI_2) { + sdev->scsi_level = SCSI_2; + sdev->sdev_target->scsi_level = SCSI_2; + } return 0; } @@ -120,12 +122,15 @@ static int slave_configure(struct scsi_device *sdev) /* we use this macro to help us write into the buffer */ #undef SPRINTF #define SPRINTF(args...) \ - do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0) + do { \ + if (pos < buffer + length) \ + pos += sprintf(pos, ## args); \ + } while (0) /* queue a command */ /* This is always called with scsi_lock(host) held */ static int queuecommand_lck(struct scsi_cmnd *srb, - void (*done)(struct scsi_cmnd *)) + void (*done)(struct scsi_cmnd *)) { struct rtsx_dev *dev = host_to_rtsx(srb->device->host); struct rtsx_chip *chip = dev->chip; @@ -313,7 +318,7 @@ static int rtsx_suspend(struct pci_dev *pci, pm_message_t state) return 0; /* lock the device pointers */ - mutex_lock(&(dev->dev_mutex)); + mutex_lock(&dev->dev_mutex); chip = dev->chip; @@ -349,7 +354,7 @@ static int rtsx_resume(struct pci_dev *pci) chip = dev->chip; /* lock the device pointers */ - mutex_lock(&(dev->dev_mutex)); + mutex_lock(&dev->dev_mutex); pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); @@ -418,7 +423,7 @@ static int rtsx_control_thread(void *__dev) break; /* lock the device pointers */ - mutex_lock(&(dev->dev_mutex)); + mutex_lock(&dev->dev_mutex); /* if the device has disconnected, we are free to exit */ if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) { @@ -433,7 +438,7 @@ static int rtsx_control_thread(void *__dev) /* has the command aborted ? */ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) { chip->srb->result = DID_ABORT << 16; - goto SkipForAbort; + goto skip_for_abort; } scsi_unlock(host); @@ -480,12 +485,12 @@ static int rtsx_control_thread(void *__dev) else if (chip->srb->result != DID_ABORT << 16) { chip->srb->scsi_done(chip->srb); } else { -SkipForAbort: +skip_for_abort: dev_err(&dev->pci->dev, "scsi command aborted\n"); } if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) { - complete(&(dev->notify)); + complete(&dev->notify); rtsx_set_stat(chip, RTSX_STAT_IDLE); } @@ -519,9 +524,9 @@ static int rtsx_polling_thread(void *__dev) { struct rtsx_dev *dev = __dev; struct rtsx_chip *chip = dev->chip; - struct sd_info *sd_card = &(chip->sd_card); - struct xd_info *xd_card = &(chip->xd_card); - struct ms_info *ms_card = &(chip->ms_card); + struct sd_info *sd_card = &chip->sd_card; + struct xd_info *xd_card = &chip->xd_card; + struct ms_info *ms_card = &chip->ms_card; sd_card->cleanup_counter = 0; xd_card->cleanup_counter = 0; @@ -531,12 +536,11 @@ static int rtsx_polling_thread(void *__dev) wait_timeout((delay_use + 5) * 1000); for (;;) { - set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL)); /* lock the device pointers */ - mutex_lock(&(dev->dev_mutex)); + mutex_lock(&dev->dev_mutex); /* if the device has disconnected, we are free to exit */ if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) { @@ -550,7 +554,7 @@ static int rtsx_polling_thread(void *__dev) mspro_polling_format_status(chip); /* lock the device pointers */ - mutex_lock(&(dev->dev_mutex)); + mutex_lock(&dev->dev_mutex); rtsx_polling_func(chip); @@ -597,7 +601,7 @@ static irqreturn_t rtsx_interrupt(int irq, void *dev_id) dev->trans_result = TRANS_RESULT_FAIL; if (dev->done) complete(dev->done); - goto Exit; + goto exit; } } @@ -619,7 +623,7 @@ static irqreturn_t rtsx_interrupt(int irq, void *dev_id) } } -Exit: +exit: spin_unlock(&dev->reg_lock); return IRQ_HANDLED; } @@ -724,9 +728,10 @@ static int rtsx_scan_thread(void *__dev) dev_info(&dev->pci->dev, "%s: waiting for device to settle before scanning\n", CR_DRIVER_NAME); - wait_event_interruptible_timeout(dev->delay_wait, - rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT), - delay_use * HZ); + wait_event_interruptible_timeout + (dev->delay_wait, + rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT), + delay_use * HZ); } /* If the device is still connected, perform the scanning */ @@ -844,7 +849,7 @@ static void rtsx_init_options(struct rtsx_chip *chip) } static int rtsx_probe(struct pci_dev *pci, - const struct pci_device_id *pci_id) + const struct pci_device_id *pci_id) { struct Scsi_Host *host; struct rtsx_dev *dev; @@ -879,18 +884,18 @@ static int rtsx_probe(struct pci_dev *pci, dev = host_to_rtsx(host); memset(dev, 0, sizeof(struct rtsx_dev)); - dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL); + dev->chip = kzalloc(sizeof(*dev->chip), GFP_KERNEL); if (!dev->chip) { err = -ENOMEM; goto errout; } spin_lock_init(&dev->reg_lock); - mutex_init(&(dev->dev_mutex)); + mutex_init(&dev->dev_mutex); init_completion(&dev->cmnd_ready); init_completion(&dev->control_exit); init_completion(&dev->polling_exit); - init_completion(&(dev->notify)); + init_completion(&dev->notify); init_completion(&dev->scanning_done); init_waitqueue_head(&dev->delay_wait); diff --git a/drivers/staging/rts5208/rtsx.h b/drivers/staging/rts5208/rtsx.h index e725b10ed08788d07b3942421eb0d3b9501f074d..575e5734f2a5ca61e2313871103fa51d36b7b93f 100644 --- a/drivers/staging/rts5208/rtsx.h +++ b/drivers/staging/rts5208/rtsx.h @@ -149,7 +149,7 @@ static inline void get_current_time(u8 *timeval_buf, int buf_len) getnstimeofday64(&ts64); - tv_usec = ts64.tv_nsec/NSEC_PER_USEC; + tv_usec = ts64.tv_nsec / NSEC_PER_USEC; timeval_buf[0] = (u8)(ts64.tv_sec >> 24); timeval_buf[1] = (u8)(ts64.tv_sec >> 16); diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c index 97717744962de13f6b7a0fd00ddd1cec7882ac4f..a6b7bffc67149f4c16591df46ad3ca32bd2cbf62 100644 --- a/drivers/staging/rts5208/rtsx_card.c +++ b/drivers/staging/rts5208/rtsx_card.c @@ -33,11 +33,11 @@ void do_remaining_work(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; #ifdef XD_DELAY_WRITE - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; #endif - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; if (chip->card_ready & SD_CARD) { if (sd_card->seq_mode) { @@ -100,9 +100,9 @@ void try_to_switch_sdio_ctrl(struct rtsx_chip *chip) if ((reg1 & 0xC0) && (reg2 & 0xC0)) { chip->sd_int = 1; rtsx_write_register(chip, SDIO_CTRL, 0xFF, - SDIO_BUS_CTRL | SDIO_CD_CTRL); + SDIO_BUS_CTRL | SDIO_CD_CTRL); rtsx_write_register(chip, PWR_GATE_CTRL, - LDO3318_PWR_MASK, LDO_ON); + LDO3318_PWR_MASK, LDO_ON); } } @@ -133,7 +133,7 @@ void dynamic_configure_sdio_aspm(struct rtsx_chip *chip) if (!chip->sdio_aspm) { dev_dbg(rtsx_dev(chip), "SDIO enter ASPM!\n"); rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFC, - 0x30 | (chip->aspm_level[1] << 2)); + 0x30 | (chip->aspm_level[1] << 2)); chip->sdio_aspm = 1; } } else { @@ -154,7 +154,7 @@ void do_reset_sd_card(struct rtsx_chip *chip) chip->sd_reset_counter, chip->card2lun[SD_CARD]); if (chip->card2lun[SD_CARD] >= MAX_ALLOWED_LUN_CNT) { - clear_bit(SD_NR, &(chip->need_reset)); + clear_bit(SD_NR, &chip->need_reset); chip->sd_reset_counter = 0; chip->sd_show_cnt = 0; return; @@ -169,7 +169,7 @@ void do_reset_sd_card(struct rtsx_chip *chip) if (chip->need_release & SD_CARD) return; if (retval == STATUS_SUCCESS) { - clear_bit(SD_NR, &(chip->need_reset)); + clear_bit(SD_NR, &chip->need_reset); chip->sd_reset_counter = 0; chip->sd_show_cnt = 0; chip->card_ready |= SD_CARD; @@ -177,7 +177,7 @@ void do_reset_sd_card(struct rtsx_chip *chip) chip->rw_card[chip->card2lun[SD_CARD]] = sd_rw; } else { if (chip->sd_io || (chip->sd_reset_counter >= MAX_RESET_CNT)) { - clear_bit(SD_NR, &(chip->need_reset)); + clear_bit(SD_NR, &chip->need_reset); chip->sd_reset_counter = 0; chip->sd_show_cnt = 0; } else { @@ -208,7 +208,7 @@ void do_reset_xd_card(struct rtsx_chip *chip) chip->xd_reset_counter, chip->card2lun[XD_CARD]); if (chip->card2lun[XD_CARD] >= MAX_ALLOWED_LUN_CNT) { - clear_bit(XD_NR, &(chip->need_reset)); + clear_bit(XD_NR, &chip->need_reset); chip->xd_reset_counter = 0; chip->xd_show_cnt = 0; return; @@ -223,14 +223,14 @@ void do_reset_xd_card(struct rtsx_chip *chip) if (chip->need_release & XD_CARD) return; if (retval == STATUS_SUCCESS) { - clear_bit(XD_NR, &(chip->need_reset)); + clear_bit(XD_NR, &chip->need_reset); chip->xd_reset_counter = 0; chip->card_ready |= XD_CARD; chip->card_fail &= ~XD_CARD; chip->rw_card[chip->card2lun[XD_CARD]] = xd_rw; } else { if (chip->xd_reset_counter >= MAX_RESET_CNT) { - clear_bit(XD_NR, &(chip->need_reset)); + clear_bit(XD_NR, &chip->need_reset); chip->xd_reset_counter = 0; chip->xd_show_cnt = 0; } else { @@ -256,7 +256,7 @@ void do_reset_ms_card(struct rtsx_chip *chip) chip->ms_reset_counter, chip->card2lun[MS_CARD]); if (chip->card2lun[MS_CARD] >= MAX_ALLOWED_LUN_CNT) { - clear_bit(MS_NR, &(chip->need_reset)); + clear_bit(MS_NR, &chip->need_reset); chip->ms_reset_counter = 0; chip->ms_show_cnt = 0; return; @@ -271,14 +271,14 @@ void do_reset_ms_card(struct rtsx_chip *chip) if (chip->need_release & MS_CARD) return; if (retval == STATUS_SUCCESS) { - clear_bit(MS_NR, &(chip->need_reset)); + clear_bit(MS_NR, &chip->need_reset); chip->ms_reset_counter = 0; chip->card_ready |= MS_CARD; chip->card_fail &= ~MS_CARD; chip->rw_card[chip->card2lun[MS_CARD]] = ms_rw; } else { if (chip->ms_reset_counter >= MAX_RESET_CNT) { - clear_bit(MS_NR, &(chip->need_reset)); + clear_bit(MS_NR, &chip->need_reset); chip->ms_reset_counter = 0; chip->ms_show_cnt = 0; } else { @@ -300,7 +300,7 @@ static void release_sdio(struct rtsx_chip *chip) { if (chip->sd_io) { rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR, - SD_STOP | SD_CLR_ERR); + SD_STOP | SD_CLR_ERR); if (chip->chip_insert_with_sdio) { chip->chip_insert_with_sdio = 0; @@ -369,7 +369,7 @@ void rtsx_reset_cards(struct rtsx_chip *chip) rtsx_disable_aspm(chip); if ((chip->need_reset & SD_CARD) && chip->chip_insert_with_sdio) - clear_bit(SD_NR, &(chip->need_reset)); + clear_bit(SD_NR, &chip->need_reset); if (chip->need_reset & XD_CARD) { chip->card_exist |= XD_CARD; @@ -381,8 +381,8 @@ void rtsx_reset_cards(struct rtsx_chip *chip) } if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) { if (chip->card_exist & XD_CARD) { - clear_bit(SD_NR, &(chip->need_reset)); - clear_bit(MS_NR, &(chip->need_reset)); + clear_bit(SD_NR, &chip->need_reset); + clear_bit(MS_NR, &chip->need_reset); } } if (chip->need_reset & SD_CARD) { @@ -449,7 +449,7 @@ void rtsx_reinit_cards(struct rtsx_chip *chip, int reset_chip) #ifdef DISABLE_CARD_INT void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset, - unsigned long *need_release) + unsigned long *need_release) { u8 release_map = 0, reset_map = 0; @@ -502,13 +502,13 @@ void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset, reset_map = 0; if (!(chip->card_exist & XD_CARD) && - (xd_cnt > (DEBOUNCE_CNT-1))) + (xd_cnt > (DEBOUNCE_CNT - 1))) reset_map |= XD_CARD; if (!(chip->card_exist & SD_CARD) && - (sd_cnt > (DEBOUNCE_CNT-1))) + (sd_cnt > (DEBOUNCE_CNT - 1))) reset_map |= SD_CARD; if (!(chip->card_exist & MS_CARD) && - (ms_cnt > (DEBOUNCE_CNT-1))) + (ms_cnt > (DEBOUNCE_CNT - 1))) reset_map |= MS_CARD; } @@ -531,23 +531,23 @@ void rtsx_init_cards(struct rtsx_chip *chip) } #ifdef DISABLE_CARD_INT - card_cd_debounce(chip, &(chip->need_reset), &(chip->need_release)); + card_cd_debounce(chip, &chip->need_reset, &chip->need_release); #endif if (chip->need_release) { if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) { if (chip->int_reg & XD_EXIST) { - clear_bit(SD_NR, &(chip->need_release)); - clear_bit(MS_NR, &(chip->need_release)); + clear_bit(SD_NR, &chip->need_release); + clear_bit(MS_NR, &chip->need_release); } } if (!(chip->card_exist & SD_CARD) && !chip->sd_io) - clear_bit(SD_NR, &(chip->need_release)); + clear_bit(SD_NR, &chip->need_release); if (!(chip->card_exist & XD_CARD)) - clear_bit(XD_NR, &(chip->need_release)); + clear_bit(XD_NR, &chip->need_release); if (!(chip->card_exist & MS_CARD)) - clear_bit(MS_NR, &(chip->need_release)); + clear_bit(MS_NR, &chip->need_release); dev_dbg(rtsx_dev(chip), "chip->need_release = 0x%x\n", (unsigned int)(chip->need_release)); @@ -556,8 +556,10 @@ void rtsx_init_cards(struct rtsx_chip *chip) if (chip->need_release) { if (chip->ocp_stat & (CARD_OC_NOW | CARD_OC_EVER)) rtsx_write_register(chip, OCPCLR, - CARD_OC_INT_CLR | CARD_OC_CLR, - CARD_OC_INT_CLR | CARD_OC_CLR); + CARD_OC_INT_CLR | + CARD_OC_CLR, + CARD_OC_INT_CLR | + CARD_OC_CLR); chip->ocp_stat = 0; } #endif @@ -567,7 +569,7 @@ void rtsx_init_cards(struct rtsx_chip *chip) } if (chip->need_release & SD_CARD) { - clear_bit(SD_NR, &(chip->need_release)); + clear_bit(SD_NR, &chip->need_release); chip->card_exist &= ~SD_CARD; chip->card_ejected &= ~SD_CARD; chip->card_fail &= ~SD_CARD; @@ -580,7 +582,7 @@ void rtsx_init_cards(struct rtsx_chip *chip) } if (chip->need_release & XD_CARD) { - clear_bit(XD_NR, &(chip->need_release)); + clear_bit(XD_NR, &chip->need_release); chip->card_exist &= ~XD_CARD; chip->card_ejected &= ~XD_CARD; chip->card_fail &= ~XD_CARD; @@ -590,13 +592,13 @@ void rtsx_init_cards(struct rtsx_chip *chip) release_xd_card(chip); if (CHECK_PID(chip, 0x5288) && - CHECK_BARO_PKG(chip, QFN)) + CHECK_BARO_PKG(chip, QFN)) rtsx_write_register(chip, HOST_SLEEP_STATE, - 0xC0, 0xC0); + 0xC0, 0xC0); } if (chip->need_release & MS_CARD) { - clear_bit(MS_NR, &(chip->need_release)); + clear_bit(MS_NR, &chip->need_release); chip->card_exist &= ~MS_CARD; chip->card_ejected &= ~MS_CARD; chip->card_fail &= ~MS_CARD; @@ -650,7 +652,7 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk) return STATUS_FAIL; } - mcu_cnt = (u8)(125/clk + 3); + mcu_cnt = (u8)(125 / clk + 3); if (mcu_cnt > 7) mcu_cnt = 7; @@ -681,9 +683,9 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk) rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB); if (sd_vpclk_phase_reset) { rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL, - PHASE_NOT_RESET, 0); + PHASE_NOT_RESET, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL, - PHASE_NOT_RESET, PHASE_NOT_RESET); + PHASE_NOT_RESET, PHASE_NOT_RESET); } retval = rtsx_send_cmd(chip, 0, WAIT_TIME); @@ -850,7 +852,7 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk) } void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip, - u32 byte_cnt, u8 pack_size) + u32 byte_cnt, u8 pack_size) { if (pack_size > DMA_1024) pack_size = DMA_512; @@ -864,11 +866,11 @@ void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip, if (dir == DMA_FROM_DEVICE) { rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL, - 0x03 | DMA_PACK_SIZE_MASK, + 0x03 | DMA_PACK_SIZE_MASK, DMA_DIR_FROM_CARD | DMA_EN | pack_size); } else { rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL, - 0x03 | DMA_PACK_SIZE_MASK, + 0x03 | DMA_PACK_SIZE_MASK, DMA_DIR_TO_CARD | DMA_EN | pack_size); } @@ -978,13 +980,13 @@ int card_power_off(struct rtsx_chip *chip, u8 card) } int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, - u32 sec_addr, u16 sec_cnt) + u32 sec_addr, u16 sec_cnt) { int retval; unsigned int lun = SCSI_LUN(srb); int i; - if (chip->rw_card[lun] == NULL) { + if (!chip->rw_card[lun]) { rtsx_trace(chip); return STATUS_FAIL; } @@ -1115,7 +1117,7 @@ void turn_on_led(struct rtsx_chip *chip, u8 gpio) { if (CHECK_PID(chip, 0x5288)) rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), - (u8)(1 << gpio)); + (u8)(1 << gpio)); else rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0); } @@ -1126,7 +1128,7 @@ void turn_off_led(struct rtsx_chip *chip, u8 gpio) rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0); else rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), - (u8)(1 << gpio)); + (u8)(1 << gpio)); } int detect_card_cd(struct rtsx_chip *chip, int card) diff --git a/drivers/staging/rts5208/rtsx_card.h b/drivers/staging/rts5208/rtsx_card.h index 56df9a431d6d5105d6f002eb898aa2c4eb586866..aa37705bae395195e09baaa10833e032c2c630a6 100644 --- a/drivers/staging/rts5208/rtsx_card.h +++ b/drivers/staging/rts5208/rtsx_card.h @@ -1011,9 +1011,9 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk); int enable_card_clock(struct rtsx_chip *chip, u8 card); int disable_card_clock(struct rtsx_chip *chip, u8 card); int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, - u32 sec_addr, u16 sec_cnt); + u32 sec_addr, u16 sec_cnt); void trans_dma_enable(enum dma_data_direction dir, - struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size); + struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size); void toggle_gpio(struct rtsx_chip *chip, u8 gpio); void turn_on_led(struct rtsx_chip *chip, u8 gpio); void turn_off_led(struct rtsx_chip *chip, u8 gpio); @@ -1030,10 +1030,10 @@ u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun); static inline u32 get_card_size(struct rtsx_chip *chip, unsigned int lun) { #ifdef SUPPORT_SD_LOCK - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; if ((get_lun_card(chip, lun) == SD_CARD) && - (sd_card->sd_lock_status & SD_LOCKED)) + (sd_card->sd_lock_status & SD_LOCKED)) return 0; return chip->capacity[lun]; @@ -1073,25 +1073,25 @@ static inline int card_power_off_all(struct rtsx_chip *chip) static inline void rtsx_clear_xd_error(struct rtsx_chip *chip) { rtsx_write_register(chip, CARD_STOP, XD_STOP | XD_CLR_ERR, - XD_STOP | XD_CLR_ERR); + XD_STOP | XD_CLR_ERR); } static inline void rtsx_clear_sd_error(struct rtsx_chip *chip) { rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR, - SD_STOP | SD_CLR_ERR); + SD_STOP | SD_CLR_ERR); } static inline void rtsx_clear_ms_error(struct rtsx_chip *chip) { rtsx_write_register(chip, CARD_STOP, MS_STOP | MS_CLR_ERR, - MS_STOP | MS_CLR_ERR); + MS_STOP | MS_CLR_ERR); } static inline void rtsx_clear_spi_error(struct rtsx_chip *chip) { rtsx_write_register(chip, CARD_STOP, SPI_STOP | SPI_CLR_ERR, - SPI_STOP | SPI_CLR_ERR); + SPI_STOP | SPI_CLR_ERR); } #ifdef SUPPORT_SDIO_ASPM diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c index a10dd6220a7ba02c4e0bd8d26ac7532a60077a07..3511157a2c787d8ed7394113f49abc073a4996e7 100644 --- a/drivers/staging/rts5208/rtsx_chip.c +++ b/drivers/staging/rts5208/rtsx_chip.c @@ -114,7 +114,8 @@ static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip) if (chip->asic_code) { retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF, - MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU); + MS_INS_PU | SD_WP_PU | + SD_CD_PU | SD_CMD_PU); if (retval) { rtsx_trace(chip); return retval; @@ -240,10 +241,10 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip) return STATUS_FAIL; } } else { - retval = rtsx_write_register(chip, - FPGA_PULL_CTL, - FPGA_SD_PULL_CTL_BIT | 0x20, - 0); + retval = rtsx_write_register + (chip, FPGA_PULL_CTL, + FPGA_SD_PULL_CTL_BIT | 0x20, + 0); if (retval) { rtsx_trace(chip); return retval; @@ -713,7 +714,8 @@ int rtsx_reset_chip(struct rtsx_chip *chip) if (chip->ft2_fast_mode) { retval = rtsx_write_register(chip, CARD_PWR_CTL, 0xFF, - MS_PARTIAL_POWER_ON | SD_PARTIAL_POWER_ON); + MS_PARTIAL_POWER_ON | + SD_PARTIAL_POWER_ON); if (retval) { rtsx_trace(chip); return retval; @@ -1567,7 +1569,8 @@ int rtsx_write_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 mask, } retval = rtsx_write_register(chip, CFGRWCTL, 0xFF, - 0x80 | mode | ((func_no & 0x03) << 4)); + 0x80 | mode | + ((func_no & 0x03) << 4)); if (retval) { rtsx_trace(chip); return retval; diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h index f36642817c6e8133b492b83f015c8dcdb032094b..4f6e3c1c4621618c93005e8dd5a8d05541ae3fd7 100644 --- a/drivers/staging/rts5208/rtsx_chip.h +++ b/drivers/staging/rts5208/rtsx_chip.h @@ -130,16 +130,20 @@ #define PRDCT_REV_LEN 4 /* Product LOT Length */ /* Dynamic flag definitions: used in set_bit() etc. */ -#define RTSX_FLIDX_TRANS_ACTIVE 18 /* 0x00040000 transfer is active */ -#define RTSX_FLIDX_ABORTING 20 /* 0x00100000 abort is in progress */ -#define RTSX_FLIDX_DISCONNECTING 21 /* 0x00200000 disconnect in progress */ +/* 0x00040000 transfer is active */ +#define RTSX_FLIDX_TRANS_ACTIVE 18 +/* 0x00100000 abort is in progress */ +#define RTSX_FLIDX_ABORTING 20 +/* 0x00200000 disconnect in progress */ +#define RTSX_FLIDX_DISCONNECTING 21 #define ABORTING_OR_DISCONNECTING ((1UL << US_FLIDX_ABORTING) | \ (1UL << US_FLIDX_DISCONNECTING)) -#define RTSX_FLIDX_RESETTING 22 /* 0x00400000 device reset in progress */ -#define RTSX_FLIDX_TIMED_OUT 23 /* 0x00800000 SCSI midlayer timed out */ - +/* 0x00400000 device reset in progress */ +#define RTSX_FLIDX_RESETTING 22 +/* 0x00800000 SCSI midlayer timed out */ +#define RTSX_FLIDX_TIMED_OUT 23 #define DRCT_ACCESS_DEV 0x00 /* Direct Access Device */ #define RMB_DISC 0x80 /* The Device is Removable */ #define ANSI_SCSI2 0x02 /* Based on ANSI-SCSI2 */ @@ -285,23 +289,24 @@ struct sense_data_t { #define CARD_INT (XD_INT | MS_INT | SD_INT) #define NEED_COMPLETE_INT (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT) -#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | CARD_INT | GPIO0_INT | OC_INT) +#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | CARD_INT | \ + GPIO0_INT | OC_INT) #define CARD_EXIST (XD_EXIST | MS_EXIST | SD_EXIST) /* Bus interrupt enable register */ -#define CMD_DONE_INT_EN (1 << 31) -#define DATA_DONE_INT_EN (1 << 30) -#define TRANS_OK_INT_EN (1 << 29) -#define TRANS_FAIL_INT_EN (1 << 28) -#define XD_INT_EN (1 << 27) -#define MS_INT_EN (1 << 26) -#define SD_INT_EN (1 << 25) -#define GPIO0_INT_EN (1 << 24) -#define OC_INT_EN (1 << 23) +#define CMD_DONE_INT_EN BIT(31) +#define DATA_DONE_INT_EN BIT(30) +#define TRANS_OK_INT_EN BIT(29) +#define TRANS_FAIL_INT_EN BIT(28) +#define XD_INT_EN BIT(27) +#define MS_INT_EN BIT(26) +#define SD_INT_EN BIT(25) +#define GPIO0_INT_EN BIT(24) +#define OC_INT_EN BIT(23) #define DELINK_INT_EN GPIO0_INT_EN -#define MS_OC_INT_EN (1 << 23) -#define SD_OC_INT_EN (1 << 22) +#define MS_OC_INT_EN BIT(23) +#define SD_OC_INT_EN BIT(22) #define READ_REG_CMD 0 #define WRITE_REG_CMD 1 @@ -318,10 +323,10 @@ struct sense_data_t { #define MS_NR 3 #define XD_NR 4 #define SPI_NR 7 -#define SD_CARD (1 << SD_NR) -#define MS_CARD (1 << MS_NR) -#define XD_CARD (1 << XD_NR) -#define SPI_CARD (1 << SPI_NR) +#define SD_CARD BIT(SD_NR) +#define MS_CARD BIT(MS_NR) +#define XD_CARD BIT(XD_NR) +#define SPI_CARD BIT(SPI_NR) #define MAX_ALLOWED_LUN_CNT 8 @@ -393,14 +398,23 @@ struct zone_entry { /* SD card */ #define CHK_SD(sd_card) (((sd_card)->sd_type & 0xFF) == TYPE_SD) -#define CHK_SD_HS(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HS)) -#define CHK_SD_SDR50(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR50)) -#define CHK_SD_DDR50(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_DDR50)) -#define CHK_SD_SDR104(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR104)) -#define CHK_SD_HCXC(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HCXC)) -#define CHK_SD_HC(sd_card) (CHK_SD_HCXC(sd_card) && ((sd_card)->capacity <= 0x4000000)) -#define CHK_SD_XC(sd_card) (CHK_SD_HCXC(sd_card) && ((sd_card)->capacity > 0x4000000)) -#define CHK_SD30_SPEED(sd_card) (CHK_SD_SDR50(sd_card) || CHK_SD_DDR50(sd_card) || CHK_SD_SDR104(sd_card)) +#define CHK_SD_HS(sd_card) (CHK_SD(sd_card) && \ + ((sd_card)->sd_type & SD_HS)) +#define CHK_SD_SDR50(sd_card) (CHK_SD(sd_card) && \ + ((sd_card)->sd_type & SD_SDR50)) +#define CHK_SD_DDR50(sd_card) (CHK_SD(sd_card) && \ + ((sd_card)->sd_type & SD_DDR50)) +#define CHK_SD_SDR104(sd_card) (CHK_SD(sd_card) && \ + ((sd_card)->sd_type & SD_SDR104)) +#define CHK_SD_HCXC(sd_card) (CHK_SD(sd_card) && \ + ((sd_card)->sd_type & SD_HCXC)) +#define CHK_SD_HC(sd_card) (CHK_SD_HCXC(sd_card) && \ + ((sd_card)->capacity <= 0x4000000)) +#define CHK_SD_XC(sd_card) (CHK_SD_HCXC(sd_card) && \ + ((sd_card)->capacity > 0x4000000)) +#define CHK_SD30_SPEED(sd_card) (CHK_SD_SDR50(sd_card) || \ + CHK_SD_DDR50(sd_card) || \ + CHK_SD_SDR104(sd_card)) #define SET_SD(sd_card) ((sd_card)->sd_type = TYPE_SD) #define SET_SD_HS(sd_card) ((sd_card)->sd_type |= SD_HS) @@ -416,13 +430,20 @@ struct zone_entry { #define CLR_SD_HCXC(sd_card) ((sd_card)->sd_type &= ~SD_HCXC) /* MMC card */ -#define CHK_MMC(sd_card) (((sd_card)->sd_type & 0xFF) == TYPE_MMC) -#define CHK_MMC_26M(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_26M)) -#define CHK_MMC_52M(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_52M)) -#define CHK_MMC_4BIT(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_4BIT)) -#define CHK_MMC_8BIT(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_8BIT)) -#define CHK_MMC_SECTOR_MODE(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_SECTOR_MODE)) -#define CHK_MMC_DDR52(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_DDR52)) +#define CHK_MMC(sd_card) (((sd_card)->sd_type & 0xFF) == \ + TYPE_MMC) +#define CHK_MMC_26M(sd_card) (CHK_MMC(sd_card) && \ + ((sd_card)->sd_type & MMC_26M)) +#define CHK_MMC_52M(sd_card) (CHK_MMC(sd_card) && \ + ((sd_card)->sd_type & MMC_52M)) +#define CHK_MMC_4BIT(sd_card) (CHK_MMC(sd_card) && \ + ((sd_card)->sd_type & MMC_4BIT)) +#define CHK_MMC_8BIT(sd_card) (CHK_MMC(sd_card) && \ + ((sd_card)->sd_type & MMC_8BIT)) +#define CHK_MMC_SECTOR_MODE(sd_card) (CHK_MMC(sd_card) && \ + ((sd_card)->sd_type & MMC_SECTOR_MODE)) +#define CHK_MMC_DDR52(sd_card) (CHK_MMC(sd_card) && \ + ((sd_card)->sd_type & MMC_DDR52)) #define SET_MMC(sd_card) ((sd_card)->sd_type = TYPE_MMC) #define SET_MMC_26M(sd_card) ((sd_card)->sd_type |= MMC_26M) @@ -439,7 +460,8 @@ struct zone_entry { #define CLR_MMC_SECTOR_MODE(sd_card) ((sd_card)->sd_type &= ~MMC_SECTOR_MODE) #define CLR_MMC_DDR52(sd_card) ((sd_card)->sd_type &= ~MMC_DDR52) -#define CHK_MMC_HS(sd_card) (CHK_MMC_52M(sd_card) && CHK_MMC_26M(sd_card)) +#define CHK_MMC_HS(sd_card) (CHK_MMC_52M(sd_card) && \ + CHK_MMC_26M(sd_card)) #define CLR_MMC_HS(sd_card) \ do { \ CLR_MMC_DDR52(sd_card); \ @@ -450,12 +472,18 @@ do { \ #define SD_SUPPORT_CLASS_TEN 0x01 #define SD_SUPPORT_1V8 0x02 -#define SD_SET_CLASS_TEN(sd_card) ((sd_card)->sd_setting |= SD_SUPPORT_CLASS_TEN) -#define SD_CHK_CLASS_TEN(sd_card) ((sd_card)->sd_setting & SD_SUPPORT_CLASS_TEN) -#define SD_CLR_CLASS_TEN(sd_card) ((sd_card)->sd_setting &= ~SD_SUPPORT_CLASS_TEN) -#define SD_SET_1V8(sd_card) ((sd_card)->sd_setting |= SD_SUPPORT_1V8) -#define SD_CHK_1V8(sd_card) ((sd_card)->sd_setting & SD_SUPPORT_1V8) -#define SD_CLR_1V8(sd_card) ((sd_card)->sd_setting &= ~SD_SUPPORT_1V8) +#define SD_SET_CLASS_TEN(sd_card) ((sd_card)->sd_setting |= \ + SD_SUPPORT_CLASS_TEN) +#define SD_CHK_CLASS_TEN(sd_card) ((sd_card)->sd_setting & \ + SD_SUPPORT_CLASS_TEN) +#define SD_CLR_CLASS_TEN(sd_card) ((sd_card)->sd_setting &= \ + ~SD_SUPPORT_CLASS_TEN) +#define SD_SET_1V8(sd_card) ((sd_card)->sd_setting |= \ + SD_SUPPORT_1V8) +#define SD_CHK_1V8(sd_card) ((sd_card)->sd_setting & \ + SD_SUPPORT_1V8) +#define SD_CLR_1V8(sd_card) ((sd_card)->sd_setting &= \ + ~SD_SUPPORT_1V8) struct sd_info { u16 sd_type; @@ -544,9 +572,12 @@ struct xd_info { #define HG8BIT (MS_HG | MS_8BIT) #define CHK_MSPRO(ms_card) (((ms_card)->ms_type & 0xFF) == TYPE_MSPRO) -#define CHK_HG8BIT(ms_card) (CHK_MSPRO(ms_card) && (((ms_card)->ms_type & HG8BIT) == HG8BIT)) -#define CHK_MSXC(ms_card) (CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_XC)) -#define CHK_MSHG(ms_card) (CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_HG)) +#define CHK_HG8BIT(ms_card) (CHK_MSPRO(ms_card) && \ + (((ms_card)->ms_type & HG8BIT) == HG8BIT)) +#define CHK_MSXC(ms_card) (CHK_MSPRO(ms_card) && \ + ((ms_card)->ms_type & MS_XC)) +#define CHK_MSHG(ms_card) (CHK_MSPRO(ms_card) && \ + ((ms_card)->ms_type & MS_HG)) #define CHK_MS8BIT(ms_card) (((ms_card)->ms_type & MS_8BIT)) #define CHK_MS4BIT(ms_card) (((ms_card)->ms_type & MS_4BIT)) @@ -679,8 +710,10 @@ struct trace_msg_t { #define CLR_SDIO_EXIST(chip) ((chip)->sdio_func_exist &= ~SDIO_EXIST) #define CHK_SDIO_IGNORED(chip) ((chip)->sdio_func_exist & SDIO_IGNORED) -#define SET_SDIO_IGNORED(chip) ((chip)->sdio_func_exist |= SDIO_IGNORED) -#define CLR_SDIO_IGNORED(chip) ((chip)->sdio_func_exist &= ~SDIO_IGNORED) +#define SET_SDIO_IGNORED(chip) ((chip)->sdio_func_exist |= \ + SDIO_IGNORED) +#define CLR_SDIO_IGNORED(chip) ((chip)->sdio_func_exist &= \ + ~SDIO_IGNORED) struct rtsx_chip { struct rtsx_dev *rtsx; @@ -957,12 +990,12 @@ void rtsx_stop_cmd(struct rtsx_chip *chip, int card); int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data); int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data); int rtsx_write_cfg_dw(struct rtsx_chip *chip, - u8 func_no, u16 addr, u32 mask, u32 val); + u8 func_no, u16 addr, u32 mask, u32 val); int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val); int rtsx_write_cfg_seq(struct rtsx_chip *chip, - u8 func, u16 addr, u8 *buf, int len); + u8 func, u16 addr, u8 *buf, int len); int rtsx_read_cfg_seq(struct rtsx_chip *chip, - u8 func, u16 addr, u8 *buf, int len); + u8 func, u16 addr, u8 *buf, int len); int rtsx_write_phy_register(struct rtsx_chip *chip, u8 addr, u16 val); int rtsx_read_phy_register(struct rtsx_chip *chip, u8 addr, u16 *val); int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val); diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c index becb4bba166c98a2fe8d0aa57b4d1a9f7cf4d99a..a95c5de1aa006107e2288788b2176d4ff63974bd 100644 --- a/drivers/staging/rts5208/rtsx_scsi.c +++ b/drivers/staging/rts5208/rtsx_scsi.c @@ -354,7 +354,7 @@ void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type) case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD: set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0, - ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1); + ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1); break; case SENSE_TYPE_FORMAT_IN_PROGRESS: @@ -397,10 +397,10 @@ void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type) } void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code, - u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0, + u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0, u16 sns_key_info1) { - struct sense_data_t *sense = &(chip->sense_buffer[lun]); + struct sense_data_t *sense = &chip->sense_buffer[lun]; sense->err_code = err_code; sense->sense_key = sense_key; @@ -436,7 +436,7 @@ static int test_unit_ready(struct scsi_cmnd *srb, struct rtsx_chip *chip) #ifdef SUPPORT_SD_LOCK if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; if (sd_card->sd_lock_notify) { sd_card->sd_lock_notify = 0; @@ -444,7 +444,7 @@ static int test_unit_ready(struct scsi_cmnd *srb, struct rtsx_chip *chip) return TRANSPORT_FAILED; } else if (sd_card->sd_lock_status & SD_LOCKED) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_READ_FORBIDDEN); + SENSE_TYPE_MEDIA_READ_FORBIDDEN); return TRANSPORT_FAILED; } } @@ -514,7 +514,7 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip) #ifdef SUPPORT_MAGIC_GATE if ((chip->mspro_formatter_enable) && - (chip->lun2card[lun] & MS_CARD)) + (chip->lun2card[lun] & MS_CARD)) #else if (chip->mspro_formatter_enable) #endif @@ -603,7 +603,7 @@ static int allow_medium_removal(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (prevent) { set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -615,13 +615,13 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip) { struct sense_data_t *sense; unsigned int lun = SCSI_LUN(srb); - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; unsigned char *tmp, *buf; - sense = &(chip->sense_buffer[lun]); + sense = &chip->sense_buffer[lun]; if ((get_lun_card(chip, lun) == MS_CARD) && - ms_card->pro_under_formatting) { + ms_card->pro_under_formatting) { if (ms_card->format_status == FORMAT_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE); ms_card->pro_under_formatting = 0; @@ -629,7 +629,7 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip) } else if (ms_card->format_status == FORMAT_IN_PROGRESS) { /* Logical Unit Not Ready Format in Progress */ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, - 0, (u16)(ms_card->progress)); + 0, (u16)(ms_card->progress)); } else { /* Format Command Failed */ set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED); @@ -659,9 +659,9 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip) } static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd, - int lun, u8 *buf, int buf_len) + int lun, u8 *buf, int buf_len) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int sys_info_offset; int data_size = buf_len; bool support_format = false; @@ -754,10 +754,10 @@ static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd, static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip) { unsigned int lun = SCSI_LUN(srb); - unsigned int dataSize; + unsigned int data_size; int status; bool pro_formatter_flag; - unsigned char pageCode, *buf; + unsigned char page_code, *buf; u8 card = get_lun_card(chip, lun); #ifndef SUPPORT_MAGIC_GATE @@ -770,11 +770,11 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip) #endif pro_formatter_flag = false; - dataSize = 8; + data_size = 8; #ifdef SUPPORT_MAGIC_GATE if ((chip->lun2card[lun] & MS_CARD)) { if (!card || (card == MS_CARD)) { - dataSize = 108; + data_size = 108; if (chip->mspro_formatter_enable) pro_formatter_flag = true; } @@ -783,28 +783,28 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (card == MS_CARD) { if (chip->mspro_formatter_enable) { pro_formatter_flag = true; - dataSize = 108; + data_size = 108; } } #endif - buf = kmalloc(dataSize, GFP_KERNEL); + buf = kmalloc(data_size, GFP_KERNEL); if (!buf) { rtsx_trace(chip); return TRANSPORT_ERROR; } - pageCode = srb->cmnd[2] & 0x3f; + page_code = srb->cmnd[2] & 0x3f; - if ((pageCode == 0x3F) || (pageCode == 0x1C) || - (pageCode == 0x00) || - (pro_formatter_flag && (pageCode == 0x20))) { + if ((page_code == 0x3F) || (page_code == 0x1C) || + (page_code == 0x00) || + (pro_formatter_flag && (page_code == 0x20))) { if (srb->cmnd[0] == MODE_SENSE) { - if ((pageCode == 0x3F) || (pageCode == 0x20)) { + if ((page_code == 0x3F) || (page_code == 0x20)) { ms_mode_sense(chip, srb->cmnd[0], - lun, buf, dataSize); + lun, buf, data_size); } else { - dataSize = 4; + data_size = 4; buf[0] = 0x03; buf[1] = 0x00; if (check_card_wp(chip, lun)) @@ -815,11 +815,11 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip) buf[3] = 0x00; } } else { - if ((pageCode == 0x3F) || (pageCode == 0x20)) { + if ((page_code == 0x3F) || (page_code == 0x20)) { ms_mode_sense(chip, srb->cmnd[0], - lun, buf, dataSize); + lun, buf, data_size); } else { - dataSize = 8; + data_size = 8; buf[0] = 0x00; buf[1] = 0x06; buf[2] = 0x00; @@ -842,7 +842,7 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (status == TRANSPORT_GOOD) { unsigned int len = min_t(unsigned int, scsi_bufflen(srb), - dataSize); + data_size); rtsx_stor_set_xfer_buf(buf, len, srb); scsi_set_resid(srb, scsi_bufflen(srb) - len); } @@ -854,7 +854,7 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip) static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip) { #ifdef SUPPORT_SD_LOCK - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; #endif unsigned int lun = SCSI_LUN(srb); int retval; @@ -896,7 +896,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (sd_card->sd_lock_status & SD_LOCKED) { dev_dbg(rtsx_dev(chip), "SD card locked!\n"); set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_READ_FORBIDDEN); + SENSE_TYPE_MEDIA_READ_FORBIDDEN); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -932,7 +932,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip) * need to judge start_sec at first */ if ((start_sec > get_card_size(chip, lun)) || - ((start_sec + sec_cnt) > get_card_size(chip, lun))) { + ((start_sec + sec_cnt) > get_card_size(chip, lun))) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE); rtsx_trace(chip); return TRANSPORT_FAILED; @@ -947,7 +947,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip) dev_dbg(rtsx_dev(chip), "read/write fail three times in succession\n"); if (srb->sc_data_direction == DMA_FROM_DEVICE) set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); else set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); @@ -959,7 +959,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (check_card_wp(chip, lun)) { dev_dbg(rtsx_dev(chip), "Write protected card!\n"); set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_WRITE_PROTECT); + SENSE_TYPE_MEDIA_WRITE_PROTECT); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -973,15 +973,16 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip) } else { chip->rw_fail_cnt[lun]++; if (srb->sc_data_direction == DMA_FROM_DEVICE) - set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + set_sense_type + (chip, lun, + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); else set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); } retval = TRANSPORT_FAILED; rtsx_trace(chip); - goto Exit; + goto exit; } else { chip->rw_fail_cnt[lun] = 0; retval = TRANSPORT_GOOD; @@ -989,7 +990,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip) scsi_set_resid(srb, 0); -Exit: +exit: return retval; } @@ -1025,8 +1026,8 @@ static int read_format_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip) /* Capacity List Length */ if ((buf_len > 12) && chip->mspro_formatter_enable && - (chip->lun2card[lun] & MS_CARD) && - (!card || (card == MS_CARD))) { + (chip->lun2card[lun] & MS_CARD) && + (!card || (card == MS_CARD))) { buf[i++] = 0x10; desc_cnt = 2; } else { @@ -1143,7 +1144,7 @@ static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { vfree(buf); set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1153,7 +1154,7 @@ static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { vfree(buf); set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1195,7 +1196,7 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip) retval = spi_erase_eeprom_chip(chip); if (retval != STATUS_SUCCESS) { set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1216,7 +1217,7 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { vfree(buf); set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1247,7 +1248,7 @@ static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (addr < 0xFC00) { set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1271,7 +1272,7 @@ static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { vfree(buf); set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1305,7 +1306,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (addr < 0xFC00) { set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1333,7 +1334,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { vfree(buf); set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1346,7 +1347,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip) static int get_sd_csd(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; unsigned int lun = SCSI_LUN(srb); if (!check_card_ready(chip, lun)) { @@ -1399,7 +1400,7 @@ static int trace_msg_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip) if ((scsi_bufflen(srb) < buf_len) || !scsi_sglist(srb)) { set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1522,9 +1523,9 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (srb->cmnd[3] == 1) { /* Variable Clock */ - struct xd_info *xd_card = &(chip->xd_card); - struct sd_info *sd_card = &(chip->sd_card); - struct ms_info *ms_card = &(chip->ms_card); + struct xd_info *xd_card = &chip->xd_card; + struct sd_info *sd_card = &chip->sd_card; + struct ms_info *ms_card = &chip->ms_card; switch (srb->cmnd[4]) { case XD_CARD: @@ -1541,7 +1542,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip) default: set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1556,7 +1557,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip) rtsx_disable_aspm(chip); if (chip->ss_en && - (rtsx_get_stat(chip) == RTSX_STAT_SS)) { + (rtsx_get_stat(chip) == RTSX_STAT_SS)) { rtsx_exit_ss(chip); wait_timeout(100); } @@ -1565,7 +1566,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip) retval = rtsx_force_power_on(chip, SSC_PDCTL); if (retval != STATUS_SUCCESS) { set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1586,9 +1587,9 @@ static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip) unsigned int lun = SCSI_LUN(srb); if (srb->cmnd[3] == 1) { - struct xd_info *xd_card = &(chip->xd_card); - struct sd_info *sd_card = &(chip->sd_card); - struct ms_info *ms_card = &(chip->ms_card); + struct xd_info *xd_card = &chip->xd_card; + struct sd_info *sd_card = &chip->sd_card; + struct ms_info *ms_card = &chip->ms_card; u8 tmp; switch (srb->cmnd[4]) { @@ -1606,7 +1607,7 @@ static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip) default: set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1648,14 +1649,15 @@ static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip) dev_dbg(rtsx_dev(chip), "Write to device\n"); retval = rtsx_transfer_data(chip, 0, scsi_sglist(srb), len, - scsi_sg_count(srb), srb->sc_data_direction, 1000); + scsi_sg_count(srb), srb->sc_data_direction, + 1000); if (retval < 0) { if (srb->sc_data_direction == DMA_FROM_DEVICE) set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); else set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; @@ -1667,8 +1669,8 @@ static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip) static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); - struct ms_info *ms_card = &(chip->ms_card); + struct sd_info *sd_card = &chip->sd_card; + struct ms_info *ms_card = &chip->ms_card; int buf_len; unsigned int lun = SCSI_LUN(srb); u8 card = get_lun_card(chip, lun); @@ -1699,8 +1701,8 @@ static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip) #ifdef SUPPORT_OCP status[8] = 0; - if (CHECK_LUN_MODE(chip, - SD_MS_2LUN) && (chip->lun2card[lun] == MS_CARD)) { + if (CHECK_LUN_MODE(chip, SD_MS_2LUN) && + (chip->lun2card[lun] == MS_CARD)) { oc_now_mask = MS_OC_NOW; oc_ever_mask = MS_OC_EVER; } else { @@ -1804,7 +1806,7 @@ static int set_chip_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (!CHECK_PID(chip, 0x5208)) { set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1884,7 +1886,7 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip) cmd_type = srb->cmnd[4]; if (cmd_type > 2) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1903,7 +1905,7 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip) value = *(rtsx_get_cmd_data(chip) + idx); if (scsi_bufflen(srb) < 1) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1971,7 +1973,7 @@ static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { vfree(buf); set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -1980,8 +1982,9 @@ static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip) retval = rtsx_read_phy_register(chip, addr + i, &val); if (retval != STATUS_SUCCESS) { vfree(buf); - set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + set_sense_type + (chip, SCSI_LUN(srb), + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -2039,7 +2042,7 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { vfree(buf); set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -2050,7 +2053,7 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { vfree(buf); set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -2090,7 +2093,7 @@ static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip) retval = spi_erase_eeprom_chip(chip); if (retval != STATUS_SUCCESS) { set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -2098,13 +2101,13 @@ static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip) retval = spi_erase_eeprom_byte(chip, addr); if (retval != STATUS_SUCCESS) { set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } } else { set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -2139,7 +2142,7 @@ static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { vfree(buf); set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -2149,7 +2152,7 @@ static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { vfree(buf); set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -2204,7 +2207,7 @@ static int write_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { vfree(buf); set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -2242,7 +2245,7 @@ static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { vfree(buf); set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -2252,7 +2255,7 @@ static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { vfree(buf); set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -2311,7 +2314,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip) } retval = rtsx_write_register(chip, PWR_GATE_CTRL, - LDO3318_PWR_MASK, LDO_OFF); + LDO3318_PWR_MASK, LDO_OFF); if (retval != STATUS_SUCCESS) { vfree(buf); rtsx_trace(chip); @@ -2321,7 +2324,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip) wait_timeout(600); retval = rtsx_write_phy_register(chip, 0x08, - 0x4C00 | chip->phy_voltage); + 0x4C00 | chip->phy_voltage); if (retval != STATUS_SUCCESS) { vfree(buf); rtsx_trace(chip); @@ -2329,7 +2332,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip) } retval = rtsx_write_register(chip, PWR_GATE_CTRL, - LDO3318_PWR_MASK, LDO_ON); + LDO3318_PWR_MASK, LDO_ON); if (retval != STATUS_SUCCESS) { vfree(buf); rtsx_trace(chip); @@ -2352,14 +2355,14 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip) retval = rtsx_write_efuse(chip, addr + i, buf[i]); if (retval != STATUS_SUCCESS) { set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); result = TRANSPORT_FAILED; rtsx_trace(chip); - goto Exit; + goto exit; } } -Exit: +exit: vfree(buf); retval = card_power_off(chip, SPI_CARD); @@ -2370,7 +2373,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (chip->asic_code) { retval = rtsx_write_register(chip, PWR_GATE_CTRL, - LDO3318_PWR_MASK, LDO_OFF); + LDO3318_PWR_MASK, LDO_OFF); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return TRANSPORT_ERROR; @@ -2385,7 +2388,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip) } retval = rtsx_write_register(chip, PWR_GATE_CTRL, - LDO3318_PWR_MASK, LDO_ON); + LDO3318_PWR_MASK, LDO_ON); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return TRANSPORT_ERROR; @@ -2425,7 +2428,7 @@ static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (func > func_max) { set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -2439,7 +2442,7 @@ static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip) retval = rtsx_read_cfg_seq(chip, func, addr, buf, len); if (retval != STATUS_SUCCESS) { set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); vfree(buf); rtsx_trace(chip); return TRANSPORT_FAILED; @@ -2484,7 +2487,7 @@ static int write_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (func > func_max) { set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -2593,7 +2596,7 @@ static int app_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip) default: set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -2670,7 +2673,7 @@ static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (get_lun_card(chip, lun) == XD_CARD) { rtsx_status[13] = 0x40; } else if (get_lun_card(chip, lun) == SD_CARD) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; rtsx_status[13] = 0x20; if (CHK_SD(sd_card)) { @@ -2686,7 +2689,7 @@ static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip) rtsx_status[13] |= 0x04; } } else if (get_lun_card(chip, lun) == MS_CARD) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; if (CHK_MSPRO(ms_card)) { rtsx_status[13] = 0x38; @@ -2881,7 +2884,7 @@ static int vendor_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip) default: set_sense_type(chip, SCSI_LUN(srb), - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -2895,14 +2898,15 @@ void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip) unsigned int lun = SCSI_LUN(srb); u16 sec_cnt; - if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) + if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) { sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8]; - else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) { + } else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) { sec_cnt = srb->cmnd[4]; if (sec_cnt == 0) sec_cnt = 256; - } else + } else { return; + } if (chip->rw_cap[lun] >= GPIO_TOGGLE_THRESHOLD) { toggle_gpio(chip, LED_GPIO); @@ -2915,7 +2919,7 @@ void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip) static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; unsigned int lun = SCSI_LUN(srb); bool quick_format; int retval; @@ -2927,7 +2931,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip) } if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47) || - (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) || + (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) || (srb->cmnd[7] != 0x74)) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); @@ -2941,7 +2945,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip) wait_timeout(100); if (!check_card_ready(chip, lun) || - (get_card_size(chip, lun) == 0)) { + (get_card_size(chip, lun) == 0)) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); rtsx_trace(chip); return TRANSPORT_FAILED; @@ -2986,7 +2990,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip) #ifdef SUPPORT_PCGL_1P18 static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; unsigned int lun = SCSI_LUN(srb); u8 dev_info_id, data_len; u8 *buf; @@ -3005,8 +3009,8 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip) } if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) || - (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) || - (srb->cmnd[7] != 0x44)) { + (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) || + (srb->cmnd[7] != 0x44)) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; @@ -3014,17 +3018,20 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip) dev_info_id = srb->cmnd[3]; if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) || - (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) || - !CHK_MSPRO(ms_card)) { + (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) || + !CHK_MSPRO(ms_card)) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } - if (dev_info_id == 0x15) - buf_len = data_len = 0x3A; - else - buf_len = data_len = 0x6A; + if (dev_info_id == 0x15) { + buf_len = 0x3A; + data_len = 0x3A; + } else { + buf_len = 0x6A; + data_len = 0x6A; + } buf = kmalloc(buf_len, GFP_KERNEL); if (!buf) { @@ -3100,7 +3107,7 @@ static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip) } #ifdef SUPPORT_CPRM -static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip) +static int sd_extension_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip) { unsigned int lun = SCSI_LUN(srb); int result; @@ -3164,7 +3171,7 @@ static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip) #ifdef SUPPORT_MAGIC_GATE static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; unsigned int lun = SCSI_LUN(srb); int retval; u8 key_format; @@ -3208,8 +3215,8 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) switch (key_format) { case KF_GET_LOC_EKB: if ((scsi_bufflen(srb) == 0x41C) && - (srb->cmnd[8] == 0x04) && - (srb->cmnd[9] == 0x1C)) { + (srb->cmnd[8] == 0x04) && + (srb->cmnd[9] == 0x1C)) { retval = mg_get_local_EKB(srb, chip); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); @@ -3218,7 +3225,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) } else { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -3226,8 +3233,8 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) case KF_RSP_CHG: if ((scsi_bufflen(srb) == 0x24) && - (srb->cmnd[8] == 0x00) && - (srb->cmnd[9] == 0x24)) { + (srb->cmnd[8] == 0x00) && + (srb->cmnd[9] == 0x24)) { retval = mg_get_rsp_chg(srb, chip); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); @@ -3236,7 +3243,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) } else { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -3245,12 +3252,12 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) case KF_GET_ICV: ms_card->mg_entry_num = srb->cmnd[5]; if ((scsi_bufflen(srb) == 0x404) && - (srb->cmnd[8] == 0x04) && - (srb->cmnd[9] == 0x04) && - (srb->cmnd[2] == 0x00) && - (srb->cmnd[3] == 0x00) && - (srb->cmnd[4] == 0x00) && - (srb->cmnd[5] < 32)) { + (srb->cmnd[8] == 0x04) && + (srb->cmnd[9] == 0x04) && + (srb->cmnd[2] == 0x00) && + (srb->cmnd[3] == 0x00) && + (srb->cmnd[4] == 0x00) && + (srb->cmnd[5] < 32)) { retval = mg_get_ICV(srb, chip); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); @@ -3259,7 +3266,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) } else { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -3277,7 +3284,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; unsigned int lun = SCSI_LUN(srb); int retval; u8 key_format; @@ -3326,8 +3333,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) switch (key_format) { case KF_SET_LEAF_ID: if ((scsi_bufflen(srb) == 0x0C) && - (srb->cmnd[8] == 0x00) && - (srb->cmnd[9] == 0x0C)) { + (srb->cmnd[8] == 0x00) && + (srb->cmnd[9] == 0x0C)) { retval = mg_set_leaf_id(srb, chip); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); @@ -3336,7 +3343,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) } else { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -3344,8 +3351,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) case KF_CHG_HOST: if ((scsi_bufflen(srb) == 0x0C) && - (srb->cmnd[8] == 0x00) && - (srb->cmnd[9] == 0x0C)) { + (srb->cmnd[8] == 0x00) && + (srb->cmnd[9] == 0x0C)) { retval = mg_chg(srb, chip); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); @@ -3354,7 +3361,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) } else { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -3362,8 +3369,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) case KF_RSP_HOST: if ((scsi_bufflen(srb) == 0x0C) && - (srb->cmnd[8] == 0x00) && - (srb->cmnd[9] == 0x0C)) { + (srb->cmnd[8] == 0x00) && + (srb->cmnd[9] == 0x0C)) { retval = mg_rsp(srb, chip); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); @@ -3372,7 +3379,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) } else { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -3381,12 +3388,12 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) case KF_SET_ICV: ms_card->mg_entry_num = srb->cmnd[5]; if ((scsi_bufflen(srb) == 0x404) && - (srb->cmnd[8] == 0x04) && - (srb->cmnd[9] == 0x04) && - (srb->cmnd[2] == 0x00) && - (srb->cmnd[3] == 0x00) && - (srb->cmnd[4] == 0x00) && - (srb->cmnd[5] < 32)) { + (srb->cmnd[8] == 0x04) && + (srb->cmnd[9] == 0x04) && + (srb->cmnd[2] == 0x00) && + (srb->cmnd[3] == 0x00) && + (srb->cmnd[4] == 0x00) && + (srb->cmnd[5] < 32)) { retval = mg_set_ICV(srb, chip); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); @@ -3395,7 +3402,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) } else { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -3415,9 +3422,9 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip) int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip) { #ifdef SUPPORT_SD_LOCK - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; #endif - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; unsigned int lun = SCSI_LUN(srb); int result; @@ -3427,9 +3434,9 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip) * REQUEST_SENSE and rs_ppstatus */ if (!((srb->cmnd[0] == VENDOR_CMND) && - (srb->cmnd[1] == SCSI_APP_CMD) && - (srb->cmnd[2] == GET_DEV_STATUS)) && - (srb->cmnd[0] != REQUEST_SENSE)) { + (srb->cmnd[1] == SCSI_APP_CMD) && + (srb->cmnd[2] == GET_DEV_STATUS)) && + (srb->cmnd[0] != REQUEST_SENSE)) { /* Logical Unit Not Ready Format in Progress */ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, 0, 0); @@ -3440,12 +3447,12 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip) #endif if ((get_lun_card(chip, lun) == MS_CARD) && - (ms_card->format_status == FORMAT_IN_PROGRESS)) { + (ms_card->format_status == FORMAT_IN_PROGRESS)) { if ((srb->cmnd[0] != REQUEST_SENSE) && - (srb->cmnd[0] != INQUIRY)) { + (srb->cmnd[0] != INQUIRY)) { /* Logical Unit Not Ready Format in Progress */ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, - 0, (u16)(ms_card->progress)); + 0, (u16)(ms_card->progress)); rtsx_trace(chip); return TRANSPORT_FAILED; } @@ -3510,7 +3517,7 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip) case SD_EXECUTE_WRITE: case SD_GET_RSP: case SD_HW_RST: - result = sd_extention_cmnd(srb, chip); + result = sd_extension_cmnd(srb, chip); break; #endif diff --git a/drivers/staging/rts5208/rtsx_scsi.h b/drivers/staging/rts5208/rtsx_scsi.h index 03dd76d6c85924aefbc04c5ceb59af051225ef71..30f3724848fe57f3bfb5823a7ae204b698ef031b 100644 --- a/drivers/staging/rts5208/rtsx_scsi.h +++ b/drivers/staging/rts5208/rtsx_scsi.h @@ -136,8 +136,8 @@ void scsi_show_command(struct rtsx_chip *chip); void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type); void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code, - u8 sense_key, u32 info, u8 asc, u8 ascq, - u8 sns_key_info0, u16 sns_key_info1); + u8 sense_key, u32 info, u8 asc, u8 ascq, + u8 sns_key_info0, u16 sns_key_info1); int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip); #endif /* __REALTEK_RTSX_SCSI_H */ diff --git a/drivers/staging/rts5208/rtsx_sys.h b/drivers/staging/rts5208/rtsx_sys.h index f49bed9ec76a6ea0e2980e9e4288f94f228f925a..817700c0d79464c64e4f271aed9afec8c79e9689 100644 --- a/drivers/staging/rts5208/rtsx_sys.h +++ b/drivers/staging/rts5208/rtsx_sys.h @@ -32,9 +32,9 @@ static inline void rtsx_exclusive_enter_ss(struct rtsx_chip *chip) { struct rtsx_dev *dev = chip->rtsx; - spin_lock(&(dev->reg_lock)); + spin_lock(&dev->reg_lock); rtsx_enter_ss(chip); - spin_unlock(&(dev->reg_lock)); + spin_unlock(&dev->reg_lock); } static inline void rtsx_reset_detected_cards(struct rtsx_chip *chip, int flag) diff --git a/drivers/staging/rts5208/rtsx_transport.h b/drivers/staging/rts5208/rtsx_transport.h index 479137398c3d65e66859114e4d892566dbc2149a..99740c33f2fbcb994e2770dccad8b079d52e9688 100644 --- a/drivers/staging/rts5208/rtsx_transport.h +++ b/drivers/staging/rts5208/rtsx_transport.h @@ -30,18 +30,21 @@ #define WAIT_TIME 2000 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer, - unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index, - unsigned int *offset, enum xfer_buf_dir dir); -void rtsx_stor_set_xfer_buf(unsigned char *buffer, - unsigned int buflen, struct scsi_cmnd *srb); -void rtsx_stor_get_xfer_buf(unsigned char *buffer, - unsigned int buflen, struct scsi_cmnd *srb); + unsigned int buflen, + struct scsi_cmnd *srb, + unsigned int *index, + unsigned int *offset, + enum xfer_buf_dir dir); +void rtsx_stor_set_xfer_buf(unsigned char *buffer, unsigned int buflen, + struct scsi_cmnd *srb); +void rtsx_stor_get_xfer_buf(unsigned char *buffer, unsigned int buflen, + struct scsi_cmnd *srb); void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip); #define rtsx_init_cmd(chip) ((chip)->ci = 0) -void rtsx_add_cmd(struct rtsx_chip *chip, - u8 cmd_type, u16 reg_addr, u8 mask, u8 data); +void rtsx_add_cmd(struct rtsx_chip *chip, u8 cmd_type, u16 reg_addr, u8 mask, + u8 data); void rtsx_send_cmd_no_wait(struct rtsx_chip *chip); int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout); @@ -55,11 +58,12 @@ static inline u8 *rtsx_get_cmd_data(struct rtsx_chip *chip) } int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len, - int use_sg, enum dma_data_direction dma_dir, int timeout); + int use_sg, enum dma_data_direction dma_dir, + int timeout); -int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card, - void *buf, size_t len, - int use_sg, unsigned int *index, unsigned int *offset, - enum dma_data_direction dma_dir, int timeout); +int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card, void *buf, + size_t len, int use_sg, unsigned int *index, + unsigned int *offset, + enum dma_data_direction dma_dir, int timeout); #endif /* __REALTEK_RTSX_TRANSPORT_H */ diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c index b0bbb36f89889003b4972f5e84893d17f50e3c33..bdd35b611f2736d10bcc91cfd7188b886d261444 100644 --- a/drivers/staging/rts5208/sd.c +++ b/drivers/staging/rts5208/sd.c @@ -56,21 +56,21 @@ static u16 REG_SD_DCMPS1_CTL; static inline void sd_set_err_code(struct rtsx_chip *chip, u8 err_code) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; sd_card->err_code |= err_code; } static inline void sd_clr_err_code(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; sd_card->err_code = 0; } static inline int sd_check_err_code(struct rtsx_chip *chip, u8 err_code) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; return sd_card->err_code & err_code; } @@ -124,9 +124,9 @@ static int sd_check_data0_status(struct rtsx_chip *chip) } static int sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, - u32 arg, u8 rsp_type, u8 *rsp, int rsp_len) + u32 arg, u8 rsp_type, u8 *rsp, int rsp_len) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; int timeout = 100; u16 reg_addr; @@ -153,11 +153,12 @@ static int sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, - 0x01, PINGPONG_BUFFER); + 0x01, PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, - 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); + 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, - SD_TRANSFER_END | SD_STAT_IDLE, SD_TRANSFER_END | SD_STAT_IDLE); + SD_TRANSFER_END | SD_STAT_IDLE, SD_TRANSFER_END | + SD_STAT_IDLE); if (rsp_type == SD_RSP_TYPE_R2) { for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16; @@ -238,7 +239,7 @@ static int sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, if ((rsp_type == SD_RSP_TYPE_R1) || (rsp_type == SD_RSP_TYPE_R1b)) { if ((cmd_idx != SEND_RELATIVE_ADDR) && - (cmd_idx != SEND_IF_COND)) { + (cmd_idx != SEND_IF_COND)) { if (cmd_idx != STOP_TRANSMISSION) { if (ptr[1] & 0x80) { rtsx_trace(chip); @@ -285,7 +286,7 @@ static int sd_read_data(struct rtsx_chip *chip, u16 blk_cnt, u8 bus_width, u8 *buf, int buf_len, int timeout) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; int i; @@ -308,27 +309,27 @@ static int sd_read_data(struct rtsx_chip *chip, 0xFF, cmd[i]); } rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, - (u8)byte_cnt); + (u8)byte_cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, - (u8)(byte_cnt >> 8)); + (u8)(byte_cnt >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, - (u8)blk_cnt); + (u8)blk_cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, - (u8)(blk_cnt >> 8)); + (u8)(blk_cnt >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, - SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END| - SD_CHECK_CRC7 | SD_RSP_LEN_6); + SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | + SD_CHECK_CRC7 | SD_RSP_LEN_6); if (trans_mode != SD_TM_AUTO_TUNING) rtsx_add_cmd(chip, WRITE_REG_CMD, - CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); + CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, - trans_mode | SD_TRANSFER_START); + trans_mode | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, - SD_TRANSFER_END); + SD_TRANSFER_END); retval = rtsx_send_cmd(chip, SD_CARD, timeout); if (retval < 0) { @@ -353,10 +354,10 @@ static int sd_read_data(struct rtsx_chip *chip, } static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode, - u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt, u8 bus_width, - u8 *buf, int buf_len, int timeout) + u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt, + u8 bus_width, u8 *buf, int buf_len, int timeout) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; int i; @@ -389,30 +390,30 @@ static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode, } } rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, - (u8)byte_cnt); + (u8)byte_cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, - (u8)(byte_cnt >> 8)); + (u8)(byte_cnt >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, - (u8)blk_cnt); + (u8)blk_cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, - (u8)(blk_cnt >> 8)); + (u8)(blk_cnt >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, - SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | - SD_CHECK_CRC7 | SD_RSP_LEN_6); + SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | + SD_CHECK_CRC7 | SD_RSP_LEN_6); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, - trans_mode | SD_TRANSFER_START); + trans_mode | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, - SD_TRANSFER_END); + SD_TRANSFER_END); retval = rtsx_send_cmd(chip, SD_CARD, timeout); if (retval < 0) { if (retval == -ETIMEDOUT) { - sd_send_cmd_get_rsp(chip, SEND_STATUS, - sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); + sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, + SD_RSP_TYPE_R1, NULL, 0); } rtsx_trace(chip); @@ -424,7 +425,7 @@ static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode, static int sd_check_csd(struct rtsx_chip *chip, char check_wp) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; int i; u8 csd_ver, trans_speed; @@ -438,7 +439,7 @@ static int sd_check_csd(struct rtsx_chip *chip, char check_wp) } retval = sd_send_cmd_get_rsp(chip, SEND_CSD, sd_card->sd_addr, - SD_RSP_TYPE_R2, rsp, 16); + SD_RSP_TYPE_R2, rsp, 16); if (retval == STATUS_SUCCESS) break; } @@ -534,7 +535,7 @@ static int sd_check_csd(struct rtsx_chip *chip, char check_wp) static int sd_set_sample_push_timing(struct rtsx_chip *chip) { int retval; - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; u8 val = 0; if ((chip->sd_ctl & SD_PUSH_POINT_CTL_MASK) == SD_PUSH_POINT_DELAY) @@ -573,7 +574,7 @@ static int sd_set_sample_push_timing(struct rtsx_chip *chip) static void sd_choose_proper_clock(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; if (CHK_SD_SDR104(sd_card)) { if (chip->asic_code) @@ -637,7 +638,7 @@ static int sd_set_clock_divider(struct rtsx_chip *chip, u8 clk_div) static int sd_set_init_para(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; retval = sd_set_sample_push_timing(chip); @@ -659,7 +660,7 @@ static int sd_set_init_para(struct rtsx_chip *chip) int sd_select_card(struct rtsx_chip *chip, int select) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; u8 cmd_idx, cmd_type; u32 addr; @@ -686,12 +687,12 @@ int sd_select_card(struct rtsx_chip *chip, int select) #ifdef SUPPORT_SD_LOCK static int sd_update_lock_status(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; u8 rsp[5]; retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, - SD_RSP_TYPE_R1, rsp, 5); + SD_RSP_TYPE_R1, rsp, 5); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -715,23 +716,23 @@ static int sd_update_lock_status(struct rtsx_chip *chip) #endif static int sd_wait_state_data_ready(struct rtsx_chip *chip, u8 state, - u8 data_ready, int polling_cnt) + u8 data_ready, int polling_cnt) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval, i; u8 rsp[5]; for (i = 0; i < polling_cnt; i++) { retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, - sd_card->sd_addr, SD_RSP_TYPE_R1, rsp, - 5); + sd_card->sd_addr, SD_RSP_TYPE_R1, + rsp, 5); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; } if (((rsp[3] & 0x1E) == state) && - ((rsp[3] & 0x01) == data_ready)) + ((rsp[3] & 0x01) == data_ready)) return STATUS_SUCCESS; } @@ -746,8 +747,8 @@ static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage) if (voltage == SD_IO_3V3) { if (chip->asic_code) { retval = rtsx_write_phy_register(chip, 0x08, - 0x4FC0 | - chip->phy_voltage); + 0x4FC0 | + chip->phy_voltage); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -763,8 +764,8 @@ static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage) } else if (voltage == SD_IO_1V8) { if (chip->asic_code) { retval = rtsx_write_phy_register(chip, 0x08, - 0x4C40 | - chip->phy_voltage); + 0x4C40 | + chip->phy_voltage); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -800,7 +801,7 @@ static int sd_voltage_switch(struct rtsx_chip *chip) } retval = sd_send_cmd_get_rsp(chip, VOLTAGE_SWITCH, 0, SD_RSP_TYPE_R1, - NULL, 0); + NULL, 0); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -851,8 +852,8 @@ static int sd_voltage_switch(struct rtsx_chip *chip) (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | SD_DAT1_STATUS | SD_DAT0_STATUS)) { dev_dbg(rtsx_dev(chip), "SD_BUS_STAT: 0x%x\n", stat); - rtsx_write_register(chip, SD_BUS_STAT, - SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); + rtsx_write_register(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN | + SD_CLK_FORCE_STOP, 0); rtsx_write_register(chip, CARD_CLK_EN, 0xFF, 0); rtsx_trace(chip); return STATUS_FAIL; @@ -903,7 +904,7 @@ static int sd_reset_dcm(struct rtsx_chip *chip, u8 tune_dir) static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; u16 SD_VP_CTL, SD_DCMPS_CTL; u8 val; int retval; @@ -968,7 +969,9 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir) } udelay(50); retval = rtsx_write_register(chip, SD_VP_CTL, 0xFF, - PHASE_CHANGE | PHASE_NOT_RESET | sample_point); + PHASE_CHANGE | + PHASE_NOT_RESET | + sample_point); if (retval) { rtsx_trace(chip); return retval; @@ -982,7 +985,8 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir) } udelay(50); retval = rtsx_write_register(chip, SD_VP_CTL, 0xFF, - PHASE_NOT_RESET | sample_point); + PHASE_NOT_RESET | + sample_point); if (retval) { rtsx_trace(chip); return retval; @@ -992,24 +996,24 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir) rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, SD_DCMPS_CTL, DCMPS_CHANGE, - DCMPS_CHANGE); + DCMPS_CHANGE); rtsx_add_cmd(chip, CHECK_REG_CMD, SD_DCMPS_CTL, - DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE); + DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE); retval = rtsx_send_cmd(chip, SD_CARD, 100); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto Fail; + goto fail; } val = *rtsx_get_cmd_data(chip); if (val & DCMPS_ERROR) { rtsx_trace(chip); - goto Fail; + goto fail; } if ((val & DCMPS_CURRENT_PHASE) != sample_point) { rtsx_trace(chip); - goto Fail; + goto fail; } retval = rtsx_write_register(chip, SD_DCMPS_CTL, @@ -1045,7 +1049,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir) return STATUS_SUCCESS; -Fail: +fail: rtsx_read_register(chip, SD_VP_CTL, &val); dev_dbg(rtsx_dev(chip), "SD_VP_CTL: 0x%x\n", val); rtsx_read_register(chip, SD_DCMPS_CTL, &val); @@ -1060,12 +1064,12 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir) static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; u8 cmd[5], buf[8]; retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1078,7 +1082,7 @@ static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width) cmd[4] = 0; retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 8, 1, bus_width, - buf, 8, 250); + buf, 8, 250); if (retval != STATUS_SUCCESS) { rtsx_clear_sd_error(chip); rtsx_trace(chip); @@ -1096,7 +1100,7 @@ static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width) } static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group, - u8 func_to_switch, u8 *buf, int buf_len) + u8 func_to_switch, u8 *buf, int buf_len) { u8 support_mask = 0, query_switch = 0, switch_busy = 0; int support_offset = 0, query_switch_offset = 0, check_busy_offset = 0; @@ -1198,7 +1202,7 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group, if (func_group == SD_FUNC_GROUP_1) { if (!(buf[support_offset] & support_mask) || - ((buf[query_switch_offset] & 0x0F) != query_switch)) { + ((buf[query_switch_offset] & 0x0F) != query_switch)) { rtsx_trace(chip); return STATUS_FAIL; } @@ -1206,7 +1210,7 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group, /* Check 'Busy Status' */ if ((buf[DATA_STRUCTURE_VER_OFFSET] == 0x01) && - ((buf[check_busy_offset] & switch_busy) == switch_busy)) { + ((buf[check_busy_offset] & switch_busy) == switch_busy)) { rtsx_trace(chip); return STATUS_FAIL; } @@ -1214,10 +1218,10 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group, return STATUS_SUCCESS; } -static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, - u8 func_group, u8 func_to_switch, u8 bus_width) +static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, u8 func_group, + u8 func_to_switch, u8 bus_width) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; u8 cmd[5], buf[64]; @@ -1247,7 +1251,7 @@ static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, } retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, bus_width, - buf, 64, 250); + buf, 64, 250); if (retval != STATUS_SUCCESS) { rtsx_clear_sd_error(chip); rtsx_trace(chip); @@ -1326,7 +1330,7 @@ static u8 downgrade_switch_mode(u8 func_group, u8 func_to_switch) } static int sd_check_switch(struct rtsx_chip *chip, - u8 func_group, u8 func_to_switch, u8 bus_width) + u8 func_group, u8 func_to_switch, u8 bus_width) { int retval; int i; @@ -1340,12 +1344,14 @@ static int sd_check_switch(struct rtsx_chip *chip, } retval = sd_check_switch_mode(chip, SD_CHECK_MODE, func_group, - func_to_switch, bus_width); + func_to_switch, bus_width); if (retval == STATUS_SUCCESS) { u8 stat; retval = sd_check_switch_mode(chip, SD_SWITCH_MODE, - func_group, func_to_switch, bus_width); + func_group, + func_to_switch, + bus_width); if (retval == STATUS_SUCCESS) { switch_good = true; break; @@ -1364,7 +1370,7 @@ static int sd_check_switch(struct rtsx_chip *chip, } func_to_switch = downgrade_switch_mode(func_group, - func_to_switch); + func_to_switch); wait_timeout(20); } @@ -1379,14 +1385,14 @@ static int sd_check_switch(struct rtsx_chip *chip, static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; int i; u8 func_to_switch = 0; /* Get supported functions */ - retval = sd_check_switch_mode(chip, SD_CHECK_MODE, - NO_ARGUMENT, NO_ARGUMENT, bus_width); + retval = sd_check_switch_mode(chip, SD_CHECK_MODE, NO_ARGUMENT, + NO_ARGUMENT, bus_width); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1396,24 +1402,24 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width) /* Function Group 1: Access Mode */ for (i = 0; i < 4; i++) { - switch ((u8)(chip->sd_speed_prior >> (i*8))) { + switch ((u8)(chip->sd_speed_prior >> (i * 8))) { case SDR104_SUPPORT: - if ((sd_card->func_group1_mask & SDR104_SUPPORT_MASK) - && chip->sdr104_en) { + if ((sd_card->func_group1_mask & SDR104_SUPPORT_MASK) && + chip->sdr104_en) { func_to_switch = SDR104_SUPPORT; } break; case DDR50_SUPPORT: - if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK) - && chip->ddr50_en) { + if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK) && + chip->ddr50_en) { func_to_switch = DDR50_SUPPORT; } break; case SDR50_SUPPORT: - if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK) - && chip->sdr50_en) { + if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK) && + chip->sdr50_en) { func_to_switch = SDR50_SUPPORT; } break; @@ -1430,7 +1436,6 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width) if (func_to_switch) break; - } dev_dbg(rtsx_dev(chip), "SD_FUNC_GROUP_1: func_to_switch = 0x%02x", func_to_switch); @@ -1446,7 +1451,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width) if (func_to_switch) { retval = sd_check_switch(chip, SD_FUNC_GROUP_1, func_to_switch, - bus_width); + bus_width); if (retval != STATUS_SUCCESS) { if (func_to_switch == SDR104_SUPPORT) { sd_card->sd_switch_fail = SDR104_SUPPORT_MASK; @@ -1496,7 +1501,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width) func_to_switch = 0xFF; for (i = 0; i < 4; i++) { - switch ((u8)(chip->sd_current_prior >> (i*8))) { + switch ((u8)(chip->sd_current_prior >> (i * 8))) { case CURRENT_LIMIT_800: if (sd_card->func_group4_mask & CURRENT_LIMIT_800_MASK) func_to_switch = CURRENT_LIMIT_800; @@ -1534,7 +1539,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width) if (func_to_switch <= CURRENT_LIMIT_800) { retval = sd_check_switch(chip, SD_FUNC_GROUP_4, func_to_switch, - bus_width); + bus_width); if (retval != STATUS_SUCCESS) { if (sd_check_err_code(chip, SD_NO_CARD)) { rtsx_trace(chip); @@ -1596,8 +1601,8 @@ static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point) cmd[3] = 0; cmd[4] = 0; - retval = sd_read_data(chip, SD_TM_AUTO_TUNING, - cmd, 5, 0x40, 1, SD_BUS_WIDTH_4, NULL, 0, 100); + retval = sd_read_data(chip, SD_TM_AUTO_TUNING, cmd, 5, 0x40, 1, + SD_BUS_WIDTH_4, NULL, 0, 100); if (retval != STATUS_SUCCESS) { (void)sd_wait_data_idle(chip); @@ -1611,7 +1616,7 @@ static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point) static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; u8 cmd[5]; @@ -1624,7 +1629,7 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point) dev_dbg(rtsx_dev(chip), "sd ddr tuning rx\n"); retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1636,8 +1641,8 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point) cmd[3] = 0; cmd[4] = 0; - retval = sd_read_data(chip, SD_TM_NORMAL_READ, - cmd, 5, 64, 1, SD_BUS_WIDTH_4, NULL, 0, 100); + retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, + SD_BUS_WIDTH_4, NULL, 0, 100); if (retval != STATUS_SUCCESS) { (void)sd_wait_data_idle(chip); @@ -1651,7 +1656,7 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point) static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; u8 cmd[5], bus_width; @@ -1676,8 +1681,8 @@ static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point) cmd[3] = 0; cmd[4] = 0; - retval = sd_read_data(chip, SD_TM_NORMAL_READ, - cmd, 5, 0x200, 1, bus_width, NULL, 0, 100); + retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 0x200, 1, + bus_width, NULL, 0, 100); if (retval != STATUS_SUCCESS) { (void)sd_wait_data_idle(chip); @@ -1691,7 +1696,7 @@ static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point) static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; retval = sd_change_phase(chip, sample_point, TUNE_TX); @@ -1708,11 +1713,11 @@ static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point) } retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { if (sd_check_err_code(chip, SD_RSP_TIMEOUT)) { rtsx_write_register(chip, SD_CFG3, - SD_RSP_80CLK_TIMEOUT_EN, 0); + SD_RSP_80CLK_TIMEOUT_EN, 0); rtsx_trace(chip); return STATUS_FAIL; } @@ -1730,7 +1735,7 @@ static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point) static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; u8 cmd[5], bus_width; @@ -1770,8 +1775,8 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point) cmd[3] = 0; cmd[4] = 0; - retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2, - cmd, 5, 16, 1, bus_width, sd_card->raw_csd, 16, 100); + retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2, cmd, 5, 16, 1, + bus_width, sd_card->raw_csd, 16, 100); if (retval != STATUS_SUCCESS) { rtsx_clear_sd_error(chip); rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0); @@ -1787,7 +1792,7 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point) } sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1, - NULL, 0); + NULL, 0); return STATUS_SUCCESS; } @@ -1795,7 +1800,7 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point) static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map, u8 tune_dir) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; struct timing_phase_path path[MAX_PHASE + 1]; int i, j, cont_path_cnt; bool new_block; @@ -1808,7 +1813,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map, else final_phase = (u8)chip->sd_default_tx_phase; - goto Search_Finish; + goto search_finish; } cont_path_cnt = 0; @@ -1839,7 +1844,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map, if (cont_path_cnt == 0) { dev_dbg(rtsx_dev(chip), "No continuous phase path\n"); - goto Search_Finish; + goto search_finish; } else { int idx = cont_path_cnt - 1; @@ -1848,7 +1853,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map, } if ((path[0].start == 0) && - (path[cont_path_cnt - 1].end == MAX_PHASE)) { + (path[cont_path_cnt - 1].end == MAX_PHASE)) { path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1; path[0].len += path[cont_path_cnt - 1].len; path[0].mid = path[0].start + path[0].len / 2; @@ -1906,14 +1911,14 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map, } } -Search_Finish: +search_finish: dev_dbg(rtsx_dev(chip), "Final chosen phase: %d\n", final_phase); return final_phase; } static int sd_tuning_rx(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; int i, j; u32 raw_phase_map[3], phase_map; @@ -1974,7 +1979,7 @@ static int sd_tuning_rx(struct rtsx_chip *chip) static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; int i; u32 phase_map; @@ -1992,7 +1997,7 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip) if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { sd_set_err_code(chip, SD_NO_CARD); rtsx_write_register(chip, SD_CFG3, - SD_RSP_80CLK_TIMEOUT_EN, 0); + SD_RSP_80CLK_TIMEOUT_EN, 0); rtsx_trace(chip); return STATUS_FAIL; } @@ -2002,10 +2007,10 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip) continue; retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, - sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, - 0); + sd_card->sd_addr, SD_RSP_TYPE_R1, + NULL, 0); if ((retval == STATUS_SUCCESS) || - !sd_check_err_code(chip, SD_RSP_TIMEOUT)) + !sd_check_err_code(chip, SD_RSP_TIMEOUT)) phase_map |= 1 << i; } @@ -2039,7 +2044,7 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip) static int sd_tuning_tx(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; int i, j; u32 raw_phase_map[3], phase_map; @@ -2131,7 +2136,7 @@ static int sd_ddr_tuning(struct rtsx_chip *chip) } } else { retval = sd_change_phase(chip, (u8)chip->sd_ddr_tx_phase, - TUNE_TX); + TUNE_TX); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -2167,7 +2172,7 @@ static int mmc_ddr_tuning(struct rtsx_chip *chip) } } else { retval = sd_change_phase(chip, (u8)chip->mmc_ddr_tx_phase, - TUNE_TX); + TUNE_TX); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -2193,7 +2198,7 @@ static int mmc_ddr_tuning(struct rtsx_chip *chip) int sd_switch_clock(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; int re_tuning = 0; @@ -2231,7 +2236,7 @@ int sd_switch_clock(struct rtsx_chip *chip) static int sd_prepare_reset(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; if (chip->asic_code) @@ -2286,31 +2291,36 @@ static int sd_pull_ctl_disable(struct rtsx_chip *chip) if (CHECK_PID(chip, 0x5208)) { retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF, - XD_D3_PD | SD_D7_PD | SD_CLK_PD | SD_D5_PD); + XD_D3_PD | SD_D7_PD | SD_CLK_PD | + SD_D5_PD); if (retval) { rtsx_trace(chip); return retval; } retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF, - SD_D6_PD | SD_D0_PD | SD_D1_PD | XD_D5_PD); + SD_D6_PD | SD_D0_PD | SD_D1_PD | + XD_D5_PD); if (retval) { rtsx_trace(chip); return retval; } retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF, - SD_D4_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); + SD_D4_PD | XD_CE_PD | XD_CLE_PD | + XD_CD_PU); if (retval) { rtsx_trace(chip); return retval; } retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF, - XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD); + XD_RDY_PD | SD_D3_PD | SD_D2_PD | + XD_ALE_PD); if (retval) { rtsx_trace(chip); return retval; } retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF, - MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); + MS_INS_PU | SD_WP_PD | SD_CD_PU | + SD_CMD_PD); if (retval) { rtsx_trace(chip); return retval; @@ -2361,27 +2371,27 @@ int sd_pull_ctl_enable(struct rtsx_chip *chip) if (CHECK_PID(chip, 0x5208)) { rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, - XD_D3_PD | SD_DAT7_PU | SD_CLK_NP | SD_D5_PU); + XD_D3_PD | SD_DAT7_PU | SD_CLK_NP | SD_D5_PU); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, - SD_D6_PU | SD_D0_PU | SD_D1_PU | XD_D5_PD); + SD_D6_PU | SD_D0_PU | SD_D1_PU | XD_D5_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, - SD_D4_PU | XD_CE_PD | XD_CLE_PD | XD_CD_PU); + SD_D4_PU | XD_CE_PD | XD_CLE_PD | XD_CD_PU); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, - XD_RDY_PD | SD_D3_PU | SD_D2_PU | XD_ALE_PD); + XD_RDY_PD | SD_D3_PU | SD_D2_PU | XD_ALE_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, - MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU); + MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, - MS_D5_PD | MS_D4_PD); + MS_D5_PD | MS_D4_PD); } else if (CHECK_PID(chip, 0x5288)) { if (CHECK_BARO_PKG(chip, QFN)) { rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, - 0xA8); + 0xA8); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, - 0x5A); + 0x5A); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, - 0x95); + 0x95); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, - 0xAA); + 0xAA); } } @@ -2478,7 +2488,7 @@ static int sd_dummy_clock(struct rtsx_chip *chip) static int sd_read_lba0(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; u8 cmd[5], bus_width; @@ -2499,8 +2509,8 @@ static int sd_read_lba0(struct rtsx_chip *chip) bus_width = SD_BUS_WIDTH_1; } - retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, - 5, 512, 1, bus_width, NULL, 0, 100); + retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 512, 1, + bus_width, NULL, 0, 100); if (retval != STATUS_SUCCESS) { rtsx_clear_sd_error(chip); rtsx_trace(chip); @@ -2512,14 +2522,14 @@ static int sd_read_lba0(struct rtsx_chip *chip) static int sd_check_wp_state(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; u32 val; u16 sd_card_type; u8 cmd[5], buf[64]; - retval = sd_send_cmd_get_rsp(chip, APP_CMD, - sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); + retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, + SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -2532,12 +2542,12 @@ static int sd_check_wp_state(struct rtsx_chip *chip) cmd[4] = 0; retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, - SD_BUS_WIDTH_4, buf, 64, 250); + SD_BUS_WIDTH_4, buf, 64, 250); if (retval != STATUS_SUCCESS) { rtsx_clear_sd_error(chip); sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); rtsx_trace(chip); return STATUS_FAIL; } @@ -2562,7 +2572,7 @@ static int sd_check_wp_state(struct rtsx_chip *chip) static int reset_sd(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; bool hi_cap_flow = false; int retval, i = 0, j = 0, k = 0; bool sd_dont_switch = false; @@ -2575,7 +2585,7 @@ static int reset_sd(struct rtsx_chip *chip) SET_SD(sd_card); -Switch_Fail: +switch_fail: i = 0; j = 0; @@ -2589,11 +2599,11 @@ static int reset_sd(struct rtsx_chip *chip) retval = sd_prepare_reset(chip); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; retval = sd_dummy_clock(chip); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) && try_sdio) { int rty_cnt = 0; @@ -2601,11 +2611,11 @@ static int reset_sd(struct rtsx_chip *chip) for (; rty_cnt < chip->sdio_retry_cnt; rty_cnt++) { if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { sd_set_err_code(chip, SD_NO_CARD); - goto Status_Fail; + goto status_fail; } retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0, - SD_RSP_TYPE_R4, rsp, 5); + SD_RSP_TYPE_R4, rsp, 5); if (retval == STATUS_SUCCESS) { int func_num = (rsp[1] >> 4) & 0x07; @@ -2613,7 +2623,7 @@ static int reset_sd(struct rtsx_chip *chip) dev_dbg(rtsx_dev(chip), "SD_IO card (Function number: %d)!\n", func_num); chip->sd_io = 1; - goto Status_Fail; + goto status_fail; } break; @@ -2630,14 +2640,14 @@ static int reset_sd(struct rtsx_chip *chip) /* Start Initialization Process of SD Card */ RTY_SD_RST: retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0, - NULL, 0); + NULL, 0); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; wait_timeout(20); retval = sd_send_cmd_get_rsp(chip, SEND_IF_COND, 0x000001AA, - SD_RSP_TYPE_R7, rsp, 5); + SD_RSP_TYPE_R7, rsp, 5); if (retval == STATUS_SUCCESS) { if ((rsp[4] == 0xAA) && ((rsp[3] & 0x0f) == 0x01)) { hi_cap_flow = true; @@ -2649,37 +2659,37 @@ static int reset_sd(struct rtsx_chip *chip) voltage = SUPPORT_VOLTAGE; retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, - SD_RSP_TYPE_R0, NULL, 0); + SD_RSP_TYPE_R0, NULL, 0); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; wait_timeout(20); } do { retval = sd_send_cmd_get_rsp(chip, APP_CMD, 0, SD_RSP_TYPE_R1, - NULL, 0); + NULL, 0); if (retval != STATUS_SUCCESS) { if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { sd_set_err_code(chip, SD_NO_CARD); - goto Status_Fail; + goto status_fail; } j++; if (j < 3) goto RTY_SD_RST; else - goto Status_Fail; + goto status_fail; } retval = sd_send_cmd_get_rsp(chip, SD_APP_OP_COND, voltage, - SD_RSP_TYPE_R3, rsp, 5); + SD_RSP_TYPE_R3, rsp, 5); if (retval != STATUS_SUCCESS) { k++; if (k < 3) goto RTY_SD_RST; else - goto Status_Fail; + goto status_fail; } i++; @@ -2687,7 +2697,7 @@ static int reset_sd(struct rtsx_chip *chip) } while (!(rsp[1] & 0x80) && (i < 255)); if (i == 255) - goto Status_Fail; + goto status_fail; if (hi_cap_flow) { if (rsp[1] & 0x40) @@ -2705,19 +2715,19 @@ static int reset_sd(struct rtsx_chip *chip) if (support_1v8) { retval = sd_voltage_switch(chip); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; } retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2, - NULL, 0); + NULL, 0); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; for (i = 0; i < 3; i++) { retval = sd_send_cmd_get_rsp(chip, SEND_RELATIVE_ADDR, 0, - SD_RSP_TYPE_R6, rsp, 5); + SD_RSP_TYPE_R6, rsp, 5); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; sd_card->sd_addr = (u32)rsp[1] << 24; sd_card->sd_addr += (u32)rsp[2] << 16; @@ -2728,17 +2738,17 @@ static int reset_sd(struct rtsx_chip *chip) retval = sd_check_csd(chip, 1); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; retval = sd_select_card(chip, 1); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; #ifdef SUPPORT_SD_LOCK SD_UNLOCK_ENTRY: retval = sd_update_lock_status(chip); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; if (sd_card->sd_lock_status & SD_LOCKED) { sd_card->sd_lock_status |= (SD_LOCK_1BIT_MODE | SD_PWD_EXIST); @@ -2749,25 +2759,25 @@ static int reset_sd(struct rtsx_chip *chip) #endif retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; retval = sd_send_cmd_get_rsp(chip, SET_CLR_CARD_DETECT, 0, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; if (support_1v8) { retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; switch_bus_width = SD_BUS_WIDTH_4; } else { @@ -2775,13 +2785,13 @@ static int reset_sd(struct rtsx_chip *chip) } retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1, - NULL, 0); + NULL, 0); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; if (!(sd_card->raw_csd[4] & 0x40)) sd_dont_switch = true; @@ -2804,7 +2814,7 @@ static int reset_sd(struct rtsx_chip *chip) sd_dont_switch = true; try_sdio = false; - goto Switch_Fail; + goto switch_fail; } } else { if (support_1v8) { @@ -2812,21 +2822,21 @@ static int reset_sd(struct rtsx_chip *chip) sd_dont_switch = true; try_sdio = false; - goto Switch_Fail; + goto switch_fail; } } } if (!support_1v8) { retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; } #ifdef SUPPORT_SD_LOCK @@ -2845,7 +2855,7 @@ static int reset_sd(struct rtsx_chip *chip) retval = sd_set_init_para(chip); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; if (CHK_SD_DDR50(sd_card)) retval = sd_ddr_tuning(chip); @@ -2854,20 +2864,20 @@ static int reset_sd(struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { if (sd20_mode) { - goto Status_Fail; + goto status_fail; } else { retval = sd_init_power(chip); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; try_sdio = false; sd20_mode = true; - goto Switch_Fail; + goto switch_fail; } } sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); if (CHK_SD_DDR50(sd_card)) { retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000); @@ -2879,15 +2889,15 @@ static int reset_sd(struct rtsx_chip *chip) retval = sd_read_lba0(chip); if (retval != STATUS_SUCCESS) { if (sd20_mode) { - goto Status_Fail; + goto status_fail; } else { retval = sd_init_power(chip); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; try_sdio = false; sd20_mode = true; - goto Switch_Fail; + goto switch_fail; } } } @@ -2895,7 +2905,7 @@ static int reset_sd(struct rtsx_chip *chip) retval = sd_check_wp_state(chip); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; chip->card_bus_width[chip->card2lun[SD_CARD]] = 4; @@ -2918,21 +2928,21 @@ static int reset_sd(struct rtsx_chip *chip) return STATUS_SUCCESS; -Status_Fail: +status_fail: rtsx_trace(chip); return STATUS_FAIL; } static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; u8 buf[8] = {0}, bus_width, *ptr; u16 byte_cnt; int len; retval = sd_send_cmd_get_rsp(chip, BUSTEST_W, 0, SD_RSP_TYPE_R1, NULL, - 0); + 0); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return SWITCH_FAIL; @@ -2957,8 +2967,8 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width) return SWITCH_ERR; } - retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3, - NULL, 0, byte_cnt, 1, bus_width, buf, len, 100); + retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3, NULL, 0, byte_cnt, 1, + bus_width, buf, len, 100); if (retval != STATUS_SUCCESS) { rtsx_clear_sd_error(chip); rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0); @@ -2980,23 +2990,23 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width) if (width == MMC_8BIT_BUS) rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, - 0xFF, 0x08); + 0xFF, 0x08); else rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, - 0xFF, 0x04); + 0xFF, 0x04); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 1); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0); - rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, - SD_CALCULATE_CRC7 | SD_NO_CHECK_CRC16 | SD_NO_WAIT_BUSY_END| - SD_CHECK_CRC7 | SD_RSP_LEN_6); + rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, SD_CALCULATE_CRC7 | + SD_NO_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | + SD_CHECK_CRC7 | SD_RSP_LEN_6); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, - PINGPONG_BUFFER); + PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, - SD_TM_NORMAL_READ | SD_TRANSFER_START); + SD_TM_NORMAL_READ | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, - SD_TRANSFER_END); + SD_TRANSFER_END); rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2, 0, 0); if (width == MMC_8BIT_BUS) @@ -3024,9 +3034,9 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width) arg = 0x03B70200; retval = sd_send_cmd_get_rsp(chip, SWITCH, arg, - SD_RSP_TYPE_R1b, rsp, 5); + SD_RSP_TYPE_R1b, rsp, 5); if ((retval == STATUS_SUCCESS) && - !(rsp[4] & MMC_SWITCH_ERR)) + !(rsp[4] & MMC_SWITCH_ERR)) return SWITCH_SUCCESS; } } else { @@ -3041,9 +3051,9 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width) arg = 0x03B70100; retval = sd_send_cmd_get_rsp(chip, SWITCH, arg, - SD_RSP_TYPE_R1b, rsp, 5); + SD_RSP_TYPE_R1b, rsp, 5); if ((retval == STATUS_SUCCESS) && - !(rsp[4] & MMC_SWITCH_ERR)) + !(rsp[4] & MMC_SWITCH_ERR)) return SWITCH_SUCCESS; } } @@ -3054,7 +3064,7 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width) static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; u8 *ptr, card_type, card_type_mask = 0; @@ -3065,7 +3075,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr) rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, - 0x40 | SEND_EXT_CSD); + 0x40 | SEND_EXT_CSD); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, 0); @@ -3077,14 +3087,14 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr) rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, - SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END| - SD_CHECK_CRC7 | SD_RSP_LEN_6); + SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | + SD_CHECK_CRC7 | SD_RSP_LEN_6); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, - PINGPONG_BUFFER); + PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, - SD_TM_NORMAL_READ | SD_TRANSFER_START); + SD_TM_NORMAL_READ | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, - SD_TRANSFER_END); + SD_TRANSFER_END); rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 196, 0xFF, 0); rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 212, 0xFF, 0); @@ -3097,7 +3107,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr) if (retval == -ETIMEDOUT) { rtsx_clear_sd_error(chip); sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); } rtsx_trace(chip); return STATUS_FAIL; @@ -3106,7 +3116,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr) ptr = rtsx_get_cmd_data(chip); if (ptr[0] & SD_TRANSFER_ERR) { sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); rtsx_trace(chip); return STATUS_FAIL; } @@ -3132,8 +3142,8 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr) SET_MMC_26M(sd_card); } - retval = sd_send_cmd_get_rsp(chip, SWITCH, - 0x03B90100, SD_RSP_TYPE_R1b, rsp, 5); + retval = sd_send_cmd_get_rsp(chip, SWITCH, 0x03B90100, + SD_RSP_TYPE_R1b, rsp, 5); if ((retval != STATUS_SUCCESS) || (rsp[4] & MMC_SWITCH_ERR)) CLR_MMC_HS(sd_card); } @@ -3178,7 +3188,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr) static int reset_mmc(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval, i = 0, j = 0, k = 0; bool switch_ddr = true; u8 rsp[16]; @@ -3190,7 +3200,7 @@ static int reset_mmc(struct rtsx_chip *chip) goto MMC_UNLOCK_ENTRY; #endif -Switch_Fail: +switch_fail: retval = sd_prepare_reset(chip); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); @@ -3201,7 +3211,7 @@ static int reset_mmc(struct rtsx_chip *chip) RTY_MMC_RST: retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0, - NULL, 0); + NULL, 0); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3215,11 +3225,11 @@ static int reset_mmc(struct rtsx_chip *chip) } retval = sd_send_cmd_get_rsp(chip, SEND_OP_COND, - (SUPPORT_VOLTAGE | 0x40000000), - SD_RSP_TYPE_R3, rsp, 5); + (SUPPORT_VOLTAGE | 0x40000000), + SD_RSP_TYPE_R3, rsp, 5); if (retval != STATUS_SUCCESS) { if (sd_check_err_code(chip, SD_BUSY) || - sd_check_err_code(chip, SD_TO_ERR)) { + sd_check_err_code(chip, SD_TO_ERR)) { k++; if (k < 20) { sd_clr_err_code(chip); @@ -3255,7 +3265,7 @@ static int reset_mmc(struct rtsx_chip *chip) CLR_MMC_SECTOR_MODE(sd_card); retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2, - NULL, 0); + NULL, 0); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3263,7 +3273,7 @@ static int reset_mmc(struct rtsx_chip *chip) sd_card->sd_addr = 0x00100000; retval = sd_send_cmd_get_rsp(chip, SET_RELATIVE_ADDR, sd_card->sd_addr, - SD_RSP_TYPE_R6, rsp, 5); + SD_RSP_TYPE_R6, rsp, 5); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3284,7 +3294,7 @@ static int reset_mmc(struct rtsx_chip *chip) } retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1, - NULL, 0); + NULL, 0); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3319,7 +3329,7 @@ static int reset_mmc(struct rtsx_chip *chip) } sd_card->mmc_dont_switch_bus = 1; rtsx_trace(chip); - goto Switch_Fail; + goto switch_fail; } } @@ -3345,7 +3355,7 @@ static int reset_mmc(struct rtsx_chip *chip) switch_ddr = false; rtsx_trace(chip); - goto Switch_Fail; + goto switch_fail; } retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000); @@ -3360,7 +3370,7 @@ static int reset_mmc(struct rtsx_chip *chip) switch_ddr = false; rtsx_trace(chip); - goto Switch_Fail; + goto switch_fail; } } } @@ -3392,7 +3402,7 @@ static int reset_mmc(struct rtsx_chip *chip) int reset_sd_card(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; sd_init_reg_addr(chip); @@ -3407,7 +3417,7 @@ int reset_sd_card(struct rtsx_chip *chip) } if (chip->ignore_sd && CHK_SDIO_EXIST(chip) && - !CHK_SDIO_IGNORED(chip)) { + !CHK_SDIO_IGNORED(chip)) { if (chip->asic_code) { retval = sd_pull_ctl_enable(chip); if (retval != STATUS_SUCCESS) { @@ -3416,7 +3426,8 @@ int reset_sd_card(struct rtsx_chip *chip) } } else { retval = rtsx_write_register(chip, FPGA_PULL_CTL, - FPGA_SD_PULL_CTL_BIT | 0x20, 0); + FPGA_SD_PULL_CTL_BIT | + 0x20, 0); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3505,7 +3516,7 @@ int reset_sd_card(struct rtsx_chip *chip) static int reset_mmc_only(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; sd_card->sd_type = 0; @@ -3574,7 +3585,7 @@ static int reset_mmc_only(struct rtsx_chip *chip) static int wait_data_buf_ready(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int i, retval; for (i = 0; i < WAIT_DATA_READY_RTY_CNT; i++) { @@ -3587,7 +3598,8 @@ static int wait_data_buf_ready(struct rtsx_chip *chip) sd_card->sd_data_buf_ready = 0; retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, - sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0); + sd_card->sd_addr, SD_RSP_TYPE_R1, + NULL, 0); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -3607,7 +3619,7 @@ static int wait_data_buf_ready(struct rtsx_chip *chip) void sd_stop_seq_mode(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; if (sd_card->seq_mode) { @@ -3616,7 +3628,7 @@ void sd_stop_seq_mode(struct rtsx_chip *chip) return; retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0, - SD_RSP_TYPE_R1b, NULL, 0); + SD_RSP_TYPE_R1b, NULL, 0); if (retval != STATUS_SUCCESS) sd_set_err_code(chip, SD_STS_ERR); @@ -3632,7 +3644,7 @@ void sd_stop_seq_mode(struct rtsx_chip *chip) static inline int sd_auto_tune_clock(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; if (chip->asic_code) { @@ -3679,9 +3691,9 @@ static inline int sd_auto_tune_clock(struct rtsx_chip *chip) } int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, - u16 sector_cnt) + u16 sector_cnt) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; u32 data_addr; u8 cfg2; int retval; @@ -3730,20 +3742,20 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, } if (sd_card->seq_mode && - ((sd_card->pre_dir != srb->sc_data_direction) || - ((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) != - start_sector))) { - if ((sd_card->pre_sec_cnt < 0x80) - && (sd_card->pre_dir == DMA_FROM_DEVICE) - && !CHK_SD30_SPEED(sd_card) - && !CHK_SD_HS(sd_card) - && !CHK_MMC_HS(sd_card)) { + ((sd_card->pre_dir != srb->sc_data_direction) || + ((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) != + start_sector))) { + if ((sd_card->pre_sec_cnt < 0x80) && + (sd_card->pre_dir == DMA_FROM_DEVICE) && + !CHK_SD30_SPEED(sd_card) && + !CHK_SD_HS(sd_card) && + !CHK_MMC_HS(sd_card)) { sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); } - retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, - 0, SD_RSP_TYPE_R1b, NULL, 0); + retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0, + SD_RSP_TYPE_R1b, NULL, 0); if (retval != STATUS_SUCCESS) { chip->rw_need_retry = 1; sd_set_err_code(chip, SD_STS_ERR); @@ -3760,12 +3772,12 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, goto RW_FAIL; } - if ((sd_card->pre_sec_cnt < 0x80) - && !CHK_SD30_SPEED(sd_card) - && !CHK_SD_HS(sd_card) - && !CHK_MMC_HS(sd_card)) { + if ((sd_card->pre_sec_cnt < 0x80) && + !CHK_SD30_SPEED(sd_card) && + !CHK_SD_HS(sd_card) && + !CHK_MMC_HS(sd_card)) { sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0); + SD_RSP_TYPE_R1, NULL, 0); } } @@ -3774,30 +3786,30 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0x00); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 0x02); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, - (u8)sector_cnt); + (u8)sector_cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, - (u8)(sector_cnt >> 8)); + (u8)(sector_cnt >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER); if (CHK_MMC_8BIT(sd_card)) rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, - 0x03, SD_BUS_WIDTH_8); + 0x03, SD_BUS_WIDTH_8); else if (CHK_MMC_4BIT(sd_card) || CHK_SD(sd_card)) rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, - 0x03, SD_BUS_WIDTH_4); + 0x03, SD_BUS_WIDTH_4); else rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, - 0x03, SD_BUS_WIDTH_1); + 0x03, SD_BUS_WIDTH_1); if (sd_card->seq_mode) { - cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16| + cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0; rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, cfg2); trans_dma_enable(srb->sc_data_direction, chip, sector_cnt * 512, - DMA_512); + DMA_512); if (srb->sc_data_direction == DMA_FROM_DEVICE) { rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, @@ -3808,7 +3820,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, } rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, - SD_TRANSFER_END, SD_TRANSFER_END); + SD_TRANSFER_END, SD_TRANSFER_END); rtsx_send_cmd_no_wait(chip); } else { @@ -3818,22 +3830,22 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | READ_MULTIPLE_BLOCK); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, - (u8)(data_addr >> 24)); + (u8)(data_addr >> 24)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, - (u8)(data_addr >> 16)); + (u8)(data_addr >> 16)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, - (u8)(data_addr >> 8)); + (u8)(data_addr >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, - (u8)data_addr); + (u8)data_addr); cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6; rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, - cfg2); + cfg2); trans_dma_enable(srb->sc_data_direction, chip, - sector_cnt * 512, DMA_512); + sector_cnt * 512, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_AUTO_READ_2 | SD_TRANSFER_START); @@ -3861,7 +3873,8 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, } retval = sd_send_cmd_get_rsp(chip, WRITE_MULTIPLE_BLOCK, - data_addr, SD_RSP_TYPE_R1, NULL, 0); + data_addr, SD_RSP_TYPE_R1, + NULL, 0); if (retval != STATUS_SUCCESS) { chip->rw_need_retry = 1; rtsx_trace(chip); @@ -3874,10 +3887,10 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0; rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, - cfg2); + cfg2); trans_dma_enable(srb->sc_data_direction, chip, - sector_cnt * 512, DMA_512); + sector_cnt * 512, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START); @@ -3891,7 +3904,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, } retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb), - scsi_bufflen(srb), scsi_sg_count(srb), + scsi_bufflen(srb), scsi_sg_count(srb), srb->sc_data_direction, chip->sd_timeout); if (retval < 0) { u8 stat = 0; @@ -3916,7 +3929,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, chip->rw_need_retry = 1; retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0, - SD_RSP_TYPE_R1b, NULL, 0); + SD_RSP_TYPE_R1b, NULL, 0); if (retval != STATUS_SUCCESS) { sd_set_err_code(chip, SD_STS_ERR); rtsx_trace(chip); @@ -3984,8 +3997,9 @@ int soft_reset_sd_card(struct rtsx_chip *chip) return reset_sd(chip); } -int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, - u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, bool special_check) +int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, u32 arg, + u8 rsp_type, u8 *rsp, int rsp_len, + bool special_check) { int retval; int timeout = 100; @@ -4011,11 +4025,11 @@ int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, - 0x01, PINGPONG_BUFFER); + 0x01, PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, - 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); + 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END, - SD_TRANSFER_END); + SD_TRANSFER_END); if (rsp_type == SD_RSP_TYPE_R2) { for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16; @@ -4084,7 +4098,7 @@ int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, } if ((cmd_idx == SELECT_CARD) || (cmd_idx == APP_CMD) || - (cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) { + (cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) { if ((cmd_idx != STOP_TRANSMISSION) && !special_check) { if (ptr[1] & 0x80) { rtsx_trace(chip); @@ -4172,7 +4186,7 @@ int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type) int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; unsigned int lun = SCSI_LUN(srb); int len; u8 buf[18] = { @@ -4206,9 +4220,9 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip) } if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) || - (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) || - (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) || - (srb->cmnd[8] != 0x64)) { + (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) || + (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) || + (srb->cmnd[8] != 0x64)) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; @@ -4245,7 +4259,7 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip) } static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type, - int *rsp_len) + int *rsp_len) { if (!rsp_type || !rsp_len) return STATUS_FAIL; @@ -4285,7 +4299,7 @@ static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type, int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; unsigned int lun = SCSI_LUN(srb); int retval, rsp_len; u8 cmd_idx, rsp_type; @@ -4339,7 +4353,7 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) { if (CHK_MMC_8BIT(sd_card)) { retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, - SD_BUS_WIDTH_8); + SD_BUS_WIDTH_8); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return TRANSPORT_FAILED; @@ -4347,7 +4361,7 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) { retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, - SD_BUS_WIDTH_4); + SD_BUS_WIDTH_4); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return TRANSPORT_FAILED; @@ -4366,32 +4380,33 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) retval = sd_select_card(chip, 0); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Cmd_Failed; + goto sd_execute_cmd_failed; } } if (acmd) { retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD, - sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0, false); + sd_card->sd_addr, + SD_RSP_TYPE_R1, NULL, 0, + false); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Cmd_Failed; + goto sd_execute_cmd_failed; } } retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type, - sd_card->rsp, rsp_len, false); + sd_card->rsp, rsp_len, false); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Cmd_Failed; + goto sd_execute_cmd_failed; } if (standby) { retval = sd_select_card(chip, 1); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Cmd_Failed; + goto sd_execute_cmd_failed; } } @@ -4399,14 +4414,14 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) retval = sd_update_lock_status(chip); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Cmd_Failed; + goto sd_execute_cmd_failed; } #endif scsi_set_resid(srb, 0); return TRANSPORT_GOOD; -SD_Execute_Cmd_Failed: +sd_execute_cmd_failed: sd_card->pre_cmd_err = 1; set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE); release_sd_card(chip); @@ -4420,7 +4435,7 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; unsigned int lun = SCSI_LUN(srb); int retval, rsp_len, i; bool read_err = false, cmd13_checkbit = false; @@ -4492,10 +4507,11 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (data_len < 512) { retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len, - SD_RSP_TYPE_R1, NULL, 0, false); + SD_RSP_TYPE_R1, NULL, 0, + false); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Read_Cmd_Failed; + goto sd_execute_read_cmd_failed; } } @@ -4503,17 +4519,18 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) retval = sd_select_card(chip, 0); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Read_Cmd_Failed; + goto sd_execute_read_cmd_failed; } } if (acmd) { retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD, - sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0, false); + sd_card->sd_addr, + SD_RSP_TYPE_R1, NULL, 0, + false); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Read_Cmd_Failed; + goto sd_execute_read_cmd_failed; } } @@ -4539,13 +4556,13 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) } retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, byte_cnt, - blk_cnt, bus_width, buf, data_len, 2000); + blk_cnt, bus_width, buf, data_len, 2000); if (retval != STATUS_SUCCESS) { read_err = true; kfree(buf); rtsx_clear_sd_error(chip); rtsx_trace(chip); - goto SD_Execute_Read_Cmd_Failed; + goto sd_execute_read_cmd_failed; } min_len = min(data_len, scsi_bufflen(srb)); @@ -4558,24 +4575,24 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) trans_dma_enable(DMA_FROM_DEVICE, chip, data_len, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, - 0x02); + 0x02); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, - 0x00); + 0x00); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, - 0xFF, (srb->cmnd[7] & 0xFE) >> 1); + 0xFF, (srb->cmnd[7] & 0xFE) >> 1); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, - 0xFF, (u8)((data_len & 0x0001FE00) >> 9)); + 0xFF, (u8)((data_len & 0x0001FE00) >> 9)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, - 0x40 | cmd_idx); + 0x40 | cmd_idx); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, - srb->cmnd[3]); + srb->cmnd[3]); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, - srb->cmnd[4]); + srb->cmnd[4]); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, - srb->cmnd[5]); + srb->cmnd[5]); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, - srb->cmnd[6]); + srb->cmnd[6]); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type); @@ -4583,66 +4600,69 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_AUTO_READ_2 | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, - SD_TRANSFER_END, SD_TRANSFER_END); + SD_TRANSFER_END, SD_TRANSFER_END); rtsx_send_cmd_no_wait(chip); retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb), - scsi_bufflen(srb), scsi_sg_count(srb), - DMA_FROM_DEVICE, 10000); + scsi_bufflen(srb), + scsi_sg_count(srb), + DMA_FROM_DEVICE, 10000); if (retval < 0) { read_err = true; rtsx_clear_sd_error(chip); rtsx_trace(chip); - goto SD_Execute_Read_Cmd_Failed; + goto sd_execute_read_cmd_failed; } } else { rtsx_trace(chip); - goto SD_Execute_Read_Cmd_Failed; + goto sd_execute_read_cmd_failed; } retval = ext_sd_get_rsp(chip, rsp_len, sd_card->rsp, rsp_type); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Read_Cmd_Failed; + goto sd_execute_read_cmd_failed; } if (standby) { retval = sd_select_card(chip, 1); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Read_Cmd_Failed; + goto sd_execute_read_cmd_failed; } } if (send_cmd12) { - retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, - 0, SD_RSP_TYPE_R1b, NULL, 0, false); + retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0, + SD_RSP_TYPE_R1b, NULL, 0, + false); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Read_Cmd_Failed; + goto sd_execute_read_cmd_failed; } } if (data_len < 512) { retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, - SD_RSP_TYPE_R1, NULL, 0, false); + SD_RSP_TYPE_R1, NULL, 0, + false); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Read_Cmd_Failed; + goto sd_execute_read_cmd_failed; } retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Read_Cmd_Failed; + goto sd_execute_read_cmd_failed; } retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Read_Cmd_Failed; + goto sd_execute_read_cmd_failed; } } @@ -4651,7 +4671,7 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) for (i = 0; i < 3; i++) { retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS, - sd_card->sd_addr, + sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0, cmd13_checkbit); if (retval == STATUS_SUCCESS) @@ -4659,13 +4679,13 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) } if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Read_Cmd_Failed; + goto sd_execute_read_cmd_failed; } scsi_set_resid(srb, 0); return TRANSPORT_GOOD; -SD_Execute_Read_Cmd_Failed: +sd_execute_read_cmd_failed: sd_card->pre_cmd_err = 1; set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE); if (read_err) @@ -4682,7 +4702,7 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; unsigned int lun = SCSI_LUN(srb); int retval, rsp_len, i; bool write_err = false, cmd13_checkbit = false; @@ -4754,7 +4774,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) { if (CHK_MMC_8BIT(sd_card)) { retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, - SD_BUS_WIDTH_8); + SD_BUS_WIDTH_8); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return TRANSPORT_FAILED; @@ -4762,7 +4782,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) { retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, - SD_BUS_WIDTH_4); + SD_BUS_WIDTH_4); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return TRANSPORT_FAILED; @@ -4779,10 +4799,11 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (data_len < 512) { retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len, - SD_RSP_TYPE_R1, NULL, 0, false); + SD_RSP_TYPE_R1, NULL, 0, + false); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } } @@ -4790,25 +4811,26 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) retval = sd_select_card(chip, 0); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } } if (acmd) { retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD, - sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0, false); + sd_card->sd_addr, + SD_RSP_TYPE_R1, NULL, 0, + false); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } } retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type, - sd_card->rsp, rsp_len, false); + sd_card->rsp, rsp_len, false); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } if (data_len <= 512) { @@ -4832,37 +4854,37 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) rtsx_init_cmd(chip); for (i = 0; i < 256; i++) { rtsx_add_cmd(chip, WRITE_REG_CMD, - PPBUF_BASE2 + i, 0xFF, buf[i]); + PPBUF_BASE2 + i, 0xFF, buf[i]); } retval = rtsx_send_cmd(chip, 0, 250); if (retval != STATUS_SUCCESS) { kfree(buf); rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } rtsx_init_cmd(chip); for (i = 256; i < data_len; i++) { rtsx_add_cmd(chip, WRITE_REG_CMD, - PPBUF_BASE2 + i, 0xFF, buf[i]); + PPBUF_BASE2 + i, 0xFF, buf[i]); } retval = rtsx_send_cmd(chip, 0, 250); if (retval != STATUS_SUCCESS) { kfree(buf); rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } } else { rtsx_init_cmd(chip); for (i = 0; i < data_len; i++) { rtsx_add_cmd(chip, WRITE_REG_CMD, - PPBUF_BASE2 + i, 0xFF, buf[i]); + PPBUF_BASE2 + i, 0xFF, buf[i]); } retval = rtsx_send_cmd(chip, 0, 250); if (retval != STATUS_SUCCESS) { kfree(buf); rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } } @@ -4871,20 +4893,20 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, - srb->cmnd[8] & 0x03); + srb->cmnd[8] & 0x03); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, - srb->cmnd[9]); + srb->cmnd[9]); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, - 0x00); + 0x00); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, - 0x01); + 0x01); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, - PINGPONG_BUFFER); + PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, - SD_TRANSFER_END, SD_TRANSFER_END); + SD_TRANSFER_END, SD_TRANSFER_END); retval = rtsx_send_cmd(chip, SD_CARD, 250); } else if (!(data_len & 0x1FF)) { @@ -4893,35 +4915,36 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) trans_dma_enable(DMA_TO_DEVICE, chip, data_len, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, - 0x02); + 0x02); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, - 0x00); + 0x00); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, - 0xFF, (srb->cmnd[7] & 0xFE) >> 1); + 0xFF, (srb->cmnd[7] & 0xFE) >> 1); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, - 0xFF, (u8)((data_len & 0x0001FE00) >> 9)); + 0xFF, (u8)((data_len & 0x0001FE00) >> 9)); rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF, - SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START); + SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START); rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, - SD_TRANSFER_END, SD_TRANSFER_END); + SD_TRANSFER_END, SD_TRANSFER_END); rtsx_send_cmd_no_wait(chip); retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb), - scsi_bufflen(srb), scsi_sg_count(srb), - DMA_TO_DEVICE, 10000); + scsi_bufflen(srb), + scsi_sg_count(srb), + DMA_TO_DEVICE, 10000); } else { rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } if (retval < 0) { write_err = true; rtsx_clear_sd_error(chip); rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } #ifdef SUPPORT_SD_LOCK @@ -4949,37 +4972,39 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) retval = sd_select_card(chip, 1); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } } if (send_cmd12) { - retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, - 0, SD_RSP_TYPE_R1b, NULL, 0, false); + retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0, + SD_RSP_TYPE_R1b, NULL, 0, + false); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } } if (data_len < 512) { retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, - SD_RSP_TYPE_R1, NULL, 0, false); + SD_RSP_TYPE_R1, NULL, 0, + false); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } } @@ -4988,15 +5013,15 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) for (i = 0; i < 3; i++) { retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS, - sd_card->sd_addr, - SD_RSP_TYPE_R1, NULL, 0, - cmd13_checkbit); + sd_card->sd_addr, + SD_RSP_TYPE_R1, NULL, 0, + cmd13_checkbit); if (retval == STATUS_SUCCESS) break; } if (retval != STATUS_SUCCESS) { rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } #ifdef SUPPORT_SD_LOCK @@ -5024,7 +5049,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (retval != STATUS_SUCCESS) { sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST); rtsx_trace(chip); - goto SD_Execute_Write_Cmd_Failed; + goto sd_execute_write_cmd_failed; } } @@ -5045,7 +5070,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) scsi_set_resid(srb, 0); return TRANSPORT_GOOD; -SD_Execute_Write_Cmd_Failed: +sd_execute_write_cmd_failed: sd_card->pre_cmd_err = 1; set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE); if (write_err) @@ -5062,7 +5087,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; unsigned int lun = SCSI_LUN(srb); int count; u16 data_len; @@ -5104,7 +5129,7 @@ int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip) int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; unsigned int lun = SCSI_LUN(srb); int retval; @@ -5122,9 +5147,9 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip) } if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) || - (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) || - (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) || - (srb->cmnd[8] != 0x64)) { + (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) || + (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) || + (srb->cmnd[8] != 0x64)) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; @@ -5174,7 +5199,7 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip) void sd_cleanup_work(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; if (sd_card->seq_mode) { dev_dbg(rtsx_dev(chip), "SD: stop transmission\n"); @@ -5230,7 +5255,7 @@ int sd_power_off_card3v3(struct rtsx_chip *chip) int release_sd_card(struct rtsx_chip *chip) { - struct sd_info *sd_card = &(chip->sd_card); + struct sd_info *sd_card = &chip->sd_card; int retval; chip->card_ready &= ~SD_CARD; diff --git a/drivers/staging/rts5208/sd.h b/drivers/staging/rts5208/sd.h index 60b79280fb5f909114e2477725cbd130219f3511..55764e16b93a9f1f2ba1e4018d34def2272e746d 100644 --- a/drivers/staging/rts5208/sd.h +++ b/drivers/staging/rts5208/sd.h @@ -280,14 +280,15 @@ int reset_sd_card(struct rtsx_chip *chip); int sd_switch_clock(struct rtsx_chip *chip); void sd_stop_seq_mode(struct rtsx_chip *chip); int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, - u32 start_sector, u16 sector_cnt); + u32 start_sector, u16 sector_cnt); void sd_cleanup_work(struct rtsx_chip *chip); int sd_power_off_card3v3(struct rtsx_chip *chip); int release_sd_card(struct rtsx_chip *chip); #ifdef SUPPORT_CPRM int soft_reset_sd_card(struct rtsx_chip *chip); int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, - u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, bool special_check); + u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, + bool special_check); int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type); int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip); diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c index 13c539c83838ca9a48b40cab900cc79f07850989..8b8cd955dfeb14894aa3321b7312fbdee212ac11 100644 --- a/drivers/staging/rts5208/spi.c +++ b/drivers/staging/rts5208/spi.c @@ -29,7 +29,7 @@ static inline void spi_set_err_code(struct rtsx_chip *chip, u8 err_code) { - struct spi_info *spi = &(chip->spi); + struct spi_info *spi = &chip->spi; spi->err_code = err_code; } @@ -57,7 +57,7 @@ static int spi_init(struct rtsx_chip *chip) static int spi_set_init_para(struct rtsx_chip *chip) { - struct spi_info *spi = &(chip->spi); + struct spi_info *spi = &chip->spi; int retval; retval = rtsx_write_register(chip, SPI_CLK_DIVIDER1, 0xFF, @@ -117,9 +117,9 @@ static int sf_polling_status(struct rtsx_chip *chip, int msec) rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, SPI_RDSR); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_POLLING_MODE0); + SPI_TRANSFER0_START | SPI_POLLING_MODE0); rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, - SPI_TRANSFER0_END); + SPI_TRANSFER0_END); retval = rtsx_send_cmd(chip, 0, msec); if (retval < 0) { @@ -134,7 +134,7 @@ static int sf_polling_status(struct rtsx_chip *chip, int msec) static int sf_enable_write(struct rtsx_chip *chip, u8 ins) { - struct spi_info *spi = &(chip->spi); + struct spi_info *spi = &chip->spi; int retval; if (!spi->write_en) @@ -144,11 +144,11 @@ static int sf_enable_write(struct rtsx_chip *chip, u8 ins) rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, - SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); + SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_C_MODE0); + SPI_TRANSFER0_START | SPI_C_MODE0); rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, - SPI_TRANSFER0_END); + SPI_TRANSFER0_END); retval = rtsx_send_cmd(chip, 0, 100); if (retval < 0) { @@ -163,7 +163,7 @@ static int sf_enable_write(struct rtsx_chip *chip, u8 ins) static int sf_disable_write(struct rtsx_chip *chip, u8 ins) { - struct spi_info *spi = &(chip->spi); + struct spi_info *spi = &chip->spi; int retval; if (!spi->write_en) @@ -173,11 +173,11 @@ static int sf_disable_write(struct rtsx_chip *chip, u8 ins) rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, - SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); + SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_C_MODE0); + SPI_TRANSFER0_START | SPI_C_MODE0); rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, - SPI_TRANSFER0_END); + SPI_TRANSFER0_END); retval = rtsx_send_cmd(chip, 0, 100); if (retval < 0) { @@ -191,27 +191,27 @@ static int sf_disable_write(struct rtsx_chip *chip, u8 ins) } static void sf_program(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr, - u16 len) + u16 len) { rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, - SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); + SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, (u8)len); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, (u8)(len >> 8)); if (addr_mode) { rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, - (u8)(addr >> 8)); + (u8)(addr >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, - (u8)(addr >> 16)); + (u8)(addr >> 16)); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_CADO_MODE0); + SPI_TRANSFER0_START | SPI_CADO_MODE0); } else { rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_CDO_MODE0); + SPI_TRANSFER0_START | SPI_CDO_MODE0); } rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, - SPI_TRANSFER0_END); + SPI_TRANSFER0_END); } static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr) @@ -222,21 +222,21 @@ static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr) rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, - SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); + SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); if (addr_mode) { rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, - (u8)(addr >> 8)); + (u8)(addr >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, - (u8)(addr >> 16)); + (u8)(addr >> 16)); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_CA_MODE0); + SPI_TRANSFER0_START | SPI_CA_MODE0); } else { rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_C_MODE0); + SPI_TRANSFER0_START | SPI_C_MODE0); } rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, - SPI_TRANSFER0_END); + SPI_TRANSFER0_END); retval = rtsx_send_cmd(chip, 0, 100); if (retval < 0) { @@ -322,9 +322,9 @@ static int spi_eeprom_program_enable(struct rtsx_chip *chip) rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x86); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x13); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_CA_MODE0); + SPI_TRANSFER0_START | SPI_CA_MODE0); rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, - SPI_TRANSFER0_END); + SPI_TRANSFER0_END); retval = rtsx_send_cmd(chip, 0, 100); if (retval < 0) { @@ -358,9 +358,9 @@ int spi_erase_eeprom_chip(struct rtsx_chip *chip) rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x12); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x84); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_CA_MODE0); + SPI_TRANSFER0_START | SPI_CA_MODE0); rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, - SPI_TRANSFER0_END); + SPI_TRANSFER0_END); retval = rtsx_send_cmd(chip, 0, 100); if (retval < 0) { @@ -402,9 +402,9 @@ int spi_erase_eeprom_byte(struct rtsx_chip *chip, u16 addr) rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)(addr >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_CA_MODE0); + SPI_TRANSFER0_START | SPI_CA_MODE0); rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, - SPI_TRANSFER0_END); + SPI_TRANSFER0_END); retval = rtsx_send_cmd(chip, 0, 100); if (retval < 0) { @@ -442,9 +442,9 @@ int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val) rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_CADI_MODE0); + SPI_TRANSFER0_START | SPI_CADI_MODE0); rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, - SPI_TRANSFER0_END); + SPI_TRANSFER0_END); retval = rtsx_send_cmd(chip, 0, 100); if (retval < 0) { @@ -497,9 +497,9 @@ int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val) rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, (u8)(addr >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x4E); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_CA_MODE0); + SPI_TRANSFER0_START | SPI_CA_MODE0); rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, - SPI_TRANSFER0_END); + SPI_TRANSFER0_END); retval = rtsx_send_cmd(chip, 0, 100); if (retval < 0) { @@ -518,12 +518,12 @@ int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val) int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct spi_info *spi = &(chip->spi); + struct spi_info *spi = &chip->spi; dev_dbg(rtsx_dev(chip), "spi_get_status: err_code = 0x%x\n", spi->err_code); - rtsx_stor_set_xfer_buf(&(spi->err_code), - min_t(int, scsi_bufflen(srb), 1), srb); + rtsx_stor_set_xfer_buf(&spi->err_code, + min_t(int, scsi_bufflen(srb), 1), srb); scsi_set_resid(srb, scsi_bufflen(srb) - 1); return STATUS_SUCCESS; @@ -531,7 +531,7 @@ int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip) int spi_set_parameter(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct spi_info *spi = &(chip->spi); + struct spi_info *spi = &chip->spi; spi_set_err_code(chip, SPI_NO_ERR); @@ -574,37 +574,37 @@ int spi_read_flash_id(struct scsi_cmnd *srb, struct rtsx_chip *chip) rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, - PINGPONG_BUFFER); + PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, srb->cmnd[3]); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, srb->cmnd[4]); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, srb->cmnd[5]); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, srb->cmnd[6]); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, - SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); + SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, srb->cmnd[7]); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, srb->cmnd[8]); if (len == 0) { if (srb->cmnd[9]) { rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, - 0xFF, SPI_TRANSFER0_START | SPI_CA_MODE0); + 0xFF, SPI_TRANSFER0_START | SPI_CA_MODE0); } else { rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, - 0xFF, SPI_TRANSFER0_START | SPI_C_MODE0); + 0xFF, SPI_TRANSFER0_START | SPI_C_MODE0); } } else { if (srb->cmnd[9]) { rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_CADI_MODE0); + SPI_TRANSFER0_START | SPI_CADI_MODE0); } else { rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_CDI_MODE0); + SPI_TRANSFER0_START | SPI_CDI_MODE0); } } rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, - SPI_TRANSFER0_END); + SPI_TRANSFER0_END); retval = rtsx_send_cmd(chip, 0, 100); if (retval < 0) { @@ -682,38 +682,38 @@ int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (slow_read) { rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, - (u8)addr); + (u8)addr); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, - (u8)(addr >> 8)); + (u8)(addr >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, - (u8)(addr >> 16)); + (u8)(addr >> 16)); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, - SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); + SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); } else { rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, - (u8)addr); + (u8)addr); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, - (u8)(addr >> 8)); + (u8)(addr >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR3, 0xFF, - (u8)(addr >> 16)); + (u8)(addr >> 16)); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, - SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_32); + SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_32); } rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, - (u8)(pagelen >> 8)); + (u8)(pagelen >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, - (u8)pagelen); + (u8)pagelen); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_CADI_MODE0); + SPI_TRANSFER0_START | SPI_CADI_MODE0); rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, - SPI_TRANSFER0_END, SPI_TRANSFER0_END); + SPI_TRANSFER0_END, SPI_TRANSFER0_END); rtsx_send_cmd_no_wait(chip); retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0, - DMA_FROM_DEVICE, 10000); + DMA_FROM_DEVICE, 10000); if (retval < 0) { kfree(buf); rtsx_clear_spi_error(chip); @@ -723,7 +723,7 @@ int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip) } rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index, &offset, - TO_XFER_BUF); + TO_XFER_BUF); addr += pagelen; len -= pagelen; @@ -775,14 +775,14 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip) } rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset, - FROM_XFER_BUF); + FROM_XFER_BUF); rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, - 0x01, PINGPONG_BUFFER); + 0x01, PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF, - buf[0]); + buf[0]); sf_program(chip, ins, 1, addr, 1); retval = rtsx_send_cmd(chip, 0, 100); @@ -824,14 +824,14 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip) while (len) { rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset, - FROM_XFER_BUF); + FROM_XFER_BUF); rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, - 0x01, PINGPONG_BUFFER); + 0x01, PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF, - buf[0]); + buf[0]); if (first_byte) { sf_program(chip, ins, 1, addr, 1); first_byte = 0; @@ -899,10 +899,10 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip) rtsx_send_cmd_no_wait(chip); rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index, - &offset, FROM_XFER_BUF); + &offset, FROM_XFER_BUF); retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0, - DMA_TO_DEVICE, 100); + DMA_TO_DEVICE, 100); if (retval < 0) { kfree(buf); rtsx_clear_spi_error(chip); @@ -1010,18 +1010,18 @@ int spi_write_flash_status(struct scsi_cmnd *srb, struct rtsx_chip *chip) rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, - PINGPONG_BUFFER); + PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, - SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); + SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1); rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF, status); rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF, - SPI_TRANSFER0_START | SPI_CDO_MODE0); + SPI_TRANSFER0_START | SPI_CDO_MODE0); rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END, - SPI_TRANSFER0_END); + SPI_TRANSFER0_END); retval = rtsx_send_cmd(chip, 0, 100); if (retval != STATUS_SUCCESS) { diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c index 1de02bb988392fc67f99bdd2f3a9d1d624ba7f94..85aba05acbc1ffcff497b81e2a02b177d3eb0a26 100644 --- a/drivers/staging/rts5208/xd.c +++ b/drivers/staging/rts5208/xd.c @@ -37,21 +37,21 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk, u16 logoff, static inline void xd_set_err_code(struct rtsx_chip *chip, u8 err_code) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; xd_card->err_code = err_code; } static inline int xd_check_err_code(struct rtsx_chip *chip, u8 err_code) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; return (xd_card->err_code == err_code); } static int xd_set_init_para(struct rtsx_chip *chip) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; int retval; if (chip->asic_code) @@ -70,7 +70,7 @@ static int xd_set_init_para(struct rtsx_chip *chip) static int xd_switch_clock(struct rtsx_chip *chip) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; int retval; retval = select_card(chip, XD_CARD); @@ -97,9 +97,9 @@ static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len) rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, id_cmd); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, - XD_TRANSFER_START | XD_READ_ID); + XD_TRANSFER_START | XD_READ_ID); rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END, - XD_TRANSFER_END); + XD_TRANSFER_END); for (i = 0; i < 4; i++) rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_ADDRESS1 + i), 0, 0); @@ -122,28 +122,30 @@ static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len) static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; switch (mode) { case XD_RW_ADDR: rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 0xFF, (u8)addr); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2, - 0xFF, (u8)(addr >> 8)); + 0xFF, (u8)(addr >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS3, - 0xFF, (u8)(addr >> 16)); + 0xFF, (u8)(addr >> 16)); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF, - xd_card->addr_cycle | XD_CALC_ECC | XD_BA_NO_TRANSFORM); + xd_card->addr_cycle | + XD_CALC_ECC | + XD_BA_NO_TRANSFORM); break; case XD_ERASE_ADDR: rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, (u8)addr); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, - 0xFF, (u8)(addr >> 8)); + 0xFF, (u8)(addr >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2, - 0xFF, (u8)(addr >> 16)); + 0xFF, (u8)(addr >> 16)); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF, - (xd_card->addr_cycle - 1) | XD_CALC_ECC | + (xd_card->addr_cycle - 1) | XD_CALC_ECC | XD_BA_NO_TRANSFORM); break; @@ -153,7 +155,7 @@ static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode) } static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr, - u8 *buf, int buf_len) + u8 *buf, int buf_len) { int retval, i; @@ -162,16 +164,16 @@ static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr, xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, - 0xFF, XD_TRANSFER_START | XD_READ_REDUNDANT); + 0xFF, XD_TRANSFER_START | XD_READ_REDUNDANT); rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, - XD_TRANSFER_END, XD_TRANSFER_END); + XD_TRANSFER_END, XD_TRANSFER_END); for (i = 0; i < 6; i++) rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_PAGE_STATUS + i), - 0, 0); + 0, 0); for (i = 0; i < 4; i++) rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_RESERVED0 + i), - 0, 0); + 0, 0); rtsx_add_cmd(chip, READ_REG_CMD, XD_PARITY, 0, 0); retval = rtsx_send_cmd(chip, XD_CARD, 500); @@ -192,7 +194,7 @@ static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr, } static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset, - u8 *buf, int buf_len) + u8 *buf, int buf_len) { int retval, i; @@ -205,7 +207,7 @@ static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset, for (i = 0; i < buf_len; i++) rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + offset + i, - 0, 0); + 0, 0); retval = rtsx_send_cmd(chip, 0, 250); if (retval < 0) { @@ -220,7 +222,7 @@ static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset, } static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf, - int buf_len) + int buf_len) { int retval; u8 reg; @@ -235,15 +237,15 @@ static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf, xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, - 0x01, PINGPONG_BUFFER); + 0x01, PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, - XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS); + XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, - XD_TRANSFER_START | XD_READ_PAGES); + XD_TRANSFER_START | XD_READ_PAGES); rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END, - XD_TRANSFER_END); + XD_TRANSFER_END); retval = rtsx_send_cmd(chip, XD_CARD, 250); if (retval == -ETIMEDOUT) { @@ -347,27 +349,27 @@ static void xd_fill_pull_ctl_disable(struct rtsx_chip *chip) { if (CHECK_PID(chip, 0x5208)) { rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, - XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD); + XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, - XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD); + XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, - XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); + XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, - XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD); + XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, - MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); + MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, - MS_D5_PD | MS_D4_PD); + MS_D5_PD | MS_D4_PD); } else if (CHECK_PID(chip, 0x5288)) { if (CHECK_BARO_PKG(chip, QFN)) { rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, - 0xFF, 0x55); + 0xFF, 0x55); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, - 0xFF, 0x55); + 0xFF, 0x55); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, - 0xFF, 0x4B); + 0xFF, 0x4B); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, - 0xFF, 0x69); + 0xFF, 0x69); } } } @@ -386,27 +388,27 @@ static void xd_fill_pull_ctl_enable(struct rtsx_chip *chip) { if (CHECK_PID(chip, 0x5208)) { rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, - XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD); + XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, - XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD); + XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, - XD_WP_PD | XD_CE_PU | XD_CLE_PD | XD_CD_PU); + XD_WP_PD | XD_CE_PU | XD_CLE_PD | XD_CD_PU); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, - XD_RDY_PU | XD_WE_PU | XD_RE_PU | XD_ALE_PD); + XD_RDY_PU | XD_WE_PU | XD_RE_PU | XD_ALE_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, - MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); + MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, - MS_D5_PD | MS_D4_PD); + MS_D5_PD | MS_D4_PD); } else if (CHECK_PID(chip, 0x5288)) { if (CHECK_BARO_PKG(chip, QFN)) { rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, - 0xFF, 0x55); + 0xFF, 0x55); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, - 0xFF, 0x55); + 0xFF, 0x55); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, - 0xFF, 0x53); + 0xFF, 0x53); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, - 0xFF, 0xA9); + 0xFF, 0xA9); } } } @@ -417,31 +419,46 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip) if (CHECK_PID(chip, 0x5208)) { retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF, - XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD); + XD_D3_PD | + XD_D2_PD | + XD_D1_PD | + XD_D0_PD); if (retval) { rtsx_trace(chip); return retval; } retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF, - XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD); + XD_D7_PD | + XD_D6_PD | + XD_D5_PD | + XD_D4_PD); if (retval) { rtsx_trace(chip); return retval; } retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF, - XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); + XD_WP_PD | + XD_CE_PD | + XD_CLE_PD | + XD_CD_PU); if (retval) { rtsx_trace(chip); return retval; } retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF, - XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD); + XD_RDY_PD | + XD_WE_PD | + XD_RE_PD | + XD_ALE_PD); if (retval) { rtsx_trace(chip); return retval; } retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF, - MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); + MS_INS_PU | + SD_WP_PD | + SD_CD_PU | + SD_CMD_PD); if (retval) { rtsx_trace(chip); return retval; @@ -486,7 +503,7 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip) static int reset_xd(struct rtsx_chip *chip) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; int retval, i, j; u8 *ptr, id_buf[4], redunt[11]; @@ -499,7 +516,7 @@ static int reset_xd(struct rtsx_chip *chip) rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 0xFF, - XD_PGSTS_NOT_FF); + XD_PGSTS_NOT_FF); if (chip->asic_code) { if (!CHECK_PID(chip, 0x5288)) xd_fill_pull_ctl_disable(chip); @@ -507,12 +524,13 @@ static int reset_xd(struct rtsx_chip *chip) xd_fill_pull_ctl_stage1_barossa(chip); } else { rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF, - (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3) | 0x20); + (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3) | + 0x20); } if (!chip->ft2_fast_mode) rtsx_add_cmd(chip, WRITE_REG_CMD, XD_INIT, - XD_NO_AUTO_PWR_OFF, 0); + XD_NO_AUTO_PWR_OFF, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0); @@ -537,8 +555,9 @@ static int reset_xd(struct rtsx_chip *chip) xd_fill_pull_ctl_enable(chip); } else { rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF, - (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) | - 0x20); + (FPGA_XD_PULL_CTL_EN1 & + FPGA_XD_PULL_CTL_EN2) | + 0x20); } retval = rtsx_send_cmd(chip, XD_CARD, 100); @@ -571,8 +590,9 @@ static int reset_xd(struct rtsx_chip *chip) xd_fill_pull_ctl_enable(chip); } else { rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF, - (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) | - 0x20); + (FPGA_XD_PULL_CTL_EN1 & + FPGA_XD_PULL_CTL_EN2) | + 0x20); } } @@ -599,16 +619,17 @@ static int reset_xd(struct rtsx_chip *chip) rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DTCTL, 0xFF, - XD_TIME_SETUP_STEP * 3 + - XD_TIME_RW_STEP * (2 + i) + XD_TIME_RWN_STEP * i); + XD_TIME_SETUP_STEP * 3 + + XD_TIME_RW_STEP * (2 + i) + XD_TIME_RWN_STEP * i); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CATCTL, 0xFF, - XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP * (4 + i) + - XD_TIME_RWN_STEP * (3 + i)); + XD_TIME_SETUP_STEP * 3 + + XD_TIME_RW_STEP * (4 + i) + + XD_TIME_RWN_STEP * (3 + i)); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, - XD_TRANSFER_START | XD_RESET); + XD_TRANSFER_START | XD_RESET); rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, - XD_TRANSFER_END, XD_TRANSFER_END); + XD_TRANSFER_END, XD_TRANSFER_END); rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0); rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0); @@ -625,7 +646,7 @@ static int reset_xd(struct rtsx_chip *chip) ptr[0], ptr[1]); if (((ptr[0] & READY_FLAG) != READY_STATE) || - !(ptr[1] & XD_RDY)) + !(ptr[1] & XD_RDY)) continue; retval = xd_read_id(chip, READ_ID, id_buf, 4); @@ -773,7 +794,7 @@ static int reset_xd(struct rtsx_chip *chip) if (redunt[PAGE_STATUS] != XD_GPG) { for (j = 1; j <= 8; j++) { retval = xd_read_redundant(chip, page_addr + j, - redunt, 11); + redunt, 11); if (retval == STATUS_SUCCESS) { if (redunt[PAGE_STATUS] == XD_GPG) break; @@ -786,7 +807,7 @@ static int reset_xd(struct rtsx_chip *chip) /* Check CIS data */ if ((redunt[BLOCK_STATUS] == XD_GBLK) && - (redunt[PARITY] & XD_BA1_ALL0)) { + (redunt[PARITY] & XD_BA1_ALL0)) { u8 buf[10]; page_addr += j; @@ -798,11 +819,11 @@ static int reset_xd(struct rtsx_chip *chip) } if ((buf[0] == 0x01) && (buf[1] == 0x03) && - (buf[2] == 0xD9) - && (buf[3] == 0x01) && (buf[4] == 0xFF) - && (buf[5] == 0x18) && (buf[6] == 0x02) - && (buf[7] == 0xDF) && (buf[8] == 0x01) - && (buf[9] == 0x20)) { + (buf[2] == 0xD9) && + (buf[3] == 0x01) && (buf[4] == 0xFF) && + (buf[5] == 0x18) && (buf[6] == 0x02) && + (buf[7] == 0xDF) && (buf[8] == 0x01) && + (buf[9] == 0x20)) { xd_card->cis_block = (u16)i; } } @@ -861,7 +882,7 @@ static u16 xd_load_log_block_addr(u8 *redunt) static int xd_init_l2p_tbl(struct rtsx_chip *chip) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; int size, i; dev_dbg(rtsx_dev(chip), "xd_init_l2p_tbl: zone_cnt = %d\n", @@ -910,7 +931,7 @@ static inline void free_zone(struct zone_entry *zone) static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; struct zone_entry *zone; int zone_no; @@ -920,15 +941,15 @@ static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk) zone_no, xd_card->zone_cnt); return; } - zone = &(xd_card->zone[zone_no]); + zone = &xd_card->zone[zone_no]; - if (zone->free_table == NULL) { + if (!zone->free_table) { if (xd_build_l2p_tbl(chip, zone_no) != STATUS_SUCCESS) return; } - if ((zone->set_index >= XD_FREE_TABLE_CNT) - || (zone->set_index < 0)) { + if ((zone->set_index >= XD_FREE_TABLE_CNT) || + (zone->set_index < 0)) { free_zone(zone); dev_dbg(rtsx_dev(chip), "Set unused block fail, invalid set_index\n"); return; @@ -945,7 +966,7 @@ static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk) static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; struct zone_entry *zone; u32 phy_blk; @@ -954,10 +975,10 @@ static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no) zone_no, xd_card->zone_cnt); return BLK_NOT_FOUND; } - zone = &(xd_card->zone[zone_no]); + zone = &xd_card->zone[zone_no]; if ((zone->unused_blk_cnt == 0) || - (zone->set_index == zone->get_index)) { + (zone->set_index == zone->get_index)) { free_zone(zone); dev_dbg(rtsx_dev(chip), "Get unused block fail, no unused block available\n"); return BLK_NOT_FOUND; @@ -982,22 +1003,22 @@ static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no) } static void xd_set_l2p_tbl(struct rtsx_chip *chip, - int zone_no, u16 log_off, u16 phy_off) + int zone_no, u16 log_off, u16 phy_off) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; struct zone_entry *zone; - zone = &(xd_card->zone[zone_no]); + zone = &xd_card->zone[zone_no]; zone->l2p_table[log_off] = phy_off; } static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; struct zone_entry *zone; int retval; - zone = &(xd_card->zone[zone_no]); + zone = &xd_card->zone[zone_no]; if (zone->l2p_table[log_off] == 0xFFFF) { u32 phy_blk = 0; int i; @@ -1023,7 +1044,7 @@ static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off) } retval = xd_init_page(chip, phy_blk, log_off, - 0, xd_card->page_off + 1); + 0, xd_card->page_off + 1); if (retval == STATUS_SUCCESS) break; } @@ -1041,7 +1062,7 @@ static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off) int reset_xd_card(struct rtsx_chip *chip) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; int retval; memset(xd_card, 0, sizeof(struct xd_info)); @@ -1077,7 +1098,7 @@ int reset_xd_card(struct rtsx_chip *chip) static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; int retval; u32 page_addr; u8 reg = 0; @@ -1107,12 +1128,12 @@ static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk) xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, - xd_card->page_off + 1); + xd_card->page_off + 1); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, - XD_TRANSFER_START | XD_WRITE_REDUNDANT); + XD_TRANSFER_START | XD_WRITE_REDUNDANT); rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, - XD_TRANSFER_END, XD_TRANSFER_END); + XD_TRANSFER_END, XD_TRANSFER_END); retval = rtsx_send_cmd(chip, XD_CARD, 500); if (retval < 0) { @@ -1132,7 +1153,7 @@ static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk) static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk, u16 logoff, u8 start_page, u8 end_page) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; int retval; u32 page_addr; u8 reg = 0; @@ -1153,7 +1174,7 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk, rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, 0xFF); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, 0xFF); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H, - 0xFF, (u8)(logoff >> 8)); + 0xFF, (u8)(logoff >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)logoff); page_addr = (phy_blk << xd_card->block_shift) + start_page; @@ -1161,15 +1182,15 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk, xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, - XD_BA_TRANSFORM, XD_BA_TRANSFORM); + XD_BA_TRANSFORM, XD_BA_TRANSFORM); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, - 0xFF, (end_page - start_page)); + 0xFF, (end_page - start_page)); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, - 0xFF, XD_TRANSFER_START | XD_WRITE_REDUNDANT); + 0xFF, XD_TRANSFER_START | XD_WRITE_REDUNDANT); rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, - XD_TRANSFER_END, XD_TRANSFER_END); + XD_TRANSFER_END, XD_TRANSFER_END); retval = rtsx_send_cmd(chip, XD_CARD, 500); if (retval < 0) { @@ -1191,7 +1212,7 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk, static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk, u8 start_page, u8 end_page) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; u32 old_page, new_page; u8 i, reg = 0; int retval; @@ -1235,11 +1256,11 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk, rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, - XD_AUTO_CHK_DATA_STATUS, 0); + XD_AUTO_CHK_DATA_STATUS, 0); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, - XD_TRANSFER_START | XD_READ_PAGES); + XD_TRANSFER_START | XD_READ_PAGES); rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, - XD_TRANSFER_END, XD_TRANSFER_END); + XD_TRANSFER_END, XD_TRANSFER_END); retval = rtsx_send_cmd(chip, XD_CARD, 500); if (retval < 0) { @@ -1250,22 +1271,24 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk, wait_timeout(100); if (detect_card_cd(chip, - XD_CARD) != STATUS_SUCCESS) { + XD_CARD) != STATUS_SUCCESS) { xd_set_err_code(chip, XD_NO_CARD); rtsx_trace(chip); return STATUS_FAIL; } if (((reg & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) == - (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) - || ((reg & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) == + (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) || + ((reg & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) == (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) { rtsx_write_register(chip, - XD_PAGE_STATUS, 0xFF, - XD_BPG); + XD_PAGE_STATUS, + 0xFF, + XD_BPG); rtsx_write_register(chip, - XD_BLOCK_STATUS, 0xFF, - XD_GBLK); + XD_BLOCK_STATUS, + 0xFF, + XD_GBLK); XD_SET_BAD_OLDBLK(xd_card); dev_dbg(rtsx_dev(chip), "old block 0x%x ecc error\n", old_blk); @@ -1287,7 +1310,7 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk, rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, XD_TRANSFER_START | XD_WRITE_PAGES); rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, - XD_TRANSFER_END, XD_TRANSFER_END); + XD_TRANSFER_END, XD_TRANSFER_END); retval = rtsx_send_cmd(chip, XD_CARD, 300); if (retval < 0) { @@ -1320,9 +1343,9 @@ static int xd_reset_cmd(struct rtsx_chip *chip) rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, - 0xFF, XD_TRANSFER_START | XD_RESET); + 0xFF, XD_TRANSFER_START | XD_RESET); rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, - XD_TRANSFER_END, XD_TRANSFER_END); + XD_TRANSFER_END, XD_TRANSFER_END); rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0); rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0); @@ -1342,7 +1365,7 @@ static int xd_reset_cmd(struct rtsx_chip *chip) static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; u32 page_addr; u8 reg = 0, *ptr; int i, retval; @@ -1360,9 +1383,9 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk) xd_assign_phy_addr(chip, page_addr, XD_ERASE_ADDR); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, - XD_TRANSFER_START | XD_ERASE); + XD_TRANSFER_START | XD_ERASE); rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, - XD_TRANSFER_END, XD_TRANSFER_END); + XD_TRANSFER_END, XD_TRANSFER_END); rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0); retval = rtsx_send_cmd(chip, XD_CARD, 250); @@ -1403,7 +1426,7 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk) static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; struct zone_entry *zone; int retval; u32 start, end, i; @@ -1413,7 +1436,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no) dev_dbg(rtsx_dev(chip), "xd_build_l2p_tbl: %d\n", zone_no); - if (xd_card->zone == NULL) { + if (!xd_card->zone) { retval = xd_init_l2p_tbl(chip); if (retval != STATUS_SUCCESS) return retval; @@ -1425,22 +1448,22 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no) return STATUS_SUCCESS; } - zone = &(xd_card->zone[zone_no]); + zone = &xd_card->zone[zone_no]; - if (zone->l2p_table == NULL) { + if (!zone->l2p_table) { zone->l2p_table = vmalloc(2000); if (!zone->l2p_table) { rtsx_trace(chip); - goto Build_Fail; + goto build_fail; } } memset((u8 *)(zone->l2p_table), 0xff, 2000); - if (zone->free_table == NULL) { + if (!zone->free_table) { zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2); if (!zone->free_table) { rtsx_trace(chip); - goto Build_Fail; + goto build_fail; } } memset((u8 *)(zone->free_table), 0xff, XD_FREE_TABLE_CNT * 2); @@ -1466,7 +1489,8 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no) dev_dbg(rtsx_dev(chip), "start block 0x%x, end block 0x%x\n", start, end); - zone->set_index = zone->get_index = 0; + zone->set_index = 0; + zone->get_index = 0; zone->unused_blk_cnt = 0; for (i = start; i < end; i++) { @@ -1490,7 +1514,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no) cur_fst_page_logoff = xd_load_log_block_addr(redunt); if ((cur_fst_page_logoff == 0xFFFF) || - (cur_fst_page_logoff > max_logoff)) { + (cur_fst_page_logoff > max_logoff)) { retval = xd_erase_block(chip, i); if (retval == STATUS_SUCCESS) xd_set_unused_block(chip, i); @@ -1498,7 +1522,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no) } if ((zone_no == 0) && (cur_fst_page_logoff == 0) && - (redunt[PAGE_STATUS] != XD_GPG)) + (redunt[PAGE_STATUS] != XD_GPG)) XD_SET_MBR_FAIL(xd_card); if (zone->l2p_table[cur_fst_page_logoff] == 0xFFFF) { @@ -1524,7 +1548,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no) for (m = 0; m < 3; m++) { retval = xd_read_redundant(chip, page_addr, - redunt, 11); + redunt, 11); if (retval == STATUS_SUCCESS) break; } @@ -1581,7 +1605,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no) return STATUS_SUCCESS; -Build_Fail: +build_fail: vfree(zone->l2p_table); zone->l2p_table = NULL; vfree(zone->free_table); @@ -1598,9 +1622,9 @@ static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd) rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, cmd); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, - XD_TRANSFER_START | XD_SET_CMD); + XD_TRANSFER_START | XD_SET_CMD); rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, - XD_TRANSFER_END, XD_TRANSFER_END); + XD_TRANSFER_END, XD_TRANSFER_END); retval = rtsx_send_cmd(chip, XD_CARD, 200); if (retval < 0) { @@ -1612,18 +1636,18 @@ static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd) } static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk, - u32 log_blk, u8 start_page, u8 end_page, - u8 *buf, unsigned int *index, - unsigned int *offset) + u32 log_blk, u8 start_page, u8 end_page, + u8 *buf, unsigned int *index, + unsigned int *offset) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; u32 page_addr, new_blk; u16 log_off; u8 reg_val, page_cnt; int zone_no, retval, i; if (start_page > end_page) - goto Status_Fail; + goto status_fail; page_cnt = end_page - start_page; zone_no = (int)(log_blk / 1000); @@ -1639,7 +1663,7 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk, if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) { xd_set_err_code(chip, XD_NO_CARD); - goto Status_Fail; + goto status_fail; } } } @@ -1653,37 +1677,38 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk, rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, - XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS); + XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS); trans_dma_enable(chip->srb->sc_data_direction, chip, - page_cnt * 512, DMA_512); + page_cnt * 512, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF, - XD_TRANSFER_START | XD_READ_PAGES); + XD_TRANSFER_START | XD_READ_PAGES); rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, - XD_TRANSFER_END | XD_PPB_EMPTY, XD_TRANSFER_END | XD_PPB_EMPTY); + XD_TRANSFER_END | XD_PPB_EMPTY, + XD_TRANSFER_END | XD_PPB_EMPTY); rtsx_send_cmd_no_wait(chip); retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512, - scsi_sg_count(chip->srb), - index, offset, DMA_FROM_DEVICE, - chip->xd_timeout); + scsi_sg_count(chip->srb), + index, offset, DMA_FROM_DEVICE, + chip->xd_timeout); if (retval < 0) { rtsx_clear_xd_error(chip); if (retval == -ETIMEDOUT) { xd_set_err_code(chip, XD_TO_ERROR); - goto Status_Fail; + goto status_fail; } else { rtsx_trace(chip); - goto Fail; + goto fail; } } return STATUS_SUCCESS; -Fail: +fail: retval = rtsx_read_register(chip, XD_PAGE_STATUS, ®_val); if (retval) { rtsx_trace(chip); @@ -1699,15 +1724,15 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk, return retval; } - if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) - == (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) - || ((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) - == (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) { + if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) == + (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) || + ((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) == + (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) { wait_timeout(100); if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) { xd_set_err_code(chip, XD_NO_CARD); - goto Status_Fail; + goto status_fail; } xd_set_err_code(chip, XD_ECC_ERROR); @@ -1715,11 +1740,11 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk, new_blk = xd_get_unused_block(chip, zone_no); if (new_blk == NO_NEW_BLK) { XD_CLR_BAD_OLDBLK(xd_card); - goto Status_Fail; + goto status_fail; } retval = xd_copy_page(chip, phy_blk, new_blk, 0, - xd_card->page_off + 1); + xd_card->page_off + 1); if (retval != STATUS_SUCCESS) { if (!XD_CHK_BAD_NEWBLK(xd_card)) { retval = xd_erase_block(chip, new_blk); @@ -1729,7 +1754,7 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk, XD_CLR_BAD_NEWBLK(xd_card); } XD_CLR_BAD_OLDBLK(xd_card); - goto Status_Fail; + goto status_fail; } xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF)); xd_erase_block(chip, phy_blk); @@ -1737,15 +1762,15 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk, XD_CLR_BAD_OLDBLK(xd_card); } -Status_Fail: +status_fail: rtsx_trace(chip); return STATUS_FAIL; } static int xd_finish_write(struct rtsx_chip *chip, - u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off) + u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; int retval, zone_no; u16 log_off; @@ -1762,7 +1787,7 @@ static int xd_finish_write(struct rtsx_chip *chip, if (old_blk == BLK_NOT_FOUND) { retval = xd_init_page(chip, new_blk, log_off, - page_off, xd_card->page_off + 1); + page_off, xd_card->page_off + 1); if (retval != STATUS_SUCCESS) { retval = xd_erase_block(chip, new_blk); if (retval == STATUS_SUCCESS) @@ -1772,7 +1797,7 @@ static int xd_finish_write(struct rtsx_chip *chip, } } else { retval = xd_copy_page(chip, old_blk, new_blk, - page_off, xd_card->page_off + 1); + page_off, xd_card->page_off + 1); if (retval != STATUS_SUCCESS) { if (!XD_CHK_BAD_NEWBLK(xd_card)) { retval = xd_erase_block(chip, new_blk); @@ -1804,7 +1829,7 @@ static int xd_finish_write(struct rtsx_chip *chip, } static int xd_prepare_write(struct rtsx_chip *chip, - u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off) + u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off) { int retval; @@ -1823,11 +1848,11 @@ static int xd_prepare_write(struct rtsx_chip *chip, } static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk, - u32 new_blk, u32 log_blk, u8 start_page, - u8 end_page, u8 *buf, unsigned int *index, - unsigned int *offset) + u32 new_blk, u32 log_blk, u8 start_page, + u8 end_page, u8 *buf, unsigned int *index, + unsigned int *offset) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; u32 page_addr; int zone_no, retval; u16 log_off; @@ -1837,7 +1862,7 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk, __func__, old_blk, new_blk, log_blk); if (start_page > end_page) - goto Status_Fail; + goto status_fail; page_cnt = end_page - start_page; zone_no = (int)(log_blk / 1000); @@ -1847,12 +1872,12 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk, retval = xd_send_cmd(chip, READ1_1); if (retval != STATUS_SUCCESS) - goto Status_Fail; + goto status_fail; rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H, - 0xFF, (u8)(log_off >> 8)); + 0xFF, (u8)(log_off >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)log_off); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_GBLK); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG); @@ -1860,32 +1885,32 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk, xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_BA_TRANSFORM, - XD_BA_TRANSFORM); + XD_BA_TRANSFORM); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER); trans_dma_enable(chip->srb->sc_data_direction, chip, - page_cnt * 512, DMA_512); + page_cnt * 512, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, - 0xFF, XD_TRANSFER_START | XD_WRITE_PAGES); + 0xFF, XD_TRANSFER_START | XD_WRITE_PAGES); rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, - XD_TRANSFER_END, XD_TRANSFER_END); + XD_TRANSFER_END, XD_TRANSFER_END); rtsx_send_cmd_no_wait(chip); retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512, - scsi_sg_count(chip->srb), - index, offset, DMA_TO_DEVICE, chip->xd_timeout); + scsi_sg_count(chip->srb), + index, offset, DMA_TO_DEVICE, chip->xd_timeout); if (retval < 0) { rtsx_clear_xd_error(chip); if (retval == -ETIMEDOUT) { xd_set_err_code(chip, XD_TO_ERROR); - goto Status_Fail; + goto status_fail; } else { rtsx_trace(chip); - goto Fail; + goto fail; } } @@ -1911,7 +1936,7 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk, return STATUS_SUCCESS; -Fail: +fail: retval = rtsx_read_register(chip, XD_DAT, ®_val); if (retval) { rtsx_trace(chip); @@ -1922,7 +1947,7 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk, xd_mark_bad_block(chip, new_blk); } -Status_Fail: +status_fail: rtsx_trace(chip); return STATUS_FAIL; } @@ -1930,8 +1955,8 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk, #ifdef XD_DELAY_WRITE int xd_delay_write(struct rtsx_chip *chip) { - struct xd_info *xd_card = &(chip->xd_card); - struct xd_delay_write_tag *delay_write = &(xd_card->delay_write); + struct xd_info *xd_card = &chip->xd_card; + struct xd_delay_write_tag *delay_write = &xd_card->delay_write; int retval; if (delay_write->delay_write_flag) { @@ -1944,9 +1969,10 @@ int xd_delay_write(struct rtsx_chip *chip) delay_write->delay_write_flag = 0; retval = xd_finish_write(chip, - delay_write->old_phyblock, - delay_write->new_phyblock, - delay_write->logblock, delay_write->pageoff); + delay_write->old_phyblock, + delay_write->new_phyblock, + delay_write->logblock, + delay_write->pageoff); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); return STATUS_FAIL; @@ -1958,12 +1984,12 @@ int xd_delay_write(struct rtsx_chip *chip) #endif int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, - u32 start_sector, u16 sector_cnt) + u32 start_sector, u16 sector_cnt) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; unsigned int lun = SCSI_LUN(srb); #ifdef XD_DELAY_WRITE - struct xd_delay_write_tag *delay_write = &(xd_card->delay_write); + struct xd_delay_write_tag *delay_write = &xd_card->delay_write; #endif int retval, zone_no; unsigned int index = 0, offset = 0; @@ -2012,17 +2038,18 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, if (srb->sc_data_direction == DMA_TO_DEVICE) { #ifdef XD_DELAY_WRITE if (delay_write->delay_write_flag && - (delay_write->logblock == log_blk) && - (start_page > delay_write->pageoff)) { + (delay_write->logblock == log_blk) && + (start_page > delay_write->pageoff)) { delay_write->delay_write_flag = 0; if (delay_write->old_phyblock != BLK_NOT_FOUND) { retval = xd_copy_page(chip, - delay_write->old_phyblock, - delay_write->new_phyblock, - delay_write->pageoff, start_page); + delay_write->old_phyblock, + delay_write->new_phyblock, + delay_write->pageoff, + start_page); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return STATUS_FAIL; } @@ -2039,7 +2066,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, retval = xd_delay_write(chip); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return STATUS_FAIL; } @@ -2047,25 +2074,25 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, old_blk = xd_get_l2p_tbl(chip, zone_no, log_off); new_blk = xd_get_unused_block(chip, zone_no); if ((old_blk == BLK_NOT_FOUND) || - (new_blk == BLK_NOT_FOUND)) { + (new_blk == BLK_NOT_FOUND)) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return STATUS_FAIL; } retval = xd_prepare_write(chip, old_blk, new_blk, - log_blk, start_page); + log_blk, start_page); if (retval != STATUS_SUCCESS) { if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_NOT_PRESENT); + SENSE_TYPE_MEDIA_NOT_PRESENT); rtsx_trace(chip); return STATUS_FAIL; } set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return STATUS_FAIL; } @@ -2078,12 +2105,12 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, if (retval != STATUS_SUCCESS) { if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_NOT_PRESENT); + SENSE_TYPE_MEDIA_NOT_PRESENT); rtsx_trace(chip); return STATUS_FAIL; } set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return STATUS_FAIL; } @@ -2092,7 +2119,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, old_blk = xd_get_l2p_tbl(chip, zone_no, log_off); if (old_blk == BLK_NOT_FOUND) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return STATUS_FAIL; } @@ -2116,22 +2143,22 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, page_cnt = end_page - start_page; if (srb->sc_data_direction == DMA_FROM_DEVICE) { retval = xd_read_multiple_pages(chip, old_blk, log_blk, - start_page, end_page, ptr, - &index, &offset); + start_page, end_page, + ptr, &index, &offset); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_trace(chip); return STATUS_FAIL; } } else { retval = xd_write_multiple_pages(chip, old_blk, - new_blk, log_blk, - start_page, end_page, ptr, - &index, &offset); + new_blk, log_blk, + start_page, end_page, + ptr, &index, &offset); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return STATUS_FAIL; } @@ -2153,7 +2180,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, if (retval != STATUS_SUCCESS) { chip->card_fail |= XD_CARD; set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_NOT_PRESENT); + SENSE_TYPE_MEDIA_NOT_PRESENT); rtsx_trace(chip); return STATUS_FAIL; } @@ -2163,10 +2190,10 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, if (old_blk == BLK_NOT_FOUND) { if (srb->sc_data_direction == DMA_FROM_DEVICE) set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); else set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return STATUS_FAIL; @@ -2176,7 +2203,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, new_blk = xd_get_unused_block(chip, zone_no); if (new_blk == BLK_NOT_FOUND) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_WRITE_ERR); + SENSE_TYPE_MEDIA_WRITE_ERR); rtsx_trace(chip); return STATUS_FAIL; } @@ -2186,7 +2213,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, } if ((srb->sc_data_direction == DMA_TO_DEVICE) && - (end_page != (xd_card->page_off + 1))) { + (end_page != (xd_card->page_off + 1))) { #ifdef XD_DELAY_WRITE delay_write->delay_write_flag = 1; delay_write->old_phyblock = old_blk; @@ -2202,11 +2229,11 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, } retval = xd_finish_write(chip, old_blk, new_blk, - log_blk, end_page); + log_blk, end_page); if (retval != STATUS_SUCCESS) { if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) { set_sense_type(chip, lun, - SENSE_TYPE_MEDIA_NOT_PRESENT); + SENSE_TYPE_MEDIA_NOT_PRESENT); rtsx_trace(chip); return STATUS_FAIL; } @@ -2224,10 +2251,10 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, void xd_free_l2p_tbl(struct rtsx_chip *chip) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; int i = 0; - if (xd_card->zone != NULL) { + if (xd_card->zone) { for (i = 0; i < xd_card->zone_cnt; i++) { vfree(xd_card->zone[i].l2p_table); xd_card->zone[i].l2p_table = NULL; @@ -2242,7 +2269,7 @@ void xd_free_l2p_tbl(struct rtsx_chip *chip) void xd_cleanup_work(struct rtsx_chip *chip) { #ifdef XD_DELAY_WRITE - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; if (xd_card->delay_write.delay_write_flag) { dev_dbg(rtsx_dev(chip), "xD: delay write\n"); @@ -2297,7 +2324,7 @@ int xd_power_off_card3v3(struct rtsx_chip *chip) int release_xd_card(struct rtsx_chip *chip) { - struct xd_info *xd_card = &(chip->xd_card); + struct xd_info *xd_card = &chip->xd_card; int retval; chip->card_ready &= ~XD_CARD; diff --git a/drivers/staging/rts5208/xd.h b/drivers/staging/rts5208/xd.h index 938138c50bb55c34861b26d934d09f8d62ba9096..d5f10880efb749a1ae34ba14c87fda47bd0d2f56 100644 --- a/drivers/staging/rts5208/xd.h +++ b/drivers/staging/rts5208/xd.h @@ -179,7 +179,7 @@ int reset_xd_card(struct rtsx_chip *chip); int xd_delay_write(struct rtsx_chip *chip); #endif int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, - u32 start_sector, u16 sector_cnt); + u32 start_sector, u16 sector_cnt); void xd_free_l2p_tbl(struct rtsx_chip *chip); void xd_cleanup_work(struct rtsx_chip *chip); int xd_power_off_card3v3(struct rtsx_chip *chip); diff --git a/drivers/staging/skein/skein_api.c b/drivers/staging/skein/skein_api.c index cab26e736111420230b83f7c81d554ccdb7412d4..c6526b6fbfb4ac9bf963e2a001d5175e67410a75 100644 --- a/drivers/staging/skein/skein_api.c +++ b/drivers/staging/skein/skein_api.c @@ -98,19 +98,16 @@ int skein_mac_init(struct skein_ctx *ctx, const u8 *key, size_t key_len, switch (ctx->skein_size) { case SKEIN_256: ret = skein_256_init_ext(&ctx->m.s256, hash_bit_len, - tree_info, - (const u8 *)key, key_len); + tree_info, key, key_len); break; case SKEIN_512: ret = skein_512_init_ext(&ctx->m.s512, hash_bit_len, - tree_info, - (const u8 *)key, key_len); + tree_info, key, key_len); break; case SKEIN_1024: ret = skein_1024_init_ext(&ctx->m.s1024, hash_bit_len, - tree_info, - (const u8 *)key, key_len); + tree_info, key, key_len); break; } @@ -152,16 +149,13 @@ int skein_update(struct skein_ctx *ctx, const u8 *msg, switch (ctx->skein_size) { case SKEIN_256: - ret = skein_256_update(&ctx->m.s256, (const u8 *)msg, - msg_byte_cnt); + ret = skein_256_update(&ctx->m.s256, msg, msg_byte_cnt); break; case SKEIN_512: - ret = skein_512_update(&ctx->m.s512, (const u8 *)msg, - msg_byte_cnt); + ret = skein_512_update(&ctx->m.s512, msg, msg_byte_cnt); break; case SKEIN_1024: - ret = skein_1024_update(&ctx->m.s1024, (const u8 *)msg, - msg_byte_cnt); + ret = skein_1024_update(&ctx->m.s1024, msg, msg_byte_cnt); break; } return ret; @@ -211,7 +205,7 @@ int skein_update_bits(struct skein_ctx *ctx, const u8 *msg, /* partial byte bit mask */ mask = (u8)(1u << (7 - (msg_bit_cnt & 7))); /* apply bit padding on final byte (in the buffer) */ - up[length - 1] = (u8)((up[length - 1] & (0 - mask)) | mask); + up[length - 1] = (up[length - 1] & (0 - mask)) | mask; return SKEIN_SUCCESS; } @@ -224,13 +218,13 @@ int skein_final(struct skein_ctx *ctx, u8 *hash) switch (ctx->skein_size) { case SKEIN_256: - ret = skein_256_final(&ctx->m.s256, (u8 *)hash); + ret = skein_256_final(&ctx->m.s256, hash); break; case SKEIN_512: - ret = skein_512_final(&ctx->m.s512, (u8 *)hash); + ret = skein_512_final(&ctx->m.s512, hash); break; case SKEIN_1024: - ret = skein_1024_final(&ctx->m.s1024, (u8 *)hash); + ret = skein_1024_final(&ctx->m.s1024, hash); break; } return ret; diff --git a/drivers/staging/skein/threefish_block.c b/drivers/staging/skein/threefish_block.c index a95563fad071c68fd21a28038c0d6dc1c05b4a54..50640656c10dfe615eb292c3682db28e9ef49153 100644 --- a/drivers/staging/skein/threefish_block.c +++ b/drivers/staging/skein/threefish_block.c @@ -64,7 +64,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input, b2 += b1; b1 = rol64(b1, 32) ^ b2; - b1 += k3 + t2; b0 += b1 + k2; b1 = rol64(b1, 14) ^ b0; @@ -117,7 +116,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input, b2 += b1; b1 = rol64(b1, 32) ^ b2; - b1 += k0 + t1; b0 += b1 + k4; b1 = rol64(b1, 14) ^ b0; @@ -170,7 +168,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input, b2 += b1; b1 = rol64(b1, 32) ^ b2; - b1 += k2 + t0; b0 += b1 + k1; b1 = rol64(b1, 14) ^ b0; @@ -223,7 +220,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input, b2 += b1; b1 = rol64(b1, 32) ^ b2; - b1 += k4 + t2; b0 += b1 + k3; b1 = rol64(b1, 14) ^ b0; @@ -276,7 +272,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input, b2 += b1; b1 = rol64(b1, 32) ^ b2; - b1 += k1 + t1; b0 += b1 + k0; b1 = rol64(b1, 14) ^ b0; @@ -329,7 +324,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input, b2 += b1; b1 = rol64(b1, 32) ^ b2; - b1 += k3 + t0; b0 += b1 + k2; b1 = rol64(b1, 14) ^ b0; @@ -382,7 +376,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input, b2 += b1; b1 = rol64(b1, 32) ^ b2; - b1 += k0 + t2; b0 += b1 + k4; b1 = rol64(b1, 14) ^ b0; @@ -435,7 +428,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input, b2 += b1; b1 = rol64(b1, 32) ^ b2; - b1 += k2 + t1; b0 += b1 + k1; b1 = rol64(b1, 14) ^ b0; @@ -579,7 +571,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input, b2 -= b3 + k3 + t2; b3 -= k4 + 16; - tmp = b3 ^ b0; b3 = ror64(tmp, 32); b0 -= b3; @@ -648,7 +639,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input, b2 -= b3 + k1 + t0; b3 -= k2 + 14; - tmp = b3 ^ b0; b3 = ror64(tmp, 32); b0 -= b3; @@ -717,7 +707,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input, b2 -= b3 + k4 + t1; b3 -= k0 + 12; - tmp = b3 ^ b0; b3 = ror64(tmp, 32); b0 -= b3; @@ -786,7 +775,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input, b2 -= b3 + k2 + t2; b3 -= k3 + 10; - tmp = b3 ^ b0; b3 = ror64(tmp, 32); b0 -= b3; @@ -855,7 +843,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input, b2 -= b3 + k0 + t0; b3 -= k1 + 8; - tmp = b3 ^ b0; b3 = ror64(tmp, 32); b0 -= b3; @@ -924,7 +911,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input, b2 -= b3 + k3 + t1; b3 -= k4 + 6; - tmp = b3 ^ b0; b3 = ror64(tmp, 32); b0 -= b3; @@ -993,7 +979,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input, b2 -= b3 + k1 + t2; b3 -= k2 + 4; - tmp = b3 ^ b0; b3 = ror64(tmp, 32); b0 -= b3; @@ -1062,7 +1047,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input, b2 -= b3 + k4 + t0; b3 -= k0 + 2; - tmp = b3 ^ b0; b3 = ror64(tmp, 32); b0 -= b3; diff --git a/drivers/staging/slicoss/Kconfig b/drivers/staging/slicoss/Kconfig deleted file mode 100644 index 5c2a15b42dfede39997352cc7289871bd4dfd161..0000000000000000000000000000000000000000 --- a/drivers/staging/slicoss/Kconfig +++ /dev/null @@ -1,14 +0,0 @@ -config SLICOSS - tristate "Alacritech Gigabit IS-NIC support" - depends on PCI && X86 && NET - default n - help - This driver supports Alacritech's IS-NIC gigabit ethernet cards. - - This includes the following devices: - Mojave cards (single port PCI Gigabit) both copper and fiber - Oasis cards (single and dual port PCI-x Gigabit) copper and fiber - Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber - - To compile this driver as a module, choose M here: the module - will be called slicoss. diff --git a/drivers/staging/slicoss/Makefile b/drivers/staging/slicoss/Makefile deleted file mode 100644 index 7bc9e9b9d3ab8de5551a73b57719a6722970752c..0000000000000000000000000000000000000000 --- a/drivers/staging/slicoss/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_SLICOSS) += slicoss.o diff --git a/drivers/staging/slicoss/README b/drivers/staging/slicoss/README deleted file mode 100644 index 4fa50e73ce86a2a42884ca95d9105ff4ffed694d..0000000000000000000000000000000000000000 --- a/drivers/staging/slicoss/README +++ /dev/null @@ -1,7 +0,0 @@ -This driver is supposed to support: - - Mojave cards (single port PCI Gigabit) both copper and fiber - Oasis cards (single and dual port PCI-x Gigabit) copper and fiber - Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber - -The driver was actually tested on Oasis and Kalahari cards. diff --git a/drivers/staging/slicoss/TODO b/drivers/staging/slicoss/TODO deleted file mode 100644 index 9019729b7be690d55a6c775eb49fc6dca5c6cbc4..0000000000000000000000000000000000000000 --- a/drivers/staging/slicoss/TODO +++ /dev/null @@ -1,36 +0,0 @@ -TODO: - - move firmware loading to request_firmware() - - remove direct memory access of structures - - any remaining sparse and checkpatch.pl warnings - - - use net_device_ops - - use dev->stats rather than adapter->stats - - don't cast netdev_priv it is already void - - GET RID OF MACROS - - work on all architectures - - without CONFIG_X86_64 confusion - - do 64 bit correctly - - don't depend on order of union - - get rid of ASSERT(), use BUG() instead but only where necessary - looks like most aren't really useful - - no new SIOCDEVPRIVATE ioctl allowed - - don't use module_param for configuring interrupt mitigation - use ethtool instead - - reorder code to elminate use of forward declarations - - don't keep private linked list of drivers. - - use PCI_DEVICE() - - do ethtool correctly using ethtool_ops - - NAPI? - - wasted overhead of extra stats - - state variables for things that are - easily available and shouldn't be kept in card structure, cardnum, ... - slotnumber, events, ... - - volatile == bad design => bad code - - locking too fine grained, not designed just throw more locks - at problem - -Please send patches to: - Greg Kroah-Hartman -and Cc: Lior Dotan and Christopher Harrer - as well as they are also able to test out any -changes. diff --git a/drivers/staging/slicoss/slic.h b/drivers/staging/slicoss/slic.h deleted file mode 100644 index 420546d4300282b3037841d87c2caf611ea13d62..0000000000000000000000000000000000000000 --- a/drivers/staging/slicoss/slic.h +++ /dev/null @@ -1,573 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2000-2002 Alacritech, Inc. All rights reserved. - * - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation - * are those of the authors and should not be interpreted as representing - * official policies, either expressed or implied, of Alacritech, Inc. - * - **************************************************************************/ - -/* - * FILENAME: slic.h - * - * This is the base set of header definitions for the SLICOSS driver. - */ -#ifndef __SLIC_DRIVER_H__ -#define __SLIC_DRIVER_H__ - -/* firmware stuff */ -#define OASIS_UCODE_VERS_STRING "1.2" -#define OASIS_UCODE_VERS_DATE "2006/03/27 15:10:37" -#define OASIS_UCODE_HOSTIF_ID 3 - -#define MOJAVE_UCODE_VERS_STRING "1.2" -#define MOJAVE_UCODE_VERS_DATE "2006/03/27 15:12:22" -#define MOJAVE_UCODE_HOSTIF_ID 3 - -#define GB_RCVUCODE_VERS_STRING "1.2" -#define GB_RCVUCODE_VERS_DATE "2006/03/27 15:12:15" -static u32 OasisRcvUCodeLen = 512; -static u32 GBRcvUCodeLen = 512; -#define SECTION_SIZE 65536 - -#define SLIC_RSPQ_PAGES_GB 10 -#define SLIC_RSPQ_BUFSINPAGE (PAGE_SIZE / SLIC_RSPBUF_SIZE) - -struct slic_rspqueue { - u32 offset; - u32 pageindex; - u32 num_pages; - struct slic_rspbuf *rspbuf; - u32 *vaddr[SLIC_RSPQ_PAGES_GB]; - dma_addr_t paddr[SLIC_RSPQ_PAGES_GB]; -}; - -#define SLIC_RCVQ_EXPANSION 1 -#define SLIC_RCVQ_ENTRIES (256 * SLIC_RCVQ_EXPANSION) -#define SLIC_RCVQ_MINENTRIES (SLIC_RCVQ_ENTRIES / 2) -#define SLIC_RCVQ_MAX_PROCESS_ISR ((SLIC_RCVQ_ENTRIES * 4)) -#define SLIC_RCVQ_RCVBUFSIZE 2048 -#define SLIC_RCVQ_FILLENTRIES (16 * SLIC_RCVQ_EXPANSION) -#define SLIC_RCVQ_FILLTHRESH (SLIC_RCVQ_ENTRIES - SLIC_RCVQ_FILLENTRIES) - -struct slic_rcvqueue { - struct sk_buff *head; - struct sk_buff *tail; - u32 count; - u32 size; - u32 errors; -}; - -struct slic_rcvbuf_info { - u32 id; - u32 starttime; - u32 stoptime; - u32 slicworld; - u32 lasttime; - u32 lastid; -}; - -/* - * SLIC Handle structure. Used to restrict handle values to - * 32 bits by using an index rather than an address. - * Simplifies ucode in 64-bit systems - */ -struct slic_handle_word { - union { - struct { - ushort index; - ushort bottombits; /* to denote num bufs to card */ - } parts; - u32 whole; - } handle; -}; - -struct slic_handle { - struct slic_handle_word token; /* token passed between host and card*/ - ushort type; - void *address; /* actual address of the object*/ - ushort offset; - struct slic_handle *other_handle; - struct slic_handle *next; -}; - -#define SLIC_HANDLE_FREE 0x0000 -#define SLIC_HANDLE_DATA 0x0001 -#define SLIC_HANDLE_CMD 0x0002 -#define SLIC_HANDLE_CONTEXT 0x0003 -#define SLIC_HANDLE_TEAM 0x0004 - -#define handle_index handle.parts.index -#define handle_bottom handle.parts.bottombits -#define handle_token handle.whole - -#define SLIC_HOSTCMD_SIZE 512 - -struct slic_hostcmd { - struct slic_host64_cmd cmd64; - u32 type; - struct sk_buff *skb; - u32 paddrl; - u32 paddrh; - u32 busy; - u32 cmdsize; - ushort numbufs; - struct slic_handle *pslic_handle;/* handle associated with command */ - struct slic_hostcmd *next; - struct slic_hostcmd *next_all; -}; - -#define SLIC_CMDQ_CMDSINPAGE (PAGE_SIZE / SLIC_HOSTCMD_SIZE) -#define SLIC_CMD_DUMB 3 -#define SLIC_CMDQ_INITCMDS 256 -#define SLIC_CMDQ_MAXCMDS 256 -#define SLIC_CMDQ_MAXOUTSTAND SLIC_CMDQ_MAXCMDS -#define SLIC_CMDQ_MAXPAGES (SLIC_CMDQ_MAXCMDS / SLIC_CMDQ_CMDSINPAGE) -#define SLIC_CMDQ_INITPAGES (SLIC_CMDQ_INITCMDS / SLIC_CMDQ_CMDSINPAGE) - -struct slic_cmdqmem { - int pagecnt; - u32 *pages[SLIC_CMDQ_MAXPAGES]; - dma_addr_t dma_pages[SLIC_CMDQ_MAXPAGES]; -}; - -struct slic_cmdqueue { - struct slic_hostcmd *head; - struct slic_hostcmd *tail; - int count; - spinlock_t lock; -}; - -#define SLIC_MAX_CARDS 32 -#define SLIC_MAX_PORTS 4 /* Max # of ports per card */ - -struct mcast_address { - unsigned char address[6]; - struct mcast_address *next; -}; - -#define CARD_DOWN 0x00000000 -#define CARD_UP 0x00000001 -#define CARD_FAIL 0x00000002 -#define CARD_DIAG 0x00000003 -#define CARD_SLEEP 0x00000004 - -#define ADAPT_DOWN 0x00 -#define ADAPT_UP 0x01 -#define ADAPT_FAIL 0x02 -#define ADAPT_RESET 0x03 -#define ADAPT_SLEEP 0x04 - -#define ADAPT_FLAGS_BOOTTIME 0x0001 -#define ADAPT_FLAGS_IS64BIT 0x0002 -#define ADAPT_FLAGS_PENDINGLINKDOWN 0x0004 -#define ADAPT_FLAGS_FIBERMEDIA 0x0008 -#define ADAPT_FLAGS_LOCKS_ALLOCED 0x0010 -#define ADAPT_FLAGS_INT_REGISTERED 0x0020 -#define ADAPT_FLAGS_LOAD_TIMER_SET 0x0040 -#define ADAPT_FLAGS_STATS_TIMER_SET 0x0080 -#define ADAPT_FLAGS_RESET_TIMER_SET 0x0100 - -#define LINK_DOWN 0x00 -#define LINK_CONFIG 0x01 -#define LINK_UP 0x02 - -#define LINK_10MB 0x00 -#define LINK_100MB 0x01 -#define LINK_AUTOSPEED 0x02 -#define LINK_1000MB 0x03 -#define LINK_10000MB 0x04 - -#define LINK_HALFD 0x00 -#define LINK_FULLD 0x01 -#define LINK_AUTOD 0x02 - -#define MAC_DIRECTED 0x00000001 -#define MAC_BCAST 0x00000002 -#define MAC_MCAST 0x00000004 -#define MAC_PROMISC 0x00000008 -#define MAC_LOOPBACK 0x00000010 -#define MAC_ALLMCAST 0x00000020 - -#define SLIC_DUPLEX(x) ((x == LINK_FULLD) ? "FDX" : "HDX") -#define SLIC_SPEED(x) ((x == LINK_100MB) ? "100Mb" : ((x == LINK_1000MB) ?\ - "1000Mb" : " 10Mb")) -#define SLIC_LINKSTATE(x) ((x == LINK_DOWN) ? "Down" : "Up ") -#define SLIC_ADAPTER_STATE(x) ((x == ADAPT_UP) ? "UP" : "Down") -#define SLIC_CARD_STATE(x) ((x == CARD_UP) ? "UP" : "Down") - -struct slic_iface_stats { - /* - * Stats - */ - u64 xmt_bytes; - u64 xmt_ucast; - u64 xmt_mcast; - u64 xmt_bcast; - u64 xmt_errors; - u64 xmt_discards; - u64 xmit_collisions; - u64 xmit_excess_xmit_collisions; - u64 rcv_bytes; - u64 rcv_ucast; - u64 rcv_mcast; - u64 rcv_bcast; - u64 rcv_errors; - u64 rcv_discards; -}; - -struct sliccp_stats { - u64 xmit_tcp_segs; - u64 xmit_tcp_bytes; - u64 rcv_tcp_segs; - u64 rcv_tcp_bytes; -}; - -struct slicnet_stats { - struct sliccp_stats tcp; - struct slic_iface_stats iface; -}; - -#define SLIC_LOADTIMER_PERIOD 1 -#define SLIC_INTAGG_DEFAULT 200 -#define SLIC_LOAD_0 0 -#define SLIC_INTAGG_0 0 -#define SLIC_LOAD_1 8000 -#define SLIC_LOAD_2 10000 -#define SLIC_LOAD_3 12000 -#define SLIC_LOAD_4 14000 -#define SLIC_LOAD_5 16000 -#define SLIC_INTAGG_1 50 -#define SLIC_INTAGG_2 100 -#define SLIC_INTAGG_3 150 -#define SLIC_INTAGG_4 200 -#define SLIC_INTAGG_5 250 -#define SLIC_LOAD_1GB 3000 -#define SLIC_LOAD_2GB 6000 -#define SLIC_LOAD_3GB 12000 -#define SLIC_LOAD_4GB 24000 -#define SLIC_LOAD_5GB 48000 -#define SLIC_INTAGG_1GB 50 -#define SLIC_INTAGG_2GB 75 -#define SLIC_INTAGG_3GB 100 -#define SLIC_INTAGG_4GB 100 -#define SLIC_INTAGG_5GB 100 - -struct ether_header { - unsigned char ether_dhost[6]; - unsigned char ether_shost[6]; - ushort ether_type; -}; - -struct sliccard { - uint busnumber; - uint slotnumber; - uint state; - uint cardnum; - uint card_size; - uint adapters_activated; - uint adapters_allocated; - uint adapters_sleeping; - uint gennumber; - u32 events; - u32 loadlevel_current; - u32 load; - uint reset_in_progress; - u32 pingstatus; - u32 bad_pingstatus; - struct timer_list loadtimer; - u32 loadtimerset; - uint config_set; - struct slic_config config; - struct adapter *master; - struct adapter *adapter[SLIC_MAX_PORTS]; - struct sliccard *next; - u32 error_interrupts; - u32 error_rmiss_interrupts; - u32 rcv_interrupts; - u32 xmit_interrupts; - u32 num_isrs; - u32 false_interrupts; - u32 max_isr_rcvs; - u32 max_isr_xmits; - u32 rcv_interrupt_yields; - u32 tx_packets; - u32 debug_ix; - ushort reg_type[32]; - ushort reg_offset[32]; - u32 reg_value[32]; - u32 reg_valueh[32]; -}; - -#define NUM_CFG_SPACES 2 -#define NUM_CFG_REGS 64 -#define NUM_CFG_REG_ULONGS (NUM_CFG_REGS / sizeof(u32)) - -struct physcard { - struct adapter *adapter[SLIC_MAX_PORTS]; - struct physcard *next; - uint adapters_allocd; - -/* - * the following is not currently needed - * u32 bridge_busnum; - * u32 bridge_cfg[NUM_CFG_SPACES][NUM_CFG_REG_ULONGS]; - */ -}; - -struct base_driver { - spinlock_t driver_lock; - u32 num_slic_cards; - u32 num_slic_ports; - u32 num_slic_ports_active; - u32 dynamic_intagg; - struct sliccard *slic_card; - struct physcard *phys_card; - uint cardnuminuse[SLIC_MAX_CARDS]; -}; - -struct slic_stats { - /* xmit stats */ - u64 xmit_tcp_bytes; - u64 xmit_tcp_segs; - u64 xmit_bytes; - u64 xmit_collisions; - u64 xmit_unicasts; - u64 xmit_other_error; - u64 xmit_excess_collisions; - /* rcv stats */ - u64 rcv_tcp_bytes; - u64 rcv_tcp_segs; - u64 rcv_bytes; - u64 rcv_unicasts; - u64 rcv_other_error; - u64 rcv_drops; -}; - -struct slic_shmem_data { - u32 isr; - u32 lnkstatus; - struct slic_stats stats; -}; - -struct slic_shmemory { - dma_addr_t isr_phaddr; - dma_addr_t lnkstatus_phaddr; - dma_addr_t stats_phaddr; - struct slic_shmem_data __iomem *shmem_data; -}; - -struct slic_upr { - uint adapter; - u32 upr_request; - u32 upr_data; - u32 upr_data_h; - u32 upr_buffer; - u32 upr_buffer_h; - struct slic_upr *next; -}; - -struct slic_ifevents { - uint oflow802; - uint uflow802; - uint Tprtoflow; - uint rcvearly; - uint Bufov; - uint Carre; - uint Longe; - uint Invp; - uint Crc; - uint Drbl; - uint Code; - uint IpHlen; - uint IpLen; - uint IpCsum; - uint TpCsum; - uint TpHlen; -}; - -struct adapter { - void *ifp; - struct sliccard *card; - uint port; - struct physcard *physcard; - uint physport; - uint cardindex; - uint card_size; - uint chipid; - struct net_device *netdev; - spinlock_t adapter_lock; - spinlock_t reset_lock; - struct pci_dev *pcidev; - uint busnumber; - uint slotnumber; - uint functionnumber; - ushort vendid; - ushort devid; - ushort subsysid; - u32 irq; - u32 drambase; - u32 dramlength; - uint queues_initialized; - uint allocated; - uint activated; - u32 intrregistered; - uint isp_initialized; - uint gennumber; - struct slic_shmemory shmem; - dma_addr_t phys_shmem; - void __iomem *regs; - unsigned char state; - unsigned char linkstate; - unsigned char linkspeed; - unsigned char linkduplex; - uint flags; - unsigned char macaddr[6]; - unsigned char currmacaddr[6]; - u32 macopts; - ushort devflags_prev; - u64 mcastmask; - struct mcast_address *mcastaddrs; - struct slic_upr *upr_list; - uint upr_busy; - struct timer_list pingtimer; - u32 pingtimerset; - struct timer_list loadtimer; - u32 loadtimerset; - spinlock_t upr_lock; - spinlock_t bit64reglock; - struct slic_rspqueue rspqueue; - struct slic_rcvqueue rcvqueue; - struct slic_cmdqueue cmdq_free; - struct slic_cmdqueue cmdq_done; - struct slic_cmdqueue cmdq_all; - struct slic_cmdqmem cmdqmem; - /* - * SLIC Handles - */ - /* Object handles*/ - struct slic_handle slic_handles[SLIC_CMDQ_MAXCMDS + 1]; - /* Free object handles*/ - struct slic_handle *pfree_slic_handles; - /* Object handle list lock*/ - spinlock_t handle_lock; - ushort slic_handle_ix; - - u32 xmitq_full; - u32 all_reg_writes; - u32 icr_reg_writes; - u32 isr_reg_writes; - u32 error_interrupts; - u32 error_rmiss_interrupts; - u32 rx_errors; - u32 rcv_drops; - u32 rcv_interrupts; - u32 xmit_interrupts; - u32 linkevent_interrupts; - u32 upr_interrupts; - u32 num_isrs; - u32 false_interrupts; - u32 tx_packets; - u32 xmit_completes; - u32 tx_drops; - u32 rcv_broadcasts; - u32 rcv_multicasts; - u32 rcv_unicasts; - u32 max_isr_rcvs; - u32 max_isr_xmits; - u32 rcv_interrupt_yields; - u32 intagg_period; - u32 intagg_delay; - u32 dynamic_intagg; - struct inicpm_state *inicpm_info; - void *pinicpm_info; - struct slic_ifevents if_events; - struct slic_stats inicstats_prev; - struct slicnet_stats slic_stats; -}; - -static inline u32 slic_read32(struct adapter *adapter, unsigned int reg) -{ - return ioread32(adapter->regs + reg); -} - -static inline void slic_write32(struct adapter *adapter, unsigned int reg, - u32 val) -{ - iowrite32(val, adapter->regs + reg); -} - -static inline void slic_write64(struct adapter *adapter, unsigned int reg, - u32 val, u32 hiaddr) -{ - unsigned long flags; - - spin_lock_irqsave(&adapter->bit64reglock, flags); - slic_write32(adapter, SLIC_REG_ADDR_UPPER, hiaddr); - slic_write32(adapter, reg, val); - mmiowb(); - spin_unlock_irqrestore(&adapter->bit64reglock, flags); -} - -static inline void slic_flush_write(struct adapter *adapter) -{ - ioread32(adapter->regs + SLIC_REG_HOSTID); -} - -#define UPDATE_STATS(largestat, newstat, oldstat) \ -{ \ - if ((newstat) < (oldstat)) \ - (largestat) += ((newstat) + (0xFFFFFFFF - oldstat + 1)); \ - else \ - (largestat) += ((newstat) - (oldstat)); \ -} - -#define UPDATE_STATS_GB(largestat, newstat, oldstat) \ -{ \ - (largestat) += ((newstat) - (oldstat)); \ -} - -#if BITS_PER_LONG == 64 -#define SLIC_GET_ADDR_LOW(_addr) (u32)((u64)(_addr) & \ - 0x00000000FFFFFFFF) -#define SLIC_GET_ADDR_HIGH(_addr) (u32)(((u64)(_addr) >> 32) & \ - 0x00000000FFFFFFFF) -#elif BITS_PER_LONG == 32 -#define SLIC_GET_ADDR_LOW(_addr) (u32)(_addr) -#define SLIC_GET_ADDR_HIGH(_addr) (u32)0 -#else -#error BITS_PER_LONG must be 32 or 64 -#endif - -#define FLUSH true -#define DONT_FLUSH false - -#define SIOCSLICSETINTAGG (SIOCDEVPRIVATE + 10) - -#endif /* __SLIC_DRIVER_H__ */ diff --git a/drivers/staging/slicoss/slichw.h b/drivers/staging/slicoss/slichw.h deleted file mode 100644 index 49cb91aa02bb597dd0d010a235ab349dd2e02fa4..0000000000000000000000000000000000000000 --- a/drivers/staging/slicoss/slichw.h +++ /dev/null @@ -1,652 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2000-2002 Alacritech, Inc. All rights reserved. - * - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation - * are those of the authors and should not be interpreted as representing - * official policies, either expressed or implied, of Alacritech, Inc. - * - **************************************************************************/ - -/* - * FILENAME: slichw.h - * - * This header file contains definitions that are common to our hardware. - */ -#ifndef __SLICHW_H__ -#define __SLICHW_H__ - -#define PCI_VENDOR_ID_ALACRITECH 0x139A -#define SLIC_1GB_DEVICE_ID 0x0005 -#define SLIC_2GB_DEVICE_ID 0x0007 /* Oasis Device ID */ - -#define SLIC_1GB_CICADA_SUBSYS_ID 0x0008 - -#define SLIC_NBR_MACS 4 - -#define SLIC_RCVBUF_SIZE 2048 -#define SLIC_RCVBUF_HEADSIZE 34 -#define SLIC_RCVBUF_TAILSIZE 0 -#define SLIC_RCVBUF_DATASIZE (SLIC_RCVBUF_SIZE - \ - (SLIC_RCVBUF_HEADSIZE + \ - SLIC_RCVBUF_TAILSIZE)) - -#define VGBSTAT_XPERR 0x40000000 -#define VGBSTAT_XERRSHFT 25 -#define VGBSTAT_XCSERR 0x23 -#define VGBSTAT_XUFLOW 0x22 -#define VGBSTAT_XHLEN 0x20 -#define VGBSTAT_NETERR 0x01000000 -#define VGBSTAT_NERRSHFT 16 -#define VGBSTAT_NERRMSK 0x1ff -#define VGBSTAT_NCSERR 0x103 -#define VGBSTAT_NUFLOW 0x102 -#define VGBSTAT_NHLEN 0x100 -#define VGBSTAT_LNKERR 0x00000080 -#define VGBSTAT_LERRMSK 0xff -#define VGBSTAT_LDEARLY 0x86 -#define VGBSTAT_LBOFLO 0x85 -#define VGBSTAT_LCODERR 0x84 -#define VGBSTAT_LDBLNBL 0x83 -#define VGBSTAT_LCRCERR 0x82 -#define VGBSTAT_LOFLO 0x81 -#define VGBSTAT_LUFLO 0x80 -#define IRHDDR_FLEN_MSK 0x0000ffff -#define IRHDDR_SVALID 0x80000000 -#define IRHDDR_ERR 0x10000000 -#define VRHSTAT_802OE 0x80000000 -#define VRHSTAT_TPOFLO 0x10000000 -#define VRHSTATB_802UE 0x80000000 -#define VRHSTATB_RCVE 0x40000000 -#define VRHSTATB_BUFF 0x20000000 -#define VRHSTATB_CARRE 0x08000000 -#define VRHSTATB_LONGE 0x02000000 -#define VRHSTATB_PREA 0x01000000 -#define VRHSTATB_CRC 0x00800000 -#define VRHSTATB_DRBL 0x00400000 -#define VRHSTATB_CODE 0x00200000 -#define VRHSTATB_TPCSUM 0x00100000 -#define VRHSTATB_TPHLEN 0x00080000 -#define VRHSTATB_IPCSUM 0x00040000 -#define VRHSTATB_IPLERR 0x00020000 -#define VRHSTATB_IPHERR 0x00010000 -#define SLIC_MAX64_BCNT 23 -#define SLIC_MAX32_BCNT 26 -#define IHCMD_XMT_REQ 0x01 -#define IHFLG_IFSHFT 2 -#define SLIC_RSPBUF_SIZE 32 - -#define SLIC_RESET_MAGIC 0xDEAD -#define ICR_INT_OFF 0 -#define ICR_INT_ON 1 -#define ICR_INT_MASK 2 - -#define ISR_ERR 0x80000000 -#define ISR_RCV 0x40000000 -#define ISR_CMD 0x20000000 -#define ISR_IO 0x60000000 -#define ISR_UPC 0x10000000 -#define ISR_LEVENT 0x08000000 -#define ISR_RMISS 0x02000000 -#define ISR_UPCERR 0x01000000 -#define ISR_XDROP 0x00800000 -#define ISR_UPCBSY 0x00020000 -#define ISR_EVMSK 0xffff0000 -#define ISR_PINGMASK 0x00700000 -#define ISR_PINGDSMASK 0x00710000 -#define ISR_UPCMASK 0x11000000 -#define SLIC_WCS_START 0x80000000 -#define SLIC_WCS_COMPARE 0x40000000 -#define SLIC_RCVWCS_BEGIN 0x40000000 -#define SLIC_RCVWCS_FINISH 0x80000000 -#define SLIC_PM_MAXPATTERNS 6 -#define SLIC_PM_PATTERNSIZE 128 -#define SLIC_PMCAPS_WAKEONLAN 0x00000001 -#define MIICR_REG_PCR 0x00000000 -#define MIICR_REG_4 0x00040000 -#define MIICR_REG_9 0x00090000 -#define MIICR_REG_16 0x00100000 -#define PCR_RESET 0x8000 -#define PCR_POWERDOWN 0x0800 -#define PCR_SPEED_100 0x2000 -#define PCR_SPEED_1000 0x0040 -#define PCR_AUTONEG 0x1000 -#define PCR_AUTONEG_RST 0x0200 -#define PCR_DUPLEX_FULL 0x0100 -#define PSR_LINKUP 0x0004 - -#define PAR_ADV100FD 0x0100 -#define PAR_ADV100HD 0x0080 -#define PAR_ADV10FD 0x0040 -#define PAR_ADV10HD 0x0020 -#define PAR_ASYMPAUSE 0x0C00 -#define PAR_802_3 0x0001 - -#define PAR_ADV1000XFD 0x0020 -#define PAR_ADV1000XHD 0x0040 -#define PAR_ASYMPAUSE_FIBER 0x0180 - -#define PGC_ADV1000FD 0x0200 -#define PGC_ADV1000HD 0x0100 -#define SEEQ_LINKFAIL 0x4000 -#define SEEQ_SPEED 0x0080 -#define SEEQ_DUPLEX 0x0040 -#define TDK_DUPLEX 0x0800 -#define TDK_SPEED 0x0400 -#define MRV_REG16_XOVERON 0x0068 -#define MRV_REG16_XOVEROFF 0x0008 -#define MRV_SPEED_1000 0x8000 -#define MRV_SPEED_100 0x4000 -#define MRV_SPEED_10 0x0000 -#define MRV_FULLDUPLEX 0x2000 -#define MRV_LINKUP 0x0400 - -#define GIG_LINKUP 0x0001 -#define GIG_FULLDUPLEX 0x0002 -#define GIG_SPEED_MASK 0x000C -#define GIG_SPEED_1000 0x0008 -#define GIG_SPEED_100 0x0004 -#define GIG_SPEED_10 0x0000 - -#define MCR_RESET 0x80000000 -#define MCR_CRCEN 0x40000000 -#define MCR_FULLD 0x10000000 -#define MCR_PAD 0x02000000 -#define MCR_RETRYLATE 0x01000000 -#define MCR_BOL_SHIFT 21 -#define MCR_IPG1_SHIFT 14 -#define MCR_IPG2_SHIFT 7 -#define MCR_IPG3_SHIFT 0 -#define GMCR_RESET 0x80000000 -#define GMCR_GBIT 0x20000000 -#define GMCR_FULLD 0x10000000 -#define GMCR_GAPBB_SHIFT 14 -#define GMCR_GAPR1_SHIFT 7 -#define GMCR_GAPR2_SHIFT 0 -#define GMCR_GAPBB_1000 0x60 -#define GMCR_GAPR1_1000 0x2C -#define GMCR_GAPR2_1000 0x40 -#define GMCR_GAPBB_100 0x70 -#define GMCR_GAPR1_100 0x2C -#define GMCR_GAPR2_100 0x40 -#define XCR_RESET 0x80000000 -#define XCR_XMTEN 0x40000000 -#define XCR_PAUSEEN 0x20000000 -#define XCR_LOADRNG 0x10000000 -#define RCR_RESET 0x80000000 -#define RCR_RCVEN 0x40000000 -#define RCR_RCVALL 0x20000000 -#define RCR_RCVBAD 0x10000000 -#define RCR_CTLEN 0x08000000 -#define RCR_ADDRAEN 0x02000000 -#define GXCR_RESET 0x80000000 -#define GXCR_XMTEN 0x40000000 -#define GXCR_PAUSEEN 0x20000000 -#define GRCR_RESET 0x80000000 -#define GRCR_RCVEN 0x40000000 -#define GRCR_RCVALL 0x20000000 -#define GRCR_RCVBAD 0x10000000 -#define GRCR_CTLEN 0x08000000 -#define GRCR_ADDRAEN 0x02000000 -#define GRCR_HASHSIZE_SHIFT 17 -#define GRCR_HASHSIZE 14 - -#define SLIC_EEPROM_ID 0xA5A5 -#define SLIC_SRAM_SIZE2GB (64 * 1024) -#define SLIC_SRAM_SIZE1GB (32 * 1024) -#define SLIC_HOSTID_DEFAULT 0xFFFF /* uninitialized hostid */ -#define SLIC_NBR_MACS 4 - -struct slic_rcvbuf { - u8 pad1[6]; - u16 pad2; - u32 pad3; - u32 pad4; - u32 buffer; - u32 length; - u32 status; - u32 pad5; - u16 pad6; - u8 data[SLIC_RCVBUF_DATASIZE]; -}; - -struct slic_hddr_wds { - union { - struct { - u32 frame_status; - u32 frame_status_b; - u32 time_stamp; - u32 checksum; - } hdrs_14port; - struct { - u32 frame_status; - u16 ByteCnt; - u16 TpChksum; - u16 CtxHash; - u16 MacHash; - u32 BufLnk; - } hdrs_gbit; - } u0; -}; - -#define frame_status14 u0.hdrs_14port.frame_status -#define frame_status_b14 u0.hdrs_14port.frame_status_b -#define frame_statusGB u0.hdrs_gbit.frame_status - -struct slic_host64sg { - u32 paddrl; - u32 paddrh; - u32 length; -}; - -struct slic_host64_cmd { - u32 hosthandle; - u32 RSVD; - u8 command; - u8 flags; - union { - u16 rsv1; - u16 rsv2; - } u0; - union { - struct { - u32 totlen; - struct slic_host64sg bufs[SLIC_MAX64_BCNT]; - } slic_buffers; - } u; -}; - -struct slic_rspbuf { - u32 hosthandle; - u32 pad0; - u32 pad1; - u32 status; - u32 pad2[4]; -}; - -/* Reset Register */ -#define SLIC_REG_RESET 0x0000 -/* Interrupt Control Register */ -#define SLIC_REG_ICR 0x0008 -/* Interrupt status pointer */ -#define SLIC_REG_ISP 0x0010 -/* Interrupt status */ -#define SLIC_REG_ISR 0x0018 -/* - * Header buffer address reg - * 31-8 - phy addr of set of contiguous hdr buffers - * 7-0 - number of buffers passed - * Buffers are 256 bytes long on 256-byte boundaries. - */ -#define SLIC_REG_HBAR 0x0020 -/* - * Data buffer handle & address reg - * 4 sets of registers; Buffers are 2K bytes long 2 per 4K page. - */ -#define SLIC_REG_DBAR 0x0028 -/* - * Xmt Cmd buf addr regs. - * 1 per XMT interface - * 31-5 - phy addr of host command buffer - * 4-0 - length of cmd in multiples of 32 bytes - * Buffers are 32 bytes up to 512 bytes long - */ -#define SLIC_REG_CBAR 0x0030 -/* Write control store */ -#define SLIC_REG_WCS 0x0034 -/* - * Response buffer address reg. - * 31-8 - phy addr of set of contiguous response buffers - * 7-0 - number of buffers passed - * Buffers are 32 bytes long on 32-byte boundaries. - */ -#define SLIC_REG_RBAR 0x0038 -/* Read statistics (UPR) */ -#define SLIC_REG_RSTAT 0x0040 -/* Read link status */ -#define SLIC_REG_LSTAT 0x0048 -/* Write Mac Config */ -#define SLIC_REG_WMCFG 0x0050 -/* Write phy register */ -#define SLIC_REG_WPHY 0x0058 -/* Rcv Cmd buf addr reg */ -#define SLIC_REG_RCBAR 0x0060 -/* Read SLIC Config*/ -#define SLIC_REG_RCONFIG 0x0068 -/* Interrupt aggregation time */ -#define SLIC_REG_INTAGG 0x0070 -/* Write XMIT config reg */ -#define SLIC_REG_WXCFG 0x0078 -/* Write RCV config reg */ -#define SLIC_REG_WRCFG 0x0080 -/* Write rcv addr a low */ -#define SLIC_REG_WRADDRAL 0x0088 -/* Write rcv addr a high */ -#define SLIC_REG_WRADDRAH 0x0090 -/* Write rcv addr b low */ -#define SLIC_REG_WRADDRBL 0x0098 -/* Write rcv addr b high */ -#define SLIC_REG_WRADDRBH 0x00a0 -/* Low bits of mcast mask */ -#define SLIC_REG_MCASTLOW 0x00a8 -/* High bits of mcast mask */ -#define SLIC_REG_MCASTHIGH 0x00b0 -/* Ping the card */ -#define SLIC_REG_PING 0x00b8 -/* Dump command */ -#define SLIC_REG_DUMP_CMD 0x00c0 -/* Dump data pointer */ -#define SLIC_REG_DUMP_DATA 0x00c8 -/* Read card's pci_status register */ -#define SLIC_REG_PCISTATUS 0x00d0 -/* Write hostid field */ -#define SLIC_REG_WRHOSTID 0x00d8 -/* Put card in a low power state */ -#define SLIC_REG_LOW_POWER 0x00e0 -/* Force slic into quiescent state before soft reset */ -#define SLIC_REG_QUIESCE 0x00e8 -/* Reset interface queues */ -#define SLIC_REG_RESET_IFACE 0x00f0 -/* - * Register is only written when it has changed. - * Bits 63-32 for host i/f addrs. - */ -#define SLIC_REG_ADDR_UPPER 0x00f8 -/* 64 bit Header buffer address reg */ -#define SLIC_REG_HBAR64 0x0100 -/* 64 bit Data buffer handle & address reg */ -#define SLIC_REG_DBAR64 0x0108 -/* 64 bit Xmt Cmd buf addr regs. */ -#define SLIC_REG_CBAR64 0x0110 -/* 64 bit Response buffer address reg.*/ -#define SLIC_REG_RBAR64 0x0118 -/* 64 bit Rcv Cmd buf addr reg*/ -#define SLIC_REG_RCBAR64 0x0120 -/* Read statistics (64 bit UPR) */ -#define SLIC_REG_RSTAT64 0x0128 -/* Download Gigabit RCV sequencer ucode */ -#define SLIC_REG_RCV_WCS 0x0130 -/* Write VlanId field */ -#define SLIC_REG_WRVLANID 0x0138 -/* Read Transformer info */ -#define SLIC_REG_READ_XF_INFO 0x0140 -/* Write Transformer info */ -#define SLIC_REG_WRITE_XF_INFO 0x0148 -/* Write card ticks per second */ -#define SLIC_REG_TICKS_PER_SEC 0x0170 - -#define SLIC_REG_HOSTID 0x1554 - -enum UPR_REQUEST { - SLIC_UPR_STATS, - SLIC_UPR_RLSR, - SLIC_UPR_WCFG, - SLIC_UPR_RCONFIG, - SLIC_UPR_RPHY, - SLIC_UPR_ENLB, - SLIC_UPR_ENCT, - SLIC_UPR_PDWN, - SLIC_UPR_PING, - SLIC_UPR_DUMP, -}; - -struct inicpm_wakepattern { - u32 patternlength; - u8 pattern[SLIC_PM_PATTERNSIZE]; - u8 mask[SLIC_PM_PATTERNSIZE]; -}; - -struct inicpm_state { - u32 powercaps; - u32 powerstate; - u32 wake_linkstatus; - u32 wake_magicpacket; - u32 wake_framepattern; - struct inicpm_wakepattern wakepattern[SLIC_PM_MAXPATTERNS]; -}; - -struct slicpm_packet_pattern { - u32 priority; - u32 reserved; - u32 masksize; - u32 patternoffset; - u32 patternsize; - u32 patternflags; -}; - -enum slicpm_power_state { - slicpm_state_unspecified = 0, - slicpm_state_d0, - slicpm_state_d1, - slicpm_state_d2, - slicpm_state_d3, - slicpm_state_maximum -}; - -struct slicpm_wakeup_capabilities { - enum slicpm_power_state min_magic_packet_wakeup; - enum slicpm_power_state min_pattern_wakeup; - enum slicpm_power_state min_link_change_wakeup; -}; - -struct slic_pnp_capabilities { - u32 flags; - struct slicpm_wakeup_capabilities wakeup_capabilities; -}; - -struct slic_config_mac { - u8 macaddrA[6]; -}; - -#define ATK_FRU_FORMAT 0x00 -#define VENDOR1_FRU_FORMAT 0x01 -#define VENDOR2_FRU_FORMAT 0x02 -#define VENDOR3_FRU_FORMAT 0x03 -#define VENDOR4_FRU_FORMAT 0x04 -#define NO_FRU_FORMAT 0xFF - -struct atk_fru { - u8 assembly[6]; - u8 revision[2]; - u8 serial[14]; - u8 pad[3]; -}; - -struct vendor1_fru { - u8 commodity; - u8 assembly[4]; - u8 revision[2]; - u8 supplier[2]; - u8 date[2]; - u8 sequence[3]; - u8 pad[13]; -}; - -struct vendor2_fru { - u8 part[8]; - u8 supplier[5]; - u8 date[3]; - u8 sequence[4]; - u8 pad[7]; -}; - -struct vendor3_fru { - u8 assembly[6]; - u8 revision[2]; - u8 serial[14]; - u8 pad[3]; -}; - -struct vendor4_fru { - u8 number[8]; - u8 part[8]; - u8 version[8]; - u8 pad[3]; -}; - -union oemfru { - struct vendor1_fru vendor1_fru; - struct vendor2_fru vendor2_fru; - struct vendor3_fru vendor3_fru; - struct vendor4_fru vendor4_fru; -}; - -/* - * SLIC EEPROM structure for Mojave - */ -struct slic_eeprom { - u16 Id; /* 00 EEPROM/FLASH Magic code 'A5A5'*/ - u16 EecodeSize; /* 01 Size of EEPROM Codes (bytes * 4)*/ - u16 FlashSize; /* 02 Flash size */ - u16 EepromSize; /* 03 EEPROM Size */ - u16 VendorId; /* 04 Vendor ID */ - u16 DeviceId; /* 05 Device ID */ - u8 RevisionId; /* 06 Revision ID */ - u8 ClassCode[3]; /* 07 Class Code */ - u8 DbgIntPin; /* 08 Debug Interrupt pin */ - u8 NetIntPin0; /* Network Interrupt Pin */ - u8 MinGrant; /* 09 Minimum grant */ - u8 MaxLat; /* Maximum Latency */ - u16 PciStatus; /* 10 PCI Status */ - u16 SubSysVId; /* 11 Subsystem Vendor Id */ - u16 SubSysId; /* 12 Subsystem ID */ - u16 DbgDevId; /* 13 Debug Device Id */ - u16 DramRomFn; /* 14 Dram/Rom function */ - u16 DSize2Pci; /* 15 DRAM size to PCI (bytes * 64K) */ - u16 RSize2Pci; /* 16 ROM extension size to PCI (bytes * 4k) */ - u8 NetIntPin1; /* 17 Network Interface Pin 1 - * (simba/leone only) - */ - u8 NetIntPin2; /* Network Interface Pin 2 (simba/leone only)*/ - union { - u8 NetIntPin3; /* 18 Network Interface Pin 3 (simba only) */ - u8 FreeTime; /* FreeTime setting (leone/mojave only) */ - } u1; - u8 TBIctl; /* 10-bit interface control (Mojave only) */ - u16 DramSize; /* 19 DRAM size (bytes * 64k) */ - union { - struct { - /* Mac Interface Specific portions */ - struct slic_config_mac MacInfo[SLIC_NBR_MACS]; - } mac; /* MAC access for all boards */ - struct { - /* use above struct for MAC access */ - struct slic_config_mac pad[SLIC_NBR_MACS - 1]; - u16 DeviceId2; /* Device ID for 2nd PCI function */ - u8 IntPin2; /* Interrupt pin for 2nd PCI function */ - u8 ClassCode2[3]; /* Class Code for 2nd PCI function */ - } mojave; /* 2nd function access for gigabit board */ - } u2; - u16 CfgByte6; /* Config Byte 6 */ - u16 PMECapab; /* Power Mgment capabilities */ - u16 NwClkCtrls; /* NetworkClockControls */ - u8 FruFormat; /* Alacritech FRU format type */ - struct atk_fru AtkFru; /* Alacritech FRU information */ - u8 OemFruFormat; /* optional OEM FRU format type */ - union oemfru OemFru; /* optional OEM FRU information */ - u8 Pad[4]; /* Pad to 128 bytes - includes 2 cksum bytes - * (if OEM FRU info exists) and two unusable - * bytes at the end - */ -}; - -/* SLIC EEPROM structure for Oasis */ -struct oslic_eeprom { - u16 Id; /* 00 EEPROM/FLASH Magic code 'A5A5' */ - u16 EecodeSize; /* 01 Size of EEPROM Codes (bytes * 4)*/ - u16 FlashConfig0; /* 02 Flash Config for SPI device 0 */ - u16 FlashConfig1; /* 03 Flash Config for SPI device 1 */ - u16 VendorId; /* 04 Vendor ID */ - u16 DeviceId; /* 05 Device ID (function 0) */ - u8 RevisionId; /* 06 Revision ID */ - u8 ClassCode[3]; /* 07 Class Code for PCI function 0 */ - u8 IntPin1; /* 08 Interrupt pin for PCI function 1*/ - u8 ClassCode2[3]; /* 09 Class Code for PCI function 1 */ - u8 IntPin2; /* 10 Interrupt pin for PCI function 2*/ - u8 IntPin0; /* Interrupt pin for PCI function 0*/ - u8 MinGrant; /* 11 Minimum grant */ - u8 MaxLat; /* Maximum Latency */ - u16 SubSysVId; /* 12 Subsystem Vendor Id */ - u16 SubSysId; /* 13 Subsystem ID */ - u16 FlashSize; /* 14 Flash size (bytes / 4K) */ - u16 DSize2Pci; /* 15 DRAM size to PCI (bytes / 64K) */ - u16 RSize2Pci; /* 16 Flash (ROM extension) size to PCI - * (bytes / 4K) - */ - u16 DeviceId1; /* 17 Device Id (function 1) */ - u16 DeviceId2; /* 18 Device Id (function 2) */ - u16 CfgByte6; /* 19 Device Status Config Bytes 6-7 */ - u16 PMECapab; /* 20 Power Mgment capabilities */ - u8 MSICapab; /* 21 MSI capabilities */ - u8 ClockDivider; /* Clock divider */ - u16 PciStatusLow; /* 22 PCI Status bits 15:0 */ - u16 PciStatusHigh; /* 23 PCI Status bits 31:16 */ - u16 DramConfigLow; /* 24 DRAM Configuration bits 15:0 */ - u16 DramConfigHigh; /* 25 DRAM Configuration bits 31:16 */ - u16 DramSize; /* 26 DRAM size (bytes / 64K) */ - u16 GpioTbiCtl; /* 27 GPIO/TBI controls for functions 1/0 */ - u16 EepromSize; /* 28 EEPROM Size */ - struct slic_config_mac MacInfo[2]; /* 29 MAC addresses (2 ports) */ - u8 FruFormat; /* 35 Alacritech FRU format type */ - struct atk_fru AtkFru; /* Alacritech FRU information */ - u8 OemFruFormat; /* optional OEM FRU format type */ - union oemfru OemFru; /* optional OEM FRU information */ - u8 Pad[4]; /* Pad to 128 bytes - includes 2 checksum bytes - * (if OEM FRU info exists) and two unusable - * bytes at the end - */ -}; - -#define MAX_EECODE_SIZE sizeof(struct slic_eeprom) -#define MIN_EECODE_SIZE 0x62 /* code size without optional OEM FRU stuff */ - -/* - * SLIC CONFIG structure - * - * This structure lives in the CARD structure and is valid for all board types. - * It is filled in from the appropriate EEPROM structure by - * SlicGetConfigData() - */ -struct slic_config { - bool EepromValid; /* Valid EEPROM flag (checksum good?) */ - u16 DramSize; /* DRAM size (bytes / 64K) */ - struct slic_config_mac MacInfo[SLIC_NBR_MACS]; /* MAC addresses */ - u8 FruFormat; /* Alacritech FRU format type */ - struct atk_fru AtkFru; /* Alacritech FRU information */ - u8 OemFruFormat; /* optional OEM FRU format type */ - union { - struct vendor1_fru vendor1_fru; - struct vendor2_fru vendor2_fru; - struct vendor3_fru vendor3_fru; - struct vendor4_fru vendor4_fru; - } OemFru; -}; - -#pragma pack() - -#endif diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c deleted file mode 100644 index 062307ad7fed679d40bc3aee4a45b7af4f8f1904..0000000000000000000000000000000000000000 --- a/drivers/staging/slicoss/slicoss.c +++ /dev/null @@ -1,3132 +0,0 @@ -/************************************************************************** - * - * Copyright 2000-2006 Alacritech, Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation - * are those of the authors and should not be interpreted as representing - * official policies, either expressed or implied, of Alacritech, Inc. - * - **************************************************************************/ - -/* - * FILENAME: slicoss.c - * - * The SLICOSS driver for Alacritech's IS-NIC products. - * - * This driver is supposed to support: - * - * Mojave cards (single port PCI Gigabit) both copper and fiber - * Oasis cards (single and dual port PCI-x Gigabit) copper and fiber - * Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber - * - * The driver was actually tested on Oasis and Kalahari cards. - * - * - * NOTE: This is the standard, non-accelerated version of Alacritech's - * IS-NIC driver. - */ - -#define KLUDGE_FOR_4GB_BOUNDARY 1 -#define DEBUG_MICROCODE 1 -#define DBG 1 -#define SLIC_INTERRUPT_PROCESS_LIMIT 1 -#define SLIC_OFFLOAD_IP_CHECKSUM 1 -#define STATS_TIMER_INTERVAL 2 -#define PING_TIMER_INTERVAL 1 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include "slichw.h" -#include "slic.h" - -static uint slic_first_init = 1; -static char *slic_banner = "Alacritech SLIC Technology(tm) Server and Storage Accelerator (Non-Accelerated)"; - -static char *slic_proc_version = "2.0.351 2006/07/14 12:26:00"; - -static struct base_driver slic_global = { {}, 0, 0, 0, 1, NULL, NULL }; -#define DEFAULT_INTAGG_DELAY 100 -static unsigned int rcv_count; - -#define DRV_NAME "slicoss" -#define DRV_VERSION "2.0.1" -#define DRV_AUTHOR "Alacritech, Inc. Engineering" -#define DRV_DESCRIPTION "Alacritech SLIC Techonology(tm) "\ - "Non-Accelerated Driver" -#define DRV_COPYRIGHT "Copyright 2000-2006 Alacritech, Inc. "\ - "All rights reserved." -#define PFX DRV_NAME " " - -MODULE_AUTHOR(DRV_AUTHOR); -MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_LICENSE("Dual BSD/GPL"); - -static const struct pci_device_id slic_pci_tbl[] = { - { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_1GB_DEVICE_ID) }, - { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_2GB_DEVICE_ID) }, - { 0 } -}; - -static const struct ethtool_ops slic_ethtool_ops; - -MODULE_DEVICE_TABLE(pci, slic_pci_tbl); - -static void slic_mcast_set_bit(struct adapter *adapter, char *address) -{ - unsigned char crcpoly; - - /* Get the CRC polynomial for the mac address */ - /* - * we use bits 1-8 (lsb), bitwise reversed, - * msb (= lsb bit 0 before bitrev) is automatically discarded - */ - crcpoly = ether_crc(ETH_ALEN, address) >> 23; - - /* - * We only have space on the SLIC for 64 entries. Lop - * off the top two bits. (2^6 = 64) - */ - crcpoly &= 0x3F; - - /* OR in the new bit into our 64 bit mask. */ - adapter->mcastmask |= (u64)1 << crcpoly; -} - -static void slic_mcast_set_mask(struct adapter *adapter) -{ - if (adapter->macopts & (MAC_ALLMCAST | MAC_PROMISC)) { - /* - * Turn on all multicast addresses. We have to do this for - * promiscuous mode as well as ALLMCAST mode. It saves the - * Microcode from having to keep state about the MAC - * configuration. - */ - slic_write32(adapter, SLIC_REG_MCASTLOW, 0xFFFFFFFF); - slic_write32(adapter, SLIC_REG_MCASTHIGH, 0xFFFFFFFF); - } else { - /* - * Commit our multicast mast to the SLIC by writing to the - * multicast address mask registers - */ - slic_write32(adapter, SLIC_REG_MCASTLOW, - (u32)(adapter->mcastmask & 0xFFFFFFFF)); - slic_write32(adapter, SLIC_REG_MCASTHIGH, - (u32)((adapter->mcastmask >> 32) & 0xFFFFFFFF)); - } -} - -static void slic_timer_ping(ulong dev) -{ - struct adapter *adapter; - struct sliccard *card; - - adapter = netdev_priv((struct net_device *)dev); - card = adapter->card; - - adapter->pingtimer.expires = jiffies + (PING_TIMER_INTERVAL * HZ); - add_timer(&adapter->pingtimer); -} - -/* - * slic_link_config - * - * Write phy control to configure link duplex/speed - * - */ -static void slic_link_config(struct adapter *adapter, - u32 linkspeed, u32 linkduplex) -{ - u32 speed; - u32 duplex; - u32 phy_config; - u32 phy_advreg; - u32 phy_gctlreg; - - if (adapter->state != ADAPT_UP) - return; - - if (linkspeed > LINK_1000MB) - linkspeed = LINK_AUTOSPEED; - if (linkduplex > LINK_AUTOD) - linkduplex = LINK_AUTOD; - - if ((linkspeed == LINK_AUTOSPEED) || (linkspeed == LINK_1000MB)) { - if (adapter->flags & ADAPT_FLAGS_FIBERMEDIA) { - /* - * We've got a fiber gigabit interface, and register - * 4 is different in fiber mode than in copper mode - */ - - /* advertise FD only @1000 Mb */ - phy_advreg = (MIICR_REG_4 | (PAR_ADV1000XFD)); - /* enable PAUSE frames */ - phy_advreg |= PAR_ASYMPAUSE_FIBER; - slic_write32(adapter, SLIC_REG_WPHY, phy_advreg); - - if (linkspeed == LINK_AUTOSPEED) { - /* reset phy, enable auto-neg */ - phy_config = - (MIICR_REG_PCR | - (PCR_RESET | PCR_AUTONEG | - PCR_AUTONEG_RST)); - slic_write32(adapter, SLIC_REG_WPHY, - phy_config); - } else { /* forced 1000 Mb FD*/ - /* - * power down phy to break link - * this may not work) - */ - phy_config = (MIICR_REG_PCR | PCR_POWERDOWN); - slic_write32(adapter, SLIC_REG_WPHY, - phy_config); - slic_flush_write(adapter); - /* - * wait, Marvell says 1 sec, - * try to get away with 10 ms - */ - mdelay(10); - - /* - * disable auto-neg, set speed/duplex, - * soft reset phy, powerup - */ - phy_config = - (MIICR_REG_PCR | - (PCR_RESET | PCR_SPEED_1000 | - PCR_DUPLEX_FULL)); - slic_write32(adapter, SLIC_REG_WPHY, - phy_config); - } - } else { /* copper gigabit */ - - /* - * Auto-Negotiate or 1000 Mb must be auto negotiated - * We've got a copper gigabit interface, and - * register 4 is different in copper mode than - * in fiber mode - */ - if (linkspeed == LINK_AUTOSPEED) { - /* advertise 10/100 Mb modes */ - phy_advreg = - (MIICR_REG_4 | - (PAR_ADV100FD | PAR_ADV100HD | PAR_ADV10FD - | PAR_ADV10HD)); - } else { - /* - * linkspeed == LINK_1000MB - - * don't advertise 10/100 Mb modes - */ - phy_advreg = MIICR_REG_4; - } - /* enable PAUSE frames */ - phy_advreg |= PAR_ASYMPAUSE; - /* required by the Cicada PHY */ - phy_advreg |= PAR_802_3; - slic_write32(adapter, SLIC_REG_WPHY, phy_advreg); - /* advertise FD only @1000 Mb */ - phy_gctlreg = (MIICR_REG_9 | (PGC_ADV1000FD)); - slic_write32(adapter, SLIC_REG_WPHY, phy_gctlreg); - - if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) { - /* - * if a Marvell PHY - * enable auto crossover - */ - phy_config = - (MIICR_REG_16 | (MRV_REG16_XOVERON)); - slic_write32(adapter, SLIC_REG_WPHY, - phy_config); - - /* reset phy, enable auto-neg */ - phy_config = - (MIICR_REG_PCR | - (PCR_RESET | PCR_AUTONEG | - PCR_AUTONEG_RST)); - slic_write32(adapter, SLIC_REG_WPHY, - phy_config); - } else { /* it's a Cicada PHY */ - /* enable and restart auto-neg (don't reset) */ - phy_config = - (MIICR_REG_PCR | - (PCR_AUTONEG | PCR_AUTONEG_RST)); - slic_write32(adapter, SLIC_REG_WPHY, - phy_config); - } - } - } else { - /* Forced 10/100 */ - if (linkspeed == LINK_10MB) - speed = 0; - else - speed = PCR_SPEED_100; - if (linkduplex == LINK_HALFD) - duplex = 0; - else - duplex = PCR_DUPLEX_FULL; - - if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) { - /* - * if a Marvell PHY - * disable auto crossover - */ - phy_config = (MIICR_REG_16 | (MRV_REG16_XOVEROFF)); - slic_write32(adapter, SLIC_REG_WPHY, phy_config); - } - - /* power down phy to break link (this may not work) */ - phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN | speed | duplex)); - slic_write32(adapter, SLIC_REG_WPHY, phy_config); - slic_flush_write(adapter); - /* wait, Marvell says 1 sec, try to get away with 10 ms */ - mdelay(10); - - if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) { - /* - * if a Marvell PHY - * disable auto-neg, set speed, - * soft reset phy, powerup - */ - phy_config = - (MIICR_REG_PCR | (PCR_RESET | speed | duplex)); - slic_write32(adapter, SLIC_REG_WPHY, phy_config); - } else { /* it's a Cicada PHY */ - /* disable auto-neg, set speed, powerup */ - phy_config = (MIICR_REG_PCR | (speed | duplex)); - slic_write32(adapter, SLIC_REG_WPHY, phy_config); - } - } -} - -static int slic_card_download_gbrcv(struct adapter *adapter) -{ - const struct firmware *fw; - const char *file = ""; - int ret; - u32 codeaddr; - u32 instruction; - int index = 0; - u32 rcvucodelen = 0; - - switch (adapter->devid) { - case SLIC_2GB_DEVICE_ID: - file = "slicoss/oasisrcvucode.sys"; - break; - case SLIC_1GB_DEVICE_ID: - file = "slicoss/gbrcvucode.sys"; - break; - default: - return -ENOENT; - } - - ret = request_firmware(&fw, file, &adapter->pcidev->dev); - if (ret) { - dev_err(&adapter->pcidev->dev, - "Failed to load firmware %s\n", file); - return ret; - } - - rcvucodelen = *(u32 *)(fw->data + index); - index += 4; - switch (adapter->devid) { - case SLIC_2GB_DEVICE_ID: - if (rcvucodelen != OasisRcvUCodeLen) { - release_firmware(fw); - return -EINVAL; - } - break; - case SLIC_1GB_DEVICE_ID: - if (rcvucodelen != GBRcvUCodeLen) { - release_firmware(fw); - return -EINVAL; - } - break; - } - /* start download */ - slic_write32(adapter, SLIC_REG_RCV_WCS, SLIC_RCVWCS_BEGIN); - /* download the rcv sequencer ucode */ - for (codeaddr = 0; codeaddr < rcvucodelen; codeaddr++) { - /* write out instruction address */ - slic_write32(adapter, SLIC_REG_RCV_WCS, codeaddr); - - instruction = *(u32 *)(fw->data + index); - index += 4; - /* write out the instruction data low addr */ - slic_write32(adapter, SLIC_REG_RCV_WCS, instruction); - - instruction = *(u8 *)(fw->data + index); - index++; - /* write out the instruction data high addr */ - slic_write32(adapter, SLIC_REG_RCV_WCS, instruction); - } - - /* download finished */ - release_firmware(fw); - slic_write32(adapter, SLIC_REG_RCV_WCS, SLIC_RCVWCS_FINISH); - slic_flush_write(adapter); - - return 0; -} - -MODULE_FIRMWARE("slicoss/oasisrcvucode.sys"); -MODULE_FIRMWARE("slicoss/gbrcvucode.sys"); - -static int slic_card_download(struct adapter *adapter) -{ - const struct firmware *fw; - const char *file = ""; - int ret; - u32 section; - int thissectionsize; - int codeaddr; - u32 instruction; - u32 baseaddress; - u32 i; - u32 numsects = 0; - u32 sectsize[3]; - u32 sectstart[3]; - int ucode_start, index = 0; - - switch (adapter->devid) { - case SLIC_2GB_DEVICE_ID: - file = "slicoss/oasisdownload.sys"; - break; - case SLIC_1GB_DEVICE_ID: - file = "slicoss/gbdownload.sys"; - break; - default: - return -ENOENT; - } - ret = request_firmware(&fw, file, &adapter->pcidev->dev); - if (ret) { - dev_err(&adapter->pcidev->dev, - "Failed to load firmware %s\n", file); - return ret; - } - numsects = *(u32 *)(fw->data + index); - index += 4; - for (i = 0; i < numsects; i++) { - sectsize[i] = *(u32 *)(fw->data + index); - index += 4; - } - for (i = 0; i < numsects; i++) { - sectstart[i] = *(u32 *)(fw->data + index); - index += 4; - } - ucode_start = index; - instruction = *(u32 *)(fw->data + index); - index += 4; - for (section = 0; section < numsects; section++) { - baseaddress = sectstart[section]; - thissectionsize = sectsize[section] >> 3; - - for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) { - /* Write out instruction address */ - slic_write32(adapter, SLIC_REG_WCS, - baseaddress + codeaddr); - /* Write out instruction to low addr */ - slic_write32(adapter, SLIC_REG_WCS, - instruction); - instruction = *(u32 *)(fw->data + index); - index += 4; - - /* Write out instruction to high addr */ - slic_write32(adapter, SLIC_REG_WCS, - instruction); - instruction = *(u32 *)(fw->data + index); - index += 4; - } - } - index = ucode_start; - for (section = 0; section < numsects; section++) { - instruction = *(u32 *)(fw->data + index); - baseaddress = sectstart[section]; - if (baseaddress < 0x8000) - continue; - thissectionsize = sectsize[section] >> 3; - - for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) { - /* Write out instruction address */ - slic_write32(adapter, SLIC_REG_WCS, - SLIC_WCS_COMPARE | (baseaddress + - codeaddr)); - /* Write out instruction to low addr */ - slic_write32(adapter, SLIC_REG_WCS, instruction); - instruction = *(u32 *)(fw->data + index); - index += 4; - /* Write out instruction to high addr */ - slic_write32(adapter, SLIC_REG_WCS, instruction); - instruction = *(u32 *)(fw->data + index); - index += 4; - } - } - release_firmware(fw); - /* Everything OK, kick off the card */ - mdelay(10); - - slic_write32(adapter, SLIC_REG_WCS, SLIC_WCS_START); - slic_flush_write(adapter); - /* - * stall for 20 ms, long enough for ucode to init card - * and reach mainloop - */ - mdelay(20); - - return 0; -} - -MODULE_FIRMWARE("slicoss/oasisdownload.sys"); -MODULE_FIRMWARE("slicoss/gbdownload.sys"); - -static void slic_adapter_set_hwaddr(struct adapter *adapter) -{ - struct sliccard *card = adapter->card; - - if ((adapter->card) && (card->config_set)) { - memcpy(adapter->macaddr, - card->config.MacInfo[adapter->functionnumber].macaddrA, - sizeof(struct slic_config_mac)); - if (is_zero_ether_addr(adapter->currmacaddr)) - memcpy(adapter->currmacaddr, adapter->macaddr, - ETH_ALEN); - if (adapter->netdev) - memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, - ETH_ALEN); - } -} - -static void slic_intagg_set(struct adapter *adapter, u32 value) -{ - slic_write32(adapter, SLIC_REG_INTAGG, value); - adapter->card->loadlevel_current = value; -} - -static void slic_soft_reset(struct adapter *adapter) -{ - if (adapter->card->state == CARD_UP) { - slic_write32(adapter, SLIC_REG_QUIESCE, 0); - slic_flush_write(adapter); - mdelay(1); - } - - slic_write32(adapter, SLIC_REG_RESET, SLIC_RESET_MAGIC); - slic_flush_write(adapter); - - mdelay(1); -} - -static void slic_mac_address_config(struct adapter *adapter) -{ - u32 value; - u32 value2; - - value = ntohl(*(__be32 *)&adapter->currmacaddr[2]); - slic_write32(adapter, SLIC_REG_WRADDRAL, value); - slic_write32(adapter, SLIC_REG_WRADDRBL, value); - - value2 = (u32)((adapter->currmacaddr[0] << 8 | - adapter->currmacaddr[1]) & 0xFFFF); - - slic_write32(adapter, SLIC_REG_WRADDRAH, value2); - slic_write32(adapter, SLIC_REG_WRADDRBH, value2); - - /* - * Write our multicast mask out to the card. This is done - * here in addition to the slic_mcast_addr_set routine - * because ALL_MCAST may have been enabled or disabled - */ - slic_mcast_set_mask(adapter); -} - -static void slic_mac_config(struct adapter *adapter) -{ - u32 value; - - /* Setup GMAC gaps */ - if (adapter->linkspeed == LINK_1000MB) { - value = ((GMCR_GAPBB_1000 << GMCR_GAPBB_SHIFT) | - (GMCR_GAPR1_1000 << GMCR_GAPR1_SHIFT) | - (GMCR_GAPR2_1000 << GMCR_GAPR2_SHIFT)); - } else { - value = ((GMCR_GAPBB_100 << GMCR_GAPBB_SHIFT) | - (GMCR_GAPR1_100 << GMCR_GAPR1_SHIFT) | - (GMCR_GAPR2_100 << GMCR_GAPR2_SHIFT)); - } - - /* enable GMII */ - if (adapter->linkspeed == LINK_1000MB) - value |= GMCR_GBIT; - - /* enable fullduplex */ - if ((adapter->linkduplex == LINK_FULLD) - || (adapter->macopts & MAC_LOOPBACK)) { - value |= GMCR_FULLD; - } - - /* write mac config */ - slic_write32(adapter, SLIC_REG_WMCFG, value); - - /* setup mac addresses */ - slic_mac_address_config(adapter); -} - -static void slic_config_set(struct adapter *adapter, bool linkchange) -{ - u32 value; - u32 RcrReset; - - if (linkchange) { - /* Setup MAC */ - slic_mac_config(adapter); - RcrReset = GRCR_RESET; - } else { - slic_mac_address_config(adapter); - RcrReset = 0; - } - - if (adapter->linkduplex == LINK_FULLD) { - /* setup xmtcfg */ - value = (GXCR_RESET | /* Always reset */ - GXCR_XMTEN | /* Enable transmit */ - GXCR_PAUSEEN); /* Enable pause */ - - slic_write32(adapter, SLIC_REG_WXCFG, value); - - /* Setup rcvcfg last */ - value = (RcrReset | /* Reset, if linkchange */ - GRCR_CTLEN | /* Enable CTL frames */ - GRCR_ADDRAEN | /* Address A enable */ - GRCR_RCVBAD | /* Rcv bad frames */ - (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT)); - } else { - /* setup xmtcfg */ - value = (GXCR_RESET | /* Always reset */ - GXCR_XMTEN); /* Enable transmit */ - - slic_write32(adapter, SLIC_REG_WXCFG, value); - - /* Setup rcvcfg last */ - value = (RcrReset | /* Reset, if linkchange */ - GRCR_ADDRAEN | /* Address A enable */ - GRCR_RCVBAD | /* Rcv bad frames */ - (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT)); - } - - if (adapter->state != ADAPT_DOWN) { - /* Only enable receive if we are restarting or running */ - value |= GRCR_RCVEN; - } - - if (adapter->macopts & MAC_PROMISC) - value |= GRCR_RCVALL; - - slic_write32(adapter, SLIC_REG_WRCFG, value); -} - -/* - * Turn off RCV and XMT, power down PHY - */ -static void slic_config_clear(struct adapter *adapter) -{ - u32 value; - u32 phy_config; - - /* Setup xmtcfg */ - value = (GXCR_RESET | /* Always reset */ - GXCR_PAUSEEN); /* Enable pause */ - - slic_write32(adapter, SLIC_REG_WXCFG, value); - - value = (GRCR_RESET | /* Always reset */ - GRCR_CTLEN | /* Enable CTL frames */ - GRCR_ADDRAEN | /* Address A enable */ - (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT)); - - slic_write32(adapter, SLIC_REG_WRCFG, value); - - /* power down phy */ - phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN)); - slic_write32(adapter, SLIC_REG_WPHY, phy_config); -} - -static bool slic_mac_filter(struct adapter *adapter, - struct ether_header *ether_frame) -{ - struct net_device *netdev = adapter->netdev; - u32 opts = adapter->macopts; - - if (opts & MAC_PROMISC) - return true; - - if (is_broadcast_ether_addr(ether_frame->ether_dhost)) { - if (opts & MAC_BCAST) { - adapter->rcv_broadcasts++; - return true; - } - - return false; - } - - if (is_multicast_ether_addr(ether_frame->ether_dhost)) { - if (opts & MAC_ALLMCAST) { - adapter->rcv_multicasts++; - netdev->stats.multicast++; - return true; - } - if (opts & MAC_MCAST) { - struct mcast_address *mcaddr = adapter->mcastaddrs; - - while (mcaddr) { - if (ether_addr_equal(mcaddr->address, - ether_frame->ether_dhost)) { - adapter->rcv_multicasts++; - netdev->stats.multicast++; - return true; - } - mcaddr = mcaddr->next; - } - - return false; - } - - return false; - } - if (opts & MAC_DIRECTED) { - adapter->rcv_unicasts++; - return true; - } - return false; -} - -static int slic_mac_set_address(struct net_device *dev, void *ptr) -{ - struct adapter *adapter = netdev_priv(dev); - struct sockaddr *addr = ptr; - - if (netif_running(dev)) - return -EBUSY; - if (!adapter) - return -EBUSY; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; - - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len); - - slic_config_set(adapter, true); - return 0; -} - -static void slic_timer_load_check(ulong cardaddr) -{ - struct sliccard *card = (struct sliccard *)cardaddr; - struct adapter *adapter = card->master; - u32 load = card->events; - u32 level = 0; - - if ((adapter) && (adapter->state == ADAPT_UP) && - (card->state == CARD_UP) && (slic_global.dynamic_intagg)) { - if (adapter->devid == SLIC_1GB_DEVICE_ID) { - if (adapter->linkspeed == LINK_1000MB) - level = 100; - else { - if (load > SLIC_LOAD_5) - level = SLIC_INTAGG_5; - else if (load > SLIC_LOAD_4) - level = SLIC_INTAGG_4; - else if (load > SLIC_LOAD_3) - level = SLIC_INTAGG_3; - else if (load > SLIC_LOAD_2) - level = SLIC_INTAGG_2; - else if (load > SLIC_LOAD_1) - level = SLIC_INTAGG_1; - else - level = SLIC_INTAGG_0; - } - if (card->loadlevel_current != level) { - card->loadlevel_current = level; - slic_write32(adapter, SLIC_REG_INTAGG, level); - } - } else { - if (load > SLIC_LOAD_5) - level = SLIC_INTAGG_5; - else if (load > SLIC_LOAD_4) - level = SLIC_INTAGG_4; - else if (load > SLIC_LOAD_3) - level = SLIC_INTAGG_3; - else if (load > SLIC_LOAD_2) - level = SLIC_INTAGG_2; - else if (load > SLIC_LOAD_1) - level = SLIC_INTAGG_1; - else - level = SLIC_INTAGG_0; - if (card->loadlevel_current != level) { - card->loadlevel_current = level; - slic_write32(adapter, SLIC_REG_INTAGG, level); - } - } - } - card->events = 0; - card->loadtimer.expires = jiffies + (SLIC_LOADTIMER_PERIOD * HZ); - add_timer(&card->loadtimer); -} - -static int slic_upr_queue_request(struct adapter *adapter, - u32 upr_request, - u32 upr_data, - u32 upr_data_h, - u32 upr_buffer, u32 upr_buffer_h) -{ - struct slic_upr *upr; - struct slic_upr *uprqueue; - - upr = kmalloc(sizeof(*upr), GFP_ATOMIC); - if (!upr) - return -ENOMEM; - - upr->adapter = adapter->port; - upr->upr_request = upr_request; - upr->upr_data = upr_data; - upr->upr_buffer = upr_buffer; - upr->upr_data_h = upr_data_h; - upr->upr_buffer_h = upr_buffer_h; - upr->next = NULL; - if (adapter->upr_list) { - uprqueue = adapter->upr_list; - - while (uprqueue->next) - uprqueue = uprqueue->next; - uprqueue->next = upr; - } else { - adapter->upr_list = upr; - } - return 0; -} - -static void slic_upr_start(struct adapter *adapter) -{ - struct slic_upr *upr; - - upr = adapter->upr_list; - if (!upr) - return; - if (adapter->upr_busy) - return; - adapter->upr_busy = 1; - - switch (upr->upr_request) { - case SLIC_UPR_STATS: - if (upr->upr_data_h == 0) { - slic_write32(adapter, SLIC_REG_RSTAT, upr->upr_data); - } else { - slic_write64(adapter, SLIC_REG_RSTAT64, upr->upr_data, - upr->upr_data_h); - } - break; - - case SLIC_UPR_RLSR: - slic_write64(adapter, SLIC_REG_LSTAT, upr->upr_data, - upr->upr_data_h); - break; - - case SLIC_UPR_RCONFIG: - slic_write64(adapter, SLIC_REG_RCONFIG, upr->upr_data, - upr->upr_data_h); - break; - case SLIC_UPR_PING: - slic_write32(adapter, SLIC_REG_PING, 1); - break; - } - slic_flush_write(adapter); -} - -static int slic_upr_request(struct adapter *adapter, - u32 upr_request, - u32 upr_data, - u32 upr_data_h, - u32 upr_buffer, u32 upr_buffer_h) -{ - unsigned long flags; - int rc; - - spin_lock_irqsave(&adapter->upr_lock, flags); - rc = slic_upr_queue_request(adapter, - upr_request, - upr_data, - upr_data_h, upr_buffer, upr_buffer_h); - if (rc) - goto err_unlock_irq; - - slic_upr_start(adapter); -err_unlock_irq: - spin_unlock_irqrestore(&adapter->upr_lock, flags); - return rc; -} - -static void slic_link_upr_complete(struct adapter *adapter, u32 isr) -{ - struct slic_shmemory *sm = &adapter->shmem; - struct slic_shmem_data *sm_data = sm->shmem_data; - u32 lst = sm_data->lnkstatus; - uint linkup; - unsigned char linkspeed; - unsigned char linkduplex; - - if ((isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) { - dma_addr_t phaddr = sm->lnkstatus_phaddr; - - slic_upr_queue_request(adapter, SLIC_UPR_RLSR, - cpu_to_le32(lower_32_bits(phaddr)), - cpu_to_le32(upper_32_bits(phaddr)), - 0, 0); - return; - } - if (adapter->state != ADAPT_UP) - return; - - linkup = lst & GIG_LINKUP ? LINK_UP : LINK_DOWN; - if (lst & GIG_SPEED_1000) - linkspeed = LINK_1000MB; - else if (lst & GIG_SPEED_100) - linkspeed = LINK_100MB; - else - linkspeed = LINK_10MB; - - if (lst & GIG_FULLDUPLEX) - linkduplex = LINK_FULLD; - else - linkduplex = LINK_HALFD; - - if ((adapter->linkstate == LINK_DOWN) && (linkup == LINK_DOWN)) - return; - - /* link up event, but nothing has changed */ - if ((adapter->linkstate == LINK_UP) && - (linkup == LINK_UP) && - (adapter->linkspeed == linkspeed) && - (adapter->linkduplex == linkduplex)) - return; - - /* link has changed at this point */ - - /* link has gone from up to down */ - if (linkup == LINK_DOWN) { - adapter->linkstate = LINK_DOWN; - netif_carrier_off(adapter->netdev); - return; - } - - /* link has gone from down to up */ - adapter->linkspeed = linkspeed; - adapter->linkduplex = linkduplex; - - if (adapter->linkstate != LINK_UP) { - /* setup the mac */ - slic_config_set(adapter, true); - adapter->linkstate = LINK_UP; - netif_carrier_on(adapter->netdev); - } -} - -static void slic_upr_request_complete(struct adapter *adapter, u32 isr) -{ - struct sliccard *card = adapter->card; - struct slic_upr *upr; - unsigned long flags; - - spin_lock_irqsave(&adapter->upr_lock, flags); - upr = adapter->upr_list; - if (!upr) { - spin_unlock_irqrestore(&adapter->upr_lock, flags); - return; - } - adapter->upr_list = upr->next; - upr->next = NULL; - adapter->upr_busy = 0; - switch (upr->upr_request) { - case SLIC_UPR_STATS: { - struct slic_shmemory *sm = &adapter->shmem; - struct slic_shmem_data *sm_data = sm->shmem_data; - struct slic_stats *stats = &sm_data->stats; - struct slic_stats *old = &adapter->inicstats_prev; - struct slicnet_stats *stst = &adapter->slic_stats; - - if (isr & ISR_UPCERR) { - dev_err(&adapter->netdev->dev, - "SLIC_UPR_STATS command failed isr[%x]\n", isr); - break; - } - - UPDATE_STATS_GB(stst->tcp.xmit_tcp_segs, stats->xmit_tcp_segs, - old->xmit_tcp_segs); - - UPDATE_STATS_GB(stst->tcp.xmit_tcp_bytes, stats->xmit_tcp_bytes, - old->xmit_tcp_bytes); - - UPDATE_STATS_GB(stst->tcp.rcv_tcp_segs, stats->rcv_tcp_segs, - old->rcv_tcp_segs); - - UPDATE_STATS_GB(stst->tcp.rcv_tcp_bytes, stats->rcv_tcp_bytes, - old->rcv_tcp_bytes); - - UPDATE_STATS_GB(stst->iface.xmt_bytes, stats->xmit_bytes, - old->xmit_bytes); - - UPDATE_STATS_GB(stst->iface.xmt_ucast, stats->xmit_unicasts, - old->xmit_unicasts); - - UPDATE_STATS_GB(stst->iface.rcv_bytes, stats->rcv_bytes, - old->rcv_bytes); - - UPDATE_STATS_GB(stst->iface.rcv_ucast, stats->rcv_unicasts, - old->rcv_unicasts); - - UPDATE_STATS_GB(stst->iface.xmt_errors, stats->xmit_collisions, - old->xmit_collisions); - - UPDATE_STATS_GB(stst->iface.xmt_errors, - stats->xmit_excess_collisions, - old->xmit_excess_collisions); - - UPDATE_STATS_GB(stst->iface.xmt_errors, stats->xmit_other_error, - old->xmit_other_error); - - UPDATE_STATS_GB(stst->iface.rcv_errors, stats->rcv_other_error, - old->rcv_other_error); - - UPDATE_STATS_GB(stst->iface.rcv_discards, stats->rcv_drops, - old->rcv_drops); - - if (stats->rcv_drops > old->rcv_drops) - adapter->rcv_drops += (stats->rcv_drops - - old->rcv_drops); - memcpy_fromio(old, stats, sizeof(*stats)); - break; - } - case SLIC_UPR_RLSR: - slic_link_upr_complete(adapter, isr); - break; - case SLIC_UPR_RCONFIG: - break; - case SLIC_UPR_PING: - card->pingstatus |= (isr & ISR_PINGDSMASK); - break; - } - kfree(upr); - slic_upr_start(adapter); - spin_unlock_irqrestore(&adapter->upr_lock, flags); -} - -static int slic_config_get(struct adapter *adapter, u32 config, u32 config_h) -{ - return slic_upr_request(adapter, SLIC_UPR_RCONFIG, config, config_h, - 0, 0); -} - -/* - * Compute a checksum of the EEPROM according to RFC 1071. - */ -static u16 slic_eeprom_cksum(void *eeprom, unsigned int len) -{ - u16 *wp = eeprom; - u32 checksum = 0; - - while (len > 1) { - checksum += *(wp++); - len -= 2; - } - - if (len > 0) - checksum += *(u8 *)wp; - - while (checksum >> 16) - checksum = (checksum & 0xFFFF) + ((checksum >> 16) & 0xFFFF); - - return ~checksum; -} - -static void slic_rspqueue_free(struct adapter *adapter) -{ - int i; - struct slic_rspqueue *rspq = &adapter->rspqueue; - - for (i = 0; i < rspq->num_pages; i++) { - if (rspq->vaddr[i]) { - pci_free_consistent(adapter->pcidev, PAGE_SIZE, - rspq->vaddr[i], rspq->paddr[i]); - } - rspq->vaddr[i] = NULL; - rspq->paddr[i] = 0; - } - rspq->offset = 0; - rspq->pageindex = 0; - rspq->rspbuf = NULL; -} - -static int slic_rspqueue_init(struct adapter *adapter) -{ - int i; - struct slic_rspqueue *rspq = &adapter->rspqueue; - u32 paddrh = 0; - - memset(rspq, 0, sizeof(struct slic_rspqueue)); - - rspq->num_pages = SLIC_RSPQ_PAGES_GB; - - for (i = 0; i < rspq->num_pages; i++) { - rspq->vaddr[i] = pci_zalloc_consistent(adapter->pcidev, - PAGE_SIZE, - &rspq->paddr[i]); - if (!rspq->vaddr[i]) { - dev_err(&adapter->pcidev->dev, - "pci_alloc_consistent failed\n"); - slic_rspqueue_free(adapter); - return -ENOMEM; - } - - if (paddrh == 0) { - slic_write32(adapter, SLIC_REG_RBAR, - rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE); - } else { - slic_write64(adapter, SLIC_REG_RBAR64, - rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE, - paddrh); - } - } - rspq->offset = 0; - rspq->pageindex = 0; - rspq->rspbuf = (struct slic_rspbuf *)rspq->vaddr[0]; - return 0; -} - -static struct slic_rspbuf *slic_rspqueue_getnext(struct adapter *adapter) -{ - struct slic_rspqueue *rspq = &adapter->rspqueue; - struct slic_rspbuf *buf; - - if (!(rspq->rspbuf->status)) - return NULL; - - buf = rspq->rspbuf; - if (++rspq->offset < SLIC_RSPQ_BUFSINPAGE) { - rspq->rspbuf++; - } else { - slic_write64(adapter, SLIC_REG_RBAR64, - rspq->paddr[rspq->pageindex] | - SLIC_RSPQ_BUFSINPAGE, 0); - rspq->pageindex = (rspq->pageindex + 1) % rspq->num_pages; - rspq->offset = 0; - rspq->rspbuf = (struct slic_rspbuf *) - rspq->vaddr[rspq->pageindex]; - } - - return buf; -} - -static void slic_cmdqmem_free(struct adapter *adapter) -{ - struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem; - int i; - - for (i = 0; i < SLIC_CMDQ_MAXPAGES; i++) { - if (cmdqmem->pages[i]) { - pci_free_consistent(adapter->pcidev, - PAGE_SIZE, - (void *)cmdqmem->pages[i], - cmdqmem->dma_pages[i]); - } - } - memset(cmdqmem, 0, sizeof(struct slic_cmdqmem)); -} - -static u32 *slic_cmdqmem_addpage(struct adapter *adapter) -{ - struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem; - u32 *pageaddr; - - if (cmdqmem->pagecnt >= SLIC_CMDQ_MAXPAGES) - return NULL; - pageaddr = pci_alloc_consistent(adapter->pcidev, - PAGE_SIZE, - &cmdqmem->dma_pages[cmdqmem->pagecnt]); - if (!pageaddr) - return NULL; - - cmdqmem->pages[cmdqmem->pagecnt] = pageaddr; - cmdqmem->pagecnt++; - return pageaddr; -} - -static void slic_cmdq_free(struct adapter *adapter) -{ - struct slic_hostcmd *cmd; - - cmd = adapter->cmdq_all.head; - while (cmd) { - if (cmd->busy) { - struct sk_buff *tempskb; - - tempskb = cmd->skb; - if (tempskb) { - cmd->skb = NULL; - dev_kfree_skb_irq(tempskb); - } - } - cmd = cmd->next_all; - } - memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue)); - memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue)); - memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue)); - slic_cmdqmem_free(adapter); -} - -static void slic_cmdq_addcmdpage(struct adapter *adapter, u32 *page) -{ - struct slic_hostcmd *cmd; - struct slic_hostcmd *prev; - struct slic_hostcmd *tail; - struct slic_cmdqueue *cmdq; - int cmdcnt; - void *cmdaddr; - ulong phys_addr; - u32 phys_addrl; - u32 phys_addrh; - struct slic_handle *pslic_handle; - unsigned long flags; - - cmdaddr = page; - cmd = cmdaddr; - cmdcnt = 0; - - phys_addr = virt_to_bus((void *)page); - phys_addrl = SLIC_GET_ADDR_LOW(phys_addr); - phys_addrh = SLIC_GET_ADDR_HIGH(phys_addr); - - prev = NULL; - tail = cmd; - while ((cmdcnt < SLIC_CMDQ_CMDSINPAGE) && - (adapter->slic_handle_ix < 256)) { - /* Allocate and initialize a SLIC_HANDLE for this command */ - spin_lock_irqsave(&adapter->handle_lock, flags); - pslic_handle = adapter->pfree_slic_handles; - adapter->pfree_slic_handles = pslic_handle->next; - spin_unlock_irqrestore(&adapter->handle_lock, flags); - pslic_handle->type = SLIC_HANDLE_CMD; - pslic_handle->address = (void *)cmd; - pslic_handle->offset = (ushort)adapter->slic_handle_ix++; - pslic_handle->other_handle = NULL; - pslic_handle->next = NULL; - - cmd->pslic_handle = pslic_handle; - cmd->cmd64.hosthandle = pslic_handle->token.handle_token; - cmd->busy = false; - cmd->paddrl = phys_addrl; - cmd->paddrh = phys_addrh; - cmd->next_all = prev; - cmd->next = prev; - prev = cmd; - phys_addrl += SLIC_HOSTCMD_SIZE; - cmdaddr += SLIC_HOSTCMD_SIZE; - - cmd = cmdaddr; - cmdcnt++; - } - - cmdq = &adapter->cmdq_all; - cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */ - tail->next_all = cmdq->head; - cmdq->head = prev; - cmdq = &adapter->cmdq_free; - spin_lock_irqsave(&cmdq->lock, flags); - cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */ - tail->next = cmdq->head; - cmdq->head = prev; - spin_unlock_irqrestore(&cmdq->lock, flags); -} - -static int slic_cmdq_init(struct adapter *adapter) -{ - int i; - u32 *pageaddr; - - memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue)); - memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue)); - memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue)); - spin_lock_init(&adapter->cmdq_all.lock); - spin_lock_init(&adapter->cmdq_free.lock); - spin_lock_init(&adapter->cmdq_done.lock); - memset(&adapter->cmdqmem, 0, sizeof(struct slic_cmdqmem)); - adapter->slic_handle_ix = 1; - for (i = 0; i < SLIC_CMDQ_INITPAGES; i++) { - pageaddr = slic_cmdqmem_addpage(adapter); - if (!pageaddr) { - slic_cmdq_free(adapter); - return -ENOMEM; - } - slic_cmdq_addcmdpage(adapter, pageaddr); - } - adapter->slic_handle_ix = 1; - - return 0; -} - -static void slic_cmdq_reset(struct adapter *adapter) -{ - struct slic_hostcmd *hcmd; - struct sk_buff *skb; - u32 outstanding; - unsigned long flags; - - spin_lock_irqsave(&adapter->cmdq_free.lock, flags); - spin_lock(&adapter->cmdq_done.lock); - outstanding = adapter->cmdq_all.count - adapter->cmdq_done.count; - outstanding -= adapter->cmdq_free.count; - hcmd = adapter->cmdq_all.head; - while (hcmd) { - if (hcmd->busy) { - skb = hcmd->skb; - hcmd->busy = 0; - hcmd->skb = NULL; - dev_kfree_skb_irq(skb); - } - hcmd = hcmd->next_all; - } - adapter->cmdq_free.count = 0; - adapter->cmdq_free.head = NULL; - adapter->cmdq_free.tail = NULL; - adapter->cmdq_done.count = 0; - adapter->cmdq_done.head = NULL; - adapter->cmdq_done.tail = NULL; - adapter->cmdq_free.head = adapter->cmdq_all.head; - hcmd = adapter->cmdq_all.head; - while (hcmd) { - adapter->cmdq_free.count++; - hcmd->next = hcmd->next_all; - hcmd = hcmd->next_all; - } - if (adapter->cmdq_free.count != adapter->cmdq_all.count) { - dev_err(&adapter->netdev->dev, - "free_count %d != all count %d\n", - adapter->cmdq_free.count, adapter->cmdq_all.count); - } - spin_unlock(&adapter->cmdq_done.lock); - spin_unlock_irqrestore(&adapter->cmdq_free.lock, flags); -} - -static void slic_cmdq_getdone(struct adapter *adapter) -{ - struct slic_cmdqueue *done_cmdq = &adapter->cmdq_done; - struct slic_cmdqueue *free_cmdq = &adapter->cmdq_free; - unsigned long flags; - - spin_lock_irqsave(&done_cmdq->lock, flags); - - free_cmdq->head = done_cmdq->head; - free_cmdq->count = done_cmdq->count; - done_cmdq->head = NULL; - done_cmdq->tail = NULL; - done_cmdq->count = 0; - spin_unlock_irqrestore(&done_cmdq->lock, flags); -} - -static struct slic_hostcmd *slic_cmdq_getfree(struct adapter *adapter) -{ - struct slic_cmdqueue *cmdq = &adapter->cmdq_free; - struct slic_hostcmd *cmd = NULL; - unsigned long flags; - -lock_and_retry: - spin_lock_irqsave(&cmdq->lock, flags); -retry: - cmd = cmdq->head; - if (cmd) { - cmdq->head = cmd->next; - cmdq->count--; - spin_unlock_irqrestore(&cmdq->lock, flags); - } else { - slic_cmdq_getdone(adapter); - cmd = cmdq->head; - if (cmd) { - goto retry; - } else { - u32 *pageaddr; - - spin_unlock_irqrestore(&cmdq->lock, flags); - pageaddr = slic_cmdqmem_addpage(adapter); - if (pageaddr) { - slic_cmdq_addcmdpage(adapter, pageaddr); - goto lock_and_retry; - } - } - } - return cmd; -} - -static void slic_cmdq_putdone_irq(struct adapter *adapter, - struct slic_hostcmd *cmd) -{ - struct slic_cmdqueue *cmdq = &adapter->cmdq_done; - - spin_lock(&cmdq->lock); - cmd->busy = 0; - cmd->next = cmdq->head; - cmdq->head = cmd; - cmdq->count++; - if ((adapter->xmitq_full) && (cmdq->count > 10)) - netif_wake_queue(adapter->netdev); - spin_unlock(&cmdq->lock); -} - -static int slic_rcvqueue_fill(struct adapter *adapter) -{ - void *paddr; - u32 paddrl; - u32 paddrh; - struct slic_rcvqueue *rcvq = &adapter->rcvqueue; - int i = 0; - struct device *dev = &adapter->netdev->dev; - - while (i < SLIC_RCVQ_FILLENTRIES) { - struct slic_rcvbuf *rcvbuf; - struct sk_buff *skb; -#ifdef KLUDGE_FOR_4GB_BOUNDARY -retry_rcvqfill: -#endif - skb = alloc_skb(SLIC_RCVQ_RCVBUFSIZE, GFP_ATOMIC); - if (skb) { - paddr = (void *)(unsigned long) - pci_map_single(adapter->pcidev, - skb->data, - SLIC_RCVQ_RCVBUFSIZE, - PCI_DMA_FROMDEVICE); - paddrl = SLIC_GET_ADDR_LOW(paddr); - paddrh = SLIC_GET_ADDR_HIGH(paddr); - - skb->len = SLIC_RCVBUF_HEADSIZE; - rcvbuf = (struct slic_rcvbuf *)skb->head; - rcvbuf->status = 0; - skb->next = NULL; -#ifdef KLUDGE_FOR_4GB_BOUNDARY - if (paddrl == 0) { - dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n", - __func__); - dev_err(dev, "skb[%p] PROBLEM\n", skb); - dev_err(dev, " skbdata[%p]\n", - skb->data); - dev_err(dev, " skblen[%x]\n", skb->len); - dev_err(dev, " paddr[%p]\n", paddr); - dev_err(dev, " paddrl[%x]\n", paddrl); - dev_err(dev, " paddrh[%x]\n", paddrh); - dev_err(dev, " rcvq->head[%p]\n", - rcvq->head); - dev_err(dev, " rcvq->tail[%p]\n", - rcvq->tail); - dev_err(dev, " rcvq->count[%x]\n", - rcvq->count); - dev_err(dev, "SKIP THIS SKB!!!!!!!!\n"); - goto retry_rcvqfill; - } -#else - if (paddrl == 0) { - dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n", - __func__); - dev_err(dev, "skb[%p] PROBLEM\n", skb); - dev_err(dev, " skbdata[%p]\n", - skb->data); - dev_err(dev, " skblen[%x]\n", skb->len); - dev_err(dev, " paddr[%p]\n", paddr); - dev_err(dev, " paddrl[%x]\n", paddrl); - dev_err(dev, " paddrh[%x]\n", paddrh); - dev_err(dev, " rcvq->head[%p]\n", - rcvq->head); - dev_err(dev, " rcvq->tail[%p]\n", - rcvq->tail); - dev_err(dev, " rcvq->count[%x]\n", - rcvq->count); - dev_err(dev, "GIVE TO CARD ANYWAY\n"); - } -#endif - if (paddrh == 0) { - slic_write32(adapter, SLIC_REG_HBAR, - (u32)paddrl); - } else { - slic_write64(adapter, SLIC_REG_HBAR64, paddrl, - paddrh); - } - if (rcvq->head) - rcvq->tail->next = skb; - else - rcvq->head = skb; - rcvq->tail = skb; - rcvq->count++; - i++; - } else { - dev_err(&adapter->netdev->dev, - "slic_rcvqueue_fill could only get [%d] skbuffs\n", - i); - break; - } - } - return i; -} - -static void slic_rcvqueue_free(struct adapter *adapter) -{ - struct slic_rcvqueue *rcvq = &adapter->rcvqueue; - struct sk_buff *skb; - - while (rcvq->head) { - skb = rcvq->head; - rcvq->head = rcvq->head->next; - dev_kfree_skb(skb); - } - rcvq->tail = NULL; - rcvq->head = NULL; - rcvq->count = 0; -} - -static int slic_rcvqueue_init(struct adapter *adapter) -{ - int i, count; - struct slic_rcvqueue *rcvq = &adapter->rcvqueue; - - rcvq->tail = NULL; - rcvq->head = NULL; - rcvq->size = SLIC_RCVQ_ENTRIES; - rcvq->errors = 0; - rcvq->count = 0; - i = SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES; - count = 0; - while (i) { - count += slic_rcvqueue_fill(adapter); - i--; - } - if (rcvq->count < SLIC_RCVQ_MINENTRIES) { - slic_rcvqueue_free(adapter); - return -ENOMEM; - } - return 0; -} - -static struct sk_buff *slic_rcvqueue_getnext(struct adapter *adapter) -{ - struct slic_rcvqueue *rcvq = &adapter->rcvqueue; - struct sk_buff *skb; - struct slic_rcvbuf *rcvbuf; - int count; - - if (rcvq->count) { - skb = rcvq->head; - rcvbuf = (struct slic_rcvbuf *)skb->head; - - if (rcvbuf->status & IRHDDR_SVALID) { - rcvq->head = rcvq->head->next; - skb->next = NULL; - rcvq->count--; - } else { - skb = NULL; - } - } else { - dev_err(&adapter->netdev->dev, - "RcvQ Empty!! rcvq[%p] count[%x]\n", rcvq, rcvq->count); - skb = NULL; - } - while (rcvq->count < SLIC_RCVQ_FILLTHRESH) { - count = slic_rcvqueue_fill(adapter); - if (!count) - break; - } - if (skb) - rcvq->errors = 0; - return skb; -} - -static u32 slic_rcvqueue_reinsert(struct adapter *adapter, struct sk_buff *skb) -{ - struct slic_rcvqueue *rcvq = &adapter->rcvqueue; - void *paddr; - u32 paddrl; - u32 paddrh; - struct slic_rcvbuf *rcvbuf = (struct slic_rcvbuf *)skb->head; - struct device *dev; - - paddr = (void *)(unsigned long) - pci_map_single(adapter->pcidev, skb->head, - SLIC_RCVQ_RCVBUFSIZE, PCI_DMA_FROMDEVICE); - rcvbuf->status = 0; - skb->next = NULL; - - paddrl = SLIC_GET_ADDR_LOW(paddr); - paddrh = SLIC_GET_ADDR_HIGH(paddr); - - if (paddrl == 0) { - dev = &adapter->netdev->dev; - dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n", - __func__); - dev_err(dev, "skb[%p] PROBLEM\n", skb); - dev_err(dev, " skbdata[%p]\n", skb->data); - dev_err(dev, " skblen[%x]\n", skb->len); - dev_err(dev, " paddr[%p]\n", paddr); - dev_err(dev, " paddrl[%x]\n", paddrl); - dev_err(dev, " paddrh[%x]\n", paddrh); - dev_err(dev, " rcvq->head[%p]\n", rcvq->head); - dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail); - dev_err(dev, " rcvq->count[%x]\n", rcvq->count); - } - if (paddrh == 0) - slic_write32(adapter, SLIC_REG_HBAR, (u32)paddrl); - else - slic_write64(adapter, SLIC_REG_HBAR64, paddrl, paddrh); - if (rcvq->head) - rcvq->tail->next = skb; - else - rcvq->head = skb; - rcvq->tail = skb; - rcvq->count++; - return rcvq->count; -} - -/* - * slic_link_event_handler - - * - * Initiate a link configuration sequence. The link configuration begins - * by issuing a READ_LINK_STATUS command to the Utility Processor on the - * SLIC. Since the command finishes asynchronously, the slic_upr_comlete - * routine will follow it up witha UP configuration write command, which - * will also complete asynchronously. - * - */ -static int slic_link_event_handler(struct adapter *adapter) -{ - int status; - struct slic_shmemory *sm = &adapter->shmem; - dma_addr_t phaddr = sm->lnkstatus_phaddr; - - if (adapter->state != ADAPT_UP) { - /* Adapter is not operational. Ignore. */ - return -ENODEV; - } - /* no 4GB wrap guaranteed */ - status = slic_upr_request(adapter, SLIC_UPR_RLSR, - cpu_to_le32(lower_32_bits(phaddr)), - cpu_to_le32(upper_32_bits(phaddr)), 0, 0); - return status; -} - -static void slic_init_cleanup(struct adapter *adapter) -{ - if (adapter->intrregistered) { - adapter->intrregistered = 0; - free_irq(adapter->netdev->irq, adapter->netdev); - } - - if (adapter->shmem.shmem_data) { - struct slic_shmemory *sm = &adapter->shmem; - struct slic_shmem_data *sm_data = sm->shmem_data; - - pci_free_consistent(adapter->pcidev, sizeof(*sm_data), sm_data, - sm->isr_phaddr); - } - - if (adapter->pingtimerset) { - adapter->pingtimerset = 0; - del_timer(&adapter->pingtimer); - } - - slic_rspqueue_free(adapter); - slic_cmdq_free(adapter); - slic_rcvqueue_free(adapter); -} - -/* - * Allocate a mcast_address structure to hold the multicast address. - * Link it in. - */ -static int slic_mcast_add_list(struct adapter *adapter, char *address) -{ - struct mcast_address *mcaddr, *mlist; - - /* Check to see if it already exists */ - mlist = adapter->mcastaddrs; - while (mlist) { - if (ether_addr_equal(mlist->address, address)) - return 0; - mlist = mlist->next; - } - - /* Doesn't already exist. Allocate a structure to hold it */ - mcaddr = kmalloc(sizeof(*mcaddr), GFP_ATOMIC); - if (!mcaddr) - return 1; - - ether_addr_copy(mcaddr->address, address); - - mcaddr->next = adapter->mcastaddrs; - adapter->mcastaddrs = mcaddr; - - return 0; -} - -static void slic_mcast_set_list(struct net_device *dev) -{ - struct adapter *adapter = netdev_priv(dev); - int status = 0; - char *addresses; - struct netdev_hw_addr *ha; - - netdev_for_each_mc_addr(ha, dev) { - addresses = (char *)&ha->addr; - status = slic_mcast_add_list(adapter, addresses); - if (status != 0) - break; - slic_mcast_set_bit(adapter, addresses); - } - - if (adapter->devflags_prev != dev->flags) { - adapter->macopts = MAC_DIRECTED; - if (dev->flags) { - if (dev->flags & IFF_BROADCAST) - adapter->macopts |= MAC_BCAST; - if (dev->flags & IFF_PROMISC) - adapter->macopts |= MAC_PROMISC; - if (dev->flags & IFF_ALLMULTI) - adapter->macopts |= MAC_ALLMCAST; - if (dev->flags & IFF_MULTICAST) - adapter->macopts |= MAC_MCAST; - } - adapter->devflags_prev = dev->flags; - slic_config_set(adapter, true); - } else { - if (status == 0) - slic_mcast_set_mask(adapter); - } -} - -#define XMIT_FAIL_LINK_STATE 1 -#define XMIT_FAIL_ZERO_LENGTH 2 -#define XMIT_FAIL_HOSTCMD_FAIL 3 - -static void slic_xmit_build_request(struct adapter *adapter, - struct slic_hostcmd *hcmd, struct sk_buff *skb) -{ - struct slic_host64_cmd *ihcmd; - ulong phys_addr; - - ihcmd = &hcmd->cmd64; - - ihcmd->flags = adapter->port << IHFLG_IFSHFT; - ihcmd->command = IHCMD_XMT_REQ; - ihcmd->u.slic_buffers.totlen = skb->len; - phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(adapter->pcidev, phys_addr)) { - kfree_skb(skb); - dev_err(&adapter->pcidev->dev, "DMA mapping error\n"); - return; - } - ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr); - ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr); - ihcmd->u.slic_buffers.bufs[0].length = skb->len; -#if BITS_PER_LONG == 64 - hcmd->cmdsize = (u32)((((u64)&ihcmd->u.slic_buffers.bufs[1] - - (u64)hcmd) + 31) >> 5); -#else - hcmd->cmdsize = (((u32)&ihcmd->u.slic_buffers.bufs[1] - - (u32)hcmd) + 31) >> 5; -#endif -} - -static void slic_xmit_fail(struct adapter *adapter, - struct sk_buff *skb, - void *cmd, u32 skbtype, u32 status) -{ - if (adapter->xmitq_full) - netif_stop_queue(adapter->netdev); - if ((!cmd) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) { - switch (status) { - case XMIT_FAIL_LINK_STATE: - dev_err(&adapter->netdev->dev, - "reject xmit skb[%p: %x] linkstate[%s] adapter[%s:%d] card[%s:%d]\n", - skb, skb->pkt_type, - SLIC_LINKSTATE(adapter->linkstate), - SLIC_ADAPTER_STATE(adapter->state), - adapter->state, - SLIC_CARD_STATE(adapter->card->state), - adapter->card->state); - break; - case XMIT_FAIL_ZERO_LENGTH: - dev_err(&adapter->netdev->dev, - "xmit_start skb->len == 0 skb[%p] type[%x]\n", - skb, skb->pkt_type); - break; - case XMIT_FAIL_HOSTCMD_FAIL: - dev_err(&adapter->netdev->dev, - "xmit_start skb[%p] type[%x] No host commands available\n", - skb, skb->pkt_type); - break; - } - } - dev_kfree_skb(skb); - adapter->netdev->stats.tx_dropped++; -} - -static void slic_rcv_handle_error(struct adapter *adapter, - struct slic_rcvbuf *rcvbuf) -{ - struct slic_hddr_wds *hdr = (struct slic_hddr_wds *)rcvbuf->data; - struct net_device *netdev = adapter->netdev; - - if (adapter->devid != SLIC_1GB_DEVICE_ID) { - if (hdr->frame_status14 & VRHSTAT_802OE) - adapter->if_events.oflow802++; - if (hdr->frame_status14 & VRHSTAT_TPOFLO) - adapter->if_events.Tprtoflow++; - if (hdr->frame_status_b14 & VRHSTATB_802UE) - adapter->if_events.uflow802++; - if (hdr->frame_status_b14 & VRHSTATB_RCVE) { - adapter->if_events.rcvearly++; - netdev->stats.rx_fifo_errors++; - } - if (hdr->frame_status_b14 & VRHSTATB_BUFF) { - adapter->if_events.Bufov++; - netdev->stats.rx_over_errors++; - } - if (hdr->frame_status_b14 & VRHSTATB_CARRE) { - adapter->if_events.Carre++; - netdev->stats.tx_carrier_errors++; - } - if (hdr->frame_status_b14 & VRHSTATB_LONGE) - adapter->if_events.Longe++; - if (hdr->frame_status_b14 & VRHSTATB_PREA) - adapter->if_events.Invp++; - if (hdr->frame_status_b14 & VRHSTATB_CRC) { - adapter->if_events.Crc++; - netdev->stats.rx_crc_errors++; - } - if (hdr->frame_status_b14 & VRHSTATB_DRBL) - adapter->if_events.Drbl++; - if (hdr->frame_status_b14 & VRHSTATB_CODE) - adapter->if_events.Code++; - if (hdr->frame_status_b14 & VRHSTATB_TPCSUM) - adapter->if_events.TpCsum++; - if (hdr->frame_status_b14 & VRHSTATB_TPHLEN) - adapter->if_events.TpHlen++; - if (hdr->frame_status_b14 & VRHSTATB_IPCSUM) - adapter->if_events.IpCsum++; - if (hdr->frame_status_b14 & VRHSTATB_IPLERR) - adapter->if_events.IpLen++; - if (hdr->frame_status_b14 & VRHSTATB_IPHERR) - adapter->if_events.IpHlen++; - } else { - if (hdr->frame_statusGB & VGBSTAT_XPERR) { - u32 xerr = hdr->frame_statusGB >> VGBSTAT_XERRSHFT; - - if (xerr == VGBSTAT_XCSERR) - adapter->if_events.TpCsum++; - if (xerr == VGBSTAT_XUFLOW) - adapter->if_events.Tprtoflow++; - if (xerr == VGBSTAT_XHLEN) - adapter->if_events.TpHlen++; - } - if (hdr->frame_statusGB & VGBSTAT_NETERR) { - u32 nerr = - (hdr-> - frame_statusGB >> VGBSTAT_NERRSHFT) & - VGBSTAT_NERRMSK; - if (nerr == VGBSTAT_NCSERR) - adapter->if_events.IpCsum++; - if (nerr == VGBSTAT_NUFLOW) - adapter->if_events.IpLen++; - if (nerr == VGBSTAT_NHLEN) - adapter->if_events.IpHlen++; - } - if (hdr->frame_statusGB & VGBSTAT_LNKERR) { - u32 lerr = hdr->frame_statusGB & VGBSTAT_LERRMSK; - - if (lerr == VGBSTAT_LDEARLY) - adapter->if_events.rcvearly++; - if (lerr == VGBSTAT_LBOFLO) - adapter->if_events.Bufov++; - if (lerr == VGBSTAT_LCODERR) - adapter->if_events.Code++; - if (lerr == VGBSTAT_LDBLNBL) - adapter->if_events.Drbl++; - if (lerr == VGBSTAT_LCRCERR) - adapter->if_events.Crc++; - if (lerr == VGBSTAT_LOFLO) - adapter->if_events.oflow802++; - if (lerr == VGBSTAT_LUFLO) - adapter->if_events.uflow802++; - } - } -} - -#define TCP_OFFLOAD_FRAME_PUSHFLAG 0x10000000 -#define M_FAST_PATH 0x0040 - -static void slic_rcv_handler(struct adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct sk_buff *skb; - struct slic_rcvbuf *rcvbuf; - u32 frames = 0; - - while ((skb = slic_rcvqueue_getnext(adapter))) { - u32 rx_bytes; - - rcvbuf = (struct slic_rcvbuf *)skb->head; - adapter->card->events++; - if (rcvbuf->status & IRHDDR_ERR) { - adapter->rx_errors++; - slic_rcv_handle_error(adapter, rcvbuf); - slic_rcvqueue_reinsert(adapter, skb); - continue; - } - - if (!slic_mac_filter(adapter, (struct ether_header *) - rcvbuf->data)) { - slic_rcvqueue_reinsert(adapter, skb); - continue; - } - skb_pull(skb, SLIC_RCVBUF_HEADSIZE); - rx_bytes = (rcvbuf->length & IRHDDR_FLEN_MSK); - skb_put(skb, rx_bytes); - netdev->stats.rx_packets++; - netdev->stats.rx_bytes += rx_bytes; -#if SLIC_OFFLOAD_IP_CHECKSUM - skb->ip_summed = CHECKSUM_UNNECESSARY; -#endif - - skb->dev = adapter->netdev; - skb->protocol = eth_type_trans(skb, skb->dev); - netif_rx(skb); - - ++frames; -#if SLIC_INTERRUPT_PROCESS_LIMIT - if (frames >= SLIC_RCVQ_MAX_PROCESS_ISR) { - adapter->rcv_interrupt_yields++; - break; - } -#endif - } - adapter->max_isr_rcvs = max(adapter->max_isr_rcvs, frames); -} - -static void slic_xmit_complete(struct adapter *adapter) -{ - struct slic_hostcmd *hcmd; - struct slic_rspbuf *rspbuf; - u32 frames = 0; - struct slic_handle_word slic_handle_word; - - do { - rspbuf = slic_rspqueue_getnext(adapter); - if (!rspbuf) - break; - adapter->xmit_completes++; - adapter->card->events++; - /* - * Get the complete host command buffer - */ - slic_handle_word.handle_token = rspbuf->hosthandle; - hcmd = - adapter->slic_handles[slic_handle_word.handle_index]. - address; -/* hcmd = (struct slic_hostcmd *) rspbuf->hosthandle; */ - if (hcmd->type == SLIC_CMD_DUMB) { - if (hcmd->skb) - dev_kfree_skb_irq(hcmd->skb); - slic_cmdq_putdone_irq(adapter, hcmd); - } - rspbuf->status = 0; - rspbuf->hosthandle = 0; - frames++; - } while (1); - adapter->max_isr_xmits = max(adapter->max_isr_xmits, frames); -} - -static void slic_interrupt_card_up(u32 isr, struct adapter *adapter, - struct net_device *dev) -{ - if (isr & ~ISR_IO) { - if (isr & ISR_ERR) { - adapter->error_interrupts++; - if (isr & ISR_RMISS) { - int count; - int pre_count; - int errors; - - struct slic_rcvqueue *rcvq = - &adapter->rcvqueue; - - adapter->error_rmiss_interrupts++; - - if (!rcvq->errors) - rcv_count = rcvq->count; - pre_count = rcvq->count; - errors = rcvq->errors; - - while (rcvq->count < SLIC_RCVQ_FILLTHRESH) { - count = slic_rcvqueue_fill(adapter); - if (!count) - break; - } - } else if (isr & ISR_XDROP) { - dev_err(&dev->dev, - "isr & ISR_ERR [%x] ISR_XDROP\n", - isr); - } else { - dev_err(&dev->dev, - "isr & ISR_ERR [%x]\n", - isr); - } - } - - if (isr & ISR_LEVENT) { - adapter->linkevent_interrupts++; - if (slic_link_event_handler(adapter)) - adapter->linkevent_interrupts--; - } - - if ((isr & ISR_UPC) || (isr & ISR_UPCERR) || - (isr & ISR_UPCBSY)) { - adapter->upr_interrupts++; - slic_upr_request_complete(adapter, isr); - } - } - - if (isr & ISR_RCV) { - adapter->rcv_interrupts++; - slic_rcv_handler(adapter); - } - - if (isr & ISR_CMD) { - adapter->xmit_interrupts++; - slic_xmit_complete(adapter); - } -} - -static irqreturn_t slic_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct adapter *adapter = netdev_priv(dev); - struct slic_shmemory *sm = &adapter->shmem; - struct slic_shmem_data *sm_data = sm->shmem_data; - u32 isr; - - if (sm_data->isr) { - slic_write32(adapter, SLIC_REG_ICR, ICR_INT_MASK); - slic_flush_write(adapter); - - isr = sm_data->isr; - sm_data->isr = 0; - adapter->num_isrs++; - switch (adapter->card->state) { - case CARD_UP: - slic_interrupt_card_up(isr, adapter, dev); - break; - - case CARD_DOWN: - if ((isr & ISR_UPC) || - (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) { - adapter->upr_interrupts++; - slic_upr_request_complete(adapter, isr); - } - break; - } - - adapter->all_reg_writes += 2; - adapter->isr_reg_writes++; - slic_write32(adapter, SLIC_REG_ISR, 0); - } else { - adapter->false_interrupts++; - } - return IRQ_HANDLED; -} - -#define NORMAL_ETHFRAME 0 - -static netdev_tx_t slic_xmit_start(struct sk_buff *skb, struct net_device *dev) -{ - struct sliccard *card; - struct adapter *adapter = netdev_priv(dev); - struct slic_hostcmd *hcmd = NULL; - u32 status = 0; - void *offloadcmd = NULL; - - card = adapter->card; - if ((adapter->linkstate != LINK_UP) || - (adapter->state != ADAPT_UP) || (card->state != CARD_UP)) { - status = XMIT_FAIL_LINK_STATE; - goto xmit_fail; - - } else if (skb->len == 0) { - status = XMIT_FAIL_ZERO_LENGTH; - goto xmit_fail; - } - - hcmd = slic_cmdq_getfree(adapter); - if (!hcmd) { - adapter->xmitq_full = 1; - status = XMIT_FAIL_HOSTCMD_FAIL; - goto xmit_fail; - } - hcmd->skb = skb; - hcmd->busy = 1; - hcmd->type = SLIC_CMD_DUMB; - slic_xmit_build_request(adapter, hcmd, skb); - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - -#ifdef DEBUG_DUMP - if (adapter->kill_card) { - struct slic_host64_cmd ihcmd; - - ihcmd = &hcmd->cmd64; - - ihcmd->flags |= 0x40; - adapter->kill_card = 0; /* only do this once */ - } -#endif - if (hcmd->paddrh == 0) { - slic_write32(adapter, SLIC_REG_CBAR, (hcmd->paddrl | - hcmd->cmdsize)); - } else { - slic_write64(adapter, SLIC_REG_CBAR64, - hcmd->paddrl | hcmd->cmdsize, hcmd->paddrh); - } -xmit_done: - return NETDEV_TX_OK; -xmit_fail: - slic_xmit_fail(adapter, skb, offloadcmd, NORMAL_ETHFRAME, status); - goto xmit_done; -} - -static void slic_adapter_freeresources(struct adapter *adapter) -{ - slic_init_cleanup(adapter); - adapter->error_interrupts = 0; - adapter->rcv_interrupts = 0; - adapter->xmit_interrupts = 0; - adapter->linkevent_interrupts = 0; - adapter->upr_interrupts = 0; - adapter->num_isrs = 0; - adapter->xmit_completes = 0; - adapter->rcv_broadcasts = 0; - adapter->rcv_multicasts = 0; - adapter->rcv_unicasts = 0; -} - -static int slic_adapter_allocresources(struct adapter *adapter, - unsigned long *flags) -{ - if (!adapter->intrregistered) { - int retval; - - spin_unlock_irqrestore(&slic_global.driver_lock, *flags); - - retval = request_irq(adapter->netdev->irq, - &slic_interrupt, - IRQF_SHARED, - adapter->netdev->name, adapter->netdev); - - spin_lock_irqsave(&slic_global.driver_lock, *flags); - - if (retval) { - dev_err(&adapter->netdev->dev, - "request_irq (%s) FAILED [%x]\n", - adapter->netdev->name, retval); - return retval; - } - adapter->intrregistered = 1; - } - return 0; -} - -/* - * slic_if_init - * - * Perform initialization of our slic interface. - * - */ -static int slic_if_init(struct adapter *adapter, unsigned long *flags) -{ - struct sliccard *card = adapter->card; - struct net_device *dev = adapter->netdev; - struct slic_shmemory *sm = &adapter->shmem; - struct slic_shmem_data *sm_data = sm->shmem_data; - int rc; - - /* adapter should be down at this point */ - if (adapter->state != ADAPT_DOWN) { - dev_err(&dev->dev, "%s: adapter->state != ADAPT_DOWN\n", - __func__); - rc = -EIO; - goto err; - } - - adapter->devflags_prev = dev->flags; - adapter->macopts = MAC_DIRECTED; - if (dev->flags) { - if (dev->flags & IFF_BROADCAST) - adapter->macopts |= MAC_BCAST; - if (dev->flags & IFF_PROMISC) - adapter->macopts |= MAC_PROMISC; - if (dev->flags & IFF_ALLMULTI) - adapter->macopts |= MAC_ALLMCAST; - if (dev->flags & IFF_MULTICAST) - adapter->macopts |= MAC_MCAST; - } - rc = slic_adapter_allocresources(adapter, flags); - if (rc) { - dev_err(&dev->dev, "slic_adapter_allocresources FAILED %x\n", - rc); - slic_adapter_freeresources(adapter); - goto err; - } - - if (!adapter->queues_initialized) { - rc = slic_rspqueue_init(adapter); - if (rc) - goto err; - rc = slic_cmdq_init(adapter); - if (rc) - goto err; - rc = slic_rcvqueue_init(adapter); - if (rc) - goto err; - adapter->queues_initialized = 1; - } - - slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF); - slic_flush_write(adapter); - mdelay(1); - - if (!adapter->isp_initialized) { - unsigned long flags; - - spin_lock_irqsave(&adapter->bit64reglock, flags); - slic_write32(adapter, SLIC_REG_ADDR_UPPER, - cpu_to_le32(upper_32_bits(sm->isr_phaddr))); - slic_write32(adapter, SLIC_REG_ISP, - cpu_to_le32(lower_32_bits(sm->isr_phaddr))); - spin_unlock_irqrestore(&adapter->bit64reglock, flags); - - adapter->isp_initialized = 1; - } - - adapter->state = ADAPT_UP; - if (!card->loadtimerset) { - setup_timer(&card->loadtimer, &slic_timer_load_check, - (ulong)card); - card->loadtimer.expires = - jiffies + (SLIC_LOADTIMER_PERIOD * HZ); - add_timer(&card->loadtimer); - - card->loadtimerset = 1; - } - - if (!adapter->pingtimerset) { - setup_timer(&adapter->pingtimer, &slic_timer_ping, (ulong)dev); - adapter->pingtimer.expires = - jiffies + (PING_TIMER_INTERVAL * HZ); - add_timer(&adapter->pingtimer); - adapter->pingtimerset = 1; - adapter->card->pingstatus = ISR_PINGMASK; - } - - /* - * clear any pending events, then enable interrupts - */ - sm_data->isr = 0; - slic_write32(adapter, SLIC_REG_ISR, 0); - slic_write32(adapter, SLIC_REG_ICR, ICR_INT_ON); - - slic_link_config(adapter, LINK_AUTOSPEED, LINK_AUTOD); - slic_flush_write(adapter); - - rc = slic_link_event_handler(adapter); - if (rc) { - /* disable interrupts then clear pending events */ - slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF); - slic_write32(adapter, SLIC_REG_ISR, 0); - slic_flush_write(adapter); - - if (adapter->pingtimerset) { - del_timer(&adapter->pingtimer); - adapter->pingtimerset = 0; - } - if (card->loadtimerset) { - del_timer(&card->loadtimer); - card->loadtimerset = 0; - } - adapter->state = ADAPT_DOWN; - slic_adapter_freeresources(adapter); - } - -err: - return rc; -} - -static int slic_entry_open(struct net_device *dev) -{ - struct adapter *adapter = netdev_priv(dev); - struct sliccard *card = adapter->card; - unsigned long flags; - int status; - - netif_carrier_off(dev); - - spin_lock_irqsave(&slic_global.driver_lock, flags); - if (!adapter->activated) { - card->adapters_activated++; - slic_global.num_slic_ports_active++; - adapter->activated = 1; - } - status = slic_if_init(adapter, &flags); - - if (status != 0) { - if (adapter->activated) { - card->adapters_activated--; - slic_global.num_slic_ports_active--; - adapter->activated = 0; - } - goto spin_unlock; - } - if (!card->master) - card->master = adapter; - -spin_unlock: - spin_unlock_irqrestore(&slic_global.driver_lock, flags); - - netif_start_queue(adapter->netdev); - - return status; -} - -static void slic_card_cleanup(struct sliccard *card) -{ - if (card->loadtimerset) { - card->loadtimerset = 0; - del_timer_sync(&card->loadtimer); - } - - kfree(card); -} - -static void slic_entry_remove(struct pci_dev *pcidev) -{ - struct net_device *dev = pci_get_drvdata(pcidev); - struct adapter *adapter = netdev_priv(dev); - struct sliccard *card; - struct mcast_address *mcaddr, *mlist; - - unregister_netdev(dev); - - slic_adapter_freeresources(adapter); - iounmap(adapter->regs); - - /* free multicast addresses */ - mlist = adapter->mcastaddrs; - while (mlist) { - mcaddr = mlist; - mlist = mlist->next; - kfree(mcaddr); - } - card = adapter->card; - card->adapters_allocated--; - adapter->allocated = 0; - if (!card->adapters_allocated) { - struct sliccard *curr_card = slic_global.slic_card; - - if (curr_card == card) { - slic_global.slic_card = card->next; - } else { - while (curr_card->next != card) - curr_card = curr_card->next; - curr_card->next = card->next; - } - slic_global.num_slic_cards--; - slic_card_cleanup(card); - } - free_netdev(dev); - pci_release_regions(pcidev); - pci_disable_device(pcidev); -} - -static int slic_entry_halt(struct net_device *dev) -{ - struct adapter *adapter = netdev_priv(dev); - struct sliccard *card = adapter->card; - unsigned long flags; - - spin_lock_irqsave(&slic_global.driver_lock, flags); - netif_stop_queue(adapter->netdev); - adapter->state = ADAPT_DOWN; - adapter->linkstate = LINK_DOWN; - adapter->upr_list = NULL; - adapter->upr_busy = 0; - adapter->devflags_prev = 0; - slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF); - adapter->all_reg_writes++; - adapter->icr_reg_writes++; - slic_config_clear(adapter); - if (adapter->activated) { - card->adapters_activated--; - slic_global.num_slic_ports_active--; - adapter->activated = 0; - } -#ifdef AUTOMATIC_RESET - slic_write32(adapter, SLIC_REG_RESET_IFACE, 0); -#endif - slic_flush_write(adapter); - - /* - * Reset the adapter's cmd queues - */ - slic_cmdq_reset(adapter); - -#ifdef AUTOMATIC_RESET - if (!card->adapters_activated) - slic_card_init(card, adapter); -#endif - - spin_unlock_irqrestore(&slic_global.driver_lock, flags); - - netif_carrier_off(dev); - - return 0; -} - -static struct net_device_stats *slic_get_stats(struct net_device *dev) -{ - struct adapter *adapter = netdev_priv(dev); - - dev->stats.collisions = adapter->slic_stats.iface.xmit_collisions; - dev->stats.rx_errors = adapter->slic_stats.iface.rcv_errors; - dev->stats.tx_errors = adapter->slic_stats.iface.xmt_errors; - dev->stats.rx_missed_errors = adapter->slic_stats.iface.rcv_discards; - dev->stats.tx_heartbeat_errors = 0; - dev->stats.tx_aborted_errors = 0; - dev->stats.tx_window_errors = 0; - dev->stats.tx_fifo_errors = 0; - dev->stats.rx_frame_errors = 0; - dev->stats.rx_length_errors = 0; - - return &dev->stats; -} - -static int slic_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct adapter *adapter = netdev_priv(dev); - struct ethtool_cmd edata; - struct ethtool_cmd ecmd; - u32 data[7]; - u32 intagg; - - switch (cmd) { - case SIOCSLICSETINTAGG: - if (copy_from_user(data, rq->ifr_data, 28)) - return -EFAULT; - intagg = data[0]; - dev_err(&dev->dev, "set interrupt aggregation to %d\n", - intagg); - slic_intagg_set(adapter, intagg); - return 0; - - case SIOCETHTOOL: - if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd))) - return -EFAULT; - - if (ecmd.cmd == ETHTOOL_GSET) { - memset(&edata, 0, sizeof(edata)); - edata.supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | SUPPORTED_MII); - edata.port = PORT_MII; - edata.transceiver = XCVR_INTERNAL; - edata.phy_address = 0; - if (adapter->linkspeed == LINK_100MB) - edata.speed = SPEED_100; - else if (adapter->linkspeed == LINK_10MB) - edata.speed = SPEED_10; - else - edata.speed = 0; - - if (adapter->linkduplex == LINK_FULLD) - edata.duplex = DUPLEX_FULL; - else - edata.duplex = DUPLEX_HALF; - - edata.autoneg = AUTONEG_ENABLE; - edata.maxtxpkt = 1; - edata.maxrxpkt = 1; - if (copy_to_user(rq->ifr_data, &edata, sizeof(edata))) - return -EFAULT; - - } else if (ecmd.cmd == ETHTOOL_SSET) { - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (adapter->linkspeed == LINK_100MB) - edata.speed = SPEED_100; - else if (adapter->linkspeed == LINK_10MB) - edata.speed = SPEED_10; - else - edata.speed = 0; - - if (adapter->linkduplex == LINK_FULLD) - edata.duplex = DUPLEX_FULL; - else - edata.duplex = DUPLEX_HALF; - - edata.autoneg = AUTONEG_ENABLE; - edata.maxtxpkt = 1; - edata.maxrxpkt = 1; - if ((ecmd.speed != edata.speed) || - (ecmd.duplex != edata.duplex)) { - u32 speed; - u32 duplex; - - if (ecmd.speed == SPEED_10) - speed = 0; - else - speed = PCR_SPEED_100; - if (ecmd.duplex == DUPLEX_FULL) - duplex = PCR_DUPLEX_FULL; - else - duplex = 0; - slic_link_config(adapter, speed, duplex); - if (slic_link_event_handler(adapter)) - return -EFAULT; - } - } - return 0; - default: - return -EOPNOTSUPP; - } -} - -static void slic_config_pci(struct pci_dev *pcidev) -{ - u16 pci_command; - u16 new_command; - - pci_read_config_word(pcidev, PCI_COMMAND, &pci_command); - - new_command = pci_command | PCI_COMMAND_MASTER - | PCI_COMMAND_MEMORY - | PCI_COMMAND_INVALIDATE - | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK; - if (pci_command != new_command) - pci_write_config_word(pcidev, PCI_COMMAND, new_command); -} - -static int slic_card_init(struct sliccard *card, struct adapter *adapter) -{ - struct slic_shmemory *sm = &adapter->shmem; - struct slic_shmem_data *sm_data = sm->shmem_data; - struct slic_eeprom *peeprom; - struct oslic_eeprom *pOeeprom; - dma_addr_t phys_config; - u32 phys_configh; - u32 phys_configl; - u32 i = 0; - int status; - uint macaddrs = card->card_size; - ushort eecodesize; - ushort dramsize; - ushort ee_chksum; - ushort calc_chksum; - struct slic_config_mac *pmac; - unsigned char fruformat; - unsigned char oemfruformat; - struct atk_fru *patkfru; - union oemfru *poemfru; - unsigned long flags; - - /* Reset everything except PCI configuration space */ - slic_soft_reset(adapter); - - /* Download the microcode */ - status = slic_card_download(adapter); - if (status) - return status; - - if (!card->config_set) { - peeprom = pci_alloc_consistent(adapter->pcidev, - sizeof(struct slic_eeprom), - &phys_config); - - if (!peeprom) { - dev_err(&adapter->pcidev->dev, - "Failed to allocate DMA memory for EEPROM.\n"); - return -ENOMEM; - } - - phys_configl = SLIC_GET_ADDR_LOW(phys_config); - phys_configh = SLIC_GET_ADDR_HIGH(phys_config); - - memset(peeprom, 0, sizeof(struct slic_eeprom)); - - slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF); - slic_flush_write(adapter); - mdelay(1); - - spin_lock_irqsave(&adapter->bit64reglock, flags); - slic_write32(adapter, SLIC_REG_ADDR_UPPER, - cpu_to_le32(upper_32_bits(sm->isr_phaddr))); - slic_write32(adapter, SLIC_REG_ISP, - cpu_to_le32(lower_32_bits(sm->isr_phaddr))); - spin_unlock_irqrestore(&adapter->bit64reglock, flags); - - status = slic_config_get(adapter, phys_configl, phys_configh); - if (status) { - dev_err(&adapter->pcidev->dev, - "Failed to fetch config data from device.\n"); - goto card_init_err; - } - - for (;;) { - if (sm_data->isr) { - if (sm_data->isr & ISR_UPC) { - sm_data->isr = 0; - slic_write64(adapter, SLIC_REG_ISP, 0, - 0); - slic_write32(adapter, SLIC_REG_ISR, 0); - slic_flush_write(adapter); - - slic_upr_request_complete(adapter, 0); - break; - } - - sm_data->isr = 0; - slic_write32(adapter, SLIC_REG_ISR, 0); - slic_flush_write(adapter); - } else { - mdelay(1); - i++; - if (i > 5000) { - dev_err(&adapter->pcidev->dev, - "Fetch of config data timed out.\n"); - slic_write64(adapter, SLIC_REG_ISP, - 0, 0); - slic_flush_write(adapter); - - status = -EINVAL; - goto card_init_err; - } - } - } - - switch (adapter->devid) { - /* Oasis card */ - case SLIC_2GB_DEVICE_ID: - /* extract EEPROM data and pointers to EEPROM data */ - pOeeprom = (struct oslic_eeprom *)peeprom; - eecodesize = pOeeprom->EecodeSize; - dramsize = pOeeprom->DramSize; - pmac = pOeeprom->MacInfo; - fruformat = pOeeprom->FruFormat; - patkfru = &pOeeprom->AtkFru; - oemfruformat = pOeeprom->OemFruFormat; - poemfru = &pOeeprom->OemFru; - macaddrs = 2; - /* - * Minor kludge for Oasis card - * get 2 MAC addresses from the - * EEPROM to ensure that function 1 - * gets the Port 1 MAC address - */ - break; - default: - /* extract EEPROM data and pointers to EEPROM data */ - eecodesize = peeprom->EecodeSize; - dramsize = peeprom->DramSize; - pmac = peeprom->u2.mac.MacInfo; - fruformat = peeprom->FruFormat; - patkfru = &peeprom->AtkFru; - oemfruformat = peeprom->OemFruFormat; - poemfru = &peeprom->OemFru; - break; - } - - card->config.EepromValid = false; - - /* see if the EEPROM is valid by checking it's checksum */ - if ((eecodesize <= MAX_EECODE_SIZE) && - (eecodesize >= MIN_EECODE_SIZE)) { - ee_chksum = - *(u16 *)((char *)peeprom + (eecodesize - 2)); - /* - * calculate the EEPROM checksum - */ - calc_chksum = slic_eeprom_cksum(peeprom, - eecodesize - 2); - /* - * if the ucdoe chksum flag bit worked, - * we wouldn't need this - */ - if (ee_chksum == calc_chksum) - card->config.EepromValid = true; - } - /* copy in the DRAM size */ - card->config.DramSize = dramsize; - - /* copy in the MAC address(es) */ - for (i = 0; i < macaddrs; i++) { - memcpy(&card->config.MacInfo[i], - &pmac[i], sizeof(struct slic_config_mac)); - } - - /* copy the Alacritech FRU information */ - card->config.FruFormat = fruformat; - memcpy(&card->config.AtkFru, patkfru, - sizeof(struct atk_fru)); - - pci_free_consistent(adapter->pcidev, - sizeof(struct slic_eeprom), - peeprom, phys_config); - - if (!card->config.EepromValid) { - slic_write64(adapter, SLIC_REG_ISP, 0, 0); - slic_flush_write(adapter); - dev_err(&adapter->pcidev->dev, "EEPROM invalid.\n"); - return -EINVAL; - } - - card->config_set = 1; - } - - status = slic_card_download_gbrcv(adapter); - if (status) - return status; - - if (slic_global.dynamic_intagg) - slic_intagg_set(adapter, 0); - else - slic_intagg_set(adapter, adapter->intagg_delay); - - /* - * Initialize ping status to "ok" - */ - card->pingstatus = ISR_PINGMASK; - - /* - * Lastly, mark our card state as up and return success - */ - card->state = CARD_UP; - card->reset_in_progress = 0; - - return 0; - -card_init_err: - pci_free_consistent(adapter->pcidev, sizeof(struct slic_eeprom), - peeprom, phys_config); - return status; -} - -static int slic_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coalesce) -{ - struct adapter *adapter = netdev_priv(dev); - - adapter->intagg_delay = coalesce->rx_coalesce_usecs; - adapter->dynamic_intagg = coalesce->use_adaptive_rx_coalesce; - return 0; -} - -static int slic_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coalesce) -{ - struct adapter *adapter = netdev_priv(dev); - - coalesce->rx_coalesce_usecs = adapter->intagg_delay; - coalesce->use_adaptive_rx_coalesce = adapter->dynamic_intagg; - return 0; -} - -static void slic_init_driver(void) -{ - if (slic_first_init) { - slic_first_init = 0; - spin_lock_init(&slic_global.driver_lock); - } -} - -static int slic_init_adapter(struct net_device *netdev, - struct pci_dev *pcidev, - const struct pci_device_id *pci_tbl_entry, - void __iomem *memaddr, int chip_idx) -{ - ushort index; - struct slic_handle *pslic_handle; - struct adapter *adapter = netdev_priv(netdev); - struct slic_shmemory *sm = &adapter->shmem; - struct slic_shmem_data *sm_data; - dma_addr_t phaddr; - -/* adapter->pcidev = pcidev;*/ - adapter->vendid = pci_tbl_entry->vendor; - adapter->devid = pci_tbl_entry->device; - adapter->subsysid = pci_tbl_entry->subdevice; - adapter->busnumber = pcidev->bus->number; - adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F); - adapter->functionnumber = (pcidev->devfn & 0x7); - adapter->regs = memaddr; - adapter->irq = pcidev->irq; - adapter->chipid = chip_idx; - adapter->port = 0; - adapter->cardindex = adapter->port; - spin_lock_init(&adapter->upr_lock); - spin_lock_init(&adapter->bit64reglock); - spin_lock_init(&adapter->adapter_lock); - spin_lock_init(&adapter->reset_lock); - spin_lock_init(&adapter->handle_lock); - - adapter->card_size = 1; - /* - * Initialize slic_handle array - */ - /* - * Start with 1. 0 is an invalid host handle. - */ - for (index = 1, pslic_handle = &adapter->slic_handles[1]; - index < SLIC_CMDQ_MAXCMDS; index++, pslic_handle++) { - pslic_handle->token.handle_index = index; - pslic_handle->type = SLIC_HANDLE_FREE; - pslic_handle->next = adapter->pfree_slic_handles; - adapter->pfree_slic_handles = pslic_handle; - } - sm_data = pci_zalloc_consistent(adapter->pcidev, sizeof(*sm_data), - &phaddr); - if (!sm_data) - return -ENOMEM; - - sm->shmem_data = sm_data; - sm->isr_phaddr = phaddr; - sm->lnkstatus_phaddr = phaddr + offsetof(struct slic_shmem_data, - lnkstatus); - sm->stats_phaddr = phaddr + offsetof(struct slic_shmem_data, stats); - - return 0; -} - -static const struct net_device_ops slic_netdev_ops = { - .ndo_open = slic_entry_open, - .ndo_stop = slic_entry_halt, - .ndo_start_xmit = slic_xmit_start, - .ndo_do_ioctl = slic_ioctl, - .ndo_set_mac_address = slic_mac_set_address, - .ndo_get_stats = slic_get_stats, - .ndo_set_rx_mode = slic_mcast_set_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, -}; - -static u32 slic_card_locate(struct adapter *adapter) -{ - struct sliccard *card = slic_global.slic_card; - struct physcard *physcard = slic_global.phys_card; - ushort card_hostid; - uint i; - - card_hostid = slic_read32(adapter, SLIC_REG_HOSTID); - - /* Initialize a new card structure if need be */ - if (card_hostid == SLIC_HOSTID_DEFAULT) { - card = kzalloc(sizeof(*card), GFP_KERNEL); - if (!card) - return -ENOMEM; - - card->next = slic_global.slic_card; - slic_global.slic_card = card; - card->busnumber = adapter->busnumber; - card->slotnumber = adapter->slotnumber; - - /* Find an available cardnum */ - for (i = 0; i < SLIC_MAX_CARDS; i++) { - if (slic_global.cardnuminuse[i] == 0) { - slic_global.cardnuminuse[i] = 1; - card->cardnum = i; - break; - } - } - slic_global.num_slic_cards++; - } else { - /* Card exists, find the card this adapter belongs to */ - while (card) { - if (card->cardnum == card_hostid) - break; - card = card->next; - } - } - - if (!card) - return -ENXIO; - /* Put the adapter in the card's adapter list */ - if (!card->adapter[adapter->port]) { - card->adapter[adapter->port] = adapter; - adapter->card = card; - } - - card->card_size = 1; /* one port per *logical* card */ - - while (physcard) { - for (i = 0; i < SLIC_MAX_PORTS; i++) { - if (physcard->adapter[i]) - break; - } - if (i == SLIC_MAX_PORTS) - break; - - if (physcard->adapter[i]->slotnumber == adapter->slotnumber) - break; - physcard = physcard->next; - } - if (!physcard) { - /* no structure allocated for this physical card yet */ - physcard = kzalloc(sizeof(*physcard), GFP_ATOMIC); - if (!physcard) { - if (card_hostid == SLIC_HOSTID_DEFAULT) - kfree(card); - return -ENOMEM; - } - - physcard->next = slic_global.phys_card; - slic_global.phys_card = physcard; - physcard->adapters_allocd = 1; - } else { - physcard->adapters_allocd++; - } - /* Note - this is ZERO relative */ - adapter->physport = physcard->adapters_allocd - 1; - - physcard->adapter[adapter->physport] = adapter; - adapter->physcard = physcard; - - return 0; -} - -static int slic_entry_probe(struct pci_dev *pcidev, - const struct pci_device_id *pci_tbl_entry) -{ - static int cards_found; - static int did_version; - int err = -ENODEV; - struct net_device *netdev; - struct adapter *adapter; - void __iomem *memmapped_ioaddr = NULL; - ulong mmio_start = 0; - ulong mmio_len = 0; - struct sliccard *card = NULL; - int pci_using_dac = 0; - - err = pci_enable_device(pcidev); - - if (err) - return err; - - if (did_version++ == 0) { - dev_info(&pcidev->dev, "%s\n", slic_banner); - dev_info(&pcidev->dev, "%s\n", slic_proc_version); - } - - if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) { - pci_using_dac = 1; - err = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pcidev->dev, "unable to obtain 64-bit DMA for consistent allocations\n"); - goto err_out_disable_pci; - } - } else { - err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pcidev->dev, "no usable DMA configuration\n"); - goto err_out_disable_pci; - } - pci_using_dac = 0; - pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); - } - - err = pci_request_regions(pcidev, DRV_NAME); - if (err) { - dev_err(&pcidev->dev, "can't obtain PCI resources\n"); - goto err_out_disable_pci; - } - - pci_set_master(pcidev); - - netdev = alloc_etherdev(sizeof(struct adapter)); - if (!netdev) { - err = -ENOMEM; - goto err_out_exit_slic_probe; - } - - netdev->ethtool_ops = &slic_ethtool_ops; - SET_NETDEV_DEV(netdev, &pcidev->dev); - - pci_set_drvdata(pcidev, netdev); - adapter = netdev_priv(netdev); - adapter->netdev = netdev; - adapter->pcidev = pcidev; - slic_global.dynamic_intagg = adapter->dynamic_intagg; - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; - - mmio_start = pci_resource_start(pcidev, 0); - mmio_len = pci_resource_len(pcidev, 0); - - memmapped_ioaddr = ioremap_nocache(mmio_start, mmio_len); - if (!memmapped_ioaddr) { - dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n", - mmio_len, mmio_start); - err = -ENOMEM; - goto err_out_free_netdev; - } - - slic_config_pci(pcidev); - - slic_init_driver(); - - err = slic_init_adapter(netdev, pcidev, pci_tbl_entry, memmapped_ioaddr, - cards_found); - if (err) { - dev_err(&pcidev->dev, "failed to init adapter: %i\n", err); - goto err_out_unmap; - } - - err = slic_card_locate(adapter); - if (err) { - dev_err(&pcidev->dev, "cannot locate card\n"); - goto err_clean_init; - } - - card = adapter->card; - - if (!adapter->allocated) { - card->adapters_allocated++; - adapter->allocated = 1; - } - - err = slic_card_init(card, adapter); - if (err) - goto err_clean_init; - - slic_adapter_set_hwaddr(adapter); - - netdev->base_addr = (unsigned long)memmapped_ioaddr; - netdev->irq = adapter->irq; - netdev->netdev_ops = &slic_netdev_ops; - - netif_carrier_off(netdev); - - strcpy(netdev->name, "eth%d"); - err = register_netdev(netdev); - if (err) { - dev_err(&pcidev->dev, "Cannot register net device, aborting.\n"); - goto err_clean_init; - } - - cards_found++; - - return 0; - -err_clean_init: - slic_init_cleanup(adapter); -err_out_unmap: - iounmap(memmapped_ioaddr); -err_out_free_netdev: - free_netdev(netdev); -err_out_exit_slic_probe: - pci_release_regions(pcidev); -err_out_disable_pci: - pci_disable_device(pcidev); - return err; -} - -static struct pci_driver slic_driver = { - .name = DRV_NAME, - .id_table = slic_pci_tbl, - .probe = slic_entry_probe, - .remove = slic_entry_remove, -}; - -static int __init slic_module_init(void) -{ - slic_init_driver(); - - return pci_register_driver(&slic_driver); -} - -static void __exit slic_module_cleanup(void) -{ - pci_unregister_driver(&slic_driver); -} - -static const struct ethtool_ops slic_ethtool_ops = { - .get_coalesce = slic_get_coalesce, - .set_coalesce = slic_set_coalesce -}; - -module_init(slic_module_init); -module_exit(slic_module_cleanup); diff --git a/drivers/staging/sm750fb/Makefile b/drivers/staging/sm750fb/Makefile index dcce3f487ed5422a824d91625bcf00e08b57274c..4d781f78b95c43cf8693e5dbdbf0eab1c6399847 100644 --- a/drivers/staging/sm750fb/Makefile +++ b/drivers/staging/sm750fb/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_FB_SM750) += sm750fb.o sm750fb-objs := sm750.o sm750_hw.o sm750_accel.o sm750_cursor.o ddk750_chip.o ddk750_power.o ddk750_mode.o -sm750fb-objs += ddk750_display.o ddk750_help.o ddk750_swi2c.o ddk750_sii164.o ddk750_dvi.o ddk750_hwi2c.o +sm750fb-objs += ddk750_display.o ddk750_swi2c.o ddk750_sii164.o ddk750_dvi.o ddk750_hwi2c.o diff --git a/drivers/staging/sm750fb/ddk750.h b/drivers/staging/sm750fb/ddk750.h index 2c10a08ed964338b912672d53633563838589c4d..734010324a8f039d5c253a20468b8d2271dcbf19 100644 --- a/drivers/staging/sm750fb/ddk750.h +++ b/drivers/staging/sm750fb/ddk750.h @@ -1,22 +1,21 @@ +/* + * Copyright (c) 2007 by Silicon Motion, Inc. (SMI) + * + * All rights are reserved. Reproduction or in part is prohibited + * without the written consent of the copyright owner. + * + * RegSC.h --- SM718 SDK + * This file contains the definitions for the System Configuration registers. + */ + #ifndef DDK750_H__ #define DDK750_H__ -/******************************************************************* -* -* Copyright (c) 2007 by Silicon Motion, Inc. (SMI) -* -* All rights are reserved. Reproduction or in part is prohibited -* without the written consent of the copyright owner. -* -* RegSC.h --- SM718 SDK -* This file contains the definitions for the System Configuration registers. -* -*******************************************************************/ + #include "ddk750_reg.h" #include "ddk750_mode.h" #include "ddk750_chip.h" #include "ddk750_display.h" #include "ddk750_power.h" -#include "ddk750_help.h" #ifdef USE_HW_I2C #include "ddk750_hwi2c.h" #endif diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c index 839d6730bde967e09070bb58b11a4066c756c79b..f59ce5c0867dc375497e15de5cfa23d0c86c0b0b 100644 --- a/drivers/staging/sm750fb/ddk750_chip.c +++ b/drivers/staging/sm750fb/ddk750_chip.c @@ -1,33 +1,32 @@ #include #include -#include "ddk750_help.h" #include "ddk750_reg.h" #include "ddk750_chip.h" #include "ddk750_power.h" #define MHz(x) ((x) * 1000000) +static logical_chip_type_t chip; + logical_chip_type_t sm750_get_chip_type(void) { - unsigned short physicalID; - char physicalRev; - logical_chip_type_t chip; - - physicalID = devId750; /* either 0x718 or 0x750 */ - physicalRev = revId750; + return chip; +} - if (physicalID == 0x718) +void sm750_set_chip_type(unsigned short devId, u8 revId) +{ + if (devId == 0x718) chip = SM718; - else if (physicalID == 0x750) { + else if (devId == 0x750) { chip = SM750; /* SM750 and SM750LE are different in their revision ID only. */ - if (physicalRev == SM750LE_REVISION_ID) + if (revId == SM750LE_REVISION_ID) { chip = SM750LE; + pr_info("found sm750le\n"); + } } else chip = SM_UNKNOWN; - - return chip; } static unsigned int get_mxclk_freq(void) @@ -52,9 +51,9 @@ static unsigned int get_mxclk_freq(void) * * Input: Frequency to be set. */ -static void setChipClock(unsigned int frequency) +static void set_chip_clock(unsigned int frequency) { - pll_value_t pll; + struct pll_value pll; unsigned int ulActualMxClk; /* Cheok_0509: For SM750LE, the chip clock is fixed. Nothing to set. */ @@ -63,29 +62,31 @@ static void setChipClock(unsigned int frequency) if (frequency) { /* - * Set up PLL, a structure to hold the value to be set in clocks. - */ + * Set up PLL structure to hold the value to be set in clocks. + */ pll.inputFreq = DEFAULT_INPUT_CLOCK; /* Defined in CLOCK.H */ pll.clockType = MXCLK_PLL; /* - * Call calcPllValue() to fill the other fields of PLL structure. - * Sometime, the chip cannot set up the exact clock - * required by the User. - * Return value of calcPllValue gives the actual possible clock. - */ - ulActualMxClk = calcPllValue(frequency, &pll); + * Call sm750_calc_pll_value() to fill the other fields of the PLL + * structure. Sometimes, the chip cannot set up the exact + * clock required by the User. + * Return value of sm750_calc_pll_value gives the actual possible + * clock. + */ + ulActualMxClk = sm750_calc_pll_value(frequency, &pll); /* Master Clock Control: MXCLK_PLL */ - POKE32(MXCLK_PLL_CTRL, formatPllReg(&pll)); + POKE32(MXCLK_PLL_CTRL, sm750_format_pll_reg(&pll)); } } -static void setMemoryClock(unsigned int frequency) +static void set_memory_clock(unsigned int frequency) { unsigned int reg, divisor; - /* Cheok_0509: For SM750LE, the memory clock is fixed. + /* + * Cheok_0509: For SM750LE, the memory clock is fixed. * Nothing to set. */ if (sm750_get_chip_type() == SM750LE) @@ -120,7 +121,7 @@ static void setMemoryClock(unsigned int frequency) break; } - setCurrentGate(reg); + sm750_set_current_gate(reg); } } @@ -132,18 +133,20 @@ static void setMemoryClock(unsigned int frequency) * NOTE: * The maximum frequency the engine can run is 168MHz. */ -static void setMasterClock(unsigned int frequency) +static void set_master_clock(unsigned int frequency) { unsigned int reg, divisor; - /* Cheok_0509: For SM750LE, the memory clock is fixed. + /* + * Cheok_0509: For SM750LE, the memory clock is fixed. * Nothing to set. */ if (sm750_get_chip_type() == SM750LE) return; if (frequency) { - /* Set the frequency to the maximum frequency + /* + * Set the frequency to the maximum frequency * that the SM750 engine can run, which is about 190 MHz. */ if (frequency > MHz(190)) @@ -170,11 +173,11 @@ static void setMasterClock(unsigned int frequency) break; } - setCurrentGate(reg); + sm750_set_current_gate(reg); } } -unsigned int ddk750_getVMSize(void) +unsigned int ddk750_get_vm_size(void) { unsigned int reg; unsigned int data; @@ -206,18 +209,18 @@ unsigned int ddk750_getVMSize(void) return data; } -int ddk750_initHw(initchip_param_t *pInitParam) +int ddk750_init_hw(struct initchip_param *pInitParam) { unsigned int reg; if (pInitParam->powerMode != 0) pInitParam->powerMode = 0; - setPowerMode(pInitParam->powerMode); + sm750_set_power_mode(pInitParam->powerMode); /* Enable display power gate & LOCALMEM power gate*/ reg = PEEK32(CURRENT_GATE); reg |= (CURRENT_GATE_DISPLAY | CURRENT_GATE_LOCALMEM); - setCurrentGate(reg); + sm750_set_current_gate(reg); if (sm750_get_chip_type() != SM750LE) { /* set panel pll and graphic mode via mmio_88 */ @@ -233,16 +236,17 @@ int ddk750_initHw(initchip_param_t *pInitParam) } /* Set the Main Chip Clock */ - setChipClock(MHz((unsigned int)pInitParam->chipClock)); + set_chip_clock(MHz((unsigned int)pInitParam->chipClock)); /* Set up memory clock. */ - setMemoryClock(MHz(pInitParam->memClock)); + set_memory_clock(MHz(pInitParam->memClock)); /* Set up master clock */ - setMasterClock(MHz(pInitParam->masterClock)); + set_master_clock(MHz(pInitParam->masterClock)); - /* Reset the memory controller. + /* + * Reset the memory controller. * If the memory controller is not reset in SM750, * the system might hang when sw accesses the memory. * The memory should be resetted after changing the MXCLK. @@ -257,7 +261,7 @@ int ddk750_initHw(initchip_param_t *pInitParam) } if (pInitParam->setAllEngOff == 1) { - enable2DEngine(0); + sm750_enable_2d_engine(0); /* Disable Overlay, if a former application left it on */ reg = PEEK32(VIDEO_DISPLAY_CTRL); @@ -280,7 +284,7 @@ int ddk750_initHw(initchip_param_t *pInitParam) POKE32(DMA_ABORT_INTERRUPT, reg); /* Disable DMA Power, if a former application left it on */ - enableDMA(0); + sm750_enable_dma(0); } /* We can add more initialization as needed. */ @@ -305,9 +309,10 @@ int ddk750_initHw(initchip_param_t *pInitParam) * M = {1,...,255} * N = {2,...,15} */ -unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll) +unsigned int sm750_calc_pll_value(unsigned int request_orig, struct pll_value *pll) { - /* as sm750 register definition, + /* + * as sm750 register definition, * N located in 2,15 and M located in 1,255 */ int N, M, X, d; @@ -319,7 +324,8 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll) int max_d = 6; if (sm750_get_chip_type() == SM750LE) { - /* SM750LE don't have + /* + * SM750LE don't have * programmable PLL and M/N values to work on. * Just return the requested clock. */ @@ -331,14 +337,16 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll) request = request_orig / 1000; input = pll->inputFreq / 1000; - /* for MXCLK register, + /* + * for MXCLK register, * no POD provided, so need be treated differently */ if (pll->clockType == MXCLK_PLL) max_d = 3; for (N = 15; N > 1; N--) { - /* RN will not exceed maximum long + /* + * RN will not exceed maximum long * if @request <= 285 MHZ (for 32bit cpu) */ RN = N * request; @@ -373,7 +381,7 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll) return ret; } -unsigned int formatPllReg(pll_value_t *pPLL) +unsigned int sm750_format_pll_reg(struct pll_value *pPLL) { #ifndef VALIDATION_CHIP unsigned int POD = pPLL->POD; diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h index 14357fd1cc6bda3e1b03be2aad7c508b6c3d06f3..e63b8b2938168b6469bb57b32e0e89e4e36701ba 100644 --- a/drivers/staging/sm750fb/ddk750_chip.h +++ b/drivers/staging/sm750fb/ddk750_chip.h @@ -6,6 +6,14 @@ #endif #include +#include +#include + +/* software control endianness */ +#define PEEK32(addr) readl(addr + mmio750) +#define POKE32(addr, data) writel(data, addr + mmio750) + +extern void __iomem *mmio750; /* This is all the chips recognized by this library */ typedef enum _logical_chip_type_t { @@ -25,7 +33,7 @@ typedef enum _clock_type_t { } clock_type_t; -typedef struct _pll_value_t { +struct pll_value { clock_type_t clockType; unsigned long inputFreq; /* Input clock frequency to the PLL */ @@ -34,46 +42,55 @@ typedef struct _pll_value_t { unsigned long N; unsigned long OD; unsigned long POD; -} -pll_value_t; +}; /* input struct to initChipParam() function */ -typedef struct _initchip_param_t { - unsigned short powerMode; /* Use power mode 0 or 1 */ - unsigned short chipClock; /** - * Speed of main chip clock in MHz unit - * 0 = keep the current clock setting - * Others = the new main chip clock - */ - unsigned short memClock; /** - * Speed of memory clock in MHz unit - * 0 = keep the current clock setting - * Others = the new memory clock - */ - unsigned short masterClock; /** - * Speed of master clock in MHz unit - * 0 = keep the current clock setting - * Others = the new master clock - */ - unsigned short setAllEngOff; /** - * 0 = leave all engine state untouched. - * 1 = make sure they are off: 2D, Overlay, - * video alpha, alpha, hardware cursors - */ - unsigned char resetMemory; /** - * 0 = Do not reset the memory controller - * 1 = Reset the memory controller - */ +struct initchip_param { + /* Use power mode 0 or 1 */ + unsigned short powerMode; + + /* + * Speed of main chip clock in MHz unit + * 0 = keep the current clock setting + * Others = the new main chip clock + */ + unsigned short chipClock; + + /* + * Speed of memory clock in MHz unit + * 0 = keep the current clock setting + * Others = the new memory clock + */ + unsigned short memClock; + + /* + * Speed of master clock in MHz unit + * 0 = keep the current clock setting + * Others = the new master clock + */ + unsigned short masterClock; + + /* + * 0 = leave all engine state untouched. + * 1 = make sure they are off: 2D, Overlay, + * video alpha, alpha, hardware cursors + */ + unsigned short setAllEngOff; + + /* + * 0 = Do not reset the memory controller + * 1 = Reset the memory controller + */ + unsigned char resetMemory; /* More initialization parameter can be added if needed */ -} -initchip_param_t; +}; logical_chip_type_t sm750_get_chip_type(void); -unsigned int calcPllValue(unsigned int request, pll_value_t *pll); -unsigned int formatPllReg(pll_value_t *pPLL); -void ddk750_set_mmio(void __iomem *, unsigned short, char); -unsigned int ddk750_getVMSize(void); -int ddk750_initHw(initchip_param_t *); +void sm750_set_chip_type(unsigned short devId, u8 revId); +unsigned int sm750_calc_pll_value(unsigned int request, struct pll_value *pll); +unsigned int sm750_format_pll_reg(struct pll_value *pPLL); +unsigned int ddk750_get_vm_size(void); +int ddk750_init_hw(struct initchip_param *); #endif diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c index 4023c476b9e439e96b9d2870a43b65050849d44f..c347803f7e19bb61323db3c0a685d830390f7b97 100644 --- a/drivers/staging/sm750fb/ddk750_display.c +++ b/drivers/staging/sm750fb/ddk750_display.c @@ -1,11 +1,9 @@ #include "ddk750_reg.h" -#include "ddk750_help.h" +#include "ddk750_chip.h" #include "ddk750_display.h" #include "ddk750_power.h" #include "ddk750_dvi.h" -#define primaryWaitVerticalSync(delay) waitNextVerticalSync(0, delay) - static void setDisplayControl(int ctrl, int disp_state) { /* state != 0 means turn on both timing & plane en_bit */ @@ -61,55 +59,28 @@ static void setDisplayControl(int ctrl, int disp_state) } } -static void waitNextVerticalSync(int ctrl, int delay) +static void primary_wait_vertical_sync(int delay) { unsigned int status; - if (!ctrl) { - /* primary controller */ + /* + * Do not wait when the Primary PLL is off or display control is + * already off. This will prevent the software to wait forever. + */ + if (!(PEEK32(PANEL_PLL_CTRL) & PLL_CTRL_POWER) || + !(PEEK32(PANEL_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) + return; - /* - * Do not wait when the Primary PLL is off or display control is - * already off. This will prevent the software to wait forever. - */ - if (!(PEEK32(PANEL_PLL_CTRL) & PLL_CTRL_POWER) || - !(PEEK32(PANEL_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) { - return; - } - - while (delay-- > 0) { - /* Wait for end of vsync. */ - do { - status = PEEK32(SYSTEM_CTRL); - } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE); - - /* Wait for start of vsync. */ - do { - status = PEEK32(SYSTEM_CTRL); - } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE)); - } + while (delay-- > 0) { + /* Wait for end of vsync. */ + do { + status = PEEK32(SYSTEM_CTRL); + } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE); - } else { - /* - * Do not wait when the Primary PLL is off or display control is - * already off. This will prevent the software to wait forever. - */ - if (!(PEEK32(CRT_PLL_CTRL) & PLL_CTRL_POWER) || - !(PEEK32(CRT_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) { - return; - } - - while (delay-- > 0) { - /* Wait for end of vsync. */ - do { - status = PEEK32(SYSTEM_CTRL); - } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE); - - /* Wait for start of vsync. */ - do { - status = PEEK32(SYSTEM_CTRL); - } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE)); - } + /* Wait for start of vsync. */ + do { + status = PEEK32(SYSTEM_CTRL); + } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE)); } } @@ -121,22 +92,22 @@ static void swPanelPowerSequence(int disp, int delay) reg = PEEK32(PANEL_DISPLAY_CTRL); reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0); POKE32(PANEL_DISPLAY_CTRL, reg); - primaryWaitVerticalSync(delay); + primary_wait_vertical_sync(delay); reg = PEEK32(PANEL_DISPLAY_CTRL); reg |= (disp ? PANEL_DISPLAY_CTRL_DATA : 0); POKE32(PANEL_DISPLAY_CTRL, reg); - primaryWaitVerticalSync(delay); + primary_wait_vertical_sync(delay); reg = PEEK32(PANEL_DISPLAY_CTRL); reg |= (disp ? PANEL_DISPLAY_CTRL_VBIASEN : 0); POKE32(PANEL_DISPLAY_CTRL, reg); - primaryWaitVerticalSync(delay); + primary_wait_vertical_sync(delay); reg = PEEK32(PANEL_DISPLAY_CTRL); reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0); POKE32(PANEL_DISPLAY_CTRL, reg); - primaryWaitVerticalSync(delay); + primary_wait_vertical_sync(delay); } void ddk750_setLogicalDispOut(disp_output_t output) @@ -182,5 +153,5 @@ void ddk750_setLogicalDispOut(disp_output_t output) setDAC((output & DAC_MASK) >> DAC_OFFSET); if (output & DPMS_USAGE) - ddk750_setDPMS((output & DPMS_MASK) >> DPMS_OFFSET); + ddk750_set_dpms((output & DPMS_MASK) >> DPMS_OFFSET); } diff --git a/drivers/staging/sm750fb/ddk750_display.h b/drivers/staging/sm750fb/ddk750_display.h index e3fde428c52b78ebe86573865a4a54095279b338..8abca88f089e7390a5574a6bc2672af6e2430d6f 100644 --- a/drivers/staging/sm750fb/ddk750_display.h +++ b/drivers/staging/sm750fb/ddk750_display.h @@ -1,7 +1,8 @@ #ifndef DDK750_DISPLAY_H__ #define DDK750_DISPLAY_H__ -/* panel path select +/* + * panel path select * 80000[29:28] */ @@ -12,7 +13,8 @@ #define PNL_2_SEC ((2 << PNL_2_OFFSET) | PNL_2_USAGE) -/* primary timing & plane enable bit +/* + * primary timing & plane enable bit * 1: 80000[8] & 80000[2] on * 0: both off */ @@ -23,7 +25,8 @@ #define PRI_TP_OFF ((0x0 << PRI_TP_OFFSET) | PRI_TP_USAGE) -/* panel sequency status +/* + * panel sequency status * 80000[27:24] */ #define PNL_SEQ_OFFSET 6 @@ -32,7 +35,8 @@ #define PNL_SEQ_ON (BIT(PNL_SEQ_OFFSET) | PNL_SEQ_USAGE) #define PNL_SEQ_OFF ((0 << PNL_SEQ_OFFSET) | PNL_SEQ_USAGE) -/* dual digital output +/* + * dual digital output * 80000[19] */ #define DUAL_TFT_OFFSET 8 @@ -41,7 +45,8 @@ #define DUAL_TFT_ON (BIT(DUAL_TFT_OFFSET) | DUAL_TFT_USAGE) #define DUAL_TFT_OFF ((0 << DUAL_TFT_OFFSET) | DUAL_TFT_USAGE) -/* secondary timing & plane enable bit +/* + * secondary timing & plane enable bit * 1:80200[8] & 80200[2] on * 0: both off */ @@ -51,7 +56,8 @@ #define SEC_TP_ON ((0x1 << SEC_TP_OFFSET) | SEC_TP_USAGE) #define SEC_TP_OFF ((0x0 << SEC_TP_OFFSET) | SEC_TP_USAGE) -/* crt path select +/* + * crt path select * 80200[19:18] */ #define CRT_2_OFFSET 2 @@ -61,7 +67,8 @@ #define CRT_2_SEC ((0x2 << CRT_2_OFFSET) | CRT_2_USAGE) -/* DAC affect both DVI and DSUB +/* + * DAC affect both DVI and DSUB * 4[20] */ #define DAC_OFFSET 7 @@ -70,7 +77,8 @@ #define DAC_ON ((0x0 << DAC_OFFSET) | DAC_USAGE) #define DAC_OFF ((0x1 << DAC_OFFSET) | DAC_USAGE) -/* DPMS only affect D-SUB head +/* + * DPMS only affect D-SUB head * 0[31:30] */ #define DPMS_OFFSET 9 @@ -81,7 +89,8 @@ -/* LCD1 means panel path TFT1 & panel path DVI (so enable DAC) +/* + * LCD1 means panel path TFT1 & panel path DVI (so enable DAC) * CRT means crt path DSUB */ typedef enum _disp_output_t { @@ -89,7 +98,8 @@ typedef enum _disp_output_t { do_LCD1_SEC = PNL_2_SEC | SEC_TP_ON | PNL_SEQ_ON | DAC_ON, do_LCD2_PRI = CRT_2_PRI | PRI_TP_ON | DUAL_TFT_ON, do_LCD2_SEC = CRT_2_SEC | SEC_TP_ON | DUAL_TFT_ON, - /* do_DSUB_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON|DAC_ON, + /* + * do_DSUB_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON|DAC_ON, * do_DSUB_SEC = CRT_2_SEC | SEC_TP_ON | DPMS_ON|DAC_ON, */ do_CRT_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON | DAC_ON, diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c index 8252f771ef9e3c04be4766fe03b2fd8ddc8a5423..250c2f4787781bf59f3f62fc163afcb370e9616a 100644 --- a/drivers/staging/sm750fb/ddk750_dvi.c +++ b/drivers/staging/sm750fb/ddk750_dvi.c @@ -1,6 +1,6 @@ #define USE_DVICHIP #ifdef USE_DVICHIP -#include "ddk750_help.h" +#include "ddk750_chip.h" #include "ddk750_reg.h" #include "ddk750_dvi.h" #include "ddk750_sii164.h" diff --git a/drivers/staging/sm750fb/ddk750_help.c b/drivers/staging/sm750fb/ddk750_help.c deleted file mode 100644 index 9637dd30d037918325391f84758ea3505657a418..0000000000000000000000000000000000000000 --- a/drivers/staging/sm750fb/ddk750_help.c +++ /dev/null @@ -1,17 +0,0 @@ -#include "ddk750_help.h" - -void __iomem *mmio750; -char revId750; -unsigned short devId750; - -/* after driver mapped io registers, use this function first */ -void ddk750_set_mmio(void __iomem *addr, unsigned short devId, char revId) -{ - mmio750 = addr; - devId750 = devId; - revId750 = revId; - if (revId == 0xfe) - printk("found sm750le\n"); -} - - diff --git a/drivers/staging/sm750fb/ddk750_help.h b/drivers/staging/sm750fb/ddk750_help.h deleted file mode 100644 index 009db9213a73fa118d09d7dacc550eac4ac0cdab..0000000000000000000000000000000000000000 --- a/drivers/staging/sm750fb/ddk750_help.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef DDK750_HELP_H__ -#define DDK750_HELP_H__ -#include "ddk750_chip.h" -#ifndef USE_INTERNAL_REGISTER_ACCESS - -#include -#include -#include - -/* software control endianness */ -#define PEEK32(addr) readl(addr + mmio750) -#define POKE32(addr, data) writel(data, addr + mmio750) - -extern void __iomem *mmio750; -extern char revId750; -extern unsigned short devId750; -#else -/* implement if you want use it*/ -#endif - -#endif diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c index d391c127ead7e864fc1524d0af13688c4f40cb17..05d4a73aa1d4d9bb67334fc7273c9cf17e82fbd7 100644 --- a/drivers/staging/sm750fb/ddk750_hwi2c.c +++ b/drivers/staging/sm750fb/ddk750_hwi2c.c @@ -1,6 +1,6 @@ #define USE_HW_I2C #ifdef USE_HW_I2C -#include "ddk750_help.h" +#include "ddk750_chip.h" #include "ddk750_reg.h" #include "ddk750_hwi2c.h" #include "ddk750_power.h" @@ -20,10 +20,11 @@ unsigned char bus_speed_mode value |= (GPIO_MUX_30 | GPIO_MUX_31); POKE32(GPIO_MUX, value); - /* Enable Hardware I2C power. + /* + * Enable Hardware I2C power. * TODO: Check if we need to enable GPIO power? */ - enableI2C(1); + sm750_enable_i2c(1); /* Enable the I2C Controller and set the bus speed mode */ value = PEEK32(I2C_CTRL) & ~(I2C_CTRL_MODE | I2C_CTRL_EN); @@ -44,7 +45,7 @@ void sm750_hw_i2c_close(void) POKE32(I2C_CTRL, value); /* Disable I2C Power */ - enableI2C(0); + sm750_enable_i2c(0); /* Set GPIO 30 & 31 back as GPIO pins */ value = PEEK32(GPIO_MUX); @@ -92,7 +93,8 @@ static unsigned int hw_i2c_write_data( /* Set the Device Address */ POKE32(I2C_SLAVE_ADDRESS, addr & ~0x01); - /* Write data. + /* + * Write data. * Note: * Only 16 byte can be accessed per i2c start instruction. */ @@ -158,7 +160,8 @@ static unsigned int hw_i2c_read_data( /* Set the Device Address */ POKE32(I2C_SLAVE_ADDRESS, addr | 0x01); - /* Read data and save them to the buffer. + /* + * Read data and save them to the buffer. * Note: * Only 16 byte can be accessed per i2c start instruction. */ diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c index 05b83646c2d573350a850bdec045eb4c54dc92cb..4a4b1de97a8703a96a30609af7c3b83262076272 100644 --- a/drivers/staging/sm750fb/ddk750_mode.c +++ b/drivers/staging/sm750fb/ddk750_mode.c @@ -1,10 +1,10 @@ -#include "ddk750_help.h" #include "ddk750_reg.h" #include "ddk750_mode.h" #include "ddk750_chip.h" -/* SM750LE only: +/* + * SM750LE only: * This function takes care extra registers and bit fields required to set * up a mode in SM750LE * @@ -19,7 +19,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam, x = pModeParam->horizontal_display_end; y = pModeParam->vertical_display_end; - /* SM750LE has to set up the top-left and bottom-right + /* + * SM750LE has to set up the top-left and bottom-right * registers as well. * Note that normal SM750/SM718 only use those two register for * auto-centering mode. @@ -31,7 +32,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam, CRT_AUTO_CENTERING_BR_BOTTOM_MASK) | ((x - 1) & CRT_AUTO_CENTERING_BR_RIGHT_MASK)); - /* Assume common fields in dispControl have been properly set before + /* + * Assume common fields in dispControl have been properly set before * calling this function. * This function only sets the extra fields in dispControl. */ @@ -72,7 +74,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam, /* only timing related registers will be programed */ -static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll) +static int programModeRegisters(mode_parameter_t *pModeParam, + struct pll_value *pll) { int ret = 0; int cnt = 0; @@ -80,7 +83,7 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll) if (pll->clockType == SECONDARY_PLL) { /* programe secondary pixel clock */ - POKE32(CRT_PLL_CTRL, formatPllReg(pll)); + POKE32(CRT_PLL_CTRL, sm750_format_pll_reg(pll)); POKE32(CRT_HORIZONTAL_TOTAL, (((pModeParam->horizontal_total - 1) << CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT) & @@ -130,7 +133,7 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll) } else if (pll->clockType == PRIMARY_PLL) { unsigned int reserved; - POKE32(PANEL_PLL_CTRL, formatPllReg(pll)); + POKE32(PANEL_PLL_CTRL, sm750_format_pll_reg(pll)); reg = ((pModeParam->horizontal_total - 1) << PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT) & @@ -176,14 +179,14 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll) DISPLAY_CTRL_HSYNC_PHASE | DISPLAY_CTRL_TIMING | DISPLAY_CTRL_PLANE); - /* May a hardware bug or just my test chip (not confirmed). - * PANEL_DISPLAY_CTRL register seems requiring few writes - * before a value can be successfully written in. - * Added some masks to mask out the reserved bits. - * Note: This problem happens by design. The hardware will wait for the - * next vertical sync to turn on/off the plane. - */ - + /* + * May a hardware bug or just my test chip (not confirmed). + * PANEL_DISPLAY_CTRL register seems requiring few writes + * before a value can be successfully written in. + * Added some masks to mask out the reserved bits. + * Note: This problem happens by design. The hardware will wait + * for the next vertical sync to turn on/off the plane. + */ POKE32(PANEL_DISPLAY_CTRL, tmp | reg); while ((PEEK32(PANEL_DISPLAY_CTRL) & ~reserved) != @@ -201,13 +204,13 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll) int ddk750_setModeTiming(mode_parameter_t *parm, clock_type_t clock) { - pll_value_t pll; + struct pll_value pll; unsigned int uiActualPixelClk; pll.inputFreq = DEFAULT_INPUT_CLOCK; pll.clockType = clock; - uiActualPixelClk = calcPllValue(parm->pixel_clock, &pll); + uiActualPixelClk = sm750_calc_pll_value(parm->pixel_clock, &pll); if (sm750_get_chip_type() == SM750LE) { /* set graphic mode via IO method */ outb_p(0x88, 0x3d4); diff --git a/drivers/staging/sm750fb/ddk750_power.c b/drivers/staging/sm750fb/ddk750_power.c index 7cc6169f884e0f4d44102b25f878c0720bf3ba4a..6167e30e8e0125f177850a36acbf48e34be56891 100644 --- a/drivers/staging/sm750fb/ddk750_power.c +++ b/drivers/staging/sm750fb/ddk750_power.c @@ -1,8 +1,8 @@ -#include "ddk750_help.h" +#include "ddk750_chip.h" #include "ddk750_reg.h" #include "ddk750_power.h" -void ddk750_setDPMS(DPMS_t state) +void ddk750_set_dpms(DPMS_t state) { unsigned int value; @@ -17,7 +17,7 @@ void ddk750_setDPMS(DPMS_t state) } } -static unsigned int getPowerMode(void) +static unsigned int get_power_mode(void) { if (sm750_get_chip_type() == SM750LE) return 0; @@ -29,26 +29,26 @@ static unsigned int getPowerMode(void) * SM50x can operate in one of three modes: 0, 1 or Sleep. * On hardware reset, power mode 0 is default. */ -void setPowerMode(unsigned int powerMode) +void sm750_set_power_mode(unsigned int mode) { - unsigned int control_value = 0; + unsigned int ctrl = 0; - control_value = PEEK32(POWER_MODE_CTRL) & ~POWER_MODE_CTRL_MODE_MASK; + ctrl = PEEK32(POWER_MODE_CTRL) & ~POWER_MODE_CTRL_MODE_MASK; if (sm750_get_chip_type() == SM750LE) return; - switch (powerMode) { + switch (mode) { case POWER_MODE_CTRL_MODE_MODE0: - control_value |= POWER_MODE_CTRL_MODE_MODE0; + ctrl |= POWER_MODE_CTRL_MODE_MODE0; break; case POWER_MODE_CTRL_MODE_MODE1: - control_value |= POWER_MODE_CTRL_MODE_MODE1; + ctrl |= POWER_MODE_CTRL_MODE_MODE1; break; case POWER_MODE_CTRL_MODE_SLEEP: - control_value |= POWER_MODE_CTRL_MODE_SLEEP; + ctrl |= POWER_MODE_CTRL_MODE_SLEEP; break; default: @@ -56,44 +56,28 @@ void setPowerMode(unsigned int powerMode) } /* Set up other fields in Power Control Register */ - if (powerMode == POWER_MODE_CTRL_MODE_SLEEP) { - control_value &= ~POWER_MODE_CTRL_OSC_INPUT; + if (mode == POWER_MODE_CTRL_MODE_SLEEP) { + ctrl &= ~POWER_MODE_CTRL_OSC_INPUT; #ifdef VALIDATION_CHIP - control_value &= ~POWER_MODE_CTRL_336CLK; + ctrl &= ~POWER_MODE_CTRL_336CLK; #endif } else { - control_value |= POWER_MODE_CTRL_OSC_INPUT; + ctrl |= POWER_MODE_CTRL_OSC_INPUT; #ifdef VALIDATION_CHIP - control_value |= POWER_MODE_CTRL_336CLK; + ctrl |= POWER_MODE_CTRL_336CLK; #endif } /* Program new power mode. */ - POKE32(POWER_MODE_CTRL, control_value); + POKE32(POWER_MODE_CTRL, ctrl); } -void setCurrentGate(unsigned int gate) +void sm750_set_current_gate(unsigned int gate) { - unsigned int gate_reg; - unsigned int mode; - - /* Get current power mode. */ - mode = getPowerMode(); - - switch (mode) { - case POWER_MODE_CTRL_MODE_MODE0: - gate_reg = MODE0_GATE; - break; - - case POWER_MODE_CTRL_MODE_MODE1: - gate_reg = MODE1_GATE; - break; - - default: - gate_reg = MODE0_GATE; - break; - } - POKE32(gate_reg, gate); + if (get_power_mode() == POWER_MODE_CTRL_MODE_MODE1) + POKE32(MODE1_GATE, gate); + else + POKE32(MODE0_GATE, gate); } @@ -101,7 +85,7 @@ void setCurrentGate(unsigned int gate) /* * This function enable/disable the 2D engine. */ -void enable2DEngine(unsigned int enable) +void sm750_enable_2d_engine(unsigned int enable) { u32 gate; @@ -111,10 +95,10 @@ void enable2DEngine(unsigned int enable) else gate &= ~(CURRENT_GATE_DE | CURRENT_GATE_CSC); - setCurrentGate(gate); + sm750_set_current_gate(gate); } -void enableDMA(unsigned int enable) +void sm750_enable_dma(unsigned int enable) { u32 gate; @@ -125,13 +109,13 @@ void enableDMA(unsigned int enable) else gate &= ~CURRENT_GATE_DMA; - setCurrentGate(gate); + sm750_set_current_gate(gate); } /* * This function enable/disable the GPIO Engine */ -void enableGPIO(unsigned int enable) +void sm750_enable_gpio(unsigned int enable) { u32 gate; @@ -142,13 +126,13 @@ void enableGPIO(unsigned int enable) else gate &= ~CURRENT_GATE_GPIO; - setCurrentGate(gate); + sm750_set_current_gate(gate); } /* * This function enable/disable the I2C Engine */ -void enableI2C(unsigned int enable) +void sm750_enable_i2c(unsigned int enable) { u32 gate; @@ -159,7 +143,7 @@ void enableI2C(unsigned int enable) else gate &= ~CURRENT_GATE_I2C; - setCurrentGate(gate); + sm750_set_current_gate(gate); } diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h index 5963691f9a6880955bd0ad86a9fcbdb3231e280d..eb088b0d805ff0cdcfddcba3ee49c09f884ab8cc 100644 --- a/drivers/staging/sm750fb/ddk750_power.h +++ b/drivers/staging/sm750fb/ddk750_power.h @@ -14,37 +14,29 @@ DPMS_t; (PEEK32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF) | (off)); \ } -void ddk750_setDPMS(DPMS_t); - -/* - * This function sets the current power mode - */ -void setPowerMode(unsigned int powerMode); - -/* - * This function sets current gate - */ -void setCurrentGate(unsigned int gate); +void ddk750_set_dpms(DPMS_t); +void sm750_set_power_mode(unsigned int powerMode); +void sm750_set_current_gate(unsigned int gate); /* * This function enable/disable the 2D engine. */ -void enable2DEngine(unsigned int enable); +void sm750_enable_2d_engine(unsigned int enable); /* * This function enable/disable the DMA Engine */ -void enableDMA(unsigned int enable); +void sm750_enable_dma(unsigned int enable); /* * This function enable/disable the GPIO Engine */ -void enableGPIO(unsigned int enable); +void sm750_enable_gpio(unsigned int enable); /* * This function enable/disable the I2C Engine */ -void enableI2C(unsigned int enable); +void sm750_enable_i2c(unsigned int enable); #endif diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h index 955247979aaa4d9a72575d9ed3c9f1c13995cb5a..4ed6d8d7712ae1077395f6921430d9319e3a9df2 100644 --- a/drivers/staging/sm750fb/ddk750_reg.h +++ b/drivers/staging/sm750fb/ddk750_reg.h @@ -601,13 +601,13 @@ #define PANEL_PLANE_TL 0x08001C #define PANEL_PLANE_TL_TOP_SHIFT 16 -#define PANEL_PLANE_TL_TOP_MASK (0xeff << 16) -#define PANEL_PLANE_TL_LEFT_MASK 0xeff +#define PANEL_PLANE_TL_TOP_MASK (0x7ff << 16) +#define PANEL_PLANE_TL_LEFT_MASK 0x7ff #define PANEL_PLANE_BR 0x080020 #define PANEL_PLANE_BR_BOTTOM_SHIFT 16 -#define PANEL_PLANE_BR_BOTTOM_MASK (0xeff << 16) -#define PANEL_PLANE_BR_RIGHT_MASK 0xeff +#define PANEL_PLANE_BR_BOTTOM_MASK (0x7ff << 16) +#define PANEL_PLANE_BR_RIGHT_MASK 0x7ff #define PANEL_HORIZONTAL_TOTAL 0x080024 #define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16 diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c index 99a8683e63831abba56cd9cc246e5dfb1eadd6fe..259006ace2196cf83e8d6fa6dabfc794db7708c1 100644 --- a/drivers/staging/sm750fb/ddk750_sii164.c +++ b/drivers/staging/sm750fb/ddk750_sii164.c @@ -173,7 +173,8 @@ long sii164InitChip( i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config); - /* De-skew enabled with default 111b value. + /* + * De-skew enabled with default 111b value. * This fixes some artifacts problem in some mode on board 2.2. * Somehow this fix does not affect board 2.1. */ diff --git a/drivers/staging/sm750fb/ddk750_swi2c.c b/drivers/staging/sm750fb/ddk750_swi2c.c index 72a42330e7a1ff7bcfe310308ffd2d10be91c1ee..b8a4e44359af013936d63e94d6fe33af1a2654af 100644 --- a/drivers/staging/sm750fb/ddk750_swi2c.c +++ b/drivers/staging/sm750fb/ddk750_swi2c.c @@ -1,21 +1,20 @@ -/******************************************************************* -* -* Copyright (c) 2007 by Silicon Motion, Inc. (SMI) -* -* All rights are reserved. Reproduction or in part is prohibited -* without the written consent of the copyright owner. -* -* swi2c.c --- SM750/SM718 DDK -* This file contains the source code for I2C using software -* implementation. -* -*******************************************************************/ -#include "ddk750_help.h" +/* + * Copyright (c) 2007 by Silicon Motion, Inc. (SMI) + * + * All rights are reserved. Reproduction or in part is prohibited + * without the written consent of the copyright owner. + * + * swi2c.c --- SM750/SM718 DDK + * This file contains the source code for I2C using software + * implementation. + */ + +#include "ddk750_chip.h" #include "ddk750_reg.h" #include "ddk750_swi2c.h" #include "ddk750_power.h" -/******************************************************************* +/* * I2C Software Master Driver: * =========================== * Each i2c cycle is split into 4 sections. Each of these section marks @@ -51,7 +50,7 @@ * SCL | L | | H | | * ---------------+---+---+---+---+ * - ******************************************************************/ + */ /* GPIO pins used for this I2C. It ranges from 0 to 63. */ static unsigned char sw_i2c_clk_gpio = DEFAULT_I2C_SCL; @@ -429,7 +428,7 @@ long sm750_sw_i2c_init( PEEK32(sw_i2c_data_gpio_mux_reg) & ~(1 << sw_i2c_data_gpio)); /* Enable GPIO power */ - enableGPIO(1); + sm750_enable_gpio(1); /* Clear the i2c lines. */ for (i = 0; i < 9; i++) diff --git a/drivers/staging/sm750fb/ddk750_swi2c.h b/drivers/staging/sm750fb/ddk750_swi2c.h index b53629cda0956c43fff775e07a08771f5b733a38..5a9466efc7bdf96af968d7b10aac8e5ffe34f361 100644 --- a/drivers/staging/sm750fb/ddk750_swi2c.h +++ b/drivers/staging/sm750fb/ddk750_swi2c.h @@ -1,15 +1,15 @@ -/******************************************************************* -* -* Copyright (c) 2007 by Silicon Motion, Inc. (SMI) -* -* All rights are reserved. Reproduction or in part is prohibited -* without the written consent of the copyright owner. -* -* swi2c.h --- SM750/SM718 DDK -* This file contains the definitions for i2c using software -* implementation. -* -*******************************************************************/ +/* + * Copyright (c) 2007 by Silicon Motion, Inc. (SMI) + * + * All rights are reserved. Reproduction or in part is prohibited + * without the written consent of the copyright owner. + * + * swi2c.h --- SM750/SM718 DDK + * This file contains the definitions for i2c using software + * implementation. + * + */ + #ifndef _SWI2C_H_ #define _SWI2C_H_ diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c index 7d90e250142c2bba706a6743e7926bf8751bbefb..e9632f162f99db7515cae26c4a75a1004bd59fa1 100644 --- a/drivers/staging/sm750fb/sm750.c +++ b/drivers/staging/sm750fb/sm750.c @@ -118,14 +118,14 @@ static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor) return -ENXIO; } - hw_cursor_disable(cursor); + sm750_hw_cursor_disable(cursor); if (fbcursor->set & FB_CUR_SETSIZE) - hw_cursor_setSize(cursor, + sm750_hw_cursor_setSize(cursor, fbcursor->image.width, fbcursor->image.height); if (fbcursor->set & FB_CUR_SETPOS) - hw_cursor_setPos(cursor, + sm750_hw_cursor_setPos(cursor, fbcursor->image.dx - info->var.xoffset, fbcursor->image.dy - info->var.yoffset); @@ -141,18 +141,18 @@ static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor) ((info->cmap.green[fbcursor->image.bg_color] & 0xfc00) >> 5) | ((info->cmap.blue[fbcursor->image.bg_color] & 0xf800) >> 11); - hw_cursor_setColor(cursor, fg, bg); + sm750_hw_cursor_setColor(cursor, fg, bg); } if (fbcursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) { - hw_cursor_setData(cursor, + sm750_hw_cursor_setData(cursor, fbcursor->rop, fbcursor->image.data, fbcursor->mask); } if (fbcursor->enable) - hw_cursor_enable(cursor); + sm750_hw_cursor_enable(cursor); return 0; } @@ -575,11 +575,11 @@ static int lynxfb_ops_check_var(struct fb_var_screeninfo *var, return hw_sm750_crtc_checkMode(crtc, var); } -static int lynxfb_ops_setcolreg(unsigned regno, - unsigned red, - unsigned green, - unsigned blue, - unsigned transp, +static int lynxfb_ops_setcolreg(unsigned int regno, + unsigned int red, + unsigned int green, + unsigned int blue, + unsigned int transp, struct fb_info *info) { struct lynxfb_par *par; @@ -788,7 +788,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index) memset_io(crtc->cursor.vstart, 0, crtc->cursor.size); if (!g_hwcursor) { lynxfb_ops.fb_cursor = NULL; - hw_cursor_disable(&crtc->cursor); + sm750_hw_cursor_disable(&crtc->cursor); } /* set info->fbops, must be set before fb_find_mode */ @@ -947,13 +947,13 @@ static void sm750fb_setup(struct sm750_dev *sm750_dev, char *src) g_hwcursor = 3; if (!src || !*src) { - pr_warn("no specific g_option.\n"); + dev_warn(&sm750_dev->pdev->dev, "no specific g_option.\n"); goto NO_PARAM; } while ((opt = strsep(&src, ":")) != NULL && *opt != 0) { - pr_info("opt=%s\n", opt); - pr_info("src=%s\n", src); + dev_info(&sm750_dev->pdev->dev, "opt=%s\n", opt); + dev_info(&sm750_dev->pdev->dev, "src=%s\n", src); if (!strncmp(opt, "swap", strlen("swap"))) swap = 1; @@ -974,12 +974,12 @@ static void sm750fb_setup(struct sm750_dev *sm750_dev, char *src) else { if (!g_fbmode[0]) { g_fbmode[0] = opt; - pr_info("find fbmode0 : %s\n", g_fbmode[0]); + dev_info(&sm750_dev->pdev->dev, "find fbmode0 : %s\n", g_fbmode[0]); } else if (!g_fbmode[1]) { g_fbmode[1] = opt; - pr_info("find fbmode1 : %s\n", g_fbmode[1]); + dev_info(&sm750_dev->pdev->dev, "find fbmode1 : %s\n", g_fbmode[1]); } else { - pr_warn("How many view you wann set?\n"); + dev_warn(&sm750_dev->pdev->dev, "How many view you wann set?\n"); } } } @@ -1083,10 +1083,10 @@ static int lynxfb_pci_probe(struct pci_dev *pdev, * if some chip need specific function, * please hook it in smXXX_set_drv routine */ - sm750_dev->accel.de_init = hw_de_init; - sm750_dev->accel.de_fillrect = hw_fillrect; - sm750_dev->accel.de_copyarea = hw_copyarea; - sm750_dev->accel.de_imageblit = hw_imageblit; + sm750_dev->accel.de_init = sm750_hw_de_init; + sm750_dev->accel.de_fillrect = sm750_hw_fillrect; + sm750_dev->accel.de_copyarea = sm750_hw_copyarea; + sm750_dev->accel.de_imageblit = sm750_hw_imageblit; } /* call chip specific setup routine */ @@ -1188,7 +1188,7 @@ static int __init lynxfb_setup(char *options) return 0; } -static struct pci_device_id smi_pci_table[] = { +static const struct pci_device_id smi_pci_table[] = { { PCI_DEVICE(0x126f, 0x0750), }, {0,} }; @@ -1209,7 +1209,6 @@ static struct pci_driver lynxfb_driver = { static int __init lynxfb_init(void) { char *option; - int ret; #ifdef MODULE option = g_option; @@ -1219,8 +1218,7 @@ static int __init lynxfb_init(void) #endif lynxfb_setup(option); - ret = pci_register_driver(&lynxfb_driver); - return ret; + return pci_register_driver(&lynxfb_driver); } module_init(lynxfb_init); @@ -1245,4 +1243,4 @@ MODULE_PARM_DESC(g_option, MODULE_AUTHOR("monk liu "); MODULE_AUTHOR("Sudip Mukherjee "); MODULE_DESCRIPTION("Frame buffer driver for SM750 chipset"); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h index ff31c5c9cc6f6145fe274a2abb148e8d901c1308..28f4b9b4f95fa9768d9f8344cd769740c99eec28 100644 --- a/drivers/staging/sm750fb/sm750.h +++ b/drivers/staging/sm750fb/sm750.h @@ -146,14 +146,16 @@ struct lynxfb_crtc { struct lynxfb_output { int dpms; int paths; - /* which paths(s) this output stands for,for sm750: + /* + * which paths(s) this output stands for,for sm750: * paths=1:means output for panel paths * paths=2:means output for crt paths * paths=3:means output for both panel and crt paths */ int *channel; - /* which channel these outputs linked with,for sm750: + /* + * which channel these outputs linked with,for sm750: * *channel=0 means primary channel * *channel=1 means secondary channel * output->channel ==> &crtc->channel diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c index 38adae6b5d83848efc2a23c79ae553abc384741e..af0db5789c532277c404de6f7f76009a2db777b9 100644 --- a/drivers/staging/sm750fb/sm750_accel.c +++ b/drivers/staging/sm750fb/sm750_accel.c @@ -32,7 +32,7 @@ static inline void write_dpPort(struct lynx_accel *accel, u32 data) writel(data, accel->dpPortBase); } -void hw_de_init(struct lynx_accel *accel) +void sm750_hw_de_init(struct lynx_accel *accel) { /* setup 2d engine registers */ u32 reg, clr; @@ -65,12 +65,13 @@ void hw_de_init(struct lynx_accel *accel) write_dpr(accel, DE_CONTROL, read_dpr(accel, DE_CONTROL) & ~clr); } -/* set2dformat only be called from setmode functions +/* + * set2dformat only be called from setmode functions * but if you need dual framebuffer driver,need call set2dformat * every time you use 2d function */ -void hw_set2dformat(struct lynx_accel *accel, int fmt) +void sm750_hw_set2dformat(struct lynx_accel *accel, int fmt) { u32 reg; @@ -82,7 +83,7 @@ void hw_set2dformat(struct lynx_accel *accel, int fmt) write_dpr(accel, DE_STRETCH_FORMAT, reg); } -int hw_fillrect(struct lynx_accel *accel, +int sm750_hw_fillrect(struct lynx_accel *accel, u32 base, u32 pitch, u32 Bpp, u32 x, u32 y, u32 width, u32 height, u32 color, u32 rop) @@ -90,7 +91,8 @@ int hw_fillrect(struct lynx_accel *accel, u32 deCtrl; if (accel->de_wait() != 0) { - /* int time wait and always busy,seems hardware + /* + * int time wait and always busy,seems hardware * got something error */ pr_debug("De engine always busy\n"); @@ -126,7 +128,7 @@ int hw_fillrect(struct lynx_accel *accel, return 0; } -int hw_copyarea( +int sm750_hw_copyarea( struct lynx_accel *accel, unsigned int sBase, /* Address of source: offset in frame buffer */ unsigned int sPitch, /* Pitch value of source surface in BYTE */ @@ -213,25 +215,29 @@ unsigned int rop2) /* ROP value */ opSign = (-1); } - /* Note: + /* + * Note: * DE_FOREGROUND are DE_BACKGROUND are don't care. * DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS * are set by set deSetTransparency(). */ - /* 2D Source Base. + /* + * 2D Source Base. * It is an address offset (128 bit aligned) * from the beginning of frame buffer. */ write_dpr(accel, DE_WINDOW_SOURCE_BASE, sBase); /* dpr40 */ - /* 2D Destination Base. + /* + * 2D Destination Base. * It is an address offset (128 bit aligned) * from the beginning of frame buffer. */ write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); /* dpr44 */ - /* Program pitch (distance between the 1st points of two adjacent lines). + /* + * Program pitch (distance between the 1st points of two adjacent lines). * Note that input pitch is BYTE value, but the 2D Pitch register uses * pixel values. Need Byte to pixel conversion. */ @@ -240,7 +246,8 @@ unsigned int rop2) /* ROP value */ DE_PITCH_DESTINATION_MASK) | (sPitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */ - /* Screen Window width in Pixels. + /* + * Screen Window width in Pixels. * 2D engine uses this value to calculate the linear address in frame buffer * for a given point. */ @@ -286,7 +293,7 @@ static unsigned int deGetTransparency(struct lynx_accel *accel) return de_ctrl; } -int hw_imageblit(struct lynx_accel *accel, +int sm750_hw_imageblit(struct lynx_accel *accel, const char *pSrcbuf, /* pointer to start of source buffer in system memory */ u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */ u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */ @@ -316,7 +323,8 @@ int hw_imageblit(struct lynx_accel *accel, if (accel->de_wait() != 0) return -1; - /* 2D Source Base. + /* + * 2D Source Base. * Use 0 for HOST Blt. */ write_dpr(accel, DE_WINDOW_SOURCE_BASE, 0); @@ -326,16 +334,19 @@ int hw_imageblit(struct lynx_accel *accel, * from the beginning of frame buffer. */ write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); - /* Program pitch (distance between the 1st points of two adjacent lines). - * Note that input pitch is BYTE value, but the 2D Pitch register uses - * pixel values. Need Byte to pixel conversion. - */ + + /* + * Program pitch (distance between the 1st points of two adjacent + * lines). Note that input pitch is BYTE value, but the 2D Pitch + * register uses pixel values. Need Byte to pixel conversion. + */ write_dpr(accel, DE_PITCH, ((dPitch / bytePerPixel << DE_PITCH_DESTINATION_SHIFT) & DE_PITCH_DESTINATION_MASK) | (dPitch / bytePerPixel & DE_PITCH_SOURCE_MASK)); /* dpr10 */ - /* Screen Window width in Pixels. + /* + * Screen Window width in Pixels. * 2D engine uses this value to calculate the linear address * in frame buffer for a given point. */ @@ -344,7 +355,8 @@ int hw_imageblit(struct lynx_accel *accel, DE_WINDOW_WIDTH_DST_MASK) | (dPitch / bytePerPixel & DE_WINDOW_WIDTH_SRC_MASK)); - /* Note: For 2D Source in Host Write, only X_K1_MONO field is needed, + /* + * Note: For 2D Source in Host Write, only X_K1_MONO field is needed, * and Y_K2 field is not used. * For mono bitmap, use startBit for X_K1. */ @@ -383,6 +395,6 @@ int hw_imageblit(struct lynx_accel *accel, pSrcbuf += srcDelta; } - return 0; + return 0; } diff --git a/drivers/staging/sm750fb/sm750_accel.h b/drivers/staging/sm750fb/sm750_accel.h index d59d005e0add0768c61a366957060769f14da337..4b0ff8feb9a0a70f050b27e9c40603c530bfa6d1 100644 --- a/drivers/staging/sm750fb/sm750_accel.h +++ b/drivers/staging/sm750fb/sm750_accel.h @@ -184,16 +184,16 @@ #define BOTTOM_TO_TOP 1 #define RIGHT_TO_LEFT 1 -void hw_set2dformat(struct lynx_accel *accel, int fmt); +void sm750_hw_set2dformat(struct lynx_accel *accel, int fmt); -void hw_de_init(struct lynx_accel *accel); +void sm750_hw_de_init(struct lynx_accel *accel); -int hw_fillrect(struct lynx_accel *accel, +int sm750_hw_fillrect(struct lynx_accel *accel, u32 base, u32 pitch, u32 Bpp, u32 x, u32 y, u32 width, u32 height, u32 color, u32 rop); -int hw_copyarea( +int sm750_hw_copyarea( struct lynx_accel *accel, unsigned int sBase, /* Address of source: offset in frame buffer */ unsigned int sPitch, /* Pitch value of source surface in BYTE */ @@ -208,7 +208,7 @@ unsigned int width, unsigned int height, /* width and height of rectangle in pixel value */ unsigned int rop2); -int hw_imageblit(struct lynx_accel *accel, +int sm750_hw_imageblit(struct lynx_accel *accel, const char *pSrcbuf, /* pointer to start of source buffer in system memory */ u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */ u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */ diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c index d622d65b6ceee2834b5070c1282952b3b7a81545..2a13353fc492a8becd6cbfab3cd24222da213a41 100644 --- a/drivers/staging/sm750fb/sm750_cursor.c +++ b/drivers/staging/sm750fb/sm750_cursor.c @@ -47,25 +47,25 @@ writel((data), cursor->mmio + (addr)) /* hw_cursor_xxx works for voyager,718 and 750 */ -void hw_cursor_enable(struct lynx_cursor *cursor) +void sm750_hw_cursor_enable(struct lynx_cursor *cursor) { u32 reg; reg = (cursor->offset & HWC_ADDRESS_ADDRESS_MASK) | HWC_ADDRESS_ENABLE; POKE32(HWC_ADDRESS, reg); } -void hw_cursor_disable(struct lynx_cursor *cursor) +void sm750_hw_cursor_disable(struct lynx_cursor *cursor) { POKE32(HWC_ADDRESS, 0); } -void hw_cursor_setSize(struct lynx_cursor *cursor, +void sm750_hw_cursor_setSize(struct lynx_cursor *cursor, int w, int h) { cursor->w = w; cursor->h = h; } -void hw_cursor_setPos(struct lynx_cursor *cursor, +void sm750_hw_cursor_setPos(struct lynx_cursor *cursor, int x, int y) { u32 reg; @@ -74,7 +74,7 @@ void hw_cursor_setPos(struct lynx_cursor *cursor, (x & HWC_LOCATION_X_MASK)); POKE32(HWC_LOCATION, reg); } -void hw_cursor_setColor(struct lynx_cursor *cursor, +void sm750_hw_cursor_setColor(struct lynx_cursor *cursor, u32 fg, u32 bg) { u32 reg = (fg << HWC_COLOR_12_2_RGB565_SHIFT) & @@ -84,7 +84,7 @@ void hw_cursor_setColor(struct lynx_cursor *cursor, POKE32(HWC_COLOR_3, 0xffe0); } -void hw_cursor_setData(struct lynx_cursor *cursor, +void sm750_hw_cursor_setData(struct lynx_cursor *cursor, u16 rop, const u8 *pcol, const u8 *pmsk) { int i, j, count, pitch, offset; @@ -138,7 +138,7 @@ void hw_cursor_setData(struct lynx_cursor *cursor, } -void hw_cursor_setData2(struct lynx_cursor *cursor, +void sm750_hw_cursor_setData2(struct lynx_cursor *cursor, u16 rop, const u8 *pcol, const u8 *pmsk) { int i, j, count, pitch, offset; diff --git a/drivers/staging/sm750fb/sm750_cursor.h b/drivers/staging/sm750fb/sm750_cursor.h index 6c4fc9b73489c2ba7892f7e30439c87fdb6fc2e4..c7b86ae235b42631268a67fbbaef1183d02a42f9 100644 --- a/drivers/staging/sm750fb/sm750_cursor.h +++ b/drivers/staging/sm750fb/sm750_cursor.h @@ -2,16 +2,16 @@ #define LYNX_CURSOR_H__ /* hw_cursor_xxx works for voyager,718 and 750 */ -void hw_cursor_enable(struct lynx_cursor *cursor); -void hw_cursor_disable(struct lynx_cursor *cursor); -void hw_cursor_setSize(struct lynx_cursor *cursor, +void sm750_hw_cursor_enable(struct lynx_cursor *cursor); +void sm750_hw_cursor_disable(struct lynx_cursor *cursor); +void sm750_hw_cursor_setSize(struct lynx_cursor *cursor, int w, int h); -void hw_cursor_setPos(struct lynx_cursor *cursor, +void sm750_hw_cursor_setPos(struct lynx_cursor *cursor, int x, int y); -void hw_cursor_setColor(struct lynx_cursor *cursor, +void sm750_hw_cursor_setColor(struct lynx_cursor *cursor, u32 fg, u32 bg); -void hw_cursor_setData(struct lynx_cursor *cursor, +void sm750_hw_cursor_setData(struct lynx_cursor *cursor, u16 rop, const u8 *data, const u8 *mask); -void hw_cursor_setData2(struct lynx_cursor *cursor, +void sm750_hw_cursor_setData2(struct lynx_cursor *cursor, u16 rop, const u8 *data, const u8 *mask); #endif diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c index 7dd208caa5eb07aa1a96914bdef3781182bcb445..b6af3b53076bb699726f64c2439d95641ae113b9 100644 --- a/drivers/staging/sm750fb/sm750_hw.c +++ b/drivers/staging/sm750fb/sm750_hw.c @@ -23,6 +23,8 @@ #include "ddk750.h" #include "sm750_accel.h" +void __iomem *mmio750; + int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev) { int ret; @@ -34,7 +36,8 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev) pr_info("mmio phyAddr = %lx\n", sm750_dev->vidreg_start); - /* reserve the vidreg space of smi adaptor + /* + * reserve the vidreg space of smi adaptor * if you do this, you need to add release region code * in lynxfb_remove, or memory will not be mapped again * successfully @@ -59,15 +62,17 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev) sm750_dev->accel.dprBase = sm750_dev->pvReg + DE_BASE_ADDR_TYPE1; sm750_dev->accel.dpPortBase = sm750_dev->pvReg + DE_PORT_ADDR_TYPE1; - ddk750_set_mmio(sm750_dev->pvReg, sm750_dev->devid, sm750_dev->revid); + mmio750 = sm750_dev->pvReg; + sm750_set_chip_type(sm750_dev->devid, sm750_dev->revid); sm750_dev->vidmem_start = pci_resource_start(pdev, 0); - /* don't use pdev_resource[x].end - resource[x].start to + /* + * don't use pdev_resource[x].end - resource[x].start to * calculate the resource size, it's only the maximum available * size but not the actual size, using - * @ddk750_getVMSize function can be safe. + * @ddk750_get_vm_size function can be safe. */ - sm750_dev->vidmem_size = ddk750_getVMSize(); + sm750_dev->vidmem_size = ddk750_get_vm_size(); pr_info("video memory phyAddr = %lx, size = %u bytes\n", sm750_dev->vidmem_start, sm750_dev->vidmem_size); @@ -100,7 +105,7 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev) if (parm->master_clk == 0) parm->master_clk = parm->chip_clk / 3; - ddk750_initHw((initchip_param_t *)&sm750_dev->initParm); + ddk750_init_hw((struct initchip_param *)&sm750_dev->initParm); /* for sm718, open pci burst */ if (sm750_dev->devid == 0x718) { POKE32(SYSTEM_CTRL, @@ -141,7 +146,8 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev) } POKE32(PANEL_DISPLAY_CTRL, val); } else { - /* for 750LE, no DVI chip initialization + /* + * for 750LE, no DVI chip initialization * makes Monitor no signal * * Set up GPIO for software I2C to program DVI chip in the @@ -149,13 +155,15 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev) */ sm750_sw_i2c_init(0, 1); - /* Customer may NOT use CH7301 DVI chip, which has to be + /* + * Customer may NOT use CH7301 DVI chip, which has to be * initialized differently. */ if (sm750_sw_i2c_read_reg(0xec, 0x4a) == 0x95) { - /* The following register values for CH7301 are from - * Chrontel app note and our experiment. - */ + /* + * The following register values for CH7301 are from + * Chrontel app note and our experiment. + */ pr_info("yes,CH7301 DVI chip found\n"); sm750_sw_i2c_write_reg(0xec, 0x1d, 0x16); sm750_sw_i2c_write_reg(0xec, 0x21, 0x9); @@ -267,7 +275,7 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc, fmt = 2; break; } - hw_set2dformat(&sm750_dev->accel, fmt); + sm750_hw_set2dformat(&sm750_dev->accel, fmt); } /* set timing */ @@ -308,7 +316,8 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc, crtc->oScreen & PANEL_FB_ADDRESS_ADDRESS_MASK); reg = var->xres * (var->bits_per_pixel >> 3); - /* crtc->channel is not equal to par->index on numeric, + /* + * crtc->channel is not equal to par->index on numeric, * be aware of that */ reg = ALIGN(reg, crtc->line_pad); @@ -342,7 +351,8 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc, /* not implemented now */ POKE32(CRT_FB_ADDRESS, crtc->oScreen); reg = var->xres * (var->bits_per_pixel >> 3); - /* crtc->channel is not equal to par->index on numeric, + /* + * crtc->channel is not equal to par->index on numeric, * be aware of that */ reg = ALIGN(reg, crtc->line_pad) << CRT_FB_WIDTH_WIDTH_SHIFT; @@ -469,7 +479,7 @@ void hw_sm750_initAccel(struct sm750_dev *sm750_dev) { u32 reg; - enable2DEngine(1); + sm750_enable_2d_engine(1); if (sm750_get_chip_type() == SM750LE) { reg = PEEK32(DE_STATE1); diff --git a/drivers/staging/speakup/TODO b/drivers/staging/speakup/TODO index 3094799cf6a005d4a2dd2f7bc55ad349a8f64183..993410c3e531bc89edc522b0826d4e2c5f9453c4 100644 --- a/drivers/staging/speakup/TODO +++ b/drivers/staging/speakup/TODO @@ -42,6 +42,6 @@ We prefer that you contact us on the mailing list; however, if you do not want to subscribe to a mailing list, send your email to all of the following: -w.d.hubbs@gmail.com, chris@the-brannons.com, kirk@braille.uwo.ca and +w.d.hubbs@gmail.com, chris@the-brannons.com, kirk@reisers.ca and samuel.thibault@ens-lyon.org. diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c index 97ca4ecca8a910f3fbad0ae8c26868de0ca5795b..5c192042eeac1ffe928848aa3f06c442d05eddc5 100644 --- a/drivers/staging/speakup/main.c +++ b/drivers/staging/speakup/main.c @@ -351,14 +351,14 @@ static void speakup_cut(struct vc_data *vc) if (!mark_cut_flag) { mark_cut_flag = 1; - spk_xs = (u_short) spk_x; - spk_ys = (u_short) spk_y; + spk_xs = (u_short)spk_x; + spk_ys = (u_short)spk_y; spk_sel_cons = vc; synth_printf("%s\n", spk_msg_get(MSG_MARK)); return; } - spk_xe = (u_short) spk_x; - spk_ye = (u_short) spk_y; + spk_xe = (u_short)spk_x; + spk_ye = (u_short)spk_y; mark_cut_flag = 0; synth_printf("%s\n", spk_msg_get(MSG_CUT)); @@ -489,7 +489,7 @@ static void say_char(struct vc_data *vc) u_short ch; spk_old_attr = spk_attr; - ch = get_char(vc, (u_short *) spk_pos, &spk_attr); + ch = get_char(vc, (u_short *)spk_pos, &spk_attr); if (spk_attr != spk_old_attr) { if (spk_attrib_bleep & 1) bleep(spk_y); @@ -504,7 +504,7 @@ static void say_phonetic_char(struct vc_data *vc) u_short ch; spk_old_attr = spk_attr; - ch = get_char(vc, (u_short *) spk_pos, &spk_attr); + ch = get_char(vc, (u_short *)spk_pos, &spk_attr); if (isascii(ch) && isalpha(ch)) { ch &= 0x1f; synth_printf("%s\n", phonetic[--ch]); @@ -556,7 +556,7 @@ static u_long get_word(struct vc_data *vc) u_char temp; spk_old_attr = spk_attr; - ch = (char)get_char(vc, (u_short *) tmp_pos, &temp); + ch = (char)get_char(vc, (u_short *)tmp_pos, &temp); /* decided to take out the sayword if on a space (mis-information */ if (spk_say_word_ctl && ch == SPACE) { @@ -565,26 +565,26 @@ static u_long get_word(struct vc_data *vc) return 0; } else if ((tmpx < vc->vc_cols - 2) && (ch == SPACE || ch == 0 || IS_WDLM(ch)) - && ((char)get_char(vc, (u_short *) &tmp_pos + 1, &temp) > + && ((char)get_char(vc, (u_short *)&tmp_pos + 1, &temp) > SPACE)) { tmp_pos += 2; tmpx++; } else while (tmpx > 0) { - ch = (char)get_char(vc, (u_short *) tmp_pos - 1, &temp); + ch = (char)get_char(vc, (u_short *)tmp_pos - 1, &temp); if ((ch == SPACE || ch == 0 || IS_WDLM(ch)) - && ((char)get_char(vc, (u_short *) tmp_pos, &temp) > + && ((char)get_char(vc, (u_short *)tmp_pos, &temp) > SPACE)) break; tmp_pos -= 2; tmpx--; } - attr_ch = get_char(vc, (u_short *) tmp_pos, &spk_attr); + attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr); buf[cnt++] = attr_ch & 0xff; while (tmpx < vc->vc_cols - 1) { tmp_pos += 2; tmpx++; - ch = (char)get_char(vc, (u_short *) tmp_pos, &temp); + ch = (char)get_char(vc, (u_short *)tmp_pos, &temp); if ((ch == SPACE) || ch == 0 || (IS_WDLM(buf[cnt - 1]) && (ch > SPACE))) break; @@ -639,7 +639,7 @@ static void say_prev_word(struct vc_data *vc) } else spk_x--; spk_pos -= 2; - ch = (char)get_char(vc, (u_short *) spk_pos, &temp); + ch = (char)get_char(vc, (u_short *)spk_pos, &temp); if (ch == SPACE || ch == 0) state = 0; else if (IS_WDLM(ch)) @@ -672,7 +672,7 @@ static void say_next_word(struct vc_data *vc) return; } while (1) { - ch = (char)get_char(vc, (u_short *) spk_pos, &temp); + ch = (char)get_char(vc, (u_short *)spk_pos, &temp); if (ch == SPACE || ch == 0) state = 0; else if (IS_WDLM(ch)) @@ -709,7 +709,7 @@ static void spell_word(struct vc_data *vc) if (!get_word(vc)) return; - while ((ch = (u_char) *cp)) { + while ((ch = (u_char)*cp)) { if (cp != buf) synth_printf(" %s ", delay_str[spk_spell_delay]); if (IS_CHAR(ch, B_CAP)) { @@ -751,7 +751,7 @@ static int get_line(struct vc_data *vc) spk_old_attr = spk_attr; spk_attr = get_attributes(vc, (u_short *)spk_pos); for (i = 0; i < vc->vc_cols; i++) { - buf[i] = (u_char) get_char(vc, (u_short *) tmp, &tmp2); + buf[i] = (u_char)get_char(vc, (u_short *)tmp, &tmp2); tmp += 2; } for (--i; i >= 0; i--) @@ -816,7 +816,7 @@ static int say_from_to(struct vc_data *vc, u_long from, u_long to, spk_old_attr = spk_attr; spk_attr = get_attributes(vc, (u_short *)from); while (from < to) { - buf[i++] = (char)get_char(vc, (u_short *) from, &tmp); + buf[i++] = (char)get_char(vc, (u_short *)from, &tmp); from += 2; if (i >= vc->vc_size_row) break; @@ -892,7 +892,7 @@ static int get_sentence_buf(struct vc_data *vc, int read_punc) spk_attr = get_attributes(vc, (u_short *)start); while (start < end) { - sentbuf[bn][i] = (char)get_char(vc, (u_short *) start, &tmp); + sentbuf[bn][i] = (char)get_char(vc, (u_short *)start, &tmp); if (i > 0) { if (sentbuf[bn][i] == SPACE && sentbuf[bn][i - 1] == '.' && numsentences[bn] < 9) { @@ -1040,7 +1040,7 @@ static void say_position(struct vc_data *vc) static void say_char_num(struct vc_data *vc) { u_char tmp; - u_short ch = get_char(vc, (u_short *) spk_pos, &tmp); + u_short ch = get_char(vc, (u_short *)spk_pos, &tmp); ch &= 0xff; synth_printf(spk_msg_get(MSG_CHAR_INFO), ch, ch); @@ -1085,7 +1085,7 @@ static void spkup_write(const char *in_buf, int count) (currsentence <= numsentences[bn])) synth_insert_next_index(currsentence++); } - ch = (u_char) *in_buf++; + ch = (u_char)*in_buf++; char_type = spk_chartab[ch]; if (ch == old_ch && !(char_type & B_NUM)) { if (++rep_count > 2) @@ -1579,7 +1579,7 @@ static int count_highlight_color(struct vc_data *vc) int cc; int vc_num = vc->vc_num; u16 ch; - u16 *start = (u16 *) vc->vc_origin; + u16 *start = (u16 *)vc->vc_origin; for (i = 0; i < 8; i++) speakup_console[vc_num]->ht.bgcount[i] = 0; diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c index 0149edc1e0aef63b5a690654e6099d301301a93e..aeb2b865615a522471558e154bacff1c71af9c97 100644 --- a/drivers/staging/speakup/selection.c +++ b/drivers/staging/speakup/selection.c @@ -137,7 +137,7 @@ static void __speakup_paste_selection(struct work_struct *work) struct speakup_paste_work *spw = container_of(work, struct speakup_paste_work, work); struct tty_struct *tty = xchg(&spw->tty, NULL); - struct vc_data *vc = (struct vc_data *) tty->driver_data; + struct vc_data *vc = (struct vc_data *)tty->driver_data; int pasted = 0, count; struct tty_ldisc *ld; DECLARE_WAITQUEUE(wait, current); diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c index c2c435cc3d63d7331f9c775a95f3cd309c692771..ef89dc1c21c81c5951ce6ca99cdc1708e1444f3e 100644 --- a/drivers/staging/speakup/serialio.c +++ b/drivers/staging/speakup/serialio.c @@ -99,7 +99,7 @@ static irqreturn_t synth_readbuf_handler(int irq, void *dev_id) while (inb_p(speakup_info.port_tts + UART_LSR) & UART_LSR_DR) { c = inb_p(speakup_info.port_tts+UART_RX); - synth->read_buff_add((u_char) c); + synth->read_buff_add((u_char)c); } spin_unlock_irqrestore(&speakup_info.spinlock, flags); return IRQ_HANDLED; @@ -113,7 +113,7 @@ static void start_serial_interrupt(int irq) return; rv = request_irq(irq, synth_readbuf_handler, IRQF_SHARED, - "serial", (void *) synth_readbuf_handler); + "serial", (void *)synth_readbuf_handler); if (rv) pr_err("Unable to request Speakup serial I R Q\n"); @@ -141,7 +141,7 @@ void spk_stop_serial_interrupt(void) /* Turn off interrupts */ outb(0, speakup_info.port_tts+UART_IER); /* Free IRQ */ - free_irq(serstate->irq, (void *) synth_readbuf_handler); + free_irq(serstate->irq, (void *)synth_readbuf_handler); } int spk_wait_for_xmitr(void) diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index 6b1d0f538bbd320cd4e028e1300446535050d711..ed3e4282f41c4bc5a211b710c48bad0873f3d3b4 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c @@ -20,8 +20,8 @@ */ #include -#include /* for misc_register, and SYNTH_MINOR */ -#include /* for poll_wait() */ +#include /* for misc_register, and SYNTH_MINOR */ +#include /* for poll_wait() */ #include /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */ #include "spk_priv.h" @@ -55,27 +55,26 @@ static struct var_t vars[] = { V_LAST_VAR }; -/* - * These attributes will appear in /sys/accessibility/speakup/soft. - */ +/* These attributes will appear in /sys/accessibility/speakup/soft. */ + static struct kobj_attribute caps_start_attribute = - __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = - __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute freq_attribute = - __ATTR(freq, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(freq, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = - __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = - __ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(punct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = - __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute tone_attribute = - __ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = - __ATTR(voice, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(voice, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = - __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); /* * We should uncomment the following definition, when we agree on a @@ -85,15 +84,15 @@ static struct kobj_attribute vol_attribute = */ static struct kobj_attribute delay_time_attribute = - __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = - __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = - __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = - __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = - __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all @@ -162,8 +161,8 @@ static char *get_initstring(void) cp = buf; var = synth_soft.vars; while (var->var_id != MAXVARS) { - if (var->var_id != CAPS_START && var->var_id != CAPS_STOP - && var->var_id != DIRECT) + if (var->var_id != CAPS_START && var->var_id != CAPS_STOP && + var->var_id != DIRECT) cp = cp + sprintf(cp, var->u.n.synth_fmt, var->u.n.value); var++; @@ -277,8 +276,7 @@ static ssize_t softsynth_write(struct file *fp, const char __user *buf, return count; } -static unsigned int softsynth_poll(struct file *fp, - struct poll_table_struct *wait) +static unsigned int softsynth_poll(struct file *fp, struct poll_table_struct *wait) { unsigned long flags; int ret = 0; @@ -310,10 +308,8 @@ static const struct file_operations softsynth_fops = { .release = softsynth_close, }; - static int softsynth_probe(struct spk_synth *synth) { - if (misc_registered != 0) return 0; memset(&synth_device, 0, sizeof(synth_device)); diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c index e449f2770c1f2750e1a299e4babfab433d3cafef..586890908826e303242db45be9ce6b99974dfae5 100644 --- a/drivers/staging/speakup/speakup_spkout.c +++ b/drivers/staging/speakup/speakup_spkout.c @@ -1,6 +1,6 @@ /* * originally written by: Kirk Reiser -* this version considerably modified by David Borowski, david575@rogers.com + * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. @@ -40,34 +40,33 @@ static struct var_t vars[] = { V_LAST_VAR }; -/* - * These attributes will appear in /sys/accessibility/speakup/spkout. - */ +/* These attributes will appear in /sys/accessibility/speakup/spkout. */ + static struct kobj_attribute caps_start_attribute = - __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = - __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = - __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = - __ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(punct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = - __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute tone_attribute = - __ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = - __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = - __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = - __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = - __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = - __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = - __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c index fd98d4ffcb3ef8efcd5f059acf3049fc941a9ebf..b3d2cfd20ac8656cfba79e211663dc020488d558 100644 --- a/drivers/staging/speakup/speakup_txprt.c +++ b/drivers/staging/speakup/speakup_txprt.c @@ -1,6 +1,6 @@ /* * originally written by: Kirk Reiser -* this version considerably modified by David Borowski, david575@rogers.com + * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. @@ -36,32 +36,31 @@ static struct var_t vars[] = { V_LAST_VAR }; -/* - * These attributes will appear in /sys/accessibility/speakup/txprt. - */ +/* These attributes will appear in /sys/accessibility/speakup/txprt. */ + static struct kobj_attribute caps_start_attribute = - __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = - __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = - __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = - __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute tone_attribute = - __ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = - __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = - __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = - __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = - __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = - __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = - __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all diff --git a/drivers/staging/speakup/spk_priv_keyinfo.h b/drivers/staging/speakup/spk_priv_keyinfo.h index 130e9cb0118bd60e5acbf30afcabe31e24a6474c..c95b68ebd8e7ccb81e9270ccad5c310397e49967 100644 --- a/drivers/staging/speakup/spk_priv_keyinfo.h +++ b/drivers/staging/speakup/spk_priv_keyinfo.h @@ -23,84 +23,82 @@ #define FIRST_SYNTH_VAR RATE /* 0 is reserved for no remap */ -#define SPEAKUP_GOTO 0x01 -#define SPEECH_KILL 0x02 -#define SPEAKUP_QUIET 0x03 -#define SPEAKUP_CUT 0x04 -#define SPEAKUP_PASTE 0x05 -#define SAY_FIRST_CHAR 0x06 -#define SAY_LAST_CHAR 0x07 -#define SAY_CHAR 0x08 -#define SAY_PREV_CHAR 0x09 -#define SAY_NEXT_CHAR 0x0a -#define SAY_WORD 0x0b -#define SAY_PREV_WORD 0x0c -#define SAY_NEXT_WORD 0x0d -#define SAY_LINE 0x0e -#define SAY_PREV_LINE 0x0f -#define SAY_NEXT_LINE 0x10 -#define TOP_EDGE 0x11 -#define BOTTOM_EDGE 0x12 -#define LEFT_EDGE 0x13 -#define RIGHT_EDGE 0x14 -#define SPELL_PHONETIC 0x15 -#define SPELL_WORD 0x16 -#define SAY_SCREEN 0x17 -#define SAY_POSITION 0x18 -#define SAY_ATTRIBUTES 0x19 -#define SPEAKUP_OFF 0x1a -#define SPEAKUP_PARKED 0x1b -#define SAY_LINE_INDENT 0x1c -#define SAY_FROM_TOP 0x1d -#define SAY_TO_BOTTOM 0x1e -#define SAY_FROM_LEFT 0x1f -#define SAY_TO_RIGHT 0x20 -#define SAY_CHAR_NUM 0x21 -#define EDIT_SOME 0x22 -#define EDIT_MOST 0x23 -#define SAY_PHONETIC_CHAR 0x24 -#define EDIT_DELIM 0x25 -#define EDIT_REPEAT 0x26 -#define EDIT_EXNUM 0x27 -#define SET_WIN 0x28 -#define CLEAR_WIN 0x29 -#define ENABLE_WIN 0x2a -#define SAY_WIN 0x2b -#define SPK_LOCK 0x2c -#define SPEAKUP_HELP 0x2d -#define TOGGLE_CURSORING 0x2e -#define READ_ALL_DOC 0x2f -#define SPKUP_MAX_FUNC 0x30 /* one greater than the last func handler */ - -#define SPK_KEY 0x80 -#define FIRST_EDIT_BITS 0x22 - +#define SPEAKUP_GOTO 0x01 +#define SPEECH_KILL 0x02 +#define SPEAKUP_QUIET 0x03 +#define SPEAKUP_CUT 0x04 +#define SPEAKUP_PASTE 0x05 +#define SAY_FIRST_CHAR 0x06 +#define SAY_LAST_CHAR 0x07 +#define SAY_CHAR 0x08 +#define SAY_PREV_CHAR 0x09 +#define SAY_NEXT_CHAR 0x0a +#define SAY_WORD 0x0b +#define SAY_PREV_WORD 0x0c +#define SAY_NEXT_WORD 0x0d +#define SAY_LINE 0x0e +#define SAY_PREV_LINE 0x0f +#define SAY_NEXT_LINE 0x10 +#define TOP_EDGE 0x11 +#define BOTTOM_EDGE 0x12 +#define LEFT_EDGE 0x13 +#define RIGHT_EDGE 0x14 +#define SPELL_PHONETIC 0x15 +#define SPELL_WORD 0x16 +#define SAY_SCREEN 0x17 +#define SAY_POSITION 0x18 +#define SAY_ATTRIBUTES 0x19 +#define SPEAKUP_OFF 0x1a +#define SPEAKUP_PARKED 0x1b +#define SAY_LINE_INDENT 0x1c +#define SAY_FROM_TOP 0x1d +#define SAY_TO_BOTTOM 0x1e +#define SAY_FROM_LEFT 0x1f +#define SAY_TO_RIGHT 0x20 +#define SAY_CHAR_NUM 0x21 +#define EDIT_SOME 0x22 +#define EDIT_MOST 0x23 +#define SAY_PHONETIC_CHAR 0x24 +#define EDIT_DELIM 0x25 +#define EDIT_REPEAT 0x26 +#define EDIT_EXNUM 0x27 +#define SET_WIN 0x28 +#define CLEAR_WIN 0x29 +#define ENABLE_WIN 0x2a +#define SAY_WIN 0x2b +#define SPK_LOCK 0x2c +#define SPEAKUP_HELP 0x2d +#define TOGGLE_CURSORING 0x2e +#define READ_ALL_DOC 0x2f +#define SPKUP_MAX_FUNC 0x30 /* one greater than the last func handler */ +#define SPK_KEY 0x80 +#define FIRST_EDIT_BITS 0x22 #define FIRST_SET_VAR SPELL_DELAY -#define VAR_START 0x40 /* increase if adding more than 0x3f functions */ +#define VAR_START 0x40 /* increase if adding more than 0x3f functions */ /* keys for setting variables, must be ordered same as the enum for var_ids */ /* with dec being even and inc being 1 greater */ -#define SPELL_DELAY_DEC (VAR_START+0) -#define SPELL_DELAY_INC (SPELL_DELAY_DEC+1) -#define PUNC_LEVEL_DEC (SPELL_DELAY_DEC+2) -#define PUNC_LEVEL_INC (PUNC_LEVEL_DEC+1) -#define READING_PUNC_DEC (PUNC_LEVEL_DEC+2) -#define READING_PUNC_INC (READING_PUNC_DEC+1) -#define ATTRIB_BLEEP_DEC (READING_PUNC_DEC+2) -#define ATTRIB_BLEEP_INC (ATTRIB_BLEEP_DEC+1) -#define BLEEPS_DEC (ATTRIB_BLEEP_DEC+2) -#define BLEEPS_INC (BLEEPS_DEC+1) -#define RATE_DEC (BLEEPS_DEC+2) -#define RATE_INC (RATE_DEC+1) -#define PITCH_DEC (RATE_DEC+2) -#define PITCH_INC (PITCH_DEC+1) -#define VOL_DEC (PITCH_DEC+2) -#define VOL_INC (VOL_DEC+1) -#define TONE_DEC (VOL_DEC+2) -#define TONE_INC (TONE_DEC+1) -#define PUNCT_DEC (TONE_DEC+2) -#define PUNCT_INC (PUNCT_DEC+1) -#define VOICE_DEC (PUNCT_DEC+2) -#define VOICE_INC (VOICE_DEC+1) +#define SPELL_DELAY_DEC (VAR_START + 0) +#define SPELL_DELAY_INC (SPELL_DELAY_DEC + 1) +#define PUNC_LEVEL_DEC (SPELL_DELAY_DEC + 2) +#define PUNC_LEVEL_INC (PUNC_LEVEL_DEC + 1) +#define READING_PUNC_DEC (PUNC_LEVEL_DEC + 2) +#define READING_PUNC_INC (READING_PUNC_DEC + 1) +#define ATTRIB_BLEEP_DEC (READING_PUNC_DEC + 2) +#define ATTRIB_BLEEP_INC (ATTRIB_BLEEP_DEC + 1) +#define BLEEPS_DEC (ATTRIB_BLEEP_DEC + 2) +#define BLEEPS_INC (BLEEPS_DEC + 1) +#define RATE_DEC (BLEEPS_DEC + 2) +#define RATE_INC (RATE_DEC + 1) +#define PITCH_DEC (RATE_DEC + 2) +#define PITCH_INC (PITCH_DEC + 1) +#define VOL_DEC (PITCH_DEC + 2) +#define VOL_INC (VOL_DEC + 1) +#define TONE_DEC (VOL_DEC + 2) +#define TONE_INC (TONE_DEC + 1) +#define PUNCT_DEC (TONE_DEC + 2) +#define PUNCT_INC (PUNCT_DEC + 1) +#define VOICE_DEC (PUNCT_DEC + 2) +#define VOICE_INC (VOICE_DEC + 1) #endif diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h index e8ff5d7d6419b3f8f53ccc5896c559eccfc83e45..b07f6cc4f284ef615e8a1445a122c15d0e00e3e4 100644 --- a/drivers/staging/speakup/spk_types.h +++ b/drivers/staging/speakup/spk_types.h @@ -1,16 +1,14 @@ #ifndef SPEAKUP_TYPES_H #define SPEAKUP_TYPES_H -/* - * This file includes all of the typedefs and structs used in speakup. - */ +/* This file includes all of the typedefs and structs used in speakup. */ #include #include #include #include #include /* for wait_queue */ -#include /* for __init */ +#include /* for __init */ #include #include #include @@ -105,7 +103,7 @@ struct st_var_header { enum var_id_t var_id; enum var_type_t var_type; void *p_val; /* ptr to programs variable to store value */ - void *data; /* ptr to the vars data */ + void *data; /* ptr to the vars data */ }; struct num_var_t { @@ -114,8 +112,8 @@ struct num_var_t { int low; int high; short offset, multiplier; /* for fiddling rates etc. */ - char *out_str; /* if synth needs char representation of number */ - int value; /* current value */ + char *out_str; /* if synth needs char representation of number */ + int value; /* current value */ }; struct punc_var_t { @@ -169,7 +167,7 @@ struct spk_synth { int (*probe)(struct spk_synth *synth); void (*release)(void); const char *(*synth_immediate)(struct spk_synth *synth, - const char *buff); + const char *buff); void (*catch_up)(struct spk_synth *synth); void (*flush)(struct spk_synth *synth); int (*is_alive)(struct spk_synth *synth); @@ -181,7 +179,7 @@ struct spk_synth { struct attribute_group attributes; }; -/** +/* * module_spk_synth() - Helper macro for registering a speakup driver * @__spk_synth: spk_synth struct * Helper macro for speakup drivers which do not do anything special in module diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c index 54b2f391862834c277123319943680c7b6d9cd68..a61c02ba06daec5e051eea3a1980c0b199626b1c 100644 --- a/drivers/staging/speakup/synth.c +++ b/drivers/staging/speakup/synth.c @@ -8,7 +8,7 @@ #include /* for loops_per_sec */ #include #include -#include /* for copy_from_user */ +#include /* for copy_from_user */ #include #include #include @@ -67,13 +67,14 @@ int spk_serial_synth_probe(struct spk_synth *synth) return -ENODEV; } pr_info("%s: ttyS%i, Driver Version %s\n", - synth->long_name, synth->ser, synth->version); + synth->long_name, synth->ser, synth->version); synth->alive = 1; return 0; } EXPORT_SYMBOL_GPL(spk_serial_synth_probe); -/* Main loop of the progression thread: keep eating from the buffer +/* + * Main loop of the progression thread: keep eating from the buffer * and push to the serial port, waiting as needed * * For devices that have a "full" notification mechanism, the driver can @@ -303,12 +304,11 @@ void spk_get_index_count(int *linecount, int *sentcount) sentence_count = ind % 10; if ((ind / 10) <= synth->indexing.currindex) - index_count = synth->indexing.currindex-(ind/10); + index_count = synth->indexing.currindex - (ind / 10); else index_count = synth->indexing.currindex - -synth->indexing.lowindex - + synth->indexing.highindex-(ind/10)+1; - + - synth->indexing.lowindex + + synth->indexing.highindex - (ind / 10) + 1; } *sentcount = sentence_count; *linecount = index_count; @@ -406,8 +406,8 @@ static int do_synth_init(struct spk_synth *in_synth) speakup_register_var(var); if (!spk_quiet_boot) synth_printf("%s found\n", synth->long_name); - if (synth->attributes.name - && sysfs_create_group(speakup_kobj, &synth->attributes) < 0) + if (synth->attributes.name && sysfs_create_group(speakup_kobj, + &synth->attributes) < 0) return -ENOMEM; synth_flags = synth->flags; wake_up_interruptible_all(&speakup_event); @@ -476,10 +476,10 @@ void synth_remove(struct spk_synth *in_synth) break; } for ( ; synths[i] != NULL; i++) /* compress table */ - synths[i] = synths[i+1]; + synths[i] = synths[i + 1]; module_status = 0; mutex_unlock(&spk_mutex); } EXPORT_SYMBOL_GPL(synth_remove); -short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC|B_SYM }; +short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM }; diff --git a/drivers/staging/speakup/thread.c b/drivers/staging/speakup/thread.c index 90c383ee7c3f55fb6d8d04c56589f2d046c9cd76..8c64f1ada6e04b108b71613abc9dc16c0aaea7a9 100644 --- a/drivers/staging/speakup/thread.c +++ b/drivers/staging/speakup/thread.c @@ -27,7 +27,7 @@ int speakup_thread(void *data) our_sound = spk_unprocessed_sound; spk_unprocessed_sound.active = 0; prepare_to_wait(&speakup_event, &wait, - TASK_INTERRUPTIBLE); + TASK_INTERRUPTIBLE); should_break = kthread_should_stop() || our_sound.active || (synth && synth->catch_up && synth->alive && @@ -47,7 +47,8 @@ int speakup_thread(void *data) if (our_sound.active) kd_mksound(our_sound.freq, our_sound.jiffies); if (synth && synth->catch_up && synth->alive) { - /* It is up to the callee to take the lock, so that it + /* + * It is up to the callee to take the lock, so that it * can sleep whenever it likes */ synth->catch_up(synth); diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c index 21186e3dc7ad13bdc74de1e0e710bc2c0cbc29fa..cc984196020f79a7398058416c94155288d20bbb 100644 --- a/drivers/staging/speakup/varhandlers.c +++ b/drivers/staging/speakup/varhandlers.c @@ -237,8 +237,7 @@ int spk_set_num_var(int input, struct st_var_header *var, int how) if (!var_data->u.n.out_str) l = sprintf(cp, var_data->u.n.synth_fmt, (int)val); else - l = sprintf(cp, - var_data->u.n.synth_fmt, var_data->u.n.out_str[val]); + l = sprintf(cp, var_data->u.n.synth_fmt, var_data->u.n.out_str[val]); synth_printf("%s", cp); return 0; } @@ -266,7 +265,8 @@ int spk_set_string_var(const char *page, struct st_var_header *var, int len) return 0; } -/* spk_set_mask_bits sets or clears the punc/delim/repeat bits, +/* + * spk_set_mask_bits sets or clears the punc/delim/repeat bits, * if input is null uses the defaults. * values for how: 0 clears bits of chars supplied, * 1 clears allk, 2 sets bits for chars diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h index cba4433bcd51963d4f50db940dbcf6e31e5ada07..54f490090a5915222f72f6e6671445ade46795a7 100644 --- a/drivers/staging/unisys/include/iochannel.h +++ b/drivers/staging/unisys/include/iochannel.h @@ -1,20 +1,20 @@ -/* Copyright (C) 2010 - 2013 UNISYS CORPORATION */ +/* Copyright (C) 2010 - 2016 UNISYS CORPORATION */ /* All rights reserved. */ #ifndef __IOCHANNEL_H__ #define __IOCHANNEL_H__ /* * Everything needed for IOPart-GuestPart communication is define in - * this file. Note: Everything is OS-independent because this file is + * this file. Note: Everything is OS-independent because this file is * used by Windows, Linux and possible EFI drivers. */ /* * Communication flow between the IOPart and GuestPart uses the channel headers - * channel state. The following states are currently being used: + * channel state. The following states are currently being used: * UNINIT(All Zeroes), CHANNEL_ATTACHING, CHANNEL_ATTACHED, CHANNEL_OPENED * - * additional states will be used later. No locking is needed to switch between + * Additional states will be used later. No locking is needed to switch between * states due to the following rules: * * 1. IOPart is only the only partition allowed to change from UNIT @@ -39,10 +39,11 @@ #define ULTRA_VSWITCH_CHANNEL_PROTOCOL_SIGNATURE \ ULTRA_CHANNEL_PROTOCOL_SIGNATURE -/* Must increment these whenever you insert or delete fields within this channel - * struct. Also increment whenever you change the meaning of fields within this - * channel struct so as to break pre-existing software. Note that you can - * usually add fields to the END of the channel struct withOUT needing to +/* + * Must increment these whenever you insert or delete fields within this channel + * struct. Also increment whenever you change the meaning of fields within this + * channel struct so as to break pre-existing software. Note that you can + * usually add fields to the END of the channel struct without needing to * increment this. */ #define ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID 2 @@ -70,61 +71,62 @@ #define MINNUM(a, b) (((a) < (b)) ? (a) : (b)) #define MAXNUM(a, b) (((a) > (b)) ? (a) : (b)) -/* define the two queues per data channel between iopart and ioguestparts */ -/* used by ioguestpart to 'insert' signals to iopart */ +/* Define the two queues per data channel between iopart and ioguestparts. */ +/* Used by ioguestpart to 'insert' signals to iopart. */ #define IOCHAN_TO_IOPART 0 -/* used by ioguestpart to 'remove' signals from iopart, same previous queue */ +/* Used by ioguestpart to 'remove' signals from iopart, same previous queue. */ #define IOCHAN_FROM_IOPART 1 -/* size of cdb - i.e., scsi cmnd */ +/* Size of cdb - i.e., SCSI cmnd */ #define MAX_CMND_SIZE 16 #define MAX_SENSE_SIZE 64 #define MAX_PHYS_INFO 64 -/* various types of network packets that can be sent in cmdrsp */ +/* Various types of network packets that can be sent in cmdrsp. */ enum net_types { - NET_RCV_POST = 0, /* submit buffer to hold receiving + NET_RCV_POST = 0, /* + * Submit buffer to hold receiving * incoming packet */ - /* virtnic -> uisnic */ + /* visornic -> uisnic */ NET_RCV, /* incoming packet received */ /* uisnic -> virtpci */ NET_XMIT, /* for outgoing net packets */ - /* virtnic -> uisnic */ + /* visornic -> uisnic */ NET_XMIT_DONE, /* outgoing packet xmitted */ /* uisnic -> virtpci */ NET_RCV_ENBDIS, /* enable/disable packet reception */ - /* virtnic -> uisnic */ + /* visornic -> uisnic */ NET_RCV_ENBDIS_ACK, /* acknowledge enable/disable packet */ /* reception */ - /* uisnic -> virtnic */ + /* uisnic -> visornic */ NET_RCV_PROMISC, /* enable/disable promiscuous mode */ - /* virtnic -> uisnic */ - NET_CONNECT_STATUS, /* indicate the loss or restoration of a network + /* visornic -> uisnic */ + NET_CONNECT_STATUS, /* + * indicate the loss or restoration of a network * connection */ - /* uisnic -> virtnic */ - NET_MACADDR, /* indicates the client has requested to update - * its MAC addr + /* uisnic -> visornic */ + NET_MACADDR, /* + * Indicates the client has requested to update + * it's MAC address */ - NET_MACADDR_ACK, /* MAC address */ + NET_MACADDR_ACK, /* MAC address acknowledge */ }; -#define ETH_HEADER_SIZE 14 /* size of ethernet header */ - -#define ETH_MIN_DATA_SIZE 46 /* minimum eth data size */ -#define ETH_MIN_PACKET_SIZE (ETH_HEADER_SIZE + ETH_MIN_DATA_SIZE) +#define ETH_MIN_DATA_SIZE 46 /* minimum eth data size */ +#define ETH_MIN_PACKET_SIZE (ETH_HLEN + ETH_MIN_DATA_SIZE) -#define ETH_MAX_MTU 16384 /* maximum data size */ +#define VISOR_ETH_MAX_MTU 16384 /* maximum data size */ #ifndef MAX_MACADDR_LEN #define MAX_MACADDR_LEN 6 /* number of bytes in MAC address */ -#endif /* MAX_MACADDR_LEN */ +#endif -/* various types of scsi task mgmt commands */ +/* Various types of scsi task mgmt commands. */ enum task_mgmt_types { TASK_MGMT_ABORT_TASK = 1, TASK_MGMT_BUS_RESET, @@ -132,7 +134,7 @@ enum task_mgmt_types { TASK_MGMT_TARGET_RESET, }; -/* various types of vdisk mgmt commands */ +/* Various types of vdisk mgmt commands. */ enum vdisk_mgmt_types { VDISK_MGMT_ACQUIRE = 1, VDISK_MGMT_RELEASE, @@ -146,7 +148,7 @@ struct phys_info { #define MIN_NUMSIGNALS 64 -/* structs with pragma pack */ +/* Structs with pragma pack. */ struct guest_phys_info { u64 address; @@ -156,9 +158,9 @@ struct guest_phys_info { #define GPI_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct guest_phys_info)) struct uisscsi_dest { - u32 channel; /* channel == bus number */ - u32 id; /* id == target number */ - u32 lun; /* lun == logical unit number */ + u32 channel; /* channel == bus number */ + u32 id; /* id == target number */ + u32 lun; /* lun == logical unit number */ } __packed; struct vhba_wwnn { @@ -166,7 +168,8 @@ struct vhba_wwnn { u32 wwnn2; } __packed; -/* WARNING: Values stired in this structure must contain maximum counts (not +/* + * WARNING: Values stired in this structure must contain maximum counts (not * maximum values). */ struct vhba_config_max {/* 20 bytes */ @@ -189,23 +192,24 @@ struct uiscmdrsp_scsi { * information for each * fragment */ - enum dma_data_direction data_dir; /* direction of the data, if any */ + enum dma_data_direction data_dir; /* direction of the data, if any */ struct uisscsi_dest vdest; /* identifies the virtual hba, id, */ /* channel, lun to which cmd was sent */ - /* Needed to queue the rsp back to cmd originator */ - int linuxstat; /* original Linux status used by linux vdisk */ + /* Needed to queue the rsp back to cmd originator. */ + int linuxstat; /* original Linux status used by Linux vdisk */ u8 scsistat; /* the scsi status */ u8 addlstat; /* non-scsi status */ #define ADDL_SEL_TIMEOUT 4 - /* the following fields are need to determine the result of command */ + /* The following fields are need to determine the result of command. */ u8 sensebuf[MAX_SENSE_SIZE]; /* sense info in case cmd failed; */ - /* it holds the sense_data struct; */ - /* see that struct for details. */ - void *vdisk; /* pointer to the vdisk to clean up when IO completes. */ + /* sensebuf holds the sense_data struct; */ + /* See sense_data struct for more details. */ + void *vdisk; /* Pointer to the vdisk to clean up when IO completes. */ int no_disk_result; - /* used to return no disk inquiry result + /* + * Used to return no disk inquiry result * when no_disk_result is set to 1, * scsi.scsistat is SAM_STAT_GOOD * scsi.addlstat is 0 @@ -214,35 +218,44 @@ struct uiscmdrsp_scsi { */ } __packed; -/* Defines to support sending correct inquiry result when no disk is +/* + * Defines to support sending correct inquiry result when no disk is * configured. */ -/* From SCSI SPC2 - +/* + * From SCSI SPC2 - * * If the target is not capable of supporting a device on this logical unit, the * device server shall set this field to 7Fh (PERIPHERAL QUALIFIER set to 011b * and PERIPHERAL DEVICE TYPE set to 1Fh). * - *The device server is capable of supporting the specified peripheral device - *type on this logical unit. However, the physical device is not currently - *connected to this logical unit. + * The device server is capable of supporting the specified peripheral device + * type on this logical unit. However, the physical device is not currently + * connected to this logical unit. */ -#define DEV_NOT_CAPABLE 0x7f /* peripheral qualifier of 0x3 */ - /* peripheral type of 0x1f */ - /* specifies no device but target present */ +#define DEV_NOT_CAPABLE 0x7f /* + * peripheral qualifier of 0x3 + * peripheral type of 0x1f + * specifies no device but target present + */ -#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 /* peripheral qualifier of 0x1 */ - /* peripheral type of 0 - disk */ - /* specifies device capable, but not present */ +#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 /* peripheral qualifier of 0x1 + * peripheral type of 0 - disk + * Specifies device capable, but + * not present + */ -#define DEV_HISUPPORT 0x10 /* HiSup = 1; shows support for report luns */ - /* must be returned for lun 0. */ +#define DEV_HISUPPORT 0x10 /* + * HiSup = 1; shows support for report luns + * must be returned for lun 0. + */ -/* NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length - * in buf[4] some linux code accesses bytes beyond 5 to retrieve vendor, product - * & revision. Yikes! So let us always send back 36 bytes, the minimum for +/* + * NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length + * in buf[4] some Linux code accesses bytes beyond 5 to retrieve vendor, product + * and revision. Yikes! So let us always send back 36 bytes, the minimum for * inquiry result. */ #define NO_DISK_INQUIRY_RESULT_LEN 36 @@ -250,11 +263,12 @@ struct uiscmdrsp_scsi { #define MIN_INQUIRY_RESULT_LEN 5 /* 5 bytes minimum for inquiry result */ /* SCSI device version for no disk inquiry result */ -#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */ +#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */ -/* Struct & Defines to support sense information. */ +/* Struct and Defines to support sense information. */ -/* The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is +/* + * The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is * initialized in exactly the manner that is recommended in Windows (hence the * odd values). * When set, these fields will have the following values: @@ -288,9 +302,9 @@ struct net_pkt_xmt { int len; /* full length of data in the packet */ int num_frags; /* number of fragments in frags containing data */ struct phys_info frags[MAX_PHYS_INFO]; /* physical page information */ - char ethhdr[ETH_HEADER_SIZE]; /* the ethernet header */ + char ethhdr[ETH_HLEN]; /* the ethernet header */ struct { - /* these are needed for csum at uisnic end */ + /* These are needed for csum at uisnic end */ u8 valid; /* 1 = struct is valid - else ignore */ u8 hrawoffv; /* 1 = hwrafoff is valid */ u8 nhrawoffv; /* 1 = nhwrafoff is valid */ @@ -302,7 +316,8 @@ struct net_pkt_xmt { /* nhrawoff points to the start of the NETWORK LAYER HEADER */ } lincsum; - /* **** NOTE **** + /* + * NOTE: * The full packet is described in frags but the ethernet header is * separately kept in ethhdr so that uisnic doesn't have "MAP" the * guest memory to get to the header. uisnic needs ethhdr to @@ -311,41 +326,54 @@ struct net_pkt_xmt { } __packed; struct net_pkt_xmtdone { - u32 xmt_done_result; /* result of NET_XMIT */ + u32 xmt_done_result; /* result of NET_XMIT */ } __packed; -/* RCVPOST_BUF_SIZe must be at most page_size(4096) - cache_line_size (64) The +/* + * RCVPOST_BUF_SIZE must be at most page_size(4096) - cache_line_size (64) The * reason is because dev_skb_alloc which is used to generate RCV_POST skbs in - * virtnic requires that there is "overhead" in the buffer, and pads 16 bytes. I - * prefer to use 1 full cache line size for "overhead" so that transfers are - * better. IOVM requires that a buffer be represented by 1 phys_info structure + * visornic requires that there is "overhead" in the buffer, and pads 16 bytes. + * Use 1 full cache line size for "overhead" so that transfers are optimized. + * IOVM requires that a buffer be represented by 1 phys_info structure * which can only cover page_size. */ #define RCVPOST_BUF_SIZE 4032 #define MAX_NET_RCV_CHAIN \ - ((ETH_MAX_MTU + ETH_HEADER_SIZE + RCVPOST_BUF_SIZE - 1) \ + ((VISOR_ETH_MAX_MTU + ETH_HLEN + RCVPOST_BUF_SIZE - 1) \ / RCVPOST_BUF_SIZE) +/* + * rcv buf size must be large enough to include ethernet data len + ethernet + * header len - we are choosing 2K because it is guaranteed to be describable. + */ struct net_pkt_rcvpost { - /* rcv buf size must be large enough to include ethernet data len + - * ethernet header len - we are choosing 2K because it is guaranteed - * to be describable - */ - struct phys_info frag; /* physical page information for the */ - /* single fragment 2K rcv buf */ - u64 unique_num; - /* unique_num ensure that receive posts are returned to */ - /* the Adapter which we sent them originally. */ + /* Physical page information for the single fragment 2K rcv buf */ + struct phys_info frag; + + /* + * Ensures that receive posts are returned to the adapter which we sent + * them from originally. + */ + u64 unique_num; + } __packed; +/* + * The number of rcvbuf that can be chained is based on max mtu and size of each + * rcvbuf. + */ struct net_pkt_rcv { - /* the number of receive buffers that can be chained */ - /* is based on max mtu and size of each rcv buf */ - u32 rcv_done_len; /* length of received data */ - u8 numrcvbufs; /* number of receive buffers that contain the */ - /* incoming data; guest end MUST chain these together. */ - void *rcvbuf[MAX_NET_RCV_CHAIN]; /* list of chained rcvbufs */ - /* each entry is a receive buffer provided by NET_RCV_POST. */ + u32 rcv_done_len; /* length of received data */ + + /* + * numrcvbufs: contain the incoming data; guest side MUST chain these + * together. + */ + u8 numrcvbufs; + + void *rcvbuf[MAX_NET_RCV_CHAIN]; /* list of chained rcvbufs */ + + /* Each entry is a receive buffer provided by NET_RCV_POST. */ /* NOTE: first rcvbuf in the chain will also be provided in net.buf. */ u64 unique_num; u32 rcvs_dropped_delta; @@ -353,12 +381,12 @@ struct net_pkt_rcv { struct net_pkt_enbdis { void *context; - u16 enable; /* 1 = enable, 0 = disable */ + u16 enable; /* 1 = enable, 0 = disable */ } __packed; struct net_pkt_macaddr { void *context; - u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */ + u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */ } __packed; /* cmd rsp packet used for VNIC network traffic */ @@ -379,41 +407,44 @@ struct uiscmdrsp_net { } __packed; struct uiscmdrsp_scsitaskmgmt { + /* The type of task. */ enum task_mgmt_types tasktype; - /* the type of task */ + /* The vdisk for which this task mgmt is generated. */ struct uisscsi_dest vdest; - /* the vdisk for which this task mgmt is generated */ + /* + * This is a handle that the guest has saved off for its own use. + * The handle value is preserved by iopart and returned as in task + * mgmt rsp. + */ u64 handle; - /* This is a handle that the guest has saved off for its own use. - * Its value is preserved by iopart & returned as is in the task - * mgmt rsp. - */ + /* + * For Linux guests, this is a pointer to wait_queue_head that a + * thread is waiting on to see if the taskmgmt command has completed. + * When the rsp is received by guest, the thread receiving the + * response uses this to notify the thread waiting for taskmgmt + * command completion. It's value is preserved by iopart and returned + * as in the task mgmt rsp. + */ u64 notify_handle; - /* For linux guests, this is a pointer to wait_queue_head that a - * thread is waiting on to see if the taskmgmt command has completed. - * When the rsp is received by guest, the thread receiving the - * response uses this to notify the thread waiting for taskmgmt - * command completion. Its value is preserved by iopart & returned - * as is in the task mgmt rsp. - */ + /* + * This is a handle to the location in the guest where the result of + * the taskmgmt command (result field) is saved to when the response + * is handled. It's value is preserved by iopart and returned as in + * the task mgmt rsp. + */ u64 notifyresult_handle; - /* this is a handle to location in guest where the result of the - * taskmgmt command (result field) is to saved off when the response - * is handled. Its value is preserved by iopart & returned as is in - * the task mgmt rsp. - */ + /* Result of taskmgmt command - set by IOPart - values are: */ char result; - /* result of taskmgmt command - set by IOPart - values are: */ #define TASK_MGMT_FAILED 0 } __packed; -/* Used by uissd to send disk add/remove notifications to Guest */ +/* Used by uissd to send disk add/remove notifications to Guest. */ /* Note that the vHba pointer is not used by the Client/Guest side. */ struct uiscmdrsp_disknotify { u8 add; /* 0-remove, 1-add */ @@ -421,49 +452,50 @@ struct uiscmdrsp_disknotify { u32 channel, id, lun; /* SCSI Path of Disk to added or removed */ } __packed; -/* The following is used by virthba/vSCSI to send the Acquire/Release commands +/* + * The following is used by virthba/vSCSI to send the Acquire/Release commands * to the IOVM. */ struct uiscmdrsp_vdiskmgmt { + /* The type of task */ enum vdisk_mgmt_types vdisktype; - /* the type of task */ + /* The vdisk for which this task mgmt is generated */ struct uisscsi_dest vdest; - /* the vdisk for which this task mgmt is generated */ + /* + * This is a handle that the guest has saved off for its own use. It's + * value is preserved by iopart and returned as in the task mgmt rsp. + */ u64 handle; - /* This is a handle that the guest has saved off for its own use. - * Its value is preserved by iopart & returned as is in the task - * mgmt rsp. - */ + /* + * For Linux guests, this is a pointer to wait_queue_head that a + * thread is waiting on to see if the tskmgmt command has completed. + * When the rsp is received by guest, the thread receiving the + * response uses this to notify the thread waiting for taskmgmt + * command completion. It's value is preserved by iopart and returned + * as in the task mgmt rsp. + */ u64 notify_handle; - /* For linux guests, this is a pointer to wait_queue_head that a - * thread is waiting on to see if the tskmgmt command has completed. - * When the rsp is received by guest, the thread receiving the - * response uses this to notify the thread waiting for taskmgmt - * command completion. Its value is preserved by iopart & returned - * as is in the task mgmt rsp. - */ + /* + * Handle to the location in guest where the result of the + * taskmgmt command (result field) is saved to when the response + * is handled. It's value is preserved by iopart and returned as in + * the task mgmt rsp. + */ u64 notifyresult_handle; - /* this is a handle to location in guest where the result of the - * taskmgmt command (result field) is to saved off when the response - * is handled. Its value is preserved by iopart & returned as is in - * the task mgmt rsp. - */ + /* Result of taskmgmt command - set by IOPart - values are: */ char result; - - /* result of taskmgmt command - set by IOPart - values are: */ -#define VDISK_MGMT_FAILED 0 } __packed; -/* keeping cmd & rsp info in one structure for now cmd rsp packet for scsi */ +/* Keeping cmd and rsp info in one structure for now cmd rsp packet for SCSI */ struct uiscmdrsp { char cmdtype; -/* describes what type of information is in the struct */ +/* Describes what type of information is in the struct */ #define CMD_SCSI_TYPE 1 #define CMD_NET_TYPE 2 #define CMD_SCSITASKMGMT_TYPE 3 @@ -476,11 +508,11 @@ struct uiscmdrsp { struct uiscmdrsp_disknotify disknotify; struct uiscmdrsp_vdiskmgmt vdiskmgmt; }; - void *private_data; /* send the response when the cmd is */ - /* done (scsi & scsittaskmgmt). */ + /* Send the response when the cmd is done (scsi and scsittaskmgmt). */ + void *private_data; struct uiscmdrsp *next; /* General Purpose Queue Link */ - struct uiscmdrsp *activeQ_next; /* Used to track active commands */ - struct uiscmdrsp *activeQ_prev; /* Used to track active commands */ + struct uiscmdrsp *activeQ_next; /* Pointer to the nextactive commands */ + struct uiscmdrsp *activeQ_prev; /* Pointer to the prevactive commands */ } __packed; struct iochannel_vhba { @@ -493,7 +525,8 @@ struct iochannel_vnic { u32 mtu; /* 4 bytes */ uuid_le zone_uuid; /* 16 bytes */ } __packed; -/* This is just the header of the IO channel. It is assumed that directly after +/* + * This is just the header of the IO channel. It is assumed that directly after * this header there is a large region of memory which contains the command and * response queues as specified in cmd_q and rsp_q SIGNAL_QUEUE_HEADERS. */ @@ -507,31 +540,19 @@ struct spar_io_channel_protocol { } __packed; #define MAX_CLIENTSTRING_LEN 1024 - /* client_string is NULL termimated so holds max -1 bytes */ + /* client_string is NULL termimated so holds max-1 bytes */ u8 client_string[MAX_CLIENTSTRING_LEN]; } __packed; -/* INLINE functions for initializing and accessing I/O data channels */ -#define SIZEOF_PROTOCOL (COVER(sizeof(struct spar_io_channel_protocol), 64)) +/* INLINE functions for initializing and accessing I/O data channels. */ #define SIZEOF_CMDRSP (COVER(sizeof(struct uiscmdrsp), 64)) -#define MIN_IO_CHANNEL_SIZE COVER(SIZEOF_PROTOCOL + \ - 2 * MIN_NUMSIGNALS * SIZEOF_CMDRSP, 4096) - -/* - * INLINE function for expanding a guest's pfn-off-size into multiple 4K page - * pfn-off-size entires. - */ - -/* use 4K page sizes when we it comes to passing page information between */ -/* Guest and IOPartition. */ +/* Use 4K page sizes when passing page info between Guest and IOPartition. */ #define PI_PAGE_SIZE 0x1000 #define PI_PAGE_MASK 0x0FFF -/* returns next non-zero index on success or zero on failure (i.e. out of - * room) - */ -static inline u16 +/* Returns next non-zero index on success or 0 on failure (i.e. out of room). */ +static inline u16 add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index, u16 max_pi_arr_entries, struct phys_info pi_arr[]) { @@ -540,7 +561,7 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index, firstlen = PI_PAGE_SIZE - inp_off; if (inp_len <= firstlen) { - /* the input entry spans only one page - add as is */ + /* The input entry spans only one page - add as is. */ if (index >= max_pi_arr_entries) return 0; pi_arr[index].pi_pfn = inp_pfn; @@ -549,7 +570,7 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index, return index + 1; } - /* this entry spans multiple pages */ + /* This entry spans multiple pages. */ for (len = inp_len, i = 0; len; len -= pi_arr[index + i].pi_len, i++) { if (index + i >= max_pi_arr_entries) @@ -567,4 +588,4 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index, return index + i; } -#endif /* __IOCHANNEL_H__ */ +#endif /* __IOCHANNEL_H__ */ diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h index 677627c72c4c5a491d0c0162e95451e6ee68bd76..03d56f818a8658d039729551a664dcd1f3bf26cf 100644 --- a/drivers/staging/unisys/include/visorbus.h +++ b/drivers/staging/unisys/include/visorbus.h @@ -166,6 +166,8 @@ struct visor_device { struct controlvm_message_header *pending_msg_hdr; void *vbus_hdr_info; uuid_le partition_uuid; + struct dentry *debugfs_dir; + struct dentry *debugfs_client_bus_info; }; #define to_visor_device(x) container_of(x, struct visor_device, device) diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h index e97917522f6aae7dfa84ad30158d256403a74fd5..b0df26155d025184c631ad3842b22512358596da 100644 --- a/drivers/staging/unisys/visorbus/vbuschannel.h +++ b/drivers/staging/unisys/visorbus/vbuschannel.h @@ -23,6 +23,7 @@ * the client devices and client drivers for the server end to see. */ #include +#include #include "channel.h" /* {193b331b-c58f-11da-95a9-00e08161165f} */ @@ -50,12 +51,6 @@ static const uuid_le spar_vbus_channel_protocol_uuid = SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID, \ SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE) -#define SPAR_VBUS_CHANNEL_OK_SERVER(actual_bytes) \ - (spar_check_channel_server(spar_vbus_channel_protocol_uuid, \ - "vbus", \ - sizeof(struct spar_vbus_channel_protocol),\ - actual_bytes)) - #pragma pack(push, 1) /* both GCC and VC now allow this pragma */ /* @@ -72,199 +67,38 @@ struct ultra_vbus_deviceinfo { }; /** - * vbuschannel_sanitize_buffer() - remove non-printable chars from buffer - * @p: destination buffer where chars are written to - * @remain: number of bytes that can be written starting at #p - * @src: pointer to source buffer - * @srcmax: number of valid characters at #src - * - * Reads chars from the buffer at @src for @srcmax bytes, and writes to - * the buffer at @p, which is @remain bytes long, ensuring never to - * overflow the buffer at @p, using the following rules: - * - printable characters are simply copied from the buffer at @src to the - * buffer at @p - * - intervening streaks of non-printable characters in the buffer at @src - * are replaced with a single space in the buffer at @p - * Note that we pay no attention to '\0'-termination. - * - * Pass @p == NULL and @remain == 0 for this special behavior -- In this - * case, we simply return the number of bytes that WOULD HAVE been written - * to a buffer at @p, had it been infinitely big. - * - * Return: the number of bytes written to @p (or WOULD HAVE been written to - * @p, as described in the previous paragraph) - */ -static inline int -vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax) -{ - int chars = 0; - int nonprintable_streak = 0; - - while (srcmax > 0) { - if ((*src >= ' ') && (*src < 0x7f)) { - if (nonprintable_streak) { - if (remain > 0) { - *p = ' '; - p++; - remain--; - chars++; - } else if (!p) { - chars++; - } - nonprintable_streak = 0; - } - if (remain > 0) { - *p = *src; - p++; - remain--; - chars++; - } else if (!p) { - chars++; - } - } else { - nonprintable_streak = 1; - } - src++; - srcmax--; - } - return chars; -} - -#define VBUSCHANNEL_ADDACHAR(ch, p, remain, chars) \ - do { \ - if (remain <= 0) \ - break; \ - *p = ch; \ - p++; chars++; remain--; \ - } while (0) - -/** - * vbuschannel_itoa() - convert non-negative int to string - * @p: destination string - * @remain: max number of bytes that can be written to @p - * @num: input int to convert - * - * Converts the non-negative value at @num to an ascii decimal string - * at @p, writing at most @remain bytes. Note there is NO '\0' termination - * written to @p. - * - * Return: number of bytes written to @p - * - */ -static inline int -vbuschannel_itoa(char *p, int remain, int num) -{ - int digits = 0; - char s[32]; - int i; - - if (num == 0) { - /* '0' is a special case */ - if (remain <= 0) - return 0; - *p = '0'; - return 1; - } - /* form a backwards decimal ascii string in */ - while (num > 0) { - if (digits >= (int)sizeof(s)) - return 0; - s[digits++] = (num % 10) + '0'; - num = num / 10; - } - if (remain < digits) { - /* not enough room left at

to hold number, so fill with - * '?' - */ - for (i = 0; i < remain; i++, p++) - *p = '?'; - return remain; - } - /* plug in the decimal ascii string representing the number, by */ - /* reversing the string we just built in */ - i = digits; - while (i > 0) { - i--; - *p = s[i]; - p++; - } - return digits; -} - -/** - * vbuschannel_devinfo_to_string() - format a struct ultra_vbus_deviceinfo - * to a printable string + * vbuschannel_print_devinfo() - format a struct ultra_vbus_deviceinfo + * and write it to a seq_file * @devinfo: the struct ultra_vbus_deviceinfo to format - * @p: destination string area - * @remain: size of destination string area in bytes + * @seq: seq_file to write to * @devix: the device index to be included in the output data, or -1 if no * device index is to be included * - * Reads @devInfo, and converts its contents to a printable string at @p, - * writing at most @remain bytes. Note there is NO '\0' termination - * written to @p. - * - * Return: number of bytes written to @p + * Reads @devInfo, and writes it in human-readable notation to @seq. */ -static inline int -vbuschannel_devinfo_to_string(struct ultra_vbus_deviceinfo *devinfo, - char *p, int remain, int devix) +static inline void +vbuschannel_print_devinfo(struct ultra_vbus_deviceinfo *devinfo, + struct seq_file *seq, int devix) { - char *psrc; - int nsrc, x, i, pad; - int chars = 0; - - psrc = &devinfo->devtype[0]; - nsrc = sizeof(devinfo->devtype); - if (vbuschannel_sanitize_buffer(NULL, 0, psrc, nsrc) <= 0) - return 0; - - /* emit device index */ - if (devix >= 0) { - VBUSCHANNEL_ADDACHAR('[', p, remain, chars); - x = vbuschannel_itoa(p, remain, devix); - p += x; - remain -= x; - chars += x; - VBUSCHANNEL_ADDACHAR(']', p, remain, chars); - } else { - VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); - VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); - VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); - } - - /* emit device type */ - x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc); - p += x; - remain -= x; - chars += x; - pad = 15 - x; /* pad device type to be exactly 15 chars */ - for (i = 0; i < pad; i++) - VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); - VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); - - /* emit driver name */ - psrc = &devinfo->drvname[0]; - nsrc = sizeof(devinfo->drvname); - x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc); - p += x; - remain -= x; - chars += x; - pad = 15 - x; /* pad driver name to be exactly 15 chars */ - for (i = 0; i < pad; i++) - VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); - VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); - - /* emit strings */ - psrc = &devinfo->infostrs[0]; - nsrc = sizeof(devinfo->infostrs); - x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc); - p += x; - remain -= x; - chars += x; - VBUSCHANNEL_ADDACHAR('\n', p, remain, chars); - - return chars; + if (!isprint(devinfo->devtype[0])) + return; /* uninitialized vbus device entry */ + + if (devix >= 0) + seq_printf(seq, "[%d]", devix); + else + /* vbus device entry is for bus or chipset */ + seq_puts(seq, " "); + + /* + * Note: because the s-Par back-end is free to scribble in this area, + * we never assume '\0'-termination. + */ + seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->devtype), + (int)sizeof(devinfo->devtype), devinfo->devtype); + seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->drvname), + (int)sizeof(devinfo->drvname), devinfo->drvname); + seq_printf(seq, "%.*s\n", (int)sizeof(devinfo->infostrs), + devinfo->infostrs); } struct spar_vbus_headerinfo { @@ -293,11 +127,6 @@ struct spar_vbus_channel_protocol { /* describes client device and driver for each device on the bus */ }; -#define VBUS_CH_SIZE_EXACT(MAXDEVICES) \ - (sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL) + ((MAXDEVICES) * \ - sizeof(ULTRA_VBUS_DEVICEINFO))) -#define VBUS_CH_SIZE(MAXDEVICES) COVER(VBUS_CH_SIZE_EXACT(MAXDEVICES), 4096) - #pragma pack(pop) #endif diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c index fec0a54916fe14e3cfb9fe7d5c5adbf749243f8e..3457ef338e1e448e0701515798b5d6f936dd9f1c 100644 --- a/drivers/staging/unisys/visorbus/visorbus_main.c +++ b/drivers/staging/unisys/visorbus/visorbus_main.c @@ -14,6 +14,7 @@ * details. */ +#include #include #include "visorbus.h" @@ -33,6 +34,7 @@ static int visorbus_forcenomatch; #define POLLJIFFIES_NORMALCHANNEL 10 static int busreg_rc = -ENODEV; /* stores the result from bus registration */ +static struct dentry *visorbus_debugfs_dir; /* * DEVICE type attributes @@ -151,6 +153,8 @@ visorbus_release_busdevice(struct device *xdev) { struct visor_device *dev = dev_get_drvdata(xdev); + debugfs_remove(dev->debugfs_client_bus_info); + debugfs_remove_recursive(dev->debugfs_dir); kfree(dev); } @@ -186,6 +190,7 @@ static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, "0x%llx\n", visorchannel_get_physaddr(vdev->visorchannel)); } +static DEVICE_ATTR_RO(physaddr); static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -197,6 +202,7 @@ static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, "0x%lx\n", visorchannel_get_nbytes(vdev->visorchannel)); } +static DEVICE_ATTR_RO(nbytes); static ssize_t clientpartition_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -208,6 +214,7 @@ static ssize_t clientpartition_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "0x%llx\n", visorchannel_get_clientpartition(vdev->visorchannel)); } +static DEVICE_ATTR_RO(clientpartition); static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -220,6 +227,7 @@ static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, "%s\n", visorchannel_id(vdev->visorchannel, typeid)); } +static DEVICE_ATTR_RO(typeguid); static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -232,6 +240,7 @@ static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, "%s\n", visorchannel_zoneid(vdev->visorchannel, zoneid)); } +static DEVICE_ATTR_RO(zoneguid); static ssize_t typename_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -250,12 +259,6 @@ static ssize_t typename_show(struct device *dev, struct device_attribute *attr, drv = to_visor_driver(xdrv); return snprintf(buf, PAGE_SIZE, "%s\n", drv->channel_types[i - 1].name); } - -static DEVICE_ATTR_RO(physaddr); -static DEVICE_ATTR_RO(nbytes); -static DEVICE_ATTR_RO(clientpartition); -static DEVICE_ATTR_RO(typeguid); -static DEVICE_ATTR_RO(zoneguid); static DEVICE_ATTR_RO(typename); static struct attribute *channel_attrs[] = { @@ -295,6 +298,7 @@ static ssize_t partition_handle_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "0x%llx\n", handle); } +static DEVICE_ATTR_RO(partition_handle); static ssize_t partition_guid_show(struct device *dev, struct device_attribute *attr, @@ -303,6 +307,7 @@ static ssize_t partition_guid_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "{%pUb}\n", &vdev->partition_uuid); } +static DEVICE_ATTR_RO(partition_guid); static ssize_t partition_name_show(struct device *dev, struct device_attribute *attr, @@ -311,6 +316,7 @@ static ssize_t partition_name_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%s\n", vdev->name); } +static DEVICE_ATTR_RO(partition_name); static ssize_t channel_addr_show(struct device *dev, struct device_attribute *attr, @@ -320,6 +326,7 @@ static ssize_t channel_addr_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "0x%llx\n", addr); } +static DEVICE_ATTR_RO(channel_addr); static ssize_t channel_bytes_show(struct device *dev, struct device_attribute *attr, @@ -329,6 +336,7 @@ static ssize_t channel_bytes_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "0x%llx\n", nbytes); } +static DEVICE_ATTR_RO(channel_bytes); static ssize_t channel_id_show(struct device *dev, struct device_attribute *attr, @@ -343,77 +351,7 @@ static ssize_t channel_id_show(struct device *dev, } return len; } - -static ssize_t client_bus_info_show(struct device *dev, - struct device_attribute *attr, - char *buf) { - struct visor_device *vdev = to_visor_device(dev); - struct visorchannel *channel = vdev->visorchannel; - - int i, shift, remain = PAGE_SIZE; - unsigned long off; - char *pos = buf; - u8 *partition_name; - struct ultra_vbus_deviceinfo dev_info; - - partition_name = ""; - if (channel) { - if (vdev->name) - partition_name = vdev->name; - shift = snprintf(pos, remain, - "Client device / client driver info for %s partition (vbus #%u):\n", - partition_name, vdev->chipset_bus_no); - pos += shift; - remain -= shift; - shift = visorchannel_read(channel, - offsetof(struct - spar_vbus_channel_protocol, - chp_info), - &dev_info, sizeof(dev_info)); - if (shift >= 0) { - shift = vbuschannel_devinfo_to_string(&dev_info, pos, - remain, -1); - pos += shift; - remain -= shift; - } - shift = visorchannel_read(channel, - offsetof(struct - spar_vbus_channel_protocol, - bus_info), - &dev_info, sizeof(dev_info)); - if (shift >= 0) { - shift = vbuschannel_devinfo_to_string(&dev_info, pos, - remain, -1); - pos += shift; - remain -= shift; - } - off = offsetof(struct spar_vbus_channel_protocol, dev_info); - i = 0; - while (off + sizeof(dev_info) <= - visorchannel_get_nbytes(channel)) { - shift = visorchannel_read(channel, - off, &dev_info, - sizeof(dev_info)); - if (shift >= 0) { - shift = vbuschannel_devinfo_to_string - (&dev_info, pos, remain, i); - pos += shift; - remain -= shift; - } - off += sizeof(dev_info); - i++; - } - } - return PAGE_SIZE - remain; -} - -static DEVICE_ATTR_RO(partition_handle); -static DEVICE_ATTR_RO(partition_guid); -static DEVICE_ATTR_RO(partition_name); -static DEVICE_ATTR_RO(channel_addr); -static DEVICE_ATTR_RO(channel_bytes); static DEVICE_ATTR_RO(channel_id); -static DEVICE_ATTR_RO(client_bus_info); static struct attribute *dev_attrs[] = { &dev_attr_partition_handle.attr, @@ -422,7 +360,6 @@ static struct attribute *dev_attrs[] = { &dev_attr_channel_addr.attr, &dev_attr_channel_bytes.attr, &dev_attr_channel_id.attr, - &dev_attr_client_bus_info.attr, NULL }; @@ -435,6 +372,66 @@ static const struct attribute_group *visorbus_groups[] = { NULL }; +/* + * BUS debugfs entries + * + * define & implement display of debugfs attributes under + * /sys/kernel/debug/visorbus/visorbus. + */ + +static int client_bus_info_debugfs_show(struct seq_file *seq, void *v) +{ + struct visor_device *vdev = seq->private; + struct visorchannel *channel = vdev->visorchannel; + + int i; + unsigned long off; + struct ultra_vbus_deviceinfo dev_info; + + if (!channel) + return 0; + + seq_printf(seq, + "Client device / client driver info for %s partition (vbus #%u):\n", + ((vdev->name) ? (char *)(vdev->name) : ""), + vdev->chipset_bus_no); + if (visorchannel_read(channel, + offsetof(struct spar_vbus_channel_protocol, + chp_info), + &dev_info, sizeof(dev_info)) >= 0) + vbuschannel_print_devinfo(&dev_info, seq, -1); + if (visorchannel_read(channel, + offsetof(struct spar_vbus_channel_protocol, + bus_info), + &dev_info, sizeof(dev_info)) >= 0) + vbuschannel_print_devinfo(&dev_info, seq, -1); + off = offsetof(struct spar_vbus_channel_protocol, dev_info); + i = 0; + while (off + sizeof(dev_info) <= visorchannel_get_nbytes(channel)) { + if (visorchannel_read(channel, off, &dev_info, + sizeof(dev_info)) >= 0) + vbuschannel_print_devinfo(&dev_info, seq, i); + off += sizeof(dev_info); + i++; + } + + return 0; +} + +static int client_bus_info_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, client_bus_info_debugfs_show, + inode->i_private); +} + +static const struct file_operations client_bus_info_debugfs_fops = { + .owner = THIS_MODULE, + .open = client_bus_info_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static void dev_periodic_work(unsigned long __opaque) { @@ -610,8 +607,8 @@ create_visor_device(struct visor_device *dev) u32 chipset_bus_no = dev->chipset_bus_no; u32 chipset_dev_no = dev->chipset_dev_no; - POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, chipset_dev_no, chipset_bus_no, - POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, chipset_dev_no, chipset_bus_no, + DIAG_SEVERITY_PRINT); mutex_init(&dev->visordriver_callback_lock); dev->device.bus = &visorbus_type; @@ -651,8 +648,8 @@ create_visor_device(struct visor_device *dev) */ err = device_add(&dev->device); if (err < 0) { - POSTCODE_LINUX_3(DEVICE_ADD_PC, chipset_bus_no, - DIAG_SEVERITY_ERR); + POSTCODE_LINUX(DEVICE_ADD_PC, 0, chipset_bus_no, + DIAG_SEVERITY_ERR); goto err_put; } @@ -966,9 +963,10 @@ static int create_bus_instance(struct visor_device *dev) { int id = dev->chipset_bus_no; + int err; struct spar_vbus_headerinfo *hdr_info; - POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT); hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL); if (!hdr_info) @@ -979,11 +977,26 @@ create_bus_instance(struct visor_device *dev) dev->device.groups = visorbus_groups; dev->device.release = visorbus_release_busdevice; + dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->device), + visorbus_debugfs_dir); + if (!dev->debugfs_dir) { + err = -ENOMEM; + goto err_hdr_info; + } + dev->debugfs_client_bus_info = + debugfs_create_file("client_bus_info", S_IRUSR | S_IRGRP, + dev->debugfs_dir, dev, + &client_bus_info_debugfs_fops); + if (!dev->debugfs_client_bus_info) { + err = -ENOMEM; + goto err_debugfs_dir; + } + if (device_register(&dev->device) < 0) { - POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, id, - POSTCODE_SEVERITY_ERR); - kfree(hdr_info); - return -ENODEV; + POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, 0, id, + DIAG_SEVERITY_ERR); + err = -ENODEV; + goto err_debugfs_created; } if (get_vbus_header_info(dev->visorchannel, hdr_info) >= 0) { @@ -998,6 +1011,16 @@ create_bus_instance(struct visor_device *dev) list_add_tail(&dev->list_all, &list_all_bus_instances); dev_set_drvdata(&dev->device, dev); return 0; + +err_debugfs_created: + debugfs_remove(dev->debugfs_client_bus_info); + +err_debugfs_dir: + debugfs_remove_recursive(dev->debugfs_dir); + +err_hdr_info: + kfree(hdr_info); + return err; } /** @@ -1069,16 +1092,16 @@ chipset_bus_create(struct visor_device *dev) int rc; u32 bus_no = dev->chipset_bus_no; - POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT); rc = create_bus_instance(dev); - POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT); if (rc < 0) - POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no, + DIAG_SEVERITY_ERR); else - POSTCODE_LINUX_3(CHIPSET_INIT_SUCCESS_PC, bus_no, - POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, bus_no, + DIAG_SEVERITY_PRINT); bus_create_response(dev, rc); } @@ -1097,18 +1120,18 @@ chipset_device_create(struct visor_device *dev_info) u32 bus_no = dev_info->chipset_bus_no; u32 dev_no = dev_info->chipset_dev_no; - POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no, - POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no, + DIAG_SEVERITY_PRINT); rc = create_visor_device(dev_info); device_create_response(dev_info, rc); if (rc < 0) - POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, + DIAG_SEVERITY_ERR); else - POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no, bus_no, - POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(DEVICE_CREATE_SUCCESS_PC, dev_no, bus_no, + DIAG_SEVERITY_PRINT); } void @@ -1274,12 +1297,17 @@ visorbus_init(void) { int err; - POSTCODE_LINUX_3(DRIVER_ENTRY_PC, 0, POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(DRIVER_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT); + + visorbus_debugfs_dir = debugfs_create_dir("visorbus", NULL); + if (!visorbus_debugfs_dir) + return -ENOMEM; + bus_device_info_init(&clientbus_driverinfo, "clientbus", "visorbus"); err = create_bus_type(); if (err < 0) { - POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, DIAG_SEVERITY_ERR); + POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, 0, DIAG_SEVERITY_ERR); goto error; } @@ -1288,7 +1316,7 @@ visorbus_init(void) return 0; error: - POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR); return err; } @@ -1306,6 +1334,7 @@ visorbus_exit(void) remove_bus_instance(dev); } remove_bus_type(); + debugfs_remove_recursive(visorbus_debugfs_dir); } module_param_named(forcematch, visorbus_forcematch, int, S_IRUGO); diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h index 15403fb5284727402f0e98fbcd112a5687b05efe..49bec1763e332e8aea962987e57e56b206d66726 100644 --- a/drivers/staging/unisys/visorbus/visorbus_private.h +++ b/drivers/staging/unisys/visorbus/visorbus_private.h @@ -70,9 +70,9 @@ struct visorchannel *visorchannel_create_with_lock(u64 physaddr, gfp_t gfp, uuid_le guid); void visorchannel_destroy(struct visorchannel *channel); int visorchannel_read(struct visorchannel *channel, ulong offset, - void *local, ulong nbytes); + void *dest, ulong nbytes); int visorchannel_write(struct visorchannel *channel, ulong offset, - void *local, ulong nbytes); + void *dest, ulong nbytes); u64 visorchannel_get_physaddr(struct visorchannel *channel); ulong visorchannel_get_nbytes(struct visorchannel *channel); char *visorchannel_id(struct visorchannel *channel, char *s); diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c index 300a65dc5c6c005f2982a9ae19df261f346ee428..f51a7258bef07bcc8898afff28c37fc050b5dc77 100644 --- a/drivers/staging/unisys/visorbus/visorchannel.c +++ b/drivers/staging/unisys/visorbus/visorchannel.c @@ -23,6 +23,7 @@ #include #include "visorbus.h" +#include "visorbus_private.h" #include "controlvmchannel.h" #define MYDRVNAME "visorchannel" @@ -127,19 +128,19 @@ EXPORT_SYMBOL_GPL(visorchannel_get_uuid); int visorchannel_read(struct visorchannel *channel, ulong offset, - void *local, ulong nbytes) + void *dest, ulong nbytes) { if (offset + nbytes > channel->nbytes) return -EIO; - memcpy(local, channel->mapped + offset, nbytes); + memcpy(dest, channel->mapped + offset, nbytes); return 0; } int visorchannel_write(struct visorchannel *channel, ulong offset, - void *local, ulong nbytes) + void *dest, ulong nbytes) { size_t chdr_size = sizeof(struct channel_header); size_t copy_size; @@ -150,10 +151,10 @@ visorchannel_write(struct visorchannel *channel, ulong offset, if (offset < chdr_size) { copy_size = min(chdr_size - offset, nbytes); memcpy(((char *)(&channel->chan_hdr)) + offset, - local, copy_size); + dest, copy_size); } - memcpy(channel->mapped + offset, local, nbytes); + memcpy(channel->mapped + offset, dest, nbytes); return 0; } @@ -236,8 +237,9 @@ signalremove_inner(struct visorchannel *channel, u32 queue, void *msg) if (error) return error; + /* No signals to remove; have caller try again. */ if (sig_hdr.head == sig_hdr.tail) - return -EIO; /* no signals to remove */ + return -EAGAIN; sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots; @@ -299,22 +301,30 @@ EXPORT_SYMBOL_GPL(visorchannel_signalremove); * Return: boolean indicating whether any messages in the designated * channel/queue are present */ + +static bool +queue_empty(struct visorchannel *channel, u32 queue) +{ + struct signal_queue_header sig_hdr; + + if (sig_read_header(channel, queue, &sig_hdr)) + return true; + + return (sig_hdr.head == sig_hdr.tail); +} + bool visorchannel_signalempty(struct visorchannel *channel, u32 queue) { - unsigned long flags = 0; - struct signal_queue_header sig_hdr; - bool rc = false; + bool rc; + unsigned long flags; - if (channel->needs_lock) - spin_lock_irqsave(&channel->remove_lock, flags); + if (!channel->needs_lock) + return queue_empty(channel, queue); - if (sig_read_header(channel, queue, &sig_hdr)) - rc = true; - if (sig_hdr.head == sig_hdr.tail) - rc = true; - if (channel->needs_lock) - spin_unlock_irqrestore(&channel->remove_lock, flags); + spin_lock_irqsave(&channel->remove_lock, flags); + rc = queue_empty(channel, queue); + spin_unlock_irqrestore(&channel->remove_lock, flags); return rc; } diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c index 59871495ea85b180bcda36d54e5695a6c5294c3a..d7148c351d3f0deb2bd32ebbd1a08cf991a17bc5 100644 --- a/drivers/staging/unisys/visorbus/visorchipset.c +++ b/drivers/staging/unisys/visorbus/visorchipset.c @@ -29,7 +29,7 @@ #include "visorbus_private.h" #include "vmcallinterface.h" -#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c +#define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100 @@ -57,7 +57,6 @@ visorchipset_open(struct inode *inode, struct file *file) if (minor_number) return -ENODEV; - file->private_data = NULL; return 0; } @@ -499,7 +498,7 @@ controlvm_init_response(struct controlvm_message *msg, } } -static void +static int controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr, int response, enum ultra_chipset_feature features) @@ -508,34 +507,33 @@ controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr, controlvm_init_response(&outmsg, msg_hdr, response); outmsg.cmd.init_chipset.features = features; - if (visorchannel_signalinsert(controlvm_channel, - CONTROLVM_QUEUE_REQUEST, &outmsg)) { - return; - } + return visorchannel_signalinsert(controlvm_channel, + CONTROLVM_QUEUE_REQUEST, &outmsg); } -static void +static int chipset_init(struct controlvm_message *inmsg) { static int chipset_inited; enum ultra_chipset_feature features = 0; int rc = CONTROLVM_RESP_SUCCESS; + int res = 0; - POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(CHIPSET_INIT_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT); if (chipset_inited) { rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; + res = -EIO; goto out_respond; } chipset_inited = 1; - POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(CHIPSET_INIT_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT); /* * Set features to indicate we support parahotplug (if Command * also supports it). */ - features = - inmsg->cmd.init_chipset. - features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG; + features = inmsg->cmd.init_chipset.features & + ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG; /* * Set the "reply" bit so Command knows this is a @@ -545,25 +543,25 @@ chipset_init(struct controlvm_message *inmsg) out_respond: if (inmsg->hdr.flags.response_expected) - controlvm_respond_chipset_init(&inmsg->hdr, rc, features); + res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features); + + return res; } -static void +static int controlvm_respond(struct controlvm_message_header *msg_hdr, int response) { struct controlvm_message outmsg; controlvm_init_response(&outmsg, msg_hdr, response); if (outmsg.hdr.flags.test_message == 1) - return; + return -EINVAL; - if (visorchannel_signalinsert(controlvm_channel, - CONTROLVM_QUEUE_REQUEST, &outmsg)) { - return; - } + return visorchannel_signalinsert(controlvm_channel, + CONTROLVM_QUEUE_REQUEST, &outmsg); } -static void controlvm_respond_physdev_changestate( +static int controlvm_respond_physdev_changestate( struct controlvm_message_header *msg_hdr, int response, struct spar_segment_state state) { @@ -572,10 +570,8 @@ static void controlvm_respond_physdev_changestate( controlvm_init_response(&outmsg, msg_hdr, response); outmsg.cmd.device_change_state.state = state; outmsg.cmd.device_change_state.flags.phys_device = 1; - if (visorchannel_signalinsert(controlvm_channel, - CONTROLVM_QUEUE_REQUEST, &outmsg)) { - return; - } + return visorchannel_signalinsert(controlvm_channel, + CONTROLVM_QUEUE_REQUEST, &outmsg); } enum crash_obj_type { @@ -583,74 +579,80 @@ enum crash_obj_type { CRASH_BUS, }; -static void +static int save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ) { u32 local_crash_msg_offset; u16 local_crash_msg_count; + int err; - if (visorchannel_read(controlvm_channel, - offsetof(struct spar_controlvm_channel_protocol, - saved_crash_message_count), - &local_crash_msg_count, sizeof(u16)) < 0) { - POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC, - POSTCODE_SEVERITY_ERR); - return; + err = visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + saved_crash_message_count), + &local_crash_msg_count, sizeof(u16)); + if (err) { + POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0, + DIAG_SEVERITY_ERR); + return err; } if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) { - POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC, - local_crash_msg_count, - POSTCODE_SEVERITY_ERR); - return; + POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0, + local_crash_msg_count, + DIAG_SEVERITY_ERR); + return -EIO; } - if (visorchannel_read(controlvm_channel, - offsetof(struct spar_controlvm_channel_protocol, - saved_crash_message_offset), - &local_crash_msg_offset, sizeof(u32)) < 0) { - POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC, - POSTCODE_SEVERITY_ERR); - return; + err = visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + saved_crash_message_offset), + &local_crash_msg_offset, sizeof(u32)); + if (err) { + POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0, + DIAG_SEVERITY_ERR); + return err; } if (typ == CRASH_BUS) { - if (visorchannel_write(controlvm_channel, - local_crash_msg_offset, - msg, - sizeof(struct controlvm_message)) < 0) { - POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC, - POSTCODE_SEVERITY_ERR); - return; + err = visorchannel_write(controlvm_channel, + local_crash_msg_offset, + msg, + sizeof(struct controlvm_message)); + if (err) { + POSTCODE_LINUX(SAVE_MSG_BUS_FAILURE_PC, 0, 0, + DIAG_SEVERITY_ERR); + return err; } } else { local_crash_msg_offset += sizeof(struct controlvm_message); - if (visorchannel_write(controlvm_channel, - local_crash_msg_offset, - msg, - sizeof(struct controlvm_message)) < 0) { - POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC, - POSTCODE_SEVERITY_ERR); - return; + err = visorchannel_write(controlvm_channel, + local_crash_msg_offset, + msg, + sizeof(struct controlvm_message)); + if (err) { + POSTCODE_LINUX(SAVE_MSG_DEV_FAILURE_PC, 0, 0, + DIAG_SEVERITY_ERR); + return err; } } + return 0; } -static void +static int bus_responder(enum controlvm_id cmd_id, struct controlvm_message_header *pending_msg_hdr, int response) { if (!pending_msg_hdr) - return; /* no controlvm response needed */ + return -EIO; if (pending_msg_hdr->id != (u32)cmd_id) - return; + return -EINVAL; - controlvm_respond(pending_msg_hdr, response); + return controlvm_respond(pending_msg_hdr, response); } -static void +static int device_changestate_responder(enum controlvm_id cmd_id, struct visor_device *p, int response, struct spar_segment_state response_state) @@ -660,9 +662,9 @@ device_changestate_responder(enum controlvm_id cmd_id, u32 dev_no = p->chipset_dev_no; if (!p->pending_msg_hdr) - return; /* no controlvm response needed */ + return -EIO; if (p->pending_msg_hdr->id != cmd_id) - return; + return -EINVAL; controlvm_init_response(&outmsg, p->pending_msg_hdr, response); @@ -670,175 +672,74 @@ device_changestate_responder(enum controlvm_id cmd_id, outmsg.cmd.device_change_state.dev_no = dev_no; outmsg.cmd.device_change_state.state = response_state; - if (visorchannel_signalinsert(controlvm_channel, - CONTROLVM_QUEUE_REQUEST, &outmsg)) - return; + return visorchannel_signalinsert(controlvm_channel, + CONTROLVM_QUEUE_REQUEST, &outmsg); } -static void +static int device_responder(enum controlvm_id cmd_id, struct controlvm_message_header *pending_msg_hdr, int response) { if (!pending_msg_hdr) - return; /* no controlvm response needed */ + return -EIO; if (pending_msg_hdr->id != (u32)cmd_id) - return; - - controlvm_respond(pending_msg_hdr, response); -} - -static void -bus_epilog(struct visor_device *bus_info, - u32 cmd, struct controlvm_message_header *msg_hdr, - int response, bool need_response) -{ - struct controlvm_message_header *pmsg_hdr = NULL; - - if (!bus_info) { - /* - * relying on a valid passed in response code - * be lazy and re-use msg_hdr for this failure, is this ok?? - */ - pmsg_hdr = msg_hdr; - goto out_respond; - } - - if (bus_info->pending_msg_hdr) { - /* only non-NULL if dev is still waiting on a response */ - response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT; - pmsg_hdr = bus_info->pending_msg_hdr; - goto out_respond; - } - - if (need_response) { - pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); - if (!pmsg_hdr) { - POSTCODE_LINUX_4(MALLOC_FAILURE_PC, cmd, - bus_info->chipset_bus_no, - POSTCODE_SEVERITY_ERR); - return; - } - - memcpy(pmsg_hdr, msg_hdr, - sizeof(struct controlvm_message_header)); - bus_info->pending_msg_hdr = pmsg_hdr; - } - - if (response == CONTROLVM_RESP_SUCCESS) { - switch (cmd) { - case CONTROLVM_BUS_CREATE: - chipset_bus_create(bus_info); - break; - case CONTROLVM_BUS_DESTROY: - chipset_bus_destroy(bus_info); - break; - } - } - -out_respond: - bus_responder(cmd, pmsg_hdr, response); -} - -static void -device_epilog(struct visor_device *dev_info, - struct spar_segment_state state, u32 cmd, - struct controlvm_message_header *msg_hdr, int response, - bool need_response, bool for_visorbus) -{ - struct controlvm_message_header *pmsg_hdr = NULL; - - if (!dev_info) { - /* - * relying on a valid passed in response code - * be lazy and re-use msg_hdr for this failure, is this ok?? - */ - pmsg_hdr = msg_hdr; - goto out_respond; - } - - if (dev_info->pending_msg_hdr) { - /* only non-NULL if dev is still waiting on a response */ - response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT; - pmsg_hdr = dev_info->pending_msg_hdr; - goto out_respond; - } - - if (need_response) { - pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); - if (!pmsg_hdr) { - response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; - goto out_respond; - } - - memcpy(pmsg_hdr, msg_hdr, - sizeof(struct controlvm_message_header)); - dev_info->pending_msg_hdr = pmsg_hdr; - } - - if (response >= 0) { - switch (cmd) { - case CONTROLVM_DEVICE_CREATE: - chipset_device_create(dev_info); - break; - case CONTROLVM_DEVICE_CHANGESTATE: - /* ServerReady / ServerRunning / SegmentStateRunning */ - if (state.alive == segment_state_running.alive && - state.operating == - segment_state_running.operating) { - chipset_device_resume(dev_info); - } - /* ServerNotReady / ServerLost / SegmentStateStandby */ - else if (state.alive == segment_state_standby.alive && - state.operating == - segment_state_standby.operating) { - /* - * technically this is standby case - * where server is lost - */ - chipset_device_pause(dev_info); - } - break; - case CONTROLVM_DEVICE_DESTROY: - chipset_device_destroy(dev_info); - break; - } - } + return -EINVAL; -out_respond: - device_responder(cmd, pmsg_hdr, response); + return controlvm_respond(pending_msg_hdr, response); } -static void +static int bus_create(struct controlvm_message *inmsg) { struct controlvm_message_packet *cmd = &inmsg->cmd; + struct controlvm_message_header *pmsg_hdr = NULL; u32 bus_no = cmd->create_bus.bus_no; - int rc = CONTROLVM_RESP_SUCCESS; struct visor_device *bus_info; struct visorchannel *visorchannel; + int err; bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); if (bus_info && (bus_info->state.created == 1)) { - POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no, - POSTCODE_SEVERITY_ERR); - rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; - goto out_bus_epilog; + POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no, + DIAG_SEVERITY_ERR); + err = -EEXIST; + goto err_respond; } + bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL); if (!bus_info) { - POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no, - POSTCODE_SEVERITY_ERR); - rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; - goto out_bus_epilog; + POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no, + DIAG_SEVERITY_ERR); + err = -ENOMEM; + goto err_respond; } INIT_LIST_HEAD(&bus_info->list_all); bus_info->chipset_bus_no = bus_no; bus_info->chipset_dev_no = BUS_ROOT_DEVICE; - POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT); + + if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) + save_crash_message(inmsg, CRASH_BUS); + + if (inmsg->hdr.flags.response_expected == 1) { + pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), + GFP_KERNEL); + if (!pmsg_hdr) { + POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd, + bus_info->chipset_bus_no, + DIAG_SEVERITY_ERR); + err = -ENOMEM; + goto err_free_bus_info; + } + + memcpy(pmsg_hdr, &inmsg->hdr, + sizeof(struct controlvm_message_header)); + bus_info->pending_msg_hdr = pmsg_hdr; + } visorchannel = visorchannel_create(cmd->create_bus.channel_addr, cmd->create_bus.channel_bytes, @@ -846,89 +747,138 @@ bus_create(struct controlvm_message *inmsg) cmd->create_bus.bus_data_type_uuid); if (!visorchannel) { - POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no, - POSTCODE_SEVERITY_ERR); - rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; - kfree(bus_info); - bus_info = NULL; - goto out_bus_epilog; + POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no, + DIAG_SEVERITY_ERR); + err = -ENOMEM; + goto err_free_pending_msg; } bus_info->visorchannel = visorchannel; - if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) - save_crash_message(inmsg, CRASH_BUS); - POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO); + /* Response will be handled by chipset_bus_create */ + chipset_bus_create(bus_info); + + POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT); + return 0; + +err_free_pending_msg: + kfree(bus_info->pending_msg_hdr); + +err_free_bus_info: + kfree(bus_info); -out_bus_epilog: - bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr, - rc, inmsg->hdr.flags.response_expected == 1); +err_respond: + if (inmsg->hdr.flags.response_expected == 1) + bus_responder(inmsg->hdr.id, &inmsg->hdr, err); + return err; } -static void +static int bus_destroy(struct controlvm_message *inmsg) { struct controlvm_message_packet *cmd = &inmsg->cmd; + struct controlvm_message_header *pmsg_hdr = NULL; u32 bus_no = cmd->destroy_bus.bus_no; struct visor_device *bus_info; - int rc = CONTROLVM_RESP_SUCCESS; + int err; bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); - if (!bus_info) - rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; - else if (bus_info->state.created == 0) - rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; + if (!bus_info) { + err = -ENODEV; + goto err_respond; + } + if (bus_info->state.created == 0) { + err = -ENOENT; + goto err_respond; + } + if (bus_info->pending_msg_hdr) { + /* only non-NULL if dev is still waiting on a response */ + err = -EEXIST; + goto err_respond; + } + if (inmsg->hdr.flags.response_expected == 1) { + pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); + if (!pmsg_hdr) { + POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd, + bus_info->chipset_bus_no, + DIAG_SEVERITY_ERR); + err = -ENOMEM; + goto err_respond; + } - bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr, - rc, inmsg->hdr.flags.response_expected == 1); + memcpy(pmsg_hdr, &inmsg->hdr, + sizeof(struct controlvm_message_header)); + bus_info->pending_msg_hdr = pmsg_hdr; + } - /* bus_info is freed as part of the busdevice_release function */ + /* Response will be handled by chipset_bus_destroy */ + chipset_bus_destroy(bus_info); + return 0; + +err_respond: + if (inmsg->hdr.flags.response_expected == 1) + bus_responder(inmsg->hdr.id, &inmsg->hdr, err); + return err; } -static void +static int bus_configure(struct controlvm_message *inmsg, struct parser_context *parser_ctx) { struct controlvm_message_packet *cmd = &inmsg->cmd; u32 bus_no; struct visor_device *bus_info; - int rc = CONTROLVM_RESP_SUCCESS; + int err = 0; bus_no = cmd->configure_bus.bus_no; - POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no, - POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(BUS_CONFIGURE_ENTRY_PC, 0, bus_no, + DIAG_SEVERITY_PRINT); bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); if (!bus_info) { - POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no, - POSTCODE_SEVERITY_ERR); - rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; + POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no, + DIAG_SEVERITY_ERR); + err = -EINVAL; + goto err_respond; } else if (bus_info->state.created == 0) { - POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no, - POSTCODE_SEVERITY_ERR); - rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; + POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no, + DIAG_SEVERITY_ERR); + err = -EINVAL; + goto err_respond; } else if (bus_info->pending_msg_hdr) { - POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no, - POSTCODE_SEVERITY_ERR); - rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT; - } else { - visorchannel_set_clientpartition - (bus_info->visorchannel, - cmd->configure_bus.guest_handle); - bus_info->partition_uuid = parser_id_get(parser_ctx); - parser_param_start(parser_ctx, PARSERSTRING_NAME); - bus_info->name = parser_string_get(parser_ctx); - - POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no, - POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no, + DIAG_SEVERITY_ERR); + err = -EIO; + goto err_respond; } - bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr, - rc, inmsg->hdr.flags.response_expected == 1); + + err = visorchannel_set_clientpartition + (bus_info->visorchannel, + cmd->configure_bus.guest_handle); + if (err) + goto err_respond; + + bus_info->partition_uuid = parser_id_get(parser_ctx); + parser_param_start(parser_ctx, PARSERSTRING_NAME); + bus_info->name = parser_string_get(parser_ctx); + + POSTCODE_LINUX(BUS_CONFIGURE_EXIT_PC, 0, bus_no, + DIAG_SEVERITY_PRINT); + + if (inmsg->hdr.flags.response_expected == 1) + bus_responder(inmsg->hdr.id, &inmsg->hdr, err); + return 0; + +err_respond: + if (inmsg->hdr.flags.response_expected == 1) + bus_responder(inmsg->hdr.id, &inmsg->hdr, err); + return err; } static void my_device_create(struct controlvm_message *inmsg) { struct controlvm_message_packet *cmd = &inmsg->cmd; + struct controlvm_message_header *pmsg_hdr = NULL; u32 bus_no = cmd->create_device.bus_no; u32 dev_no = cmd->create_device.dev_no; struct visor_device *dev_info = NULL; @@ -938,31 +888,31 @@ my_device_create(struct controlvm_message *inmsg) bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); if (!bus_info) { - POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, + DIAG_SEVERITY_ERR); rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; goto out_respond; } if (bus_info->state.created == 0) { - POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, + DIAG_SEVERITY_ERR); rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; goto out_respond; } dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL); if (dev_info && (dev_info->state.created == 1)) { - POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, + DIAG_SEVERITY_ERR); rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; goto out_respond; } dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); if (!dev_info) { - POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, + DIAG_SEVERITY_ERR); rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; goto out_respond; } @@ -974,8 +924,8 @@ my_device_create(struct controlvm_message *inmsg) /* not sure where the best place to set the 'parent' */ dev_info->device.parent = &bus_info->device; - POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no, - POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no, + DIAG_SEVERITY_PRINT); visorchannel = visorchannel_create_with_lock(cmd->create_device.channel_addr, @@ -984,12 +934,10 @@ my_device_create(struct controlvm_message *inmsg) cmd->create_device.data_type_uuid); if (!visorchannel) { - POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, + DIAG_SEVERITY_ERR); rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; - kfree(dev_info); - dev_info = NULL; - goto out_respond; + goto out_free_dev_info; } dev_info->visorchannel = visorchannel; dev_info->channel_type_guid = cmd->create_device.data_type_uuid; @@ -997,18 +945,36 @@ my_device_create(struct controlvm_message *inmsg) spar_vhba_channel_protocol_uuid) == 0) save_crash_message(inmsg, CRASH_DEV); - POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no, - POSTCODE_SEVERITY_INFO); + if (inmsg->hdr.flags.response_expected == 1) { + pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); + if (!pmsg_hdr) { + rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; + goto out_free_dev_info; + } + + memcpy(pmsg_hdr, &inmsg->hdr, + sizeof(struct controlvm_message_header)); + dev_info->pending_msg_hdr = pmsg_hdr; + } + /* Chipset_device_create will send response */ + chipset_device_create(dev_info); + POSTCODE_LINUX(DEVICE_CREATE_EXIT_PC, dev_no, bus_no, + DIAG_SEVERITY_PRINT); + return; + +out_free_dev_info: + kfree(dev_info); + out_respond: - device_epilog(dev_info, segment_state_running, - CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc, - inmsg->hdr.flags.response_expected == 1, 1); + if (inmsg->hdr.flags.response_expected == 1) + device_responder(inmsg->hdr.id, &inmsg->hdr, rc); } static void my_device_changestate(struct controlvm_message *inmsg) { struct controlvm_message_packet *cmd = &inmsg->cmd; + struct controlvm_message_header *pmsg_hdr = NULL; u32 bus_no = cmd->device_change_state.bus_no; u32 dev_no = cmd->device_change_state.dev_no; struct spar_segment_state state = cmd->device_change_state.state; @@ -1017,39 +983,97 @@ my_device_changestate(struct controlvm_message *inmsg) dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL); if (!dev_info) { - POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no, + DIAG_SEVERITY_ERR); rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID; - } else if (dev_info->state.created == 0) { - POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no, - POSTCODE_SEVERITY_ERR); + goto err_respond; + } + if (dev_info->state.created == 0) { + POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no, + DIAG_SEVERITY_ERR); rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID; + goto err_respond; + } + if (dev_info->pending_msg_hdr) { + /* only non-NULL if dev is still waiting on a response */ + rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT; + goto err_respond; } - if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info) - device_epilog(dev_info, state, - CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc, - inmsg->hdr.flags.response_expected == 1, 1); + if (inmsg->hdr.flags.response_expected == 1) { + pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); + if (!pmsg_hdr) { + rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; + goto err_respond; + } + + memcpy(pmsg_hdr, &inmsg->hdr, + sizeof(struct controlvm_message_header)); + dev_info->pending_msg_hdr = pmsg_hdr; + } + + if (state.alive == segment_state_running.alive && + state.operating == segment_state_running.operating) + /* Response will be sent from chipset_device_resume */ + chipset_device_resume(dev_info); + /* ServerNotReady / ServerLost / SegmentStateStandby */ + else if (state.alive == segment_state_standby.alive && + state.operating == segment_state_standby.operating) + /* + * technically this is standby case where server is lost. + * Response will be sent from chipset_device_pause. + */ + chipset_device_pause(dev_info); + + return; + +err_respond: + if (inmsg->hdr.flags.response_expected == 1) + device_responder(inmsg->hdr.id, &inmsg->hdr, rc); } static void my_device_destroy(struct controlvm_message *inmsg) { struct controlvm_message_packet *cmd = &inmsg->cmd; + struct controlvm_message_header *pmsg_hdr = NULL; u32 bus_no = cmd->destroy_device.bus_no; u32 dev_no = cmd->destroy_device.dev_no; struct visor_device *dev_info; int rc = CONTROLVM_RESP_SUCCESS; dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL); - if (!dev_info) + if (!dev_info) { rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID; - else if (dev_info->state.created == 0) + goto err_respond; + } + if (dev_info->state.created == 0) { rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; + goto err_respond; + } - if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info) - device_epilog(dev_info, segment_state_running, - CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc, - inmsg->hdr.flags.response_expected == 1, 1); + if (dev_info->pending_msg_hdr) { + /* only non-NULL if dev is still waiting on a response */ + rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT; + goto err_respond; + } + if (inmsg->hdr.flags.response_expected == 1) { + pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); + if (!pmsg_hdr) { + rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; + goto err_respond; + } + + memcpy(pmsg_hdr, &inmsg->hdr, + sizeof(struct controlvm_message_header)); + dev_info->pending_msg_hdr = pmsg_hdr; + } + + chipset_device_destroy(dev_info); + return; + +err_respond: + if (inmsg->hdr.flags.response_expected == 1) + device_responder(inmsg->hdr.id, &inmsg->hdr, rc); } /** @@ -1075,7 +1099,6 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes, if (!info) return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID; - memset(info, 0, sizeof(struct visor_controlvm_payload_info)); if ((offset == 0) || (bytes == 0)) return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID; @@ -1083,6 +1106,7 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes, if (!payload) return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED; + memset(info, 0, sizeof(struct visor_controlvm_payload_info)); info->offset = offset; info->bytes = bytes; info->ptr = payload; @@ -1111,16 +1135,16 @@ initialize_controlvm_payload(void) offsetof(struct spar_controlvm_channel_protocol, request_payload_offset), &payload_offset, sizeof(payload_offset)) < 0) { - POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(CONTROLVM_INIT_FAILURE_PC, 0, 0, + DIAG_SEVERITY_ERR); return; } if (visorchannel_read(controlvm_channel, offsetof(struct spar_controlvm_channel_protocol, request_payload_bytes), &payload_bytes, sizeof(payload_bytes)) < 0) { - POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(CONTROLVM_INIT_FAILURE_PC, 0, 0, + DIAG_SEVERITY_ERR); return; } initialize_controlvm_payload_info(phys_addr, @@ -1317,7 +1341,7 @@ static struct attribute *visorchipset_install_attrs[] = { NULL }; -static struct attribute_group visorchipset_install_group = { +static const struct attribute_group visorchipset_install_group = { .name = "install", .attrs = visorchipset_install_attrs }; @@ -1540,7 +1564,7 @@ setup_crash_devices_work_queue(struct work_struct *work) u32 local_crash_msg_offset; u16 local_crash_msg_count; - POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(CRASH_DEV_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT); /* send init chipset msg */ msg.hdr.id = CONTROLVM_CHIPSET_INIT; @@ -1554,15 +1578,15 @@ setup_crash_devices_work_queue(struct work_struct *work) offsetof(struct spar_controlvm_channel_protocol, saved_crash_message_count), &local_crash_msg_count, sizeof(u16)) < 0) { - POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0, + DIAG_SEVERITY_ERR); return; } if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) { - POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC, - local_crash_msg_count, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0, + local_crash_msg_count, + DIAG_SEVERITY_ERR); return; } @@ -1571,8 +1595,8 @@ setup_crash_devices_work_queue(struct work_struct *work) offsetof(struct spar_controlvm_channel_protocol, saved_crash_message_offset), &local_crash_msg_offset, sizeof(u32)) < 0) { - POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0, + DIAG_SEVERITY_ERR); return; } @@ -1581,8 +1605,8 @@ setup_crash_devices_work_queue(struct work_struct *work) local_crash_msg_offset, &local_crash_bus_msg, sizeof(struct controlvm_message)) < 0) { - POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(CRASH_DEV_RD_BUS_FAILURE_PC, 0, 0, + DIAG_SEVERITY_ERR); return; } @@ -1592,8 +1616,8 @@ setup_crash_devices_work_queue(struct work_struct *work) sizeof(struct controlvm_message), &local_crash_dev_msg, sizeof(struct controlvm_message)) < 0) { - POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(CRASH_DEV_RD_DEV_FAILURE_PC, 0, 0, + DIAG_SEVERITY_ERR); return; } @@ -1601,8 +1625,8 @@ setup_crash_devices_work_queue(struct work_struct *work) if (local_crash_bus_msg.cmd.create_bus.channel_addr) { bus_create(&local_crash_bus_msg); } else { - POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(CRASH_DEV_BUS_NULL_FAILURE_PC, 0, 0, + DIAG_SEVERITY_ERR); return; } @@ -1610,11 +1634,11 @@ setup_crash_devices_work_queue(struct work_struct *work) if (local_crash_dev_msg.cmd.create_device.channel_addr) { my_device_create(&local_crash_dev_msg); } else { - POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC, - POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(CRASH_DEV_DEV_NULL_FAILURE_PC, 0, 0, + DIAG_SEVERITY_ERR); return; } - POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(CRASH_DEV_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT); } void @@ -2119,8 +2143,6 @@ visorchipset_init(struct acpi_device *acpi_device) if (!addr) goto error; - memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info)); - controlvm_channel = visorchannel_create_with_lock(addr, 0, GFP_KERNEL, uuid); if (!controlvm_channel) @@ -2152,11 +2174,12 @@ visorchipset_init(struct acpi_device *acpi_device) visorchipset_platform_device.dev.devt = major_dev; if (platform_device_register(&visorchipset_platform_device) < 0) { - POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR); + POSTCODE_LINUX(DEVICE_REGISTER_FAILURE_PC, 0, 0, + DIAG_SEVERITY_ERR); err = -ENODEV; goto error_cancel_work; } - POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, 0, DIAG_SEVERITY_PRINT); err = visorbus_init(); if (err < 0) @@ -2178,14 +2201,14 @@ visorchipset_init(struct acpi_device *acpi_device) visorchannel_destroy(controlvm_channel); error: - POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR); + POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR); return err; } static int visorchipset_exit(struct acpi_device *acpi_device) { - POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT); visorbus_exit(); @@ -2196,7 +2219,7 @@ visorchipset_exit(struct acpi_device *acpi_device) visorchipset_file_cleanup(visorchipset_platform_device.dev.devt); platform_device_unregister(&visorchipset_platform_device); - POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO); + POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT); return 0; } diff --git a/drivers/staging/unisys/visorbus/vmcallinterface.h b/drivers/staging/unisys/visorbus/vmcallinterface.h index 86e695d5a4414a9772d9c8a9458f05b842bfba91..674a88b657d3059f36eaaec9dd4a8e9c91d6b1bc 100644 --- a/drivers/staging/unisys/visorbus/vmcallinterface.h +++ b/drivers/staging/unisys/visorbus/vmcallinterface.h @@ -92,15 +92,6 @@ enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples */ #define ISSUE_IO_VMCALL(method, param, result) \ (result = unisys_vmcall(method, (param) & 0xFFFFFFFF, \ (param) >> 32)) -#define ISSUE_IO_EXTENDED_VMCALL(method, param1, param2, param3) \ - unisys_extended_vmcall(method, param1, param2, param3) - - /* The following uses VMCALL_POST_CODE_LOGEVENT interface but is currently - * not used much - */ -#define ISSUE_IO_VMCALL_POSTCODE_SEVERITY(postcode, severity) \ - ISSUE_IO_EXTENDED_VMCALL(VMCALL_POST_CODE_LOGEVENT, severity, \ - MDS_APPOS, postcode) /* Structures for IO VMCALLs */ @@ -117,118 +108,53 @@ struct vmcall_io_controlvm_addr_params { /******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/ enum driver_pc { /* POSTCODE driver identifier tuples */ - /* visorchipset driver files */ - VISOR_CHIPSET_PC = 0xA0, - VISOR_CHIPSET_PC_controlvm_c = 0xA1, - VISOR_CHIPSET_PC_controlvm_cm2 = 0xA2, - VISOR_CHIPSET_PC_controlvm_direct_c = 0xA3, - VISOR_CHIPSET_PC_file_c = 0xA4, - VISOR_CHIPSET_PC_parser_c = 0xA5, - VISOR_CHIPSET_PC_testing_c = 0xA6, - VISOR_CHIPSET_PC_visorchipset_main_c = 0xA7, - VISOR_CHIPSET_PC_visorswitchbus_c = 0xA8, /* visorbus driver files */ - VISOR_BUS_PC = 0xB0, - VISOR_BUS_PC_businst_attr_c = 0xB1, - VISOR_BUS_PC_channel_attr_c = 0xB2, - VISOR_BUS_PC_devmajorminor_attr_c = 0xB3, - VISOR_BUS_PC_visorbus_main_c = 0xB4, - /* visorclientbus driver files */ - VISOR_CLIENT_BUS_PC = 0xC0, - VISOR_CLIENT_BUS_PC_visorclientbus_main_c = 0xC1, - /* virt hba driver files */ - VIRT_HBA_PC = 0xC2, - VIRT_HBA_PC_virthba_c = 0xC3, - /* virtpci driver files */ - VIRT_PCI_PC = 0xC4, - VIRT_PCI_PC_virtpci_c = 0xC5, - /* virtnic driver files */ - VIRT_NIC_PC = 0xC6, - VIRT_NIC_P_virtnic_c = 0xC7, - /* uislib driver files */ - UISLIB_PC = 0xD0, - UISLIB_PC_uislib_c = 0xD1, - UISLIB_PC_uisqueue_c = 0xD2, - /* 0xD3 RESERVED */ - UISLIB_PC_uisutils_c = 0xD4, + VISOR_BUS_PC = 0xF0, + VISOR_BUS_PC_visorbus_main_c = 0xFF, + VISOR_BUS_PC_visorchipset_c = 0xFE, }; enum event_pc { /* POSTCODE event identifier tuples */ - ATTACH_PORT_ENTRY_PC = 0x001, - ATTACH_PORT_FAILURE_PC = 0x002, - ATTACH_PORT_SUCCESS_PC = 0x003, - BUS_FAILURE_PC = 0x004, - BUS_CREATE_ENTRY_PC = 0x005, - BUS_CREATE_FAILURE_PC = 0x006, - BUS_CREATE_EXIT_PC = 0x007, - BUS_CONFIGURE_ENTRY_PC = 0x008, - BUS_CONFIGURE_FAILURE_PC = 0x009, - BUS_CONFIGURE_EXIT_PC = 0x00A, - CHIPSET_INIT_ENTRY_PC = 0x00B, - CHIPSET_INIT_SUCCESS_PC = 0x00C, - CHIPSET_INIT_FAILURE_PC = 0x00D, - CHIPSET_INIT_EXIT_PC = 0x00E, - CREATE_WORKQUEUE_PC = 0x00F, - CREATE_WORKQUEUE_FAILED_PC = 0x0A0, - CONTROLVM_INIT_FAILURE_PC = 0x0A1, - DEVICE_CREATE_ENTRY_PC = 0x0A2, - DEVICE_CREATE_FAILURE_PC = 0x0A3, - DEVICE_CREATE_SUCCESS_PC = 0x0A4, - DEVICE_CREATE_EXIT_PC = 0x0A5, - DEVICE_ADD_PC = 0x0A6, - DEVICE_REGISTER_FAILURE_PC = 0x0A7, - DEVICE_CHANGESTATE_ENTRY_PC = 0x0A8, - DEVICE_CHANGESTATE_FAILURE_PC = 0x0A9, - DEVICE_CHANGESTATE_EXIT_PC = 0x0AA, - DRIVER_ENTRY_PC = 0x0AB, - DRIVER_EXIT_PC = 0x0AC, - MALLOC_FAILURE_PC = 0x0AD, - QUEUE_DELAYED_WORK_PC = 0x0AE, - /* 0x0B7 RESERVED */ - VBUS_CHANNEL_ENTRY_PC = 0x0B8, - VBUS_CHANNEL_FAILURE_PC = 0x0B9, - VBUS_CHANNEL_EXIT_PC = 0x0BA, - VHBA_CREATE_ENTRY_PC = 0x0BB, - VHBA_CREATE_FAILURE_PC = 0x0BC, - VHBA_CREATE_EXIT_PC = 0x0BD, - VHBA_CREATE_SUCCESS_PC = 0x0BE, - VHBA_COMMAND_HANDLER_PC = 0x0BF, - VHBA_PROBE_ENTRY_PC = 0x0C0, - VHBA_PROBE_FAILURE_PC = 0x0C1, - VHBA_PROBE_EXIT_PC = 0x0C2, - VNIC_CREATE_ENTRY_PC = 0x0C3, - VNIC_CREATE_FAILURE_PC = 0x0C4, - VNIC_CREATE_SUCCESS_PC = 0x0C5, - VNIC_PROBE_ENTRY_PC = 0x0C6, - VNIC_PROBE_FAILURE_PC = 0x0C7, - VNIC_PROBE_EXIT_PC = 0x0C8, - VPCI_CREATE_ENTRY_PC = 0x0C9, - VPCI_CREATE_FAILURE_PC = 0x0CA, - VPCI_CREATE_EXIT_PC = 0x0CB, - VPCI_PROBE_ENTRY_PC = 0x0CC, - VPCI_PROBE_FAILURE_PC = 0x0CD, - VPCI_PROBE_EXIT_PC = 0x0CE, - CRASH_DEV_ENTRY_PC = 0x0CF, - CRASH_DEV_EXIT_PC = 0x0D0, - CRASH_DEV_HADDR_NULL = 0x0D1, - CRASH_DEV_CONTROLVM_NULL = 0x0D2, - CRASH_DEV_RD_BUS_FAIULRE_PC = 0x0D3, - CRASH_DEV_RD_DEV_FAIULRE_PC = 0x0D4, - CRASH_DEV_BUS_NULL_FAILURE_PC = 0x0D5, - CRASH_DEV_DEV_NULL_FAILURE_PC = 0x0D6, - CRASH_DEV_CTRL_RD_FAILURE_PC = 0x0D7, - CRASH_DEV_COUNT_FAILURE_PC = 0x0D8, - SAVE_MSG_BUS_FAILURE_PC = 0x0D9, - SAVE_MSG_DEV_FAILURE_PC = 0x0DA, - CALLHOME_INIT_FAILURE_PC = 0x0DB + BUS_CREATE_ENTRY_PC = 0x001, + BUS_CREATE_FAILURE_PC = 0x002, + BUS_CREATE_EXIT_PC = 0x003, + BUS_CONFIGURE_ENTRY_PC = 0x004, + BUS_CONFIGURE_FAILURE_PC = 0x005, + BUS_CONFIGURE_EXIT_PC = 0x006, + CHIPSET_INIT_ENTRY_PC = 0x007, + CHIPSET_INIT_SUCCESS_PC = 0x008, + CHIPSET_INIT_FAILURE_PC = 0x009, + CHIPSET_INIT_EXIT_PC = 0x00A, + CONTROLVM_INIT_FAILURE_PC = 0x00B, + DEVICE_CREATE_ENTRY_PC = 0x00C, + DEVICE_CREATE_FAILURE_PC = 0x00D, + DEVICE_CREATE_SUCCESS_PC = 0x00E, + DEVICE_CREATE_EXIT_PC = 0x00F, + DEVICE_ADD_PC = 0x010, + DEVICE_REGISTER_FAILURE_PC = 0x011, + DEVICE_CHANGESTATE_FAILURE_PC = 0x012, + DRIVER_ENTRY_PC = 0x013, + DRIVER_EXIT_PC = 0x014, + MALLOC_FAILURE_PC = 0x015, + CRASH_DEV_ENTRY_PC = 0x016, + CRASH_DEV_EXIT_PC = 0x017, + CRASH_DEV_RD_BUS_FAILURE_PC = 0x018, + CRASH_DEV_RD_DEV_FAILURE_PC = 0x019, + CRASH_DEV_BUS_NULL_FAILURE_PC = 0x01A, + CRASH_DEV_DEV_NULL_FAILURE_PC = 0x01B, + CRASH_DEV_CTRL_RD_FAILURE_PC = 0x01C, + CRASH_DEV_COUNT_FAILURE_PC = 0x01D, + SAVE_MSG_BUS_FAILURE_PC = 0x01E, + SAVE_MSG_DEV_FAILURE_PC = 0x01F, }; -#define POSTCODE_SEVERITY_ERR DIAG_SEVERITY_ERR -#define POSTCODE_SEVERITY_WARNING DIAG_SEVERITY_WARNING -/* TODO-> Info currently doesn't show, so we set info=warning */ -#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT - -/* example call of POSTCODE_LINUX_2(VISOR_CHIPSET_PC, POSTCODE_SEVERITY_ERR); +/* Write a 64-bit value to the hypervisor's log file + * POSTCODE_LINUX generates a value in the form 0xAABBBCCCDDDDEEEE where + * A is an identifier for the file logging the postcode + * B is an identifier for the event logging the postcode + * C is the line logging the postcode + * D is additional information the caller wants to log + * E is additional information the caller wants to log * Please also note that the resulting postcode is in hex, so if you are * searching for the __LINE__ number, convert it first to decimal. The line * number combined with driver and type of call, will allow you to track down @@ -236,35 +162,16 @@ enum event_pc { /* POSTCODE event identifier tuples */ * entered/exited from. */ -/* BASE FUNCTIONS */ -#define POSTCODE_LINUX_A(DRIVER_PC, EVENT_PC, pc32bit, severity) \ -do { \ - unsigned long long post_code_temp; \ - post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \ - ((((u64)__LINE__) & 0xFFF) << 32) | \ - (((u64)pc32bit) & 0xFFFFFFFF); \ - ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \ -} while (0) - -#define POSTCODE_LINUX_B(DRIVER_PC, EVENT_PC, pc16bit1, pc16bit2, severity) \ +#define POSTCODE_LINUX(EVENT_PC, pc16bit1, pc16bit2, severity) \ do { \ unsigned long long post_code_temp; \ - post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \ + post_code_temp = (((u64)CURRENT_FILE_PC) << 56) | \ + (((u64)EVENT_PC) << 44) | \ ((((u64)__LINE__) & 0xFFF) << 32) | \ ((((u64)pc16bit1) & 0xFFFF) << 16) | \ (((u64)pc16bit2) & 0xFFFF); \ - ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \ + unisys_extended_vmcall(VMCALL_POST_CODE_LOGEVENT, severity, \ + MDS_APPOS, post_code_temp); \ } while (0) -/* MOST COMMON */ -#define POSTCODE_LINUX_2(EVENT_PC, severity) \ - POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, 0x0000, severity) - -#define POSTCODE_LINUX_3(EVENT_PC, pc32bit, severity) \ - POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, pc32bit, severity) - -#define POSTCODE_LINUX_4(EVENT_PC, pc16bit1, pc16bit2, severity) \ - POSTCODE_LINUX_B(CURRENT_FILE_PC, EVENT_PC, pc16bit1, \ - pc16bit2, severity) - #endif /* __IOMONINTF_H__ */ diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c index 6f94b646f7c58c45a5ca44bd3e68278d4e87f653..949cce680b29f800b1f243f7fb461dd884703196 100644 --- a/drivers/staging/unisys/visorinput/visorinput.c +++ b/drivers/staging/unisys/visorinput/visorinput.c @@ -409,6 +409,9 @@ devdata_create(struct visor_device *dev, enum visorinput_device_type devtype) if (!devdata->visorinput_dev) goto cleanups_register; break; + default: + /* No other input devices supported */ + break; } dev_set_drvdata(&dev->device, devdata); @@ -653,6 +656,9 @@ visorinput_channel_interrupt(struct visor_device *dev) input_report_rel(visorinput_dev, REL_WHEEL, -1); input_sync(visorinput_dev); break; + default: + /* Unsupported input action */ + break; } } } diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c index 136700756485ffb4dc372ede3c8a8ddee870e895..c1f674f5268c2421c7f925d45f150a139264980f 100644 --- a/drivers/staging/unisys/visornic/visornic_main.c +++ b/drivers/staging/unisys/visornic/visornic_main.c @@ -791,7 +791,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev) * pointing to */ firstfraglen = skb->len - skb->data_len; - if (firstfraglen < ETH_HEADER_SIZE) { + if (firstfraglen < ETH_HLEN) { spin_unlock_irqrestore(&devdata->priv_lock, flags); devdata->busy_cnt++; dev_err(&netdev->dev, @@ -864,7 +864,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev) /* copy ethernet header from first frag into ocmdrsp * - everything else will be pass in frags & DMA'ed */ - memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HEADER_SIZE); + memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HLEN); /* copy frags info - from skb->data we need to only provide access * beyond eth header */ @@ -1371,7 +1371,7 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf, " num_rcv_bufs = %d\n", devdata->num_rcv_bufs); str_pos += scnprintf(vbuf + str_pos, len - str_pos, - " max_oustanding_next_xmits = %lu\n", + " max_outstanding_next_xmits = %lu\n", devdata->max_outstanding_net_xmits); str_pos += scnprintf(vbuf + str_pos, len - str_pos, " upper_threshold_net_xmits = %lu\n", diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig index 9676fb29075a457109e4d4235f086987aec74868..e61e4ca064a8ab43b4dc4954b22fa3a52310379f 100644 --- a/drivers/staging/vc04_services/Kconfig +++ b/drivers/staging/vc04_services/Kconfig @@ -1,9 +1,10 @@ -config BCM2708_VCHIQ +config BCM2835_VCHIQ tristate "Videocore VCHIQ" - depends on RASPBERRYPI_FIRMWARE && BROKEN + depends on HAS_DMA + depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE) default y help Kernel to VideoCore communication interface for the - BCM2708 family of products. + BCM2835 family of products. Defaults to Y when the Broadcom Videocore services are included in the build, N otherwise. diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile index 90ab4781df2cdffcd7dd8f3b6ae81ba41209cfd0..1a9e742ee40d2ef152817999ad1c931357e793df 100644 --- a/drivers/staging/vc04_services/Makefile +++ b/drivers/staging/vc04_services/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o +obj-$(CONFIG_BCM2835_VCHIQ) += vchiq.o vchiq-objs := \ interface/vchiq_arm/vchiq_core.o \ diff --git a/drivers/staging/vc04_services/interface/vchi/TODO b/drivers/staging/vc04_services/interface/vchi/TODO new file mode 100644 index 0000000000000000000000000000000000000000..03aa65183b25764c9662ddd57aded1ba74601d7e --- /dev/null +++ b/drivers/staging/vc04_services/interface/vchi/TODO @@ -0,0 +1,50 @@ +1) Port to aarch64 + +This driver won't be very useful unless we also have it working on +Raspberry Pi 3. This requires, at least: + + - Figure out an alternative to the dmac_map_area() hack. + + - Decide what to use instead of dsb(). + + - Do something about (int) cast of bulk->data in + vchiq_bulk_transfer(). + + bulk->data is a bus address going across to the firmware. We know + our bus addresses are <32bit. + +2) Write a DT binding doc and get the corresponding DT node merged to + bcm2835. + +This will let the driver probe when enabled. + +3) Import drivers using VCHI. + +VCHI is just a tool to let drivers talk to the firmware. Here are +some of the ones we want: + + - vc_mem (https://github.com/raspberrypi/linux/blob/rpi-4.4.y/drivers/char/broadcom/vc_mem.c) + + This driver is what the vcdbg userspace program uses to set up its + requests to the firmware, which are transmitted across VCHIQ. vcdbg + is really useful for debugging firmware interactions. + + - bcm2835-camera (https://github.com/raspberrypi/linux/tree/rpi-4.4.y/drivers/media/platform/bcm2835) + + This driver will let us get images from the camera using the MMAL + protocol over VCHI. + + - VCSM (https://github.com/raspberrypi/linux/tree/rpi-4.4.y/drivers/char/broadcom/vc_sm) + + This driver is used for talking about regions of VC memory across + firmware protocols including VCHI. We'll want to extend this driver + to manage these buffers as dmabufs so that we can zero-copy import + camera images into vc4 for rendering/display. + +4) Garbage-collect unused code + +One of the reasons this driver wasn't upstreamed previously was that +there's a lot code that got built that's probably unnecessary these +days. Once we have the set of VCHI-using drivers we want in tree, we +should be able to do a sweep of the code to see what's left that's +unused. diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h index 1b17e98f737979eef19906609bc19ecd19455618..d6937288210cef102e115f4265471ad9e9da7007 100644 --- a/drivers/staging/vc04_services/interface/vchi/vchi.h +++ b/drivers/staging/vc04_services/interface/vchi/vchi.h @@ -226,25 +226,12 @@ extern int32_t vchi_service_set_option( const VCHI_SERVICE_HANDLE_T handle, int value); // Routine to send a message across a service -extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle, - const void *data, - uint32_t data_size, - VCHI_FLAGS_T flags, - void *msg_handle ); - -// scatter-gather (vector) and send message -int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle, - VCHI_MSG_VECTOR_EX_T *vector, - uint32_t count, - VCHI_FLAGS_T flags, - void *msg_handle ); - -// legacy scatter-gather (vector) and send message, only handles pointers -int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle, - VCHI_MSG_VECTOR_T *vector, - uint32_t count, - VCHI_FLAGS_T flags, - void *msg_handle ); +extern int32_t + vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle, + ssize_t (*copy_callback)(void *context, void *dest, + size_t offset, size_t maxsize), + void *context, + uint32_t data_size); // Routine to receive a msg from a service // Dequeue is equivalent to hold, copy into client buffer, release diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h index ad398bae6ee4c3d64fc1a37808398397a9a0fefe..21adf89a9065eb311c2fcdf4782062b82cd75669 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h @@ -37,4 +37,15 @@ #include "vchiq_if.h" #include "vchiq_util.h" +/* Do this so that we can test-build the code on non-rpi systems */ +#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) + +#else + +#ifndef dsb +#define dsb(a) +#endif + +#endif /* IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) */ + #endif diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index 1091b9f1dd070e3d27c269402b43b0a09d96bcdc..2b500d85cebc768373a2dd48c779063262b52570 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c @@ -45,16 +45,8 @@ #include #include -#define dmac_map_area __glue(_CACHE,_dma_map_area) -#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) - -extern void dmac_map_area(const void *, size_t, int); -extern void dmac_unmap_area(const void *, size_t, int); - #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32) -#define VCHIQ_ARM_ADDRESS(x) ((void *)((char *)x + g_virt_to_bus_offset)) - #include "vchiq_arm.h" #include "vchiq_2835.h" #include "vchiq_connected.h" @@ -70,13 +62,25 @@ typedef struct vchiq_2835_state_struct { VCHIQ_ARM_STATE_T arm_state; } VCHIQ_2835_ARM_STATE_T; +struct vchiq_pagelist_info { + PAGELIST_T *pagelist; + size_t pagelist_buffer_size; + dma_addr_t dma_addr; + enum dma_data_direction dma_dir; + unsigned int num_pages; + unsigned int pages_need_release; + struct page **pages; + struct scatterlist *scatterlist; + unsigned int scatterlist_mapped; +}; + static void __iomem *g_regs; static unsigned int g_cache_line_size = sizeof(CACHE_LINE_SIZE); static unsigned int g_fragments_size; static char *g_fragments_base; static char *g_free_fragments; static struct semaphore g_free_fragments_sema; -static unsigned long g_virt_to_bus_offset; +static struct device *g_dev; extern int vchiq_arm_log_level; @@ -85,12 +89,13 @@ static DEFINE_SEMAPHORE(g_free_fragments_mutex); static irqreturn_t vchiq_doorbell_irq(int irq, void *dev_id); -static int +static struct vchiq_pagelist_info * create_pagelist(char __user *buf, size_t count, unsigned short type, - struct task_struct *task, PAGELIST_T ** ppagelist); + struct task_struct *task); static void -free_pagelist(PAGELIST_T *pagelist, int actual); +free_pagelist(struct vchiq_pagelist_info *pagelistinfo, + int actual); int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state) { @@ -104,7 +109,14 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state) int slot_mem_size, frag_mem_size; int err, irq, i; - g_virt_to_bus_offset = virt_to_dma(dev, (void *)0); + /* + * VCHI messages between the CPU and firmware use + * 32-bit bus addresses. + */ + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + + if (err < 0) + return err; (void)of_property_read_u32(dev->of_node, "cache-line-size", &g_cache_line_size); @@ -121,7 +133,7 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state) return -ENOMEM; } - WARN_ON(((int)slot_mem & (PAGE_SIZE - 1)) != 0); + WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0); vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size); if (!vchiq_slot_zero) @@ -173,9 +185,10 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state) return err ? : -ENXIO; } + g_dev = dev; vchiq_log_info(vchiq_arm_log_level, - "vchiq_init - done (slots %x, phys %pad)", - (unsigned int)vchiq_slot_zero, &slot_phys); + "vchiq_init - done (slots %pK, phys %pad)", + vchiq_slot_zero, &slot_phys); vchiq_call_connected_callbacks(); @@ -213,47 +226,37 @@ remote_event_signal(REMOTE_EVENT_T *event) event->fired = 1; - dsb(); /* data barrier operation */ + dsb(sy); /* data barrier operation */ if (event->armed) writel(0, g_regs + BELL2); /* trigger vc interrupt */ } -int -vchiq_copy_from_user(void *dst, const void *src, int size) -{ - if ((uint32_t)src < TASK_SIZE) { - return copy_from_user(dst, src, size); - } else { - memcpy(dst, src, size); - return 0; - } -} - VCHIQ_STATUS_T vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir) { - PAGELIST_T *pagelist; - int ret; + struct vchiq_pagelist_info *pagelistinfo; WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID); - ret = create_pagelist((char __user *)offset, size, - (dir == VCHIQ_BULK_RECEIVE) - ? PAGELIST_READ - : PAGELIST_WRITE, - current, - &pagelist); - if (ret != 0) + pagelistinfo = create_pagelist((char __user *)offset, size, + (dir == VCHIQ_BULK_RECEIVE) + ? PAGELIST_READ + : PAGELIST_WRITE, + current); + + if (!pagelistinfo) return VCHIQ_ERROR; bulk->handle = memhandle; - bulk->data = VCHIQ_ARM_ADDRESS(pagelist); + bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr; - /* Store the pagelist address in remote_data, which isn't used by the - slave. */ - bulk->remote_data = pagelist; + /* + * Store the pagelistinfo address in remote_data, + * which isn't used by the slave. + */ + bulk->remote_data = pagelistinfo; return VCHIQ_SUCCESS; } @@ -262,7 +265,8 @@ void vchiq_complete_bulk(VCHIQ_BULK_T *bulk) { if (bulk && bulk->remote_data && bulk->actual) - free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual); + free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data, + bulk->actual); } void @@ -350,57 +354,93 @@ vchiq_doorbell_irq(int irq, void *dev_id) return ret; } +static void +cleaup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo) +{ + if (pagelistinfo->scatterlist_mapped) { + dma_unmap_sg(g_dev, pagelistinfo->scatterlist, + pagelistinfo->num_pages, pagelistinfo->dma_dir); + } + + if (pagelistinfo->pages_need_release) { + unsigned int i; + + for (i = 0; i < pagelistinfo->num_pages; i++) + put_page(pagelistinfo->pages[i]); + } + + dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size, + pagelistinfo->pagelist, pagelistinfo->dma_addr); +} + /* There is a potential problem with partial cache lines (pages?) ** at the ends of the block when reading. If the CPU accessed anything in ** the same line (page?) then it may have pulled old data into the cache, ** obscuring the new data underneath. We can solve this by transferring the ** partial cache lines separately, and allowing the ARM to copy into the ** cached area. - -** N.B. This implementation plays slightly fast and loose with the Linux -** driver programming rules, e.g. its use of dmac_map_area instead of -** dma_map_single, but it isn't a multi-platform driver and it benefits -** from increased speed as a result. */ -static int +static struct vchiq_pagelist_info * create_pagelist(char __user *buf, size_t count, unsigned short type, - struct task_struct *task, PAGELIST_T ** ppagelist) + struct task_struct *task) { PAGELIST_T *pagelist; + struct vchiq_pagelist_info *pagelistinfo; struct page **pages; - unsigned long *addrs; - unsigned int num_pages, offset, i; - char *addr, *base_addr, *next_addr; - int run, addridx, actual_pages; - unsigned long *need_release; - - offset = (unsigned int)buf & (PAGE_SIZE - 1); + u32 *addrs; + unsigned int num_pages, offset, i, k; + int actual_pages; + size_t pagelist_size; + struct scatterlist *scatterlist, *sg; + int dma_buffers; + dma_addr_t dma_addr; + + offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1)); num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE; - *ppagelist = NULL; + pagelist_size = sizeof(PAGELIST_T) + + (num_pages * sizeof(u32)) + + (num_pages * sizeof(pages[0]) + + (num_pages * sizeof(struct scatterlist))) + + sizeof(struct vchiq_pagelist_info); /* Allocate enough storage to hold the page pointers and the page ** list */ - pagelist = kmalloc(sizeof(PAGELIST_T) + - (num_pages * sizeof(unsigned long)) + - sizeof(unsigned long) + - (num_pages * sizeof(pages[0])), - GFP_KERNEL); - - vchiq_log_trace(vchiq_arm_log_level, - "create_pagelist - %x", (unsigned int)pagelist); + pagelist = dma_zalloc_coherent(g_dev, + pagelist_size, + &dma_addr, + GFP_KERNEL); + + vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %pK", + pagelist); if (!pagelist) - return -ENOMEM; + return NULL; - addrs = pagelist->addrs; - need_release = (unsigned long *)(addrs + num_pages); - pages = (struct page **)(addrs + num_pages + 1); + addrs = pagelist->addrs; + pages = (struct page **)(addrs + num_pages); + scatterlist = (struct scatterlist *)(pages + num_pages); + pagelistinfo = (struct vchiq_pagelist_info *) + (scatterlist + num_pages); + + pagelist->length = count; + pagelist->type = type; + pagelist->offset = offset; + + /* Populate the fields of the pagelistinfo structure */ + pagelistinfo->pagelist = pagelist; + pagelistinfo->pagelist_buffer_size = pagelist_size; + pagelistinfo->dma_addr = dma_addr; + pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE; + pagelistinfo->num_pages = num_pages; + pagelistinfo->pages_need_release = 0; + pagelistinfo->pages = pages; + pagelistinfo->scatterlist = scatterlist; + pagelistinfo->scatterlist_mapped = 0; if (is_vmalloc_addr(buf)) { - int dir = (type == PAGELIST_WRITE) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE; unsigned long length = count; unsigned int off = offset; @@ -413,14 +453,13 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, if (bytes > length) bytes = length; pages[actual_pages] = pg; - dmac_map_area(page_address(pg) + off, bytes, dir); length -= bytes; off = 0; } - *need_release = 0; /* do not try and release vmalloc pages */ + /* do not try and release vmalloc pages */ } else { down_read(&task->mm->mmap_sem); - actual_pages = get_user_pages(task, task->mm, + actual_pages = get_user_pages( (unsigned long)buf & ~(PAGE_SIZE - 1), num_pages, (type == PAGELIST_READ) ? FOLL_WRITE : 0, @@ -438,44 +477,59 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, while (actual_pages > 0) { actual_pages--; - page_cache_release(pages[actual_pages]); + put_page(pages[actual_pages]); } - kfree(pagelist); - if (actual_pages == 0) - actual_pages = -ENOMEM; - return actual_pages; + cleaup_pagelistinfo(pagelistinfo); + return NULL; } - *need_release = 1; /* release user pages */ + /* release user pages */ + pagelistinfo->pages_need_release = 1; } - pagelist->length = count; - pagelist->type = type; - pagelist->offset = offset; - - /* Group the pages into runs of contiguous pages */ - - base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0])); - next_addr = base_addr + PAGE_SIZE; - addridx = 0; - run = 0; + /* + * Initialize the scatterlist so that the magic cookie + * is filled if debugging is enabled + */ + sg_init_table(scatterlist, num_pages); + /* Now set the pages for each scatterlist */ + for (i = 0; i < num_pages; i++) + sg_set_page(scatterlist + i, pages[i], PAGE_SIZE, 0); + + dma_buffers = dma_map_sg(g_dev, + scatterlist, + num_pages, + pagelistinfo->dma_dir); + + if (dma_buffers == 0) { + cleaup_pagelistinfo(pagelistinfo); + return NULL; + } - for (i = 1; i < num_pages; i++) { - addr = VCHIQ_ARM_ADDRESS(page_address(pages[i])); - if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) { - next_addr += PAGE_SIZE; - run++; + pagelistinfo->scatterlist_mapped = 1; + + /* Combine adjacent blocks for performance */ + k = 0; + for_each_sg(scatterlist, sg, dma_buffers, i) { + u32 len = sg_dma_len(sg); + u32 addr = sg_dma_address(sg); + + /* Note: addrs is the address + page_count - 1 + * The firmware expects the block to be page + * aligned and a multiple of the page size + */ + WARN_ON(len == 0); + WARN_ON(len & ~PAGE_MASK); + WARN_ON(addr & ~PAGE_MASK); + if (k > 0 && + ((addrs[k - 1] & PAGE_MASK) | + ((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT) + == addr) { + addrs[k - 1] += (len >> PAGE_SHIFT); } else { - addrs[addridx] = (unsigned long)base_addr + run; - addridx++; - base_addr = addr; - next_addr = addr + PAGE_SIZE; - run = 0; + addrs[k++] = addr | ((len >> PAGE_SHIFT) - 1); } } - addrs[addridx] = (unsigned long)base_addr + run; - addridx++; - /* Partial cache lines (fragments) require special measures */ if ((type == PAGELIST_READ) && ((pagelist->offset & (g_cache_line_size - 1)) || @@ -484,8 +538,8 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, char *fragments; if (down_interruptible(&g_free_fragments_sema) != 0) { - kfree(pagelist); - return -EINTR; + cleaup_pagelistinfo(pagelistinfo); + return NULL; } WARN_ON(g_free_fragments == NULL); @@ -499,29 +553,28 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, (fragments - g_fragments_base) / g_fragments_size; } - dmac_flush_range(pagelist, addrs + num_pages); - - *ppagelist = pagelist; - - return 0; + return pagelistinfo; } static void -free_pagelist(PAGELIST_T *pagelist, int actual) +free_pagelist(struct vchiq_pagelist_info *pagelistinfo, + int actual) { - unsigned long *need_release; - struct page **pages; - unsigned int num_pages, i; - - vchiq_log_trace(vchiq_arm_log_level, - "free_pagelist - %x, %d", (unsigned int)pagelist, actual); + unsigned int i; + PAGELIST_T *pagelist = pagelistinfo->pagelist; + struct page **pages = pagelistinfo->pages; + unsigned int num_pages = pagelistinfo->num_pages; - num_pages = - (pagelist->length + pagelist->offset + PAGE_SIZE - 1) / - PAGE_SIZE; + vchiq_log_trace(vchiq_arm_log_level, "free_pagelist - %pK, %d", + pagelistinfo->pagelist, actual); - need_release = (unsigned long *)(pagelist->addrs + num_pages); - pages = (struct page **)(pagelist->addrs + num_pages + 1); + /* + * NOTE: dma_unmap_sg must be called before the + * cpu can touch any of the data/pages. + */ + dma_unmap_sg(g_dev, pagelistinfo->scatterlist, + pagelistinfo->num_pages, pagelistinfo->dma_dir); + pagelistinfo->scatterlist_mapped = 0; /* Deal with any partial cache lines (fragments) */ if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) { @@ -559,27 +612,12 @@ free_pagelist(PAGELIST_T *pagelist, int actual) up(&g_free_fragments_sema); } - if (*need_release) { - unsigned int length = pagelist->length; - unsigned int offset = pagelist->offset; - - for (i = 0; i < num_pages; i++) { - struct page *pg = pages[i]; - - if (pagelist->type != PAGELIST_WRITE) { - unsigned int bytes = PAGE_SIZE - offset; - - if (bytes > length) - bytes = length; - dmac_unmap_area(page_address(pg) + offset, - bytes, DMA_FROM_DEVICE); - length -= bytes; - offset = 0; - set_page_dirty(pg); - } - page_cache_release(pg); - } + /* Need to mark all the pages dirty. */ + if (pagelist->type != PAGELIST_WRITE && + pagelistinfo->pages_need_release) { + for (i = 0; i < num_pages; i++) + set_page_dirty(pages[i]); } - kfree(pagelist); + cleaup_pagelistinfo(pagelistinfo); } diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 7b6cd4d80621e38ff6d47fcd87b45fbe9cd4259b..0d987898b4f86abbd7d3f73046ef4ae5ffff5452 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -190,8 +190,8 @@ static const char *const ioctl_names[] = { "CLOSE_DELIVERED" }; -vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) == - (VCHIQ_IOC_MAX + 1)); +vchiq_static_assert(ARRAY_SIZE(ioctl_names) == + (VCHIQ_IOC_MAX + 1)); static void dump_phys_mem(void *virt_addr, uint32_t num_bytes); @@ -402,6 +402,107 @@ static void close_delivered(USER_SERVICE_T *user_service) } } +struct vchiq_io_copy_callback_context { + VCHIQ_ELEMENT_T *current_element; + size_t current_element_offset; + unsigned long elements_to_go; + size_t current_offset; +}; + +static ssize_t +vchiq_ioc_copy_element_data( + void *context, + void *dest, + size_t offset, + size_t maxsize) +{ + long res; + size_t bytes_this_round; + struct vchiq_io_copy_callback_context *copy_context = + (struct vchiq_io_copy_callback_context *)context; + + if (offset != copy_context->current_offset) + return 0; + + if (!copy_context->elements_to_go) + return 0; + + /* + * Complex logic here to handle the case of 0 size elements + * in the middle of the array of elements. + * + * Need to skip over these 0 size elements. + */ + while (1) { + bytes_this_round = min(copy_context->current_element->size - + copy_context->current_element_offset, + maxsize); + + if (bytes_this_round) + break; + + copy_context->elements_to_go--; + copy_context->current_element++; + copy_context->current_element_offset = 0; + + if (!copy_context->elements_to_go) + return 0; + } + + res = copy_from_user(dest, + copy_context->current_element->data + + copy_context->current_element_offset, + bytes_this_round); + + if (res != 0) + return -EFAULT; + + copy_context->current_element_offset += bytes_this_round; + copy_context->current_offset += bytes_this_round; + + /* + * Check if done with current element, and if so advance to the next. + */ + if (copy_context->current_element_offset == + copy_context->current_element->size) { + copy_context->elements_to_go--; + copy_context->current_element++; + copy_context->current_element_offset = 0; + } + + return bytes_this_round; +} + +/************************************************************************** + * + * vchiq_ioc_queue_message + * + **************************************************************************/ +static VCHIQ_STATUS_T +vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle, + VCHIQ_ELEMENT_T *elements, + unsigned long count) +{ + struct vchiq_io_copy_callback_context context; + unsigned long i; + size_t total_size = 0; + + context.current_element = elements; + context.current_element_offset = 0; + context.elements_to_go = count; + context.current_offset = 0; + + for (i = 0; i < count; i++) { + if (!elements[i].data && elements[i].size != 0) + return -EFAULT; + + total_size += elements[i].size; + } + + return vchiq_queue_message(handle, vchiq_ioc_copy_element_data, + &context, total_size); +} + /**************************************************************************** * * vchiq_ioctl @@ -418,8 +519,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) DEBUG_INITIALISE(g_state.local) vchiq_log_trace(vchiq_arm_log_level, - "vchiq_ioctl - instance %x, cmd %s, arg %lx", - (unsigned int)instance, + "vchiq_ioctl - instance %pK, cmd %s, arg %lx", + instance, ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ? ioctl_names[_IOC_NR(cmd)] : "", arg); @@ -453,7 +554,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ret = -EINVAL; break; } - rc = mutex_lock_interruptible(&instance->state->mutex); + rc = mutex_lock_killable(&instance->state->mutex); if (rc != 0) { vchiq_log_error(vchiq_arm_log_level, "vchiq: connect: could not lock mutex for " @@ -651,7 +752,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) VCHIQ_ELEMENT_T elements[MAX_ELEMENTS]; if (copy_from_user(elements, args.elements, args.count * sizeof(VCHIQ_ELEMENT_T)) == 0) - status = vchiq_queue_message + status = vchiq_ioc_queue_message (args.handle, elements, args.count); else @@ -713,8 +814,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } vchiq_log_info(vchiq_arm_log_level, - "found bulk_waiter %x for pid %d", - (unsigned int)waiter, current->pid); + "found bulk_waiter %pK for pid %d", waiter, + current->pid); args.userdata = &waiter->bulk_waiter; } status = vchiq_bulk_transfer @@ -743,8 +844,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) list_add(&waiter->list, &instance->bulk_waiter_list); mutex_unlock(&instance->bulk_waiter_list_mutex); vchiq_log_info(vchiq_arm_log_level, - "saved bulk_waiter %x for pid %d", - (unsigned int)waiter, current->pid); + "saved bulk_waiter %pK for pid %d", + waiter, current->pid); if (copy_to_user((void __user *) &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *) @@ -826,10 +927,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (args.msgbufsize < msglen) { vchiq_log_error( vchiq_arm_log_level, - "header %x: msgbufsize" - " %x < msglen %x", - (unsigned int)header, - args.msgbufsize, + "header %pK: msgbufsize %x < msglen %x", + header, args.msgbufsize, msglen); WARN(1, "invalid message " "size\n"); @@ -980,9 +1079,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ret = -EFAULT; } else { vchiq_log_error(vchiq_arm_log_level, - "header %x: bufsize %x < size %x", - (unsigned int)header, args.bufsize, - header->size); + "header %pK: bufsize %x < size %x", + header, args.bufsize, header->size); WARN(1, "invalid size\n"); ret = -EMSGSIZE; } @@ -1284,9 +1382,8 @@ vchiq_release(struct inode *inode, struct file *file) list); list_del(pos); vchiq_log_info(vchiq_arm_log_level, - "bulk_waiter - cleaned up %x " - "for pid %d", - (unsigned int)waiter, waiter->pid); + "bulk_waiter - cleaned up %pK for pid %d", + waiter, waiter->pid); kfree(waiter); } } @@ -1385,9 +1482,8 @@ vchiq_dump_platform_instances(void *dump_context) instance = service->instance; if (instance && !instance->mark) { len = snprintf(buf, sizeof(buf), - "Instance %x: pid %d,%s completions " - "%d/%d", - (unsigned int)instance, instance->pid, + "Instance %pK: pid %d,%s completions %d/%d", + instance, instance->pid, instance->connected ? " connected, " : "", instance->completion_insert - @@ -1415,8 +1511,7 @@ vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service) char buf[80]; int len; - len = snprintf(buf, sizeof(buf), " instance %x", - (unsigned int)service->instance); + len = snprintf(buf, sizeof(buf), " instance %pK", service->instance); if ((service->base.callback == service_callback) && user_service->is_vchi) { @@ -1473,8 +1568,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes) } down_read(¤t->mm->mmap_sem); - rc = get_user_pages(current, /* task */ - current->mm, /* mm */ + rc = get_user_pages( (unsigned long)virt_addr, /* start */ num_pages, /* len */ 0, /* gup_flags */ @@ -1485,6 +1579,12 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes) prev_idx = -1; page = NULL; + if (rc < 0) { + vchiq_log_error(vchiq_arm_log_level, + "Failed to get user pages: %d\n", rc); + goto out; + } + while (offset < end_offset) { int page_offset = offset % PAGE_SIZE; @@ -1508,11 +1608,13 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes) offset += 16; } + +out: if (page != NULL) kunmap(page); for (page_idx = 0; page_idx < num_pages; page_idx++) - page_cache_release(pages[page_idx]); + put_page(pages[page_idx]); kfree(pages); } @@ -1683,8 +1785,6 @@ vchiq_keepalive_thread_func(void *v) VCHIQ_STATUS_T vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state) { - VCHIQ_STATUS_T status = VCHIQ_SUCCESS; - if (arm_state) { rwlock_init(&arm_state->susp_res_lock); @@ -1712,14 +1812,13 @@ vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state) arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS; arm_state->suspend_timer_running = 0; - init_timer(&arm_state->suspend_timer); - arm_state->suspend_timer.data = (unsigned long)(state); - arm_state->suspend_timer.function = suspend_timer_callback; + setup_timer(&arm_state->suspend_timer, suspend_timer_callback, + (unsigned long)(state)); arm_state->first_connect = 0; } - return status; + return VCHIQ_SUCCESS; } /* @@ -2032,20 +2131,20 @@ static void output_timeout_error(VCHIQ_STATE_T *state) { VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); - char service_err[50] = ""; + char err[50] = ""; int vc_use_count = arm_state->videocore_use_count; int active_services = state->unused_service; int i; if (!arm_state->videocore_use_count) { - snprintf(service_err, 50, " Videocore usecount is 0"); + snprintf(err, sizeof(err), " Videocore usecount is 0"); goto output_msg; } for (i = 0; i < active_services; i++) { VCHIQ_SERVICE_T *service_ptr = state->services[i]; if (service_ptr && service_ptr->service_use_count && (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) { - snprintf(service_err, 50, " %c%c%c%c(%d) service has " + snprintf(err, sizeof(err), " %c%c%c%c(%d) service has " "use count %d%s", VCHIQ_FOURCC_AS_4CHARS( service_ptr->base.fourcc), service_ptr->client_id, @@ -2059,7 +2158,7 @@ output_timeout_error(VCHIQ_STATE_T *state) output_msg: vchiq_log_error(vchiq_susp_log_level, "timed out waiting for vc suspend (%d).%s", - arm_state->autosuspend_override, service_err); + arm_state->autosuspend_override, err); } @@ -2780,7 +2879,7 @@ void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state, &vchiq_keepalive_thread_func, (void *)state, threadname); - if (arm_state->ka_thread == NULL) { + if (IS_ERR(arm_state->ka_thread)) { vchiq_log_error(vchiq_susp_log_level, "vchiq: FATAL: couldn't create thread %s", threadname); @@ -2800,28 +2899,27 @@ static int vchiq_probe(struct platform_device *pdev) void *ptr_err; fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0); -/* Remove comment when booting without Device Tree is no longer supported if (!fw_node) { dev_err(&pdev->dev, "Missing firmware node\n"); return -ENOENT; } -*/ + fw = rpi_firmware_get(fw_node); + of_node_put(fw_node); if (!fw) return -EPROBE_DEFER; platform_set_drvdata(pdev, fw); - /* create debugfs entries */ - err = vchiq_debugfs_init(); + err = vchiq_platform_init(pdev, &g_state); if (err != 0) - goto failed_debugfs_init; + goto failed_platform_init; err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME); if (err != 0) { vchiq_log_error(vchiq_arm_log_level, "Unable to allocate device number"); - goto failed_alloc_chrdev; + goto failed_platform_init; } cdev_init(&vchiq_cdev, &vchiq_fops); vchiq_cdev.owner = THIS_MODULE; @@ -2844,9 +2942,10 @@ static int vchiq_probe(struct platform_device *pdev) if (IS_ERR(ptr_err)) goto failed_device_create; - err = vchiq_platform_init(pdev, &g_state); + /* create debugfs entries */ + err = vchiq_debugfs_init(); if (err != 0) - goto failed_platform_init; + goto failed_debugfs_init; vchiq_log_info(vchiq_arm_log_level, "vchiq: initialised - version %d (min %d), device %d.%d", @@ -2855,7 +2954,7 @@ static int vchiq_probe(struct platform_device *pdev) return 0; -failed_platform_init: +failed_debugfs_init: device_destroy(vchiq_class, vchiq_devid); failed_device_create: class_destroy(vchiq_class); @@ -2864,15 +2963,14 @@ static int vchiq_probe(struct platform_device *pdev) err = PTR_ERR(ptr_err); failed_cdev_add: unregister_chrdev_region(vchiq_devid, 1); -failed_alloc_chrdev: - vchiq_debugfs_deinit(); -failed_debugfs_init: +failed_platform_init: vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq"); return err; } static int vchiq_remove(struct platform_device *pdev) { + vchiq_debugfs_deinit(); device_destroy(vchiq_class, vchiq_devid); class_destroy(vchiq_class); cdev_del(&vchiq_cdev); @@ -2890,7 +2988,6 @@ MODULE_DEVICE_TABLE(of, vchiq_of_match); static struct platform_driver vchiq_driver = { .driver = { .name = "bcm2835_vchiq", - .owner = THIS_MODULE, .of_match_table = vchiq_of_match, }, .probe = vchiq_probe, @@ -2899,4 +2996,5 @@ static struct platform_driver vchiq_driver = { module_platform_driver(vchiq_driver); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Videocore VCHIQ driver"); MODULE_AUTHOR("Broadcom Corporation"); diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c index 5efc62ffb2f5e441cb307d0a575565338b407249..7ea29665bd0c1da25b4cc2e407b3ded8a13f65c9 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c @@ -72,7 +72,7 @@ void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback) { connected_init(); - if (mutex_lock_interruptible(&g_connected_mutex) != 0) + if (mutex_lock_killable(&g_connected_mutex) != 0) return; if (g_connected) @@ -107,7 +107,7 @@ void vchiq_call_connected_callbacks(void) connected_init(); - if (mutex_lock_interruptible(&g_connected_mutex) != 0) + if (mutex_lock_killable(&g_connected_mutex) != 0) return; for (i = 0; i < g_num_deferred_callbacks; i++) diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index 2c98da4307dff994a00dc246574ef0aaee05d5da..028e90bc1cdcc3f305ce5b97b88dd5c6c4613a50 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c @@ -296,12 +296,13 @@ lock_service(VCHIQ_SERVICE_T *service) void unlock_service(VCHIQ_SERVICE_T *service) { - VCHIQ_STATE_T *state = service->state; spin_lock(&service_spinlock); BUG_ON(!service || (service->ref_count == 0)); if (service && service->ref_count) { service->ref_count--; if (!service->ref_count) { + VCHIQ_STATE_T *state = service->state; + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE); state->services[service->localport] = NULL; } else @@ -380,9 +381,9 @@ make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header, void *bulk_userdata) { VCHIQ_STATUS_T status; - vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)", + vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)", service->state->id, service->localport, reason_names[reason], - (unsigned int)header, (unsigned int)bulk_userdata); + header, bulk_userdata); status = service->base.callback(reason, header, service->handle, bulk_userdata); if (status == VCHIQ_ERROR) { @@ -406,28 +407,24 @@ vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate) } static inline void -remote_event_create(REMOTE_EVENT_T *event) +remote_event_create(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event) { event->armed = 0; /* Don't clear the 'fired' flag because it may already have been set ** by the other side. */ - sema_init(event->event, 0); -} - -static inline void -remote_event_destroy(REMOTE_EVENT_T *event) -{ - (void)event; + sema_init((struct semaphore *)((char *)state + event->event), 0); } static inline int -remote_event_wait(REMOTE_EVENT_T *event) +remote_event_wait(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event) { if (!event->fired) { event->armed = 1; - dsb(); + dsb(sy); if (!event->fired) { - if (down_interruptible(event->event) != 0) { + if (down_interruptible( + (struct semaphore *) + ((char *)state + event->event)) != 0) { event->armed = 0; return 0; } @@ -441,34 +438,34 @@ remote_event_wait(REMOTE_EVENT_T *event) } static inline void -remote_event_signal_local(REMOTE_EVENT_T *event) +remote_event_signal_local(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event) { event->armed = 0; - up(event->event); + up((struct semaphore *)((char *)state + event->event)); } static inline void -remote_event_poll(REMOTE_EVENT_T *event) +remote_event_poll(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event) { if (event->fired && event->armed) - remote_event_signal_local(event); + remote_event_signal_local(state, event); } void remote_event_pollall(VCHIQ_STATE_T *state) { - remote_event_poll(&state->local->sync_trigger); - remote_event_poll(&state->local->sync_release); - remote_event_poll(&state->local->trigger); - remote_event_poll(&state->local->recycle); + remote_event_poll(state, &state->local->sync_trigger); + remote_event_poll(state, &state->local->sync_release); + remote_event_poll(state, &state->local->trigger); + remote_event_poll(state, &state->local->recycle); } /* Round up message sizes so that any space at the end of a slot is always big ** enough for a header. This relies on header size being a power of two, which ** has been verified earlier by a static assertion. */ -static inline unsigned int -calc_stride(unsigned int size) +static inline size_t +calc_stride(size_t size) { /* Allow room for the header */ size += sizeof(VCHIQ_HEADER_T); @@ -541,13 +538,13 @@ request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type) wmb(); /* ... and ensure the slot handler runs. */ - remote_event_signal_local(&state->local->trigger); + remote_event_signal_local(state, &state->local->trigger); } /* Called from queue_message, by the slot handler and application threads, ** with slot_mutex held */ static VCHIQ_HEADER_T * -reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking) +reserve_space(VCHIQ_STATE_T *state, size_t space, int is_blocking) { VCHIQ_SHARED_STATE_T *local = state->local; int tx_pos = state->local_tx_pos; @@ -626,8 +623,8 @@ process_free_queue(VCHIQ_STATE_T *state) char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index); int data_found = 0; - vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x", - state->id, slot_index, (unsigned int)data, + vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x", + state->id, slot_index, data, local->slot_queue_recycle, slot_queue_available); /* Initialise the bitmask for services which have used this @@ -659,16 +656,10 @@ process_free_queue(VCHIQ_STATE_T *state) up(&service_quota->quota_event); else if (count == 0) { vchiq_log_error(vchiq_core_log_level, - "service %d " - "message_use_count=%d " - "(header %x, msgid %x, " - "header->msgid %x, " - "header->size %x)", + "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)", port, - service_quota-> - message_use_count, - (unsigned int)header, msgid, - header->msgid, + service_quota->message_use_count, + header, msgid, header->msgid, header->size); WARN(1, "invalid message use count\n"); } @@ -690,26 +681,16 @@ process_free_queue(VCHIQ_STATE_T *state) up(&service_quota->quota_event); vchiq_log_trace( vchiq_core_log_level, - "%d: pfq:%d %x@%x - " - "slot_use->%d", + "%d: pfq:%d %x@%pK - slot_use->%d", state->id, port, - header->size, - (unsigned int)header, + header->size, header, count - 1); } else { vchiq_log_error( vchiq_core_log_level, - "service %d " - "slot_use_count" - "=%d (header %x" - ", msgid %x, " - "header->msgid" - " %x, header->" - "size %x)", - port, count, - (unsigned int)header, - msgid, - header->msgid, + "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)", + port, count, header, + msgid, header->msgid, header->size); WARN(1, "bad slot use count\n"); } @@ -721,10 +702,9 @@ process_free_queue(VCHIQ_STATE_T *state) pos += calc_stride(header->size); if (pos > VCHIQ_SLOT_SIZE) { vchiq_log_error(vchiq_core_log_level, - "pfq - pos %x: header %x, msgid %x, " - "header->msgid %x, header->size %x", - pos, (unsigned int)header, msgid, - header->msgid, header->size); + "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x", + pos, header, msgid, header->msgid, + header->size); WARN(1, "invalid slot position\n"); } } @@ -746,18 +726,66 @@ process_free_queue(VCHIQ_STATE_T *state) } } +static ssize_t +memcpy_copy_callback( + void *context, void *dest, + size_t offset, size_t maxsize) +{ + void *src = context; + + memcpy(dest + offset, src + offset, maxsize); + return maxsize; +} + +static ssize_t +copy_message_data( + ssize_t (*copy_callback)(void *context, void *dest, + size_t offset, size_t maxsize), + void *context, + void *dest, + size_t size) +{ + size_t pos = 0; + + while (pos < size) { + ssize_t callback_result; + size_t max_bytes = size - pos; + + callback_result = + copy_callback(context, dest + pos, + pos, max_bytes); + + if (callback_result < 0) + return callback_result; + + if (!callback_result) + return -EIO; + + if (callback_result > max_bytes) + return -EIO; + + pos += callback_result; + } + + return size; +} + /* Called by the slot handler and application threads */ static VCHIQ_STATUS_T queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, - int msgid, const VCHIQ_ELEMENT_T *elements, - int count, int size, int flags) + int msgid, + ssize_t (*copy_callback)(void *context, void *dest, + size_t offset, size_t maxsize), + void *context, + size_t size, + int flags) { VCHIQ_SHARED_STATE_T *local; VCHIQ_SERVICE_QUOTA_T *service_quota = NULL; VCHIQ_HEADER_T *header; int type = VCHIQ_MSG_TYPE(msgid); - unsigned int stride; + size_t stride; local = state->local; @@ -766,7 +794,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, WARN_ON(!(stride <= VCHIQ_SLOT_SIZE)); if (!(flags & QMFLAGS_NO_MUTEX_LOCK) && - (mutex_lock_interruptible(&state->slot_mutex) != 0)) + (mutex_lock_killable(&state->slot_mutex) != 0)) return VCHIQ_RETRY; if (type == VCHIQ_MSG_DATA) { @@ -822,7 +850,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, service_quota->slot_quota))) { spin_unlock("a_spinlock); vchiq_log_trace(vchiq_core_log_level, - "%d: qm:%d %s,%x - quota stall " + "%d: qm:%d %s,%zx - quota stall " "(msg %d, slot %d)", state->id, service->localport, msg_type_str(type), size, @@ -835,7 +863,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, return VCHIQ_RETRY; if (service->closing) return VCHIQ_ERROR; - if (mutex_lock_interruptible(&state->slot_mutex) != 0) + if (mutex_lock_killable(&state->slot_mutex) != 0) return VCHIQ_RETRY; if (service->srvstate != VCHIQ_SRVSTATE_OPEN) { /* The service has been closed */ @@ -863,43 +891,37 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, } if (type == VCHIQ_MSG_DATA) { - int i, pos; + ssize_t callback_result; int tx_end_index; int slot_use_count; vchiq_log_info(vchiq_core_log_level, - "%d: qm %s@%x,%x (%d->%d)", - state->id, - msg_type_str(VCHIQ_MSG_TYPE(msgid)), - (unsigned int)header, size, - VCHIQ_MSG_SRCPORT(msgid), + "%d: qm %s@%pK,%zx (%d->%d)", + state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), + header, size, VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid)); BUG_ON(!service); BUG_ON((flags & (QMFLAGS_NO_MUTEX_LOCK | QMFLAGS_NO_MUTEX_UNLOCK)) != 0); - for (i = 0, pos = 0; i < (unsigned int)count; - pos += elements[i++].size) - if (elements[i].size) { - if (vchiq_copy_from_user - (header->data + pos, elements[i].data, - (size_t) elements[i].size) != - VCHIQ_SUCCESS) { - mutex_unlock(&state->slot_mutex); - VCHIQ_SERVICE_STATS_INC(service, + callback_result = + copy_message_data(copy_callback, context, + header->data, size); + + if (callback_result < 0) { + mutex_unlock(&state->slot_mutex); + VCHIQ_SERVICE_STATS_INC(service, error_count); - return VCHIQ_ERROR; - } - if (i == 0) { - if (SRVTRACE_ENABLED(service, - VCHIQ_LOG_INFO)) - vchiq_log_dump_mem("Sent", 0, - header->data + pos, - min(64u, - elements[0].size)); - } - } + return VCHIQ_ERROR; + } + + if (SRVTRACE_ENABLED(service, + VCHIQ_LOG_INFO)) + vchiq_log_dump_mem("Sent", 0, + header->data, + min((size_t)64, + (size_t)callback_result)); spin_lock("a_spinlock); service_quota->message_use_count++; @@ -927,7 +949,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, if (slot_use_count) vchiq_log_trace(vchiq_core_log_level, - "%d: qm:%d %s,%x - slot_use->%d (hdr %p)", + "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)", state->id, service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)), size, slot_use_count, header); @@ -936,15 +958,22 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size); } else { vchiq_log_info(vchiq_core_log_level, - "%d: qm %s@%x,%x (%d->%d)", state->id, + "%d: qm %s@%pK,%zx (%d->%d)", state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), - (unsigned int)header, size, - VCHIQ_MSG_SRCPORT(msgid), + header, size, VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid)); if (size != 0) { - WARN_ON(!((count == 1) && (size == elements[0].size))); - memcpy(header->data, elements[0].data, - elements[0].size); + /* It is assumed for now that this code path + * only happens from calls inside this file. + * + * External callers are through the vchiq_queue_message + * path which always sets the type to be VCHIQ_MSG_DATA + * + * At first glance this appears to be correct but + * more review is needed. + */ + copy_message_data(copy_callback, context, + header->data, size); } VCHIQ_STATS_INC(state, ctrl_tx_count); } @@ -960,7 +989,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); vchiq_log_info(SRVTRACE_LEVEL(service), - "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d", + "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu", msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid), VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), @@ -990,19 +1019,24 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, /* Called by the slot handler and application threads */ static VCHIQ_STATUS_T queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, - int msgid, const VCHIQ_ELEMENT_T *elements, - int count, int size, int is_blocking) + int msgid, + ssize_t (*copy_callback)(void *context, void *dest, + size_t offset, size_t maxsize), + void *context, + int size, + int is_blocking) { VCHIQ_SHARED_STATE_T *local; VCHIQ_HEADER_T *header; + ssize_t callback_result; local = state->local; if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) && - (mutex_lock_interruptible(&state->sync_mutex) != 0)) + (mutex_lock_killable(&state->sync_mutex) != 0)) return VCHIQ_RETRY; - remote_event_wait(&local->sync_release); + remote_event_wait(state, &local->sync_release); rmb(); @@ -1017,52 +1051,34 @@ queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, state->id, oldmsgid); } - if (service) { - int i, pos; + vchiq_log_info(vchiq_sync_log_level, + "%d: qms %s@%pK,%x (%d->%d)", state->id, + msg_type_str(VCHIQ_MSG_TYPE(msgid)), + header, size, VCHIQ_MSG_SRCPORT(msgid), + VCHIQ_MSG_DSTPORT(msgid)); - vchiq_log_info(vchiq_sync_log_level, - "%d: qms %s@%x,%x (%d->%d)", state->id, - msg_type_str(VCHIQ_MSG_TYPE(msgid)), - (unsigned int)header, size, - VCHIQ_MSG_SRCPORT(msgid), - VCHIQ_MSG_DSTPORT(msgid)); + callback_result = + copy_message_data(copy_callback, context, + header->data, size); - for (i = 0, pos = 0; i < (unsigned int)count; - pos += elements[i++].size) - if (elements[i].size) { - if (vchiq_copy_from_user - (header->data + pos, elements[i].data, - (size_t) elements[i].size) != - VCHIQ_SUCCESS) { - mutex_unlock(&state->sync_mutex); - VCHIQ_SERVICE_STATS_INC(service, - error_count); - return VCHIQ_ERROR; - } - if (i == 0) { - if (vchiq_sync_log_level >= - VCHIQ_LOG_TRACE) - vchiq_log_dump_mem("Sent Sync", - 0, header->data + pos, - min(64u, - elements[0].size)); - } - } + if (callback_result < 0) { + mutex_unlock(&state->slot_mutex); + VCHIQ_SERVICE_STATS_INC(service, + error_count); + return VCHIQ_ERROR; + } + + if (service) { + if (SRVTRACE_ENABLED(service, + VCHIQ_LOG_INFO)) + vchiq_log_dump_mem("Sent", 0, + header->data, + min((size_t)64, + (size_t)callback_result)); VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count); VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size); } else { - vchiq_log_info(vchiq_sync_log_level, - "%d: qms %s@%x,%x (%d->%d)", state->id, - msg_type_str(VCHIQ_MSG_TYPE(msgid)), - (unsigned int)header, size, - VCHIQ_MSG_SRCPORT(msgid), - VCHIQ_MSG_DSTPORT(msgid)); - if (size != 0) { - WARN_ON(!((count == 1) && (size == elements[0].size))); - memcpy(header->data, elements[0].data, - elements[0].size); - } VCHIQ_STATS_INC(state, ctrl_tx_count); } @@ -1175,11 +1191,16 @@ notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue, VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE; int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport, service->remoteport); - VCHIQ_ELEMENT_T element = { &bulk->actual, 4 }; /* Only reply to non-dummy bulk requests */ if (bulk->remote_data) { - status = queue_message(service->state, NULL, - msgid, &element, 1, 4, 0); + status = queue_message( + service->state, + NULL, + msgid, + memcpy_copy_callback, + &bulk->actual, + 4, + 0); if (status != VCHIQ_SUCCESS) break; } @@ -1344,7 +1365,7 @@ resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue) WARN_ON(!((int)(queue->local_insert - queue->process) > 0)); WARN_ON(!((int)(queue->remote_insert - queue->process) > 0)); - rc = mutex_lock_interruptible(&state->bulk_transfer_mutex); + rc = mutex_lock_killable(&state->bulk_transfer_mutex); if (rc != 0) break; @@ -1356,26 +1377,22 @@ resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue) "Send Bulk to" : "Recv Bulk from"; if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) vchiq_log_info(SRVTRACE_LEVEL(service), - "%s %c%c%c%c d:%d len:%d %x<->%x", + "%s %c%c%c%c d:%d len:%d %pK<->%pK", header, VCHIQ_FOURCC_AS_4CHARS( service->base.fourcc), - service->remoteport, - bulk->size, - (unsigned int)bulk->data, - (unsigned int)bulk->remote_data); + service->remoteport, bulk->size, + bulk->data, bulk->remote_data); else vchiq_log_info(SRVTRACE_LEVEL(service), "%s %c%c%c%c d:%d ABORTED - tx len:%d," - " rx len:%d %x<->%x", + " rx len:%d %pK<->%pK", header, VCHIQ_FOURCC_AS_4CHARS( service->base.fourcc), service->remoteport, - bulk->size, - bulk->remote_size, - (unsigned int)bulk->data, - (unsigned int)bulk->remote_data); + bulk->size, bulk->remote_size, + bulk->data, bulk->remote_data); } vchiq_complete_bulk(bulk); @@ -1511,9 +1528,8 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header) fourcc = payload->fourcc; vchiq_log_info(vchiq_core_log_level, - "%d: prs OPEN@%x (%d->'%c%c%c%c')", - state->id, (unsigned int)header, - localport, + "%d: prs OPEN@%pK (%d->'%c%c%c%c')", + state->id, header, localport, VCHIQ_FOURCC_AS_4CHARS(fourcc)); service = get_listening_service(state, fourcc); @@ -1544,10 +1560,6 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header) struct vchiq_openack_payload ack_payload = { service->version }; - VCHIQ_ELEMENT_T body = { - &ack_payload, - sizeof(ack_payload) - }; if (state->version_common < VCHIQ_VERSION_SYNCHRONOUS_MODE) @@ -1557,21 +1569,28 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header) if (service->sync && (state->version_common >= VCHIQ_VERSION_SYNCHRONOUS_MODE)) { - if (queue_message_sync(state, NULL, + if (queue_message_sync( + state, + NULL, VCHIQ_MAKE_MSG( VCHIQ_MSG_OPENACK, service->localport, remoteport), - &body, 1, sizeof(ack_payload), + memcpy_copy_callback, + &ack_payload, + sizeof(ack_payload), 0) == VCHIQ_RETRY) goto bail_not_ready; } else { - if (queue_message(state, NULL, - VCHIQ_MAKE_MSG( + if (queue_message(state, + NULL, + VCHIQ_MAKE_MSG( VCHIQ_MSG_OPENACK, service->localport, remoteport), - &body, 1, sizeof(ack_payload), + memcpy_copy_callback, + &ack_payload, + sizeof(ack_payload), 0) == VCHIQ_RETRY) goto bail_not_ready; } @@ -1650,7 +1669,7 @@ parse_rx_slots(VCHIQ_STATE_T *state) header = (VCHIQ_HEADER_T *)(state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK)); - DEBUG_VALUE(PARSE_HEADER, (int)header); + DEBUG_VALUE(PARSE_HEADER, (int)(long)header); msgid = header->msgid; DEBUG_VALUE(PARSE_MSGID, msgid); size = header->size; @@ -1684,21 +1703,18 @@ parse_rx_slots(VCHIQ_STATE_T *state) remoteport); if (service) vchiq_log_warning(vchiq_core_log_level, - "%d: prs %s@%x (%d->%d) - " - "found connected service %d", + "%d: prs %s@%pK (%d->%d) - found connected service %d", state->id, msg_type_str(type), - (unsigned int)header, - remoteport, localport, + header, remoteport, localport, service->localport); } if (!service) { vchiq_log_error(vchiq_core_log_level, - "%d: prs %s@%x (%d->%d) - " - "invalid/closed service %d", + "%d: prs %s@%pK (%d->%d) - invalid/closed service %d", state->id, msg_type_str(type), - (unsigned int)header, - remoteport, localport, localport); + header, remoteport, localport, + localport); goto skip_message; } break; @@ -1723,12 +1739,11 @@ parse_rx_slots(VCHIQ_STATE_T *state) min(64, size)); } - if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size) - > VCHIQ_SLOT_SIZE) { + if (((unsigned long)header & VCHIQ_SLOT_MASK) + + calc_stride(size) > VCHIQ_SLOT_SIZE) { vchiq_log_error(vchiq_core_log_level, - "header %x (msgid %x) - size %x too big for " - "slot", - (unsigned int)header, (unsigned int)msgid, + "header %pK (msgid %x) - size %x too big for slot", + header, (unsigned int)msgid, (unsigned int)size); WARN(1, "oversized for slot\n"); } @@ -1747,9 +1762,9 @@ parse_rx_slots(VCHIQ_STATE_T *state) service->peer_version = payload->version; } vchiq_log_info(vchiq_core_log_level, - "%d: prs OPENACK@%x,%x (%d->%d) v:%d", - state->id, (unsigned int)header, size, - remoteport, localport, service->peer_version); + "%d: prs OPENACK@%pK,%x (%d->%d) v:%d", + state->id, header, size, remoteport, localport, + service->peer_version); if (service->srvstate == VCHIQ_SRVSTATE_OPENING) { service->remoteport = remoteport; @@ -1765,9 +1780,8 @@ parse_rx_slots(VCHIQ_STATE_T *state) WARN_ON(size != 0); /* There should be no data */ vchiq_log_info(vchiq_core_log_level, - "%d: prs CLOSE@%x (%d->%d)", - state->id, (unsigned int)header, - remoteport, localport); + "%d: prs CLOSE@%pK (%d->%d)", + state->id, header, remoteport, localport); mark_service_closing_internal(service, 1); @@ -1783,9 +1797,8 @@ parse_rx_slots(VCHIQ_STATE_T *state) break; case VCHIQ_MSG_DATA: vchiq_log_info(vchiq_core_log_level, - "%d: prs DATA@%x,%x (%d->%d)", - state->id, (unsigned int)header, size, - remoteport, localport); + "%d: prs DATA@%pK,%x (%d->%d)", + state->id, header, size, remoteport, localport); if ((service->remoteport == remoteport) && (service->srvstate == @@ -1808,8 +1821,7 @@ parse_rx_slots(VCHIQ_STATE_T *state) break; case VCHIQ_MSG_CONNECT: vchiq_log_info(vchiq_core_log_level, - "%d: prs CONNECT@%x", - state->id, (unsigned int)header); + "%d: prs CONNECT@%pK", state->id, header); state->version_common = ((VCHIQ_SLOT_ZERO_T *) state->slot_data)->version; up(&state->connect); @@ -1827,7 +1839,7 @@ parse_rx_slots(VCHIQ_STATE_T *state) int resolved = 0; DEBUG_TRACE(PARSE_LINE); - if (mutex_lock_interruptible( + if (mutex_lock_killable( &service->bulk_mutex) != 0) { DEBUG_TRACE(PARSE_LINE); goto bail_not_ready; @@ -1838,17 +1850,15 @@ parse_rx_slots(VCHIQ_STATE_T *state) bulk = &queue->bulks[ BULK_INDEX(queue->remote_insert)]; bulk->remote_data = - (void *)((int *)header->data)[0]; + (void *)(long)((int *)header->data)[0]; bulk->remote_size = ((int *)header->data)[1]; wmb(); vchiq_log_info(vchiq_core_log_level, - "%d: prs %s@%x (%d->%d) %x@%x", + "%d: prs %s@%pK (%d->%d) %x@%pK", state->id, msg_type_str(type), - (unsigned int)header, - remoteport, localport, - bulk->remote_size, - (unsigned int)bulk->remote_data); + header, remoteport, localport, + bulk->remote_size, bulk->remote_data); queue->remote_insert++; @@ -1893,7 +1903,7 @@ parse_rx_slots(VCHIQ_STATE_T *state) &service->bulk_rx : &service->bulk_tx; DEBUG_TRACE(PARSE_LINE); - if (mutex_lock_interruptible( + if (mutex_lock_killable( &service->bulk_mutex) != 0) { DEBUG_TRACE(PARSE_LINE); goto bail_not_ready; @@ -1901,11 +1911,10 @@ parse_rx_slots(VCHIQ_STATE_T *state) if ((int)(queue->remote_insert - queue->local_insert) >= 0) { vchiq_log_error(vchiq_core_log_level, - "%d: prs %s@%x (%d->%d) " + "%d: prs %s@%pK (%d->%d) " "unexpected (ri=%d,li=%d)", state->id, msg_type_str(type), - (unsigned int)header, - remoteport, localport, + header, remoteport, localport, queue->remote_insert, queue->local_insert); mutex_unlock(&service->bulk_mutex); @@ -1921,11 +1930,10 @@ parse_rx_slots(VCHIQ_STATE_T *state) queue->remote_insert++; vchiq_log_info(vchiq_core_log_level, - "%d: prs %s@%x (%d->%d) %x@%x", + "%d: prs %s@%pK (%d->%d) %x@%pK", state->id, msg_type_str(type), - (unsigned int)header, - remoteport, localport, - bulk->actual, (unsigned int)bulk->data); + header, remoteport, localport, + bulk->actual, bulk->data); vchiq_log_trace(vchiq_core_log_level, "%d: prs:%d %cx li=%x ri=%x p=%x", @@ -1947,14 +1955,14 @@ parse_rx_slots(VCHIQ_STATE_T *state) break; case VCHIQ_MSG_PADDING: vchiq_log_trace(vchiq_core_log_level, - "%d: prs PADDING@%x,%x", - state->id, (unsigned int)header, size); + "%d: prs PADDING@%pK,%x", + state->id, header, size); break; case VCHIQ_MSG_PAUSE: /* If initiated, signal the application thread */ vchiq_log_trace(vchiq_core_log_level, - "%d: prs PAUSE@%x,%x", - state->id, (unsigned int)header, size); + "%d: prs PAUSE@%pK,%x", + state->id, header, size); if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) { vchiq_log_error(vchiq_core_log_level, "%d: PAUSE received in state PAUSED", @@ -1977,8 +1985,8 @@ parse_rx_slots(VCHIQ_STATE_T *state) break; case VCHIQ_MSG_RESUME: vchiq_log_trace(vchiq_core_log_level, - "%d: prs RESUME@%x,%x", - state->id, (unsigned int)header, size); + "%d: prs RESUME@%pK,%x", + state->id, header, size); /* Release the slot mutex */ mutex_unlock(&state->slot_mutex); if (state->is_master) @@ -1999,8 +2007,8 @@ parse_rx_slots(VCHIQ_STATE_T *state) default: vchiq_log_error(vchiq_core_log_level, - "%d: prs invalid msgid %x@%x,%x", - state->id, msgid, (unsigned int)header, size); + "%d: prs invalid msgid %x@%pK,%x", + state->id, msgid, header, size); WARN(1, "invalid message\n"); break; } @@ -2039,7 +2047,7 @@ slot_handler_func(void *v) while (1) { DEBUG_COUNT(SLOT_HANDLER_COUNT); DEBUG_TRACE(SLOT_HANDLER_LINE); - remote_event_wait(&local->trigger); + remote_event_wait(state, &local->trigger); rmb(); @@ -2128,7 +2136,7 @@ recycle_func(void *v) VCHIQ_SHARED_STATE_T *local = state->local; while (1) { - remote_event_wait(&local->recycle); + remote_event_wait(state, &local->recycle); process_free_queue(state); } @@ -2151,7 +2159,7 @@ sync_func(void *v) int type; unsigned int localport, remoteport; - remote_event_wait(&local->sync_trigger); + remote_event_wait(state, &local->sync_trigger); rmb(); @@ -2165,11 +2173,9 @@ sync_func(void *v) if (!service) { vchiq_log_error(vchiq_sync_log_level, - "%d: sf %s@%x (%d->%d) - " - "invalid/closed service %d", + "%d: sf %s@%pK (%d->%d) - invalid/closed service %d", state->id, msg_type_str(type), - (unsigned int)header, - remoteport, localport, localport); + header, remoteport, localport, localport); release_message_sync(state, header); continue; } @@ -2199,9 +2205,9 @@ sync_func(void *v) service->peer_version = payload->version; } vchiq_log_info(vchiq_sync_log_level, - "%d: sf OPENACK@%x,%x (%d->%d) v:%d", - state->id, (unsigned int)header, size, - remoteport, localport, service->peer_version); + "%d: sf OPENACK@%pK,%x (%d->%d) v:%d", + state->id, header, size, remoteport, localport, + service->peer_version); if (service->srvstate == VCHIQ_SRVSTATE_OPENING) { service->remoteport = remoteport; vchiq_set_service_state(service, @@ -2214,9 +2220,8 @@ sync_func(void *v) case VCHIQ_MSG_DATA: vchiq_log_trace(vchiq_sync_log_level, - "%d: sf DATA@%x,%x (%d->%d)", - state->id, (unsigned int)header, size, - remoteport, localport); + "%d: sf DATA@%pK,%x (%d->%d)", + state->id, header, size, remoteport, localport); if ((service->remoteport == remoteport) && (service->srvstate == @@ -2234,8 +2239,8 @@ sync_func(void *v) default: vchiq_log_error(vchiq_sync_log_level, - "%d: sf unexpected msgid %x@%x,%x", - state->id, msgid, (unsigned int)header, size); + "%d: sf unexpected msgid %x@%pK,%x", + state->id, msgid, header, size); release_message_sync(state, header); break; } @@ -2268,7 +2273,8 @@ get_conn_state_name(VCHIQ_CONNSTATE_T conn_state) VCHIQ_SLOT_ZERO_T * vchiq_init_slots(void *mem_base, int mem_size) { - int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK; + int mem_align = + (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK); VCHIQ_SLOT_ZERO_T *slot_zero = (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align); int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE; @@ -2316,16 +2322,16 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero, int i; vchiq_log_warning(vchiq_core_log_level, - "%s: slot_zero = 0x%08lx, is_master = %d", - __func__, (unsigned long)slot_zero, is_master); + "%s: slot_zero = %pK, is_master = %d", + __func__, slot_zero, is_master); /* Check the input configuration */ if (slot_zero->magic != VCHIQ_MAGIC) { vchiq_loud_error_header(); vchiq_loud_error("Invalid VCHIQ magic value found."); - vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)", - (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC); + vchiq_loud_error("slot_zero=%pK: magic=%x (expected %x)", + slot_zero, slot_zero->magic, VCHIQ_MAGIC); vchiq_loud_error_footer(); return VCHIQ_ERROR; } @@ -2333,10 +2339,8 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero, if (slot_zero->version < VCHIQ_VERSION_MIN) { vchiq_loud_error_header(); vchiq_loud_error("Incompatible VCHIQ versions found."); - vchiq_loud_error("slot_zero=%x: VideoCore version=%d " - "(minimum %d)", - (unsigned int)slot_zero, slot_zero->version, - VCHIQ_VERSION_MIN); + vchiq_loud_error("slot_zero=%pK: VideoCore version=%d (minimum %d)", + slot_zero, slot_zero->version, VCHIQ_VERSION_MIN); vchiq_loud_error("Restart with a newer VideoCore image."); vchiq_loud_error_footer(); return VCHIQ_ERROR; @@ -2345,10 +2349,8 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero, if (VCHIQ_VERSION < slot_zero->version_min) { vchiq_loud_error_header(); vchiq_loud_error("Incompatible VCHIQ versions found."); - vchiq_loud_error("slot_zero=%x: version=%d (VideoCore " - "minimum %d)", - (unsigned int)slot_zero, VCHIQ_VERSION, - slot_zero->version_min); + vchiq_loud_error("slot_zero=%pK: version=%d (VideoCore minimum %d)", + slot_zero, VCHIQ_VERSION, slot_zero->version_min); vchiq_loud_error("Restart with a newer kernel."); vchiq_loud_error_footer(); return VCHIQ_ERROR; @@ -2360,26 +2362,20 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero, (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) { vchiq_loud_error_header(); if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) - vchiq_loud_error("slot_zero=%x: slot_zero_size=%x " - "(expected %x)", - (unsigned int)slot_zero, - slot_zero->slot_zero_size, - sizeof(VCHIQ_SLOT_ZERO_T)); + vchiq_loud_error("slot_zero=%pK: slot_zero_size=%d (expected %d)", + slot_zero, slot_zero->slot_zero_size, + (int)sizeof(VCHIQ_SLOT_ZERO_T)); if (slot_zero->slot_size != VCHIQ_SLOT_SIZE) - vchiq_loud_error("slot_zero=%x: slot_size=%d " - "(expected %d", - (unsigned int)slot_zero, slot_zero->slot_size, + vchiq_loud_error("slot_zero=%pK: slot_size=%d (expected %d)", + slot_zero, slot_zero->slot_size, VCHIQ_SLOT_SIZE); if (slot_zero->max_slots != VCHIQ_MAX_SLOTS) - vchiq_loud_error("slot_zero=%x: max_slots=%d " - "(expected %d)", - (unsigned int)slot_zero, slot_zero->max_slots, + vchiq_loud_error("slot_zero=%pK: max_slots=%d (expected %d)", + slot_zero, slot_zero->max_slots, VCHIQ_MAX_SLOTS); if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE) - vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d " - "(expected %d)", - (unsigned int)slot_zero, - slot_zero->max_slots_per_side, + vchiq_loud_error("slot_zero=%pK: max_slots_per_side=%d (expected %d)", + slot_zero, slot_zero->max_slots_per_side, VCHIQ_MAX_SLOTS_PER_SIDE); vchiq_loud_error_footer(); return VCHIQ_ERROR; @@ -2463,24 +2459,24 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero, state->data_use_count = 0; state->data_quota = state->slot_queue_available - 1; - local->trigger.event = &state->trigger_event; - remote_event_create(&local->trigger); + local->trigger.event = offsetof(VCHIQ_STATE_T, trigger_event); + remote_event_create(state, &local->trigger); local->tx_pos = 0; - local->recycle.event = &state->recycle_event; - remote_event_create(&local->recycle); + local->recycle.event = offsetof(VCHIQ_STATE_T, recycle_event); + remote_event_create(state, &local->recycle); local->slot_queue_recycle = state->slot_queue_available; - local->sync_trigger.event = &state->sync_trigger_event; - remote_event_create(&local->sync_trigger); + local->sync_trigger.event = offsetof(VCHIQ_STATE_T, sync_trigger_event); + remote_event_create(state, &local->sync_trigger); - local->sync_release.event = &state->sync_release_event; - remote_event_create(&local->sync_release); + local->sync_release.event = offsetof(VCHIQ_STATE_T, sync_release_event); + remote_event_create(state, &local->sync_release); /* At start-of-day, the slot is empty and available */ ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid = VCHIQ_MSGID_PADDING; - remote_event_signal_local(&local->sync_release); + remote_event_signal_local(state, &local->sync_release); local->debug[DEBUG_ENTRIES] = DEBUG_MAX; @@ -2494,7 +2490,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero, (void *)state, threadname); - if (state->slot_handler_thread == NULL) { + if (IS_ERR(state->slot_handler_thread)) { vchiq_loud_error_header(); vchiq_loud_error("couldn't create thread %s", threadname); vchiq_loud_error_footer(); @@ -2507,7 +2503,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero, state->recycle_thread = kthread_create(&recycle_func, (void *)state, threadname); - if (state->recycle_thread == NULL) { + if (IS_ERR(state->recycle_thread)) { vchiq_loud_error_header(); vchiq_loud_error("couldn't create thread %s", threadname); vchiq_loud_error_footer(); @@ -2520,7 +2516,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero, state->sync_thread = kthread_create(&sync_func, (void *)state, threadname); - if (state->sync_thread == NULL) { + if (IS_ERR(state->sync_thread)) { vchiq_loud_error_header(); vchiq_loud_error("couldn't create thread %s", threadname); vchiq_loud_error_footer(); @@ -2684,14 +2680,19 @@ vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id) service->version, service->version_min }; - VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) }; VCHIQ_STATUS_T status = VCHIQ_SUCCESS; service->client_id = client_id; vchiq_use_service_internal(service); - status = queue_message(service->state, NULL, - VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0), - &body, 1, sizeof(payload), QMFLAGS_IS_BLOCKING); + status = queue_message(service->state, + NULL, + VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, + service->localport, + 0), + memcpy_copy_callback, + &payload, + sizeof(payload), + QMFLAGS_IS_BLOCKING); if (status == VCHIQ_SUCCESS) { /* Wait for the ACK/NAK */ if (down_interruptible(&service->remove_event) != 0) { @@ -2756,20 +2757,16 @@ release_service_messages(VCHIQ_SERVICE_T *service) if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) { vchiq_log_info(vchiq_core_log_level, - " fsi - hdr %x", - (unsigned int)header); + " fsi - hdr %pK", header); release_slot(state, slot_info, header, NULL); } pos += calc_stride(header->size); if (pos > VCHIQ_SLOT_SIZE) { vchiq_log_error(vchiq_core_log_level, - "fsi - pos %x: header %x, " - "msgid %x, header->msgid %x, " - "header->size %x", - pos, (unsigned int)header, - msgid, header->msgid, - header->size); + "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x", + pos, header, msgid, + header->msgid, header->size); WARN(1, "invalid slot position\n"); } } @@ -2783,7 +2780,7 @@ do_abort_bulks(VCHIQ_SERVICE_T *service) VCHIQ_STATUS_T status; /* Abort any outstanding bulk transfers */ - if (mutex_lock_interruptible(&service->bulk_mutex) != 0) + if (mutex_lock_killable(&service->bulk_mutex) != 0) return 0; abort_outstanding_bulks(service, &service->bulk_tx); abort_outstanding_bulks(service, &service->bulk_rx); @@ -3303,7 +3300,7 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, queue = (dir == VCHIQ_BULK_TRANSMIT) ? &service->bulk_tx : &service->bulk_rx; - if (mutex_lock_interruptible(&service->bulk_mutex) != 0) { + if (mutex_lock_killable(&service->bulk_mutex) != 0) { status = VCHIQ_RETRY; goto error_exit; } @@ -3317,7 +3314,7 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, status = VCHIQ_RETRY; goto error_exit; } - if (mutex_lock_interruptible(&service->bulk_mutex) + if (mutex_lock_killable(&service->bulk_mutex) != 0) { status = VCHIQ_RETRY; goto error_exit; @@ -3341,14 +3338,13 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, wmb(); vchiq_log_info(vchiq_core_log_level, - "%d: bt (%d->%d) %cx %x@%x %x", - state->id, - service->localport, service->remoteport, dir_char, - size, (unsigned int)bulk->data, (unsigned int)userdata); + "%d: bt (%d->%d) %cx %x@%pK %pK", + state->id, service->localport, service->remoteport, dir_char, + size, bulk->data, userdata); /* The slot mutex must be held when the service is being closed, so claim it here to ensure that isn't happening */ - if (mutex_lock_interruptible(&state->slot_mutex) != 0) { + if (mutex_lock_killable(&state->slot_mutex) != 0) { status = VCHIQ_RETRY; goto cancel_bulk_error_exit; } @@ -3363,16 +3359,19 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, (dir == VCHIQ_BULK_TRANSMIT) ? VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY); } else { - int payload[2] = { (int)bulk->data, bulk->size }; - VCHIQ_ELEMENT_T element = { payload, sizeof(payload) }; - - status = queue_message(state, NULL, - VCHIQ_MAKE_MSG(dir_msgtype, - service->localport, service->remoteport), - &element, 1, sizeof(payload), - QMFLAGS_IS_BLOCKING | - QMFLAGS_NO_MUTEX_LOCK | - QMFLAGS_NO_MUTEX_UNLOCK); + int payload[2] = { (int)(long)bulk->data, bulk->size }; + + status = queue_message(state, + NULL, + VCHIQ_MAKE_MSG(dir_msgtype, + service->localport, + service->remoteport), + memcpy_copy_callback, + &payload, + sizeof(payload), + QMFLAGS_IS_BLOCKING | + QMFLAGS_NO_MUTEX_LOCK | + QMFLAGS_NO_MUTEX_UNLOCK); if (status != VCHIQ_SUCCESS) { goto unlock_both_error_exit; } @@ -3418,26 +3417,22 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle, - const VCHIQ_ELEMENT_T *elements, unsigned int count) + ssize_t (*copy_callback)(void *context, void *dest, + size_t offset, size_t maxsize), + void *context, + size_t size) { VCHIQ_SERVICE_T *service = find_service_by_handle(handle); VCHIQ_STATUS_T status = VCHIQ_ERROR; - unsigned int size = 0; - unsigned int i; - if (!service || (vchiq_check_service(service) != VCHIQ_SUCCESS)) goto error_exit; - for (i = 0; i < (unsigned int)count; i++) { - if (elements[i].size) { - if (elements[i].data == NULL) { - VCHIQ_SERVICE_STATS_INC(service, error_count); - goto error_exit; - } - size += elements[i].size; - } + if (!size) { + VCHIQ_SERVICE_STATS_INC(service, error_count); + goto error_exit; + } if (size > VCHIQ_MAX_MSG_SIZE) { @@ -3451,14 +3446,14 @@ vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA, service->localport, service->remoteport), - elements, count, size, 1); + copy_callback, context, size, 1); break; case VCHIQ_SRVSTATE_OPENSYNC: status = queue_message_sync(service->state, service, VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA, service->localport, service->remoteport), - elements, count, size, 1); + copy_callback, context, size, 1); break; default: status = VCHIQ_ERROR; @@ -3691,13 +3686,11 @@ vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state) vchiq_dump(dump_context, buf, len + 1); len = snprintf(buf, sizeof(buf), - " tx_pos=%x(@%x), rx_pos=%x(@%x)", + " tx_pos=%x(@%pK), rx_pos=%x(@%pK)", state->local->tx_pos, - (uint32_t)state->tx_data + - (state->local_tx_pos & VCHIQ_SLOT_MASK), + state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK), state->rx_pos, - (uint32_t)state->rx_data + - (state->rx_pos & VCHIQ_SLOT_MASK)); + state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK)); vchiq_dump(dump_context, buf, len + 1); len = snprintf(buf, sizeof(buf), @@ -3747,7 +3740,7 @@ vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service) char buf[80]; int len; - len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)", + len = snprintf(buf, sizeof(buf), "Service %u: %s (ref %u)", service->localport, srvstate_names[service->srvstate], service->ref_count - 1); /*Don't include the lock just taken*/ @@ -3759,7 +3752,7 @@ vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service) int tx_pending, rx_pending; if (service->remoteport != VCHIQ_PORT_FREE) { int len2 = snprintf(remoteport, sizeof(remoteport), - "%d", service->remoteport); + "%u", service->remoteport); if (service->public_fourcc != VCHIQ_FOURCC_INVALID) snprintf(remoteport + len2, sizeof(remoteport) - len2, @@ -3888,26 +3881,26 @@ VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state) return status; } -void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem, - size_t numBytes) +void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *void_mem, + size_t num_bytes) { - const uint8_t *mem = (const uint8_t *)voidMem; + const uint8_t *mem = (const uint8_t *)void_mem; size_t offset; - char lineBuf[100]; + char line_buf[100]; char *s; - while (numBytes > 0) { - s = lineBuf; + while (num_bytes > 0) { + s = line_buf; for (offset = 0; offset < 16; offset++) { - if (offset < numBytes) + if (offset < num_bytes) s += snprintf(s, 4, "%02x ", mem[offset]); else s += snprintf(s, 4, " "); } for (offset = 0; offset < 16; offset++) { - if (offset < numBytes) { + if (offset < num_bytes) { uint8_t ch = mem[offset]; if ((ch < ' ') || (ch > '~')) @@ -3919,16 +3912,16 @@ void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem, if ((label != NULL) && (*label != '\0')) vchiq_log_trace(VCHIQ_LOG_TRACE, - "%s: %08x: %s", label, addr, lineBuf); + "%s: %08x: %s", label, addr, line_buf); else vchiq_log_trace(VCHIQ_LOG_TRACE, - "%08x: %s", addr, lineBuf); + "%08x: %s", addr, line_buf); addr += 16; mem += 16; - if (numBytes > 16) - numBytes -= 16; + if (num_bytes > 16) + num_bytes -= 16; else - numBytes = 0; + num_bytes = 0; } } diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h index 9be484c776d044a51bb3904c6a1e2cd11a852e35..9e164652548a42bac2646166e16961b88bc80594 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h @@ -184,11 +184,11 @@ enum { #define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug; #define DEBUG_TRACE(d) \ - do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0) + do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0) #define DEBUG_VALUE(d, v) \ - do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0) + do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0) #define DEBUG_COUNT(d) \ - do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0) + do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0) #else /* VCHIQ_ENABLE_DEBUG */ @@ -264,7 +264,8 @@ typedef struct vchiq_bulk_queue_struct { typedef struct remote_event_struct { int armed; int fired; - struct semaphore *event; + /* Contains offset from the beginning of the VCHIQ_STATE_T structure */ + u32 event; } REMOTE_EVENT_T; typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T; @@ -633,9 +634,6 @@ vchiq_transfer_bulk(VCHIQ_BULK_T *bulk); extern void vchiq_complete_bulk(VCHIQ_BULK_T *bulk); -extern VCHIQ_STATUS_T -vchiq_copy_from_user(void *dst, const void *src, int size); - extern void remote_event_signal(REMOTE_EVENT_T *event); diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c index 7e032130d967a69384926ddbe614e9af4be8f22e..f07cd4448ddfc9189ce9caa66ef958c640499e1e 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c @@ -120,7 +120,7 @@ static int debugfs_log_open(struct inode *inode, struct file *file) return single_open(file, debugfs_log_show, inode->i_private); } -static int debugfs_log_write(struct file *file, +static ssize_t debugfs_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { @@ -229,7 +229,7 @@ static int debugfs_trace_open(struct inode *inode, struct file *file) return single_open(file, debugfs_trace_show, inode->i_private); } -static int debugfs_trace_write(struct file *file, +static ssize_t debugfs_trace_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h index 8067bbe7ce8d70c41b2e6e0466d20a4612e39d93..377e8e48bb540465b34f2d4697504cb188d83e3b 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h @@ -141,9 +141,12 @@ extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service); extern VCHIQ_STATUS_T vchiq_use_service_no_resume( VCHIQ_SERVICE_HANDLE_T service); extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service); - -extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service, - const VCHIQ_ELEMENT_T *elements, unsigned int count); +extern VCHIQ_STATUS_T +vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle, + ssize_t (*copy_callback)(void *context, void *dest, + size_t offset, size_t maxsize), + void *context, + size_t size); extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service, VCHIQ_HEADER_T *header); extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service, diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c index 25e7011edc5094372919d7e3be482f3bbd7b6155..e93922a872636eb34e4d106ef5c67d0cbde054ce 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c @@ -70,7 +70,7 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data, * ***************************************************************************/ #define VCHIQ_INIT_RETRIES 10 -VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut) +VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out) { VCHIQ_STATUS_T status = VCHIQ_ERROR; VCHIQ_STATE_T *state; @@ -108,7 +108,7 @@ VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut) mutex_init(&instance->bulk_waiter_list_mutex); INIT_LIST_HEAD(&instance->bulk_waiter_list); - *instanceOut = instance; + *instance_out = instance; status = VCHIQ_SUCCESS; @@ -134,7 +134,7 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance) vchiq_log_trace(vchiq_core_log_level, "%s(%p) called", __func__, instance); - if (mutex_lock_interruptible(&state->mutex) != 0) + if (mutex_lock_killable(&state->mutex) != 0) return VCHIQ_RETRY; /* Remove all services */ @@ -155,9 +155,8 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance) list); list_del(pos); vchiq_log_info(vchiq_arm_log_level, - "bulk_waiter - cleaned up %x " - "for pid %d", - (unsigned int)waiter, waiter->pid); + "bulk_waiter - cleaned up %pK for pid %d", + waiter, waiter->pid); kfree(waiter); } kfree(instance); @@ -192,7 +191,7 @@ VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance) vchiq_log_trace(vchiq_core_log_level, "%s(%p) called", __func__, instance); - if (mutex_lock_interruptible(&state->mutex) != 0) { + if (mutex_lock_killable(&state->mutex) != 0) { vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__); status = VCHIQ_RETRY; @@ -450,8 +449,8 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data, list_add(&waiter->list, &instance->bulk_waiter_list); mutex_unlock(&instance->bulk_waiter_list_mutex); vchiq_log_info(vchiq_arm_log_level, - "saved bulk_waiter %x for pid %d", - (unsigned int)waiter, current->pid); + "saved bulk_waiter %pK for pid %d", + waiter, current->pid); } return status; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h index 335446e05476c3dbb905a69e45b37a2702566bf6..778063ba312af34b291735cbecc43e975962aea9 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h @@ -52,18 +52,4 @@ static inline int __must_check down_interruptible_killable(struct semaphore *sem } #define down_interruptible down_interruptible_killable - -static inline int __must_check mutex_lock_interruptible_killable(struct mutex *lock) -{ - /* Allow interception of killable signals only. We don't want to be interrupted by harmless signals like SIGALRM */ - int ret; - sigset_t blocked, oldset; - siginitsetinv(&blocked, SHUTDOWN_SIGS); - sigprocmask(SIG_SETMASK, &blocked, &oldset); - ret = mutex_lock_interruptible(lock); - sigprocmask(SIG_SETMASK, &oldset, NULL); - return ret; -} -#define mutex_lock_interruptible mutex_lock_interruptible_killable - #endif diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h index d02e7764bd0d8721453d2e0a4900c26c12634c8b..dd43458f306fe6ba6f583abf9eeadffd5b2cfedb 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h @@ -42,13 +42,13 @@ /* ---- Constants and Types ---------------------------------------------- */ typedef struct { - void *armSharedMemVirt; - dma_addr_t armSharedMemPhys; - size_t armSharedMemSize; + void *arm_shared_mem_virt; + dma_addr_t arm_shared_mem_phys; + size_t arm_shared_mem_size; - void *vcSharedMemVirt; - dma_addr_t vcSharedMemPhys; - size_t vcSharedMemSize; + void *vc_shared_mem_virt; + dma_addr_t vc_shared_mem_phys; + size_t vc_shared_mem_size; } VCHIQ_SHARED_MEM_INFO_T; /* ---- Variable Externs ------------------------------------------------- */ diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h index 54a3ecec69ef6702c382a7af339d21b87ec8a346..12c304ceb952442a9b5040916bcde96da5a0e0c2 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h @@ -43,11 +43,13 @@ #define PAGELIST_READ_WITH_FRAGMENTS 2 typedef struct pagelist_struct { - unsigned long length; - unsigned short type; - unsigned short offset; - unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following - pages at consecutive addresses. */ + u32 length; + u16 type; + u16 offset; + u32 addrs[1]; /* N.B. 12 LSBs hold the number + * of following pages at consecutive + * addresses. + */ } PAGELIST_T; typedef struct fragments_struct { diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c index 8072ff613636121d50293693d0bd2aaf0b6e07b5..d9771394a041bbb55daacd7744ac57758ae198d1 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c @@ -148,10 +148,10 @@ EXPORT_SYMBOL(vchi_msg_remove); * Name: vchi_msg_queue * * Arguments: VCHI_SERVICE_HANDLE_T handle, - * const void *data, - * uint32_t data_size, - * VCHI_FLAGS_T flags, - * void *msg_handle, + * ssize_t (*copy_callback)(void *context, void *dest, + * size_t offset, size_t maxsize), + * void *context, + * uint32_t data_size * * Description: Thin wrapper to queue a message onto a connection * @@ -159,28 +159,29 @@ EXPORT_SYMBOL(vchi_msg_remove); * ***********************************************************/ int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle, - const void *data, - uint32_t data_size, - VCHI_FLAGS_T flags, - void *msg_handle) + ssize_t (*copy_callback)(void *context, void *dest, + size_t offset, size_t maxsize), + void *context, + uint32_t data_size) { SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle; - VCHIQ_ELEMENT_T element = {data, data_size}; VCHIQ_STATUS_T status; - (void)msg_handle; - - WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED); + while (1) { + status = vchiq_queue_message(service->handle, + copy_callback, + context, + data_size); - status = vchiq_queue_message(service->handle, &element, 1); + /* + * vchiq_queue_message() may return VCHIQ_RETRY, so we need to + * implement a retry mechanism since this function is supposed + * to block until queued + */ + if (status != VCHIQ_RETRY) + break; - /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to - ** implement a retry mechanism since this function is supposed - ** to block until queued - */ - while (status == VCHIQ_RETRY) { msleep(1); - status = vchiq_queue_message(service->handle, &element, 1); } return vchiq_status_to_vchi(status); @@ -229,17 +230,18 @@ int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle, return vchiq_status_to_vchi(VCHIQ_ERROR); } - status = vchiq_bulk_receive(service->handle, data_dst, data_size, - bulk_handle, mode); - - /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to - ** implement a retry mechanism since this function is supposed - ** to block until queued - */ - while (status == VCHIQ_RETRY) { - msleep(1); + while (1) { status = vchiq_bulk_receive(service->handle, data_dst, data_size, bulk_handle, mode); + /* + * vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to + * implement a retry mechanism since this function is supposed + * to block until queued + */ + if (status != VCHIQ_RETRY) + break; + + msleep(1); } return vchiq_status_to_vchi(status); @@ -289,17 +291,19 @@ int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle, return vchiq_status_to_vchi(VCHIQ_ERROR); } - status = vchiq_bulk_transmit(service->handle, data_src, data_size, - bulk_handle, mode); - - /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to - ** implement a retry mechanism since this function is supposed - ** to block until queued - */ - while (status == VCHIQ_RETRY) { - msleep(1); + while (1) { status = vchiq_bulk_transmit(service->handle, data_src, data_size, bulk_handle, mode); + + /* + * vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to + * implement a retry mechanism since this function is supposed + * to block until queued + */ + if (status != VCHIQ_RETRY) + break; + + msleep(1); } return vchiq_status_to_vchi(status); @@ -349,44 +353,6 @@ int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle, } EXPORT_SYMBOL(vchi_msg_dequeue); -/*********************************************************** - * Name: vchi_msg_queuev - * - * Arguments: VCHI_SERVICE_HANDLE_T handle, - * VCHI_MSG_VECTOR_T *vector, - * uint32_t count, - * VCHI_FLAGS_T flags, - * void *msg_handle - * - * Description: Thin wrapper to queue a message onto a connection - * - * Returns: int32_t - success == 0 - * - ***********************************************************/ - -vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T)); -vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) == - offsetof(VCHIQ_ELEMENT_T, data)); -vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) == - offsetof(VCHIQ_ELEMENT_T, size)); - -int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle, - VCHI_MSG_VECTOR_T *vector, - uint32_t count, - VCHI_FLAGS_T flags, - void *msg_handle) -{ - SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle; - - (void)msg_handle; - - WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED); - - return vchiq_status_to_vchi(vchiq_queue_message(service->handle, - (const VCHIQ_ELEMENT_T *)vector, count)); -} -EXPORT_SYMBOL(vchi_msg_queuev); - /*********************************************************** * Name: vchi_held_msg_release * @@ -400,8 +366,16 @@ EXPORT_SYMBOL(vchi_msg_queuev); ***********************************************************/ int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message) { - vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service, - (VCHIQ_HEADER_T *)message->message); + /* + * Convert the service field pointer back to an + * VCHIQ_SERVICE_HANDLE_T which is an int. + * This pointer is opaque to everything except + * vchi_msg_hold which simply upcasted the int + * to a pointer. + */ + + vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)(long)message->service, + (VCHIQ_HEADER_T *)message->message); return 0; } @@ -445,8 +419,16 @@ int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle, *data = header->data; *msg_size = header->size; + /* + * upcast the VCHIQ_SERVICE_HANDLE_T which is an int + * to a pointer and stuff it in the held message. + * This pointer is opaque to everything except + * vchi_held_msg_release which simply downcasts it back + * to an int. + */ + message_handle->service = - (struct opaque_vchi_service_t *)service->handle; + (struct opaque_vchi_service_t *)(long)service->handle; message_handle->message = header; return 0; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c index 384acb8d2eae12eca8aa955c274467f120e8824f..f76f4d79053228b636d96a1e6571c937408ce291 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c @@ -61,8 +61,7 @@ int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size) void vchiu_queue_delete(VCHIU_QUEUE_T *queue) { - if (queue->storage != NULL) - kfree(queue->storage); + kfree(queue->storage); } int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue) diff --git a/drivers/staging/vme/devices/vme_pio2.h b/drivers/staging/vme/devices/vme_pio2.h index d5d94c43c07409953da19e961b48fdc80c1d3fed..5577df3199e75868ad601bcc9df42e5cabf4b4be 100644 --- a/drivers/staging/vme/devices/vme_pio2.h +++ b/drivers/staging/vme/devices/vme_pio2.h @@ -48,8 +48,6 @@ static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0, PIO2_REGS_INT_MASK6, PIO2_REGS_INT_MASK7 }; - - #define PIO2_REGS_CTRL 0x18 #define PIO2_REGS_VME_VECTOR 0x19 #define PIO2_REGS_CNTR0 0x20 @@ -63,7 +61,6 @@ static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0, #define PIO2_REGS_ID 0x30 - /* PIO2_REGS_DATAx (0x0 - 0x3) */ static const int PIO2_CHANNEL_BANK[32] = { 0, 0, 0, 0, 0, 0, 0, 0, @@ -204,8 +201,6 @@ static const int PIO2_CNTR_SC_DEV[6] = { PIO2_CNTR_SC_DEV0, PIO2_CNTR_SC_DEV1, #define PIO2_CNTR_BCD 1 - - enum pio2_bank_config { NOFIT, INPUT, OUTPUT, BOTH }; enum pio2_int_config { NONE = 0, LOW2HIGH = 1, HIGH2LOW = 2, EITHER = 4 }; @@ -240,10 +235,10 @@ struct pio2_card { struct pio2_cntr cntr[6]; }; -int pio2_cntr_reset(struct pio2_card *); +int pio2_cntr_reset(struct pio2_card *card); -int pio2_gpio_reset(struct pio2_card *); -int pio2_gpio_init(struct pio2_card *); -void pio2_gpio_exit(struct pio2_card *); +int pio2_gpio_reset(struct pio2_card *card); +int pio2_gpio_init(struct pio2_card *card); +void pio2_gpio_exit(struct pio2_card *card); #endif /* _VME_PIO2_H_ */ diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c index 8e66a520266c8d94961c75e73ab2fcb7cd029d4e..20a2d835fdaa37e02ffd3e9ede670154bd7595fd 100644 --- a/drivers/staging/vme/devices/vme_pio2_core.c +++ b/drivers/staging/vme/devices/vme_pio2_core.c @@ -365,7 +365,7 @@ static int pio2_probe(struct vme_dev *vdev) vec = card->irq_vector | PIO2_VECTOR_CNTR[i]; retval = vme_irq_request(vdev, card->irq_level, vec, - &pio2_int, card); + &pio2_int, card); if (retval < 0) { dev_err(&card->vdev->dev, "Unable to attach VME interrupt vector0x%x, level 0x%x\n", diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c index d84dffb894f432ce97a9118d185a838ef69997e9..87aa5174df22f69965d49997920651e104e82700 100644 --- a/drivers/staging/vme/devices/vme_user.c +++ b/drivers/staging/vme/devices/vme_user.c @@ -661,7 +661,7 @@ static int vme_user_probe(struct vme_dev *vdev) } class_destroy(vme_user_sysfs_class); - /* Ensure counter set correcty to unalloc all master windows */ + /* Ensure counter set correctly to unalloc all master windows */ i = MASTER_MAX + 1; err_master: while (i > MASTER_MINOR) { @@ -671,7 +671,7 @@ static int vme_user_probe(struct vme_dev *vdev) } /* - * Ensure counter set correcty to unalloc all slave windows and buffers + * Ensure counter set correctly to unalloc all slave windows and buffers */ i = SLAVE_MAX + 1; err_slave: @@ -716,7 +716,7 @@ static int vme_user_remove(struct vme_dev *dev) /* Unregister device driver */ cdev_del(vme_user_cdev); - /* Unregiser the major and minor device numbers */ + /* Unregister the major and minor device numbers */ unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS); return 0; diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c index de503a316e713019a99fbc36b55b2c9113341e32..44dfa54213740446e85c289da5f2299198362b91 100644 --- a/drivers/staging/vt6655/baseband.c +++ b/drivers/staging/vt6655/baseband.c @@ -12,11 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * * File: baseband.c * * Purpose: Implement functions to access baseband @@ -1916,7 +1911,7 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length, * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * byBBAddr - address of register in Baseband * Out: * pbyData - data read @@ -1927,24 +1922,24 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length, bool BBbReadEmbedded(struct vnt_private *priv, unsigned char byBBAddr, unsigned char *pbyData) { - void __iomem *dwIoBase = priv->PortOffset; + void __iomem *iobase = priv->PortOffset; unsigned short ww; unsigned char byValue; /* BB reg offset */ - VNSvOutPortB(dwIoBase + MAC_REG_BBREGADR, byBBAddr); + VNSvOutPortB(iobase + MAC_REG_BBREGADR, byBBAddr); /* turn on REGR */ - MACvRegBitsOn(dwIoBase, MAC_REG_BBREGCTL, BBREGCTL_REGR); + MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR); /* W_MAX_TIMEOUT is the timeout period */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortB(dwIoBase + MAC_REG_BBREGCTL, &byValue); + VNSvInPortB(iobase + MAC_REG_BBREGCTL, &byValue); if (byValue & BBREGCTL_DONE) break; } /* get BB data */ - VNSvInPortB(dwIoBase + MAC_REG_BBREGDATA, pbyData); + VNSvInPortB(iobase + MAC_REG_BBREGDATA, pbyData); if (ww == W_MAX_TIMEOUT) { pr_debug(" DBG_PORT80(0x30)\n"); @@ -1958,7 +1953,7 @@ bool BBbReadEmbedded(struct vnt_private *priv, * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * byBBAddr - address of register in Baseband * byData - data to write * Out: @@ -1970,20 +1965,20 @@ bool BBbReadEmbedded(struct vnt_private *priv, bool BBbWriteEmbedded(struct vnt_private *priv, unsigned char byBBAddr, unsigned char byData) { - void __iomem *dwIoBase = priv->PortOffset; + void __iomem *iobase = priv->PortOffset; unsigned short ww; unsigned char byValue; /* BB reg offset */ - VNSvOutPortB(dwIoBase + MAC_REG_BBREGADR, byBBAddr); + VNSvOutPortB(iobase + MAC_REG_BBREGADR, byBBAddr); /* set BB data */ - VNSvOutPortB(dwIoBase + MAC_REG_BBREGDATA, byData); + VNSvOutPortB(iobase + MAC_REG_BBREGDATA, byData); /* turn on BBREGCTL_REGW */ - MACvRegBitsOn(dwIoBase, MAC_REG_BBREGCTL, BBREGCTL_REGW); + MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW); /* W_MAX_TIMEOUT is the timeout period */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortB(dwIoBase + MAC_REG_BBREGCTL, &byValue); + VNSvInPortB(iobase + MAC_REG_BBREGCTL, &byValue); if (byValue & BBREGCTL_DONE) break; } @@ -2000,7 +1995,7 @@ bool BBbWriteEmbedded(struct vnt_private *priv, * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * byRevId - Revision ID * byRFType - RF type * Out: @@ -2014,7 +2009,7 @@ bool BBbVT3253Init(struct vnt_private *priv) { bool bResult = true; int ii; - void __iomem *dwIoBase = priv->PortOffset; + void __iomem *iobase = priv->PortOffset; unsigned char byRFType = priv->byRFType; unsigned char byLocalID = priv->byLocalID; @@ -2036,8 +2031,8 @@ bool BBbVT3253Init(struct vnt_private *priv) byVT3253B0_AGC4_RFMD2959[ii][0], byVT3253B0_AGC4_RFMD2959[ii][1]); - VNSvOutPortD(dwIoBase + MAC_REG_ITRTMSET, 0x23); - MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT(0)); + VNSvOutPortD(iobase + MAC_REG_ITRTMSET, 0x23); + MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0)); } priv->abyBBVGA[0] = 0x18; priv->abyBBVGA[1] = 0x0A; @@ -2076,8 +2071,8 @@ bool BBbVT3253Init(struct vnt_private *priv) byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]); - VNSvOutPortB(dwIoBase + MAC_REG_ITRTMSET, 0x23); - MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT(0)); + VNSvOutPortB(iobase + MAC_REG_ITRTMSET, 0x23); + MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0)); priv->abyBBVGA[0] = 0x14; priv->abyBBVGA[1] = 0x0A; @@ -2098,7 +2093,7 @@ bool BBbVT3253Init(struct vnt_private *priv) * 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted) */ - /*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/ + /*bResult &= BBbWriteEmbedded(iobase,0x09,0x41);*/ /* Init ANT B select, * RX Config CR10 = 0x28->0x2A, @@ -2106,7 +2101,7 @@ bool BBbVT3253Init(struct vnt_private *priv) * make the ANT_A, ANT_B inverted) */ - /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/ + /*bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/ /* Select VC1/VC2, CR215 = 0x02->0x06 */ bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06); @@ -2154,7 +2149,7 @@ bool BBbVT3253Init(struct vnt_private *priv) priv->ldBmThreshold[2] = 0; priv->ldBmThreshold[3] = 0; /* Fix VT3226 DFC system timing issue */ - MACvSetRFLE_LatchBase(dwIoBase); + MACvSetRFLE_LatchBase(iobase); /* {{ RobertYu: 20050104 */ } else if (byRFType == RF_AIROHA7230) { for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++) @@ -2162,16 +2157,15 @@ bool BBbVT3253Init(struct vnt_private *priv) byVT3253B0_AIROHA2230[ii][0], byVT3253B0_AIROHA2230[ii][1]); - /* {{ RobertYu:20050223, request by JerryChung */ /* Init ANT B select,TX Config CR09 = 0x61->0x45, * 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted) */ - /*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/ + /*bResult &= BBbWriteEmbedded(iobase,0x09,0x41);*/ /* Init ANT B select,RX Config CR10 = 0x28->0x2A, * 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */ - /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/ + /*bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/ /* Select VC1/VC2, CR215 = 0x02->0x06 */ bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06); /* }} */ @@ -2259,7 +2253,7 @@ void BBvSetVGAGainOffset(struct vnt_private *priv, unsigned char byData) * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * Out: * none * @@ -2280,7 +2274,7 @@ BBvSoftwareReset(struct vnt_private *priv) * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * Out: * none * @@ -2302,7 +2296,7 @@ BBvPowerSaveModeON(struct vnt_private *priv) * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * Out: * none * diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h index b4e8c43180ecc174a9e2c9243e5c5f43b8263c24..8a567c9155b4e49d161a3b425ebea0fe57f3ba82 100644 --- a/drivers/staging/vt6655/baseband.h +++ b/drivers/staging/vt6655/baseband.h @@ -12,11 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * * File: baseband.h * * Purpose: Implement functions to access baseband @@ -60,12 +55,6 @@ #define TOP_RATE_2M 0x00200000 #define TOP_RATE_1M 0x00100000 -#define BBvClearFOE(dwIoBase) \ - BBbWriteEmbedded(dwIoBase, 0xB1, 0) - -#define BBvSetFOE(dwIoBase) \ - BBbWriteEmbedded(dwIoBase, 0xB1, 0x0C) - unsigned int BBuGetFrameTime( unsigned char byPreambleType, diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c index dbcea4434725f32731bd1bb5babb2e0aa3e1fdd1..e0c92818ed707fd1644340d12007a85ebe1d2939 100644 --- a/drivers/staging/vt6655/card.c +++ b/drivers/staging/vt6655/card.c @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: card.c * Purpose: Provide functions to setup NIC operation mode * Functions: @@ -36,7 +32,7 @@ * * Revision History: * 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec. - * 08-26-2003 Kyle Hsu: Modify the defination type of dwIoBase. + * 08-26-2003 Kyle Hsu: Modify the defination type of iobase. * 09-01-2003 Bryan YC Fan: Add vUpdateIFS(). * */ @@ -261,7 +257,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type) BBbWriteEmbedded(priv, 0x88, 0x02); bySlot = C_SLOT_LONG; bySIFS = C_SIFS_BG; - byDIFS = C_SIFS_BG + 2*C_SLOT_LONG; + byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG; byCWMaxMin = 0xA5; } else { /* PK_TYPE_11GA & PK_TYPE_11GB */ MACvSetBBType(priv->PortOffset, BB_TYPE_11G); @@ -289,7 +285,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type) byDIFS = C_SIFS_BG + 2 * C_SLOT_SHORT; } else { bySlot = C_SLOT_LONG; - byDIFS = C_SIFS_BG + 2*C_SLOT_LONG; + byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG; } byCWMaxMin = 0xa4; @@ -528,8 +524,11 @@ CARDvSafeResetTx( struct vnt_tx_desc *pCurrTD; /* initialize TD index */ - priv->apTailTD[0] = priv->apCurrTD[0] = &(priv->apTD0Rings[0]); - priv->apTailTD[1] = priv->apCurrTD[1] = &(priv->apTD1Rings[0]); + priv->apTailTD[0] = &(priv->apTD0Rings[0]); + priv->apCurrTD[0] = &(priv->apTD0Rings[0]); + + priv->apTailTD[1] = &(priv->apTD1Rings[0]); + priv->apCurrTD[1] = &(priv->apTD1Rings[0]); for (uu = 0; uu < TYPE_MAXTD; uu++) priv->iTDUsed[uu] = 0; @@ -938,20 +937,20 @@ u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2) */ bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF) { - void __iomem *dwIoBase = priv->PortOffset; + void __iomem *iobase = priv->PortOffset; unsigned short ww; unsigned char byData; - MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD); + MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD); for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortB(dwIoBase + MAC_REG_TFTCTL, &byData); + VNSvInPortB(iobase + MAC_REG_TFTCTL, &byData); if (!(byData & TFTCTL_TSFCNTRRD)) break; } if (ww == W_MAX_TIMEOUT) return false; - VNSvInPortD(dwIoBase + MAC_REG_TSFCNTR, (u32 *)pqwCurrTSF); - VNSvInPortD(dwIoBase + MAC_REG_TSFCNTR + 4, (u32 *)pqwCurrTSF + 1); + VNSvInPortD(iobase + MAC_REG_TSFCNTR, (u32 *)pqwCurrTSF); + VNSvInPortD(iobase + MAC_REG_TSFCNTR + 4, (u32 *)pqwCurrTSF + 1); return true; } @@ -989,7 +988,7 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval) * * Parameters: * In: - * dwIoBase - IO Base + * iobase - IO Base * wBeaconInterval - Beacon Interval * Out: * none @@ -999,16 +998,16 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval) void CARDvSetFirstNextTBTT(struct vnt_private *priv, unsigned short wBeaconInterval) { - void __iomem *dwIoBase = priv->PortOffset; + void __iomem *iobase = priv->PortOffset; u64 qwNextTBTT = 0; CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */ qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval); /* Set NextTBTT */ - VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, (u32)qwNextTBTT); - VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32)); - MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); + VNSvOutPortD(iobase + MAC_REG_NEXTTBTT, (u32)qwNextTBTT); + VNSvOutPortD(iobase + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32)); + MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); } /* @@ -1028,12 +1027,12 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv, void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF, unsigned short wBeaconInterval) { - void __iomem *dwIoBase = priv->PortOffset; + void __iomem *iobase = priv->PortOffset; qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval); /* Set NextTBTT */ - VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, (u32)qwTSF); - VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, (u32)(qwTSF >> 32)); - MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); + VNSvOutPortD(iobase + MAC_REG_NEXTTBTT, (u32)qwTSF); + VNSvOutPortD(iobase + MAC_REG_NEXTTBTT + 4, (u32)(qwTSF >> 32)); + MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); pr_debug("Card:Update Next TBTT[%8llx]\n", qwTSF); } diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h index 0203c7fd91a2d2ce504787e981d7f8126324dc81..44420b5a445f5db19470751e26ee317c9f23d754 100644 --- a/drivers/staging/vt6655/card.h +++ b/drivers/staging/vt6655/card.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: card.h * * Purpose: Provide functions to setup NIC operation mode @@ -50,7 +46,7 @@ #define CB_MAX_CHANNEL_24G 14 #define CB_MAX_CHANNEL_5G 42 -#define CB_MAX_CHANNEL (CB_MAX_CHANNEL_24G+CB_MAX_CHANNEL_5G) +#define CB_MAX_CHANNEL (CB_MAX_CHANNEL_24G + CB_MAX_CHANNEL_5G) typedef enum _CARD_PKT_TYPE { PKT_TYPE_802_11_BCN, diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c index 029a8df4ca1cf7225dccfcc740f6184f2e7bf160..ab89956511a00a5c3378d7ad68dc051ed97237e7 100644 --- a/drivers/staging/vt6655/channel.c +++ b/drivers/staging/vt6655/channel.c @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: channel.c * */ diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h index 2d613e7f169ca4ab208c080eccc3121ef2b6b24d..2621dfabff068fdf8aee908c5a5117a9ff9aabd6 100644 --- a/drivers/staging/vt6655/channel.h +++ b/drivers/staging/vt6655/channel.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: channel.h * */ diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h index 2d7f6ae89164d5333855edf33e7a20733aeeca3a..2fee6e759ad84aecdc2f3ccaac17bfa9a01a5317 100644 --- a/drivers/staging/vt6655/desc.h +++ b/drivers/staging/vt6655/desc.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: desc.h * * Purpose:The header file of descriptor diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h index 55405e058196f878821e851c50ce855f383e139b..3ae40d846a0927635caca95cf0be6c9e0a55480c 100644 --- a/drivers/staging/vt6655/device.h +++ b/drivers/staging/vt6655/device.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: device.h * * Purpose: MAC Data structure @@ -283,12 +279,12 @@ struct vnt_private { unsigned char byOFDMPwrG; unsigned char byCurPwr; char byCurPwrdBm; - unsigned char abyCCKPwrTbl[CB_MAX_CHANNEL_24G+1]; - unsigned char abyOFDMPwrTbl[CB_MAX_CHANNEL+1]; - char abyCCKDefaultPwr[CB_MAX_CHANNEL_24G+1]; - char abyOFDMDefaultPwr[CB_MAX_CHANNEL+1]; - char abyRegPwr[CB_MAX_CHANNEL+1]; - char abyLocalPwr[CB_MAX_CHANNEL+1]; + unsigned char abyCCKPwrTbl[CB_MAX_CHANNEL_24G + 1]; + unsigned char abyOFDMPwrTbl[CB_MAX_CHANNEL + 1]; + char abyCCKDefaultPwr[CB_MAX_CHANNEL_24G + 1]; + char abyOFDMDefaultPwr[CB_MAX_CHANNEL + 1]; + char abyRegPwr[CB_MAX_CHANNEL + 1]; + char abyLocalPwr[CB_MAX_CHANNEL + 1]; /* BaseBand Loopback Use */ unsigned char byBBCR4d; diff --git a/drivers/staging/vt6655/device_cfg.h b/drivers/staging/vt6655/device_cfg.h index b4c9547d3138b49e7b5550b1a95410c7aa7f538a..0298ea923f972b3326b40cf5d877870691bbb914 100644 --- a/drivers/staging/vt6655/device_cfg.h +++ b/drivers/staging/vt6655/device_cfg.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: device_cfg.h * * Purpose: Driver configuration header diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index f109eeac358d86954f5ee8caefc4a0b310725dce..da0f711910091a49edfc55ca7ef21701d15e26f1 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: device_main.c * * Purpose: driver entry for initial, open, close, tx and rx. @@ -314,7 +310,7 @@ static void device_init_registers(struct vnt_private *priv) SROMbyReadEmbedded(priv->PortOffset, (unsigned char)(ii + EEP_OFS_CCK_PWR_TBL)); if (priv->abyCCKPwrTbl[ii + 1] == 0) - priv->abyCCKPwrTbl[ii+1] = priv->byCCKPwr; + priv->abyCCKPwrTbl[ii + 1] = priv->byCCKPwr; priv->abyOFDMPwrTbl[ii + 1] = SROMbyReadEmbedded(priv->PortOffset, @@ -556,7 +552,7 @@ static void device_init_rd0_ring(struct vnt_private *priv) if (!device_alloc_rx_buf(priv, desc)) dev_err(&priv->pcid->dev, "can not alloc rx bufs\n"); - desc->next = &(priv->aRD0Ring[(i+1) % priv->opts.rx_descs0]); + desc->next = &(priv->aRD0Ring[(i + 1) % priv->opts.rx_descs0]); desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc)); } @@ -1272,7 +1268,6 @@ static void vnt_remove_interface(struct ieee80211_hw *hw, priv->op_mode = NL80211_IFTYPE_UNSPECIFIED; } - static int vnt_config(struct ieee80211_hw *hw, u32 changed) { struct vnt_private *priv = hw->priv; diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c index 700032e9c477e8c0584815f4cfc6bf8365d886ff..9b3fa779258a672b0196b600d73832596d5538bb 100644 --- a/drivers/staging/vt6655/dpc.c +++ b/drivers/staging/vt6655/dpc.c @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: dpc.c * * Purpose: handle dpc rx functions diff --git a/drivers/staging/vt6655/dpc.h b/drivers/staging/vt6655/dpc.h index e80b30816968652b60865a1829e1bd9e7eb01fcb..6e75fa9c5618c2fc164455de43d686e702ba2204 100644 --- a/drivers/staging/vt6655/dpc.h +++ b/drivers/staging/vt6655/dpc.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: dpc.h * * Purpose: diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c index e161d5d9aebb1240b40090b9616d7e2bedb069bd..dad9e292d4da7f16179b718c852725aa77619119 100644 --- a/drivers/staging/vt6655/key.c +++ b/drivers/staging/vt6655/key.c @@ -12,11 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * * File: key.c * * Purpose: Implement functions for 802.11i Key management diff --git a/drivers/staging/vt6655/key.h b/drivers/staging/vt6655/key.h index d72719741a565c5d06a707729869f9b23c265b7b..a5024611af60ded3c9b48ec1cdfe3d2dee3a99ec 100644 --- a/drivers/staging/vt6655/key.h +++ b/drivers/staging/vt6655/key.h @@ -12,11 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * * File: key.h * * Purpose: Implement functions for 802.11i Key management diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c index 8e13f7f414151b032c918033186099d7eba36b4c..4aaa99bafcda4d96ab10733245c2dda4752df202 100644 --- a/drivers/staging/vt6655/mac.c +++ b/drivers/staging/vt6655/mac.c @@ -12,11 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * * File: mac.c * * Purpose: MAC routines @@ -147,7 +142,6 @@ void MACvSetShortRetryLimit(struct vnt_private *priv, iowrite8(byRetryLimit, io_base + MAC_REG_SRT); } - /* * Description: * Set 802.11 Long Retry Limit @@ -321,7 +315,7 @@ bool MACbSoftwareReset(struct vnt_private *priv) */ bool MACbSafeSoftwareReset(struct vnt_private *priv) { - unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0+MAC_MAX_CONTEXT_SIZE_PAGE1]; + unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0 + MAC_MAX_CONTEXT_SIZE_PAGE1]; bool bRetVal; /* PATCH.... diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h index 030f529c339ba8393e9865ddb0fbba0aec60b1ea..33b758cb79d46254d66768068a2941f271612e03 100644 --- a/drivers/staging/vt6655/mac.h +++ b/drivers/staging/vt6655/mac.h @@ -12,11 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * * File: mac.h * * Purpose: MAC routines @@ -554,341 +549,341 @@ /*--------------------- Export Macros ------------------------------*/ -#define MACvRegBitsOn(dwIoBase, byRegOfs, byBits) \ +#define MACvRegBitsOn(iobase, byRegOfs, byBits) \ do { \ unsigned char byData; \ - VNSvInPortB(dwIoBase + byRegOfs, &byData); \ - VNSvOutPortB(dwIoBase + byRegOfs, byData | (byBits)); \ + VNSvInPortB(iobase + byRegOfs, &byData); \ + VNSvOutPortB(iobase + byRegOfs, byData | (byBits)); \ } while (0) -#define MACvWordRegBitsOn(dwIoBase, byRegOfs, wBits) \ +#define MACvWordRegBitsOn(iobase, byRegOfs, wBits) \ do { \ unsigned short wData; \ - VNSvInPortW(dwIoBase + byRegOfs, &wData); \ - VNSvOutPortW(dwIoBase + byRegOfs, wData | (wBits)); \ + VNSvInPortW(iobase + byRegOfs, &wData); \ + VNSvOutPortW(iobase + byRegOfs, wData | (wBits)); \ } while (0) -#define MACvDWordRegBitsOn(dwIoBase, byRegOfs, dwBits) \ +#define MACvDWordRegBitsOn(iobase, byRegOfs, dwBits) \ do { \ unsigned long dwData; \ - VNSvInPortD(dwIoBase + byRegOfs, &dwData); \ - VNSvOutPortD(dwIoBase + byRegOfs, dwData | (dwBits)); \ + VNSvInPortD(iobase + byRegOfs, &dwData); \ + VNSvOutPortD(iobase + byRegOfs, dwData | (dwBits)); \ } while (0) -#define MACvRegBitsOnEx(dwIoBase, byRegOfs, byMask, byBits) \ +#define MACvRegBitsOnEx(iobase, byRegOfs, byMask, byBits) \ do { \ unsigned char byData; \ - VNSvInPortB(dwIoBase + byRegOfs, &byData); \ + VNSvInPortB(iobase + byRegOfs, &byData); \ byData &= byMask; \ - VNSvOutPortB(dwIoBase + byRegOfs, byData | (byBits)); \ + VNSvOutPortB(iobase + byRegOfs, byData | (byBits)); \ } while (0) -#define MACvRegBitsOff(dwIoBase, byRegOfs, byBits) \ +#define MACvRegBitsOff(iobase, byRegOfs, byBits) \ do { \ unsigned char byData; \ - VNSvInPortB(dwIoBase + byRegOfs, &byData); \ - VNSvOutPortB(dwIoBase + byRegOfs, byData & ~(byBits)); \ + VNSvInPortB(iobase + byRegOfs, &byData); \ + VNSvOutPortB(iobase + byRegOfs, byData & ~(byBits)); \ } while (0) -#define MACvWordRegBitsOff(dwIoBase, byRegOfs, wBits) \ +#define MACvWordRegBitsOff(iobase, byRegOfs, wBits) \ do { \ unsigned short wData; \ - VNSvInPortW(dwIoBase + byRegOfs, &wData); \ - VNSvOutPortW(dwIoBase + byRegOfs, wData & ~(wBits)); \ + VNSvInPortW(iobase + byRegOfs, &wData); \ + VNSvOutPortW(iobase + byRegOfs, wData & ~(wBits)); \ } while (0) -#define MACvDWordRegBitsOff(dwIoBase, byRegOfs, dwBits) \ +#define MACvDWordRegBitsOff(iobase, byRegOfs, dwBits) \ do { \ unsigned long dwData; \ - VNSvInPortD(dwIoBase + byRegOfs, &dwData); \ - VNSvOutPortD(dwIoBase + byRegOfs, dwData & ~(dwBits)); \ + VNSvInPortD(iobase + byRegOfs, &dwData); \ + VNSvOutPortD(iobase + byRegOfs, dwData & ~(dwBits)); \ } while (0) -#define MACvGetCurrRx0DescAddr(dwIoBase, pdwCurrDescAddr) \ - VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR0, \ +#define MACvGetCurrRx0DescAddr(iobase, pdwCurrDescAddr) \ + VNSvInPortD(iobase + MAC_REG_RXDMAPTR0, \ (unsigned long *)pdwCurrDescAddr) -#define MACvGetCurrRx1DescAddr(dwIoBase, pdwCurrDescAddr) \ - VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR1, \ +#define MACvGetCurrRx1DescAddr(iobase, pdwCurrDescAddr) \ + VNSvInPortD(iobase + MAC_REG_RXDMAPTR1, \ (unsigned long *)pdwCurrDescAddr) -#define MACvGetCurrTx0DescAddr(dwIoBase, pdwCurrDescAddr) \ - VNSvInPortD(dwIoBase + MAC_REG_TXDMAPTR0, \ +#define MACvGetCurrTx0DescAddr(iobase, pdwCurrDescAddr) \ + VNSvInPortD(iobase + MAC_REG_TXDMAPTR0, \ (unsigned long *)pdwCurrDescAddr) -#define MACvGetCurrAC0DescAddr(dwIoBase, pdwCurrDescAddr) \ - VNSvInPortD(dwIoBase + MAC_REG_AC0DMAPTR, \ +#define MACvGetCurrAC0DescAddr(iobase, pdwCurrDescAddr) \ + VNSvInPortD(iobase + MAC_REG_AC0DMAPTR, \ (unsigned long *)pdwCurrDescAddr) -#define MACvGetCurrSyncDescAddr(dwIoBase, pdwCurrDescAddr) \ - VNSvInPortD(dwIoBase + MAC_REG_SYNCDMAPTR, \ +#define MACvGetCurrSyncDescAddr(iobase, pdwCurrDescAddr) \ + VNSvInPortD(iobase + MAC_REG_SYNCDMAPTR, \ (unsigned long *)pdwCurrDescAddr) -#define MACvGetCurrATIMDescAddr(dwIoBase, pdwCurrDescAddr) \ - VNSvInPortD(dwIoBase + MAC_REG_ATIMDMAPTR, \ +#define MACvGetCurrATIMDescAddr(iobase, pdwCurrDescAddr) \ + VNSvInPortD(iobase + MAC_REG_ATIMDMAPTR, \ (unsigned long *)pdwCurrDescAddr) /* set the chip with current BCN tx descriptor address */ -#define MACvSetCurrBCNTxDescAddr(dwIoBase, dwCurrDescAddr) \ - VNSvOutPortD(dwIoBase + MAC_REG_BCNDMAPTR, \ +#define MACvSetCurrBCNTxDescAddr(iobase, dwCurrDescAddr) \ + VNSvOutPortD(iobase + MAC_REG_BCNDMAPTR, \ dwCurrDescAddr) /* set the chip with current BCN length */ -#define MACvSetCurrBCNLength(dwIoBase, wCurrBCNLength) \ - VNSvOutPortW(dwIoBase + MAC_REG_BCNDMACTL+2, \ +#define MACvSetCurrBCNLength(iobase, wCurrBCNLength) \ + VNSvOutPortW(iobase + MAC_REG_BCNDMACTL+2, \ wCurrBCNLength) -#define MACvReadBSSIDAddress(dwIoBase, pbyEtherAddr) \ +#define MACvReadBSSIDAddress(iobase, pbyEtherAddr) \ do { \ - VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \ - VNSvInPortB(dwIoBase + MAC_REG_BSSID0, \ + VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \ + VNSvInPortB(iobase + MAC_REG_BSSID0, \ (unsigned char *)pbyEtherAddr); \ - VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 1, \ + VNSvInPortB(iobase + MAC_REG_BSSID0 + 1, \ pbyEtherAddr + 1); \ - VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 2, \ + VNSvInPortB(iobase + MAC_REG_BSSID0 + 2, \ pbyEtherAddr + 2); \ - VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 3, \ + VNSvInPortB(iobase + MAC_REG_BSSID0 + 3, \ pbyEtherAddr + 3); \ - VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 4, \ + VNSvInPortB(iobase + MAC_REG_BSSID0 + 4, \ pbyEtherAddr + 4); \ - VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 5, \ + VNSvInPortB(iobase + MAC_REG_BSSID0 + 5, \ pbyEtherAddr + 5); \ - VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \ + VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \ } while (0) -#define MACvWriteBSSIDAddress(dwIoBase, pbyEtherAddr) \ +#define MACvWriteBSSIDAddress(iobase, pbyEtherAddr) \ do { \ - VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \ - VNSvOutPortB(dwIoBase + MAC_REG_BSSID0, \ + VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \ + VNSvOutPortB(iobase + MAC_REG_BSSID0, \ *(pbyEtherAddr)); \ - VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 1, \ + VNSvOutPortB(iobase + MAC_REG_BSSID0 + 1, \ *(pbyEtherAddr + 1)); \ - VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 2, \ + VNSvOutPortB(iobase + MAC_REG_BSSID0 + 2, \ *(pbyEtherAddr + 2)); \ - VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 3, \ + VNSvOutPortB(iobase + MAC_REG_BSSID0 + 3, \ *(pbyEtherAddr + 3)); \ - VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 4, \ + VNSvOutPortB(iobase + MAC_REG_BSSID0 + 4, \ *(pbyEtherAddr + 4)); \ - VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 5, \ + VNSvOutPortB(iobase + MAC_REG_BSSID0 + 5, \ *(pbyEtherAddr + 5)); \ - VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \ + VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \ } while (0) -#define MACvReadEtherAddress(dwIoBase, pbyEtherAddr) \ +#define MACvReadEtherAddress(iobase, pbyEtherAddr) \ do { \ - VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \ - VNSvInPortB(dwIoBase + MAC_REG_PAR0, \ + VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \ + VNSvInPortB(iobase + MAC_REG_PAR0, \ (unsigned char *)pbyEtherAddr); \ - VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 1, \ + VNSvInPortB(iobase + MAC_REG_PAR0 + 1, \ pbyEtherAddr + 1); \ - VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 2, \ + VNSvInPortB(iobase + MAC_REG_PAR0 + 2, \ pbyEtherAddr + 2); \ - VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 3, \ + VNSvInPortB(iobase + MAC_REG_PAR0 + 3, \ pbyEtherAddr + 3); \ - VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 4, \ + VNSvInPortB(iobase + MAC_REG_PAR0 + 4, \ pbyEtherAddr + 4); \ - VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 5, \ + VNSvInPortB(iobase + MAC_REG_PAR0 + 5, \ pbyEtherAddr + 5); \ - VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \ + VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \ } while (0) -#define MACvWriteEtherAddress(dwIoBase, pbyEtherAddr) \ +#define MACvWriteEtherAddress(iobase, pbyEtherAddr) \ do { \ - VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \ - VNSvOutPortB(dwIoBase + MAC_REG_PAR0, \ + VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \ + VNSvOutPortB(iobase + MAC_REG_PAR0, \ *pbyEtherAddr); \ - VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 1, \ + VNSvOutPortB(iobase + MAC_REG_PAR0 + 1, \ *(pbyEtherAddr + 1)); \ - VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 2, \ + VNSvOutPortB(iobase + MAC_REG_PAR0 + 2, \ *(pbyEtherAddr + 2)); \ - VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 3, \ + VNSvOutPortB(iobase + MAC_REG_PAR0 + 3, \ *(pbyEtherAddr + 3)); \ - VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 4, \ + VNSvOutPortB(iobase + MAC_REG_PAR0 + 4, \ *(pbyEtherAddr + 4)); \ - VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 5, \ + VNSvOutPortB(iobase + MAC_REG_PAR0 + 5, \ *(pbyEtherAddr + 5)); \ - VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \ + VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \ } while (0) -#define MACvClearISR(dwIoBase) \ - VNSvOutPortD(dwIoBase + MAC_REG_ISR, IMR_MASK_VALUE) +#define MACvClearISR(iobase) \ + VNSvOutPortD(iobase + MAC_REG_ISR, IMR_MASK_VALUE) -#define MACvStart(dwIoBase) \ - VNSvOutPortB(dwIoBase + MAC_REG_HOSTCR, \ +#define MACvStart(iobase) \ + VNSvOutPortB(iobase + MAC_REG_HOSTCR, \ (HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON)) -#define MACvRx0PerPktMode(dwIoBase) \ - VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, RX_PERPKT) +#define MACvRx0PerPktMode(iobase) \ + VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKT) -#define MACvRx0BufferFillMode(dwIoBase) \ - VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, RX_PERPKTCLR) +#define MACvRx0BufferFillMode(iobase) \ + VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKTCLR) -#define MACvRx1PerPktMode(dwIoBase) \ - VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, RX_PERPKT) +#define MACvRx1PerPktMode(iobase) \ + VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKT) -#define MACvRx1BufferFillMode(dwIoBase) \ - VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, RX_PERPKTCLR) +#define MACvRx1BufferFillMode(iobase) \ + VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKTCLR) -#define MACvRxOn(dwIoBase) \ - MACvRegBitsOn(dwIoBase, MAC_REG_HOSTCR, HOSTCR_RXON) +#define MACvRxOn(iobase) \ + MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_RXON) -#define MACvReceive0(dwIoBase) \ +#define MACvReceive0(iobase) \ do { \ unsigned long dwData; \ - VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL0, &dwData); \ + VNSvInPortD(iobase + MAC_REG_RXDMACTL0, &dwData); \ if (dwData & DMACTL_RUN) \ - VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \ + VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \ else \ - VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_RUN); \ + VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_RUN); \ } while (0) -#define MACvReceive1(dwIoBase) \ +#define MACvReceive1(iobase) \ do { \ unsigned long dwData; \ - VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL1, &dwData); \ + VNSvInPortD(iobase + MAC_REG_RXDMACTL1, &dwData); \ if (dwData & DMACTL_RUN) \ - VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \ + VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \ else \ - VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_RUN); \ + VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_RUN); \ } while (0) -#define MACvTxOn(dwIoBase) \ - MACvRegBitsOn(dwIoBase, MAC_REG_HOSTCR, HOSTCR_TXON) +#define MACvTxOn(iobase) \ + MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_TXON) -#define MACvTransmit0(dwIoBase) \ +#define MACvTransmit0(iobase) \ do { \ unsigned long dwData; \ - VNSvInPortD(dwIoBase + MAC_REG_TXDMACTL0, &dwData); \ + VNSvInPortD(iobase + MAC_REG_TXDMACTL0, &dwData); \ if (dwData & DMACTL_RUN) \ - VNSvOutPortD(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \ + VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \ else \ - VNSvOutPortD(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_RUN); \ + VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_RUN); \ } while (0) -#define MACvTransmitAC0(dwIoBase) \ +#define MACvTransmitAC0(iobase) \ do { \ unsigned long dwData; \ - VNSvInPortD(dwIoBase + MAC_REG_AC0DMACTL, &dwData); \ + VNSvInPortD(iobase + MAC_REG_AC0DMACTL, &dwData); \ if (dwData & DMACTL_RUN) \ - VNSvOutPortD(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \ + VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \ else \ - VNSvOutPortD(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_RUN); \ + VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_RUN); \ } while (0) -#define MACvTransmitSYNC(dwIoBase) \ +#define MACvTransmitSYNC(iobase) \ do { \ unsigned long dwData; \ - VNSvInPortD(dwIoBase + MAC_REG_SYNCDMACTL, &dwData); \ + VNSvInPortD(iobase + MAC_REG_SYNCDMACTL, &dwData); \ if (dwData & DMACTL_RUN) \ - VNSvOutPortD(dwIoBase + MAC_REG_SYNCDMACTL, DMACTL_WAKE); \ + VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_WAKE); \ else \ - VNSvOutPortD(dwIoBase + MAC_REG_SYNCDMACTL, DMACTL_RUN); \ + VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_RUN); \ } while (0) -#define MACvTransmitATIM(dwIoBase) \ +#define MACvTransmitATIM(iobase) \ do { \ unsigned long dwData; \ - VNSvInPortD(dwIoBase + MAC_REG_ATIMDMACTL, &dwData); \ + VNSvInPortD(iobase + MAC_REG_ATIMDMACTL, &dwData); \ if (dwData & DMACTL_RUN) \ - VNSvOutPortD(dwIoBase + MAC_REG_ATIMDMACTL, DMACTL_WAKE); \ + VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_WAKE); \ else \ - VNSvOutPortD(dwIoBase + MAC_REG_ATIMDMACTL, DMACTL_RUN); \ + VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_RUN); \ } while (0) -#define MACvTransmitBCN(dwIoBase) \ - VNSvOutPortB(dwIoBase + MAC_REG_BCNDMACTL, BEACON_READY) +#define MACvTransmitBCN(iobase) \ + VNSvOutPortB(iobase + MAC_REG_BCNDMACTL, BEACON_READY) -#define MACvClearStckDS(dwIoBase) \ +#define MACvClearStckDS(iobase) \ do { \ unsigned char byOrgValue; \ - VNSvInPortB(dwIoBase + MAC_REG_STICKHW, &byOrgValue); \ + VNSvInPortB(iobase + MAC_REG_STICKHW, &byOrgValue); \ byOrgValue = byOrgValue & 0xFC; \ - VNSvOutPortB(dwIoBase + MAC_REG_STICKHW, byOrgValue); \ + VNSvOutPortB(iobase + MAC_REG_STICKHW, byOrgValue); \ } while (0) -#define MACvReadISR(dwIoBase, pdwValue) \ - VNSvInPortD(dwIoBase + MAC_REG_ISR, pdwValue) +#define MACvReadISR(iobase, pdwValue) \ + VNSvInPortD(iobase + MAC_REG_ISR, pdwValue) -#define MACvWriteISR(dwIoBase, dwValue) \ - VNSvOutPortD(dwIoBase + MAC_REG_ISR, dwValue) +#define MACvWriteISR(iobase, dwValue) \ + VNSvOutPortD(iobase + MAC_REG_ISR, dwValue) -#define MACvIntEnable(dwIoBase, dwMask) \ - VNSvOutPortD(dwIoBase + MAC_REG_IMR, dwMask) +#define MACvIntEnable(iobase, dwMask) \ + VNSvOutPortD(iobase + MAC_REG_IMR, dwMask) -#define MACvIntDisable(dwIoBase) \ - VNSvOutPortD(dwIoBase + MAC_REG_IMR, 0) +#define MACvIntDisable(iobase) \ + VNSvOutPortD(iobase + MAC_REG_IMR, 0) -#define MACvSelectPage0(dwIoBase) \ - VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0) +#define MACvSelectPage0(iobase) \ + VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0) -#define MACvSelectPage1(dwIoBase) \ - VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1) +#define MACvSelectPage1(iobase) \ + VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1) -#define MACvReadMIBCounter(dwIoBase, pdwCounter) \ - VNSvInPortD(dwIoBase + MAC_REG_MIBCNTR, pdwCounter) +#define MACvReadMIBCounter(iobase, pdwCounter) \ + VNSvInPortD(iobase + MAC_REG_MIBCNTR, pdwCounter) -#define MACvPwrEvntDisable(dwIoBase) \ - VNSvOutPortW(dwIoBase + MAC_REG_WAKEUPEN0, 0x0000) +#define MACvPwrEvntDisable(iobase) \ + VNSvOutPortW(iobase + MAC_REG_WAKEUPEN0, 0x0000) -#define MACvEnableProtectMD(dwIoBase) \ +#define MACvEnableProtectMD(iobase) \ do { \ unsigned long dwOrgValue; \ - VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \ + VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \ dwOrgValue = dwOrgValue | EnCFG_ProtectMd; \ - VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \ + VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \ } while (0) -#define MACvDisableProtectMD(dwIoBase) \ +#define MACvDisableProtectMD(iobase) \ do { \ unsigned long dwOrgValue; \ - VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \ + VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \ dwOrgValue = dwOrgValue & ~EnCFG_ProtectMd; \ - VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \ + VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \ } while (0) -#define MACvEnableBarkerPreambleMd(dwIoBase) \ +#define MACvEnableBarkerPreambleMd(iobase) \ do { \ unsigned long dwOrgValue; \ - VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \ + VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \ dwOrgValue = dwOrgValue | EnCFG_BarkerPream; \ - VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \ + VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \ } while (0) -#define MACvDisableBarkerPreambleMd(dwIoBase) \ +#define MACvDisableBarkerPreambleMd(iobase) \ do { \ unsigned long dwOrgValue; \ - VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \ + VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \ dwOrgValue = dwOrgValue & ~EnCFG_BarkerPream; \ - VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \ + VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \ } while (0) -#define MACvSetBBType(dwIoBase, byTyp) \ +#define MACvSetBBType(iobase, byTyp) \ do { \ unsigned long dwOrgValue; \ - VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \ + VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \ dwOrgValue = dwOrgValue & ~EnCFG_BBType_MASK; \ dwOrgValue = dwOrgValue | (unsigned long)byTyp; \ - VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \ + VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \ } while (0) -#define MACvReadATIMW(dwIoBase, pwCounter) \ - VNSvInPortW(dwIoBase + MAC_REG_AIDATIM, pwCounter) +#define MACvReadATIMW(iobase, pwCounter) \ + VNSvInPortW(iobase + MAC_REG_AIDATIM, pwCounter) -#define MACvWriteATIMW(dwIoBase, wCounter) \ - VNSvOutPortW(dwIoBase + MAC_REG_AIDATIM, wCounter) +#define MACvWriteATIMW(iobase, wCounter) \ + VNSvOutPortW(iobase + MAC_REG_AIDATIM, wCounter) -#define MACvWriteCRC16_128(dwIoBase, byRegOfs, wCRC) \ +#define MACvWriteCRC16_128(iobase, byRegOfs, wCRC) \ do { \ - VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \ - VNSvOutPortW(dwIoBase + byRegOfs, wCRC); \ - VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \ + VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \ + VNSvOutPortW(iobase + byRegOfs, wCRC); \ + VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \ } while (0) -#define MACvGPIOIn(dwIoBase, pbyValue) \ - VNSvInPortB(dwIoBase + MAC_REG_GPIOCTL1, pbyValue) +#define MACvGPIOIn(iobase, pbyValue) \ + VNSvInPortB(iobase + MAC_REG_GPIOCTL1, pbyValue) -#define MACvSetRFLE_LatchBase(dwIoBase) \ - MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT) +#define MACvSetRFLE_LatchBase(iobase) \ + MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT) bool MACbIsRegBitsOn(struct vnt_private *, unsigned char byRegOfs, unsigned char byTestBits); diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c index 7d6e7464ae51a1c34de5ec686451996817ead67d..716d2a80f8400c0c11f615484e384f7920f6b5b2 100644 --- a/drivers/staging/vt6655/power.c +++ b/drivers/staging/vt6655/power.c @@ -12,11 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * * File: power.c * * Purpose: Handles 802.11 power management functions @@ -133,7 +128,6 @@ PSvDisablePowerSaving( priv->bPWBitOn = false; } - /* * * Routine Description: diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h index d82dd8d6d68ba67d703a317b83f45bee5cd9b524..dfcb0ca8b448defe55ecfb93d42e0b7b98063564 100644 --- a/drivers/staging/vt6655/power.h +++ b/drivers/staging/vt6655/power.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: power.h * * Purpose: Handles 802.11 power management functions @@ -46,7 +42,6 @@ PSvEnablePowerSaving( unsigned short wListenInterval ); - bool PSbIsNextTBTTWakeUp( struct vnt_private * diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c index 447882c7a6beb50aef9c86dcb1e2323bac30830d..edf7db9d53b32c2a6b54a40dbb66e19c5c2a25be 100644 --- a/drivers/staging/vt6655/rf.c +++ b/drivers/staging/vt6655/rf.c @@ -12,11 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * * File: rf.c * * Purpose: rf function code @@ -50,359 +45,362 @@ #define AL7230_PWR_IDX_LEN 64 static const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = { - 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x01A00200+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x00FFF300+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0F4DC500+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0805B600+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0146C700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x00068800+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0403B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x00DBBA00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0BDFFC00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x00000D00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x00580F00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW + 0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x01A00200 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x00FFF300 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0F4DC500 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0805B600 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0146C700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x00068800 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0403B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x00DBBA00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x00099B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0BDFFC00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x00000D00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x00580F00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW }; static const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = { - 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ - 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ - 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ - 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ - 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ - 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ - 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ - 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */ - 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */ - 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */ - 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */ - 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */ - 0x03F7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */ - 0x03E7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 14, Tf = 2412M */ + 0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ + 0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ + 0x03E79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ + 0x03E79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ + 0x03F7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ + 0x03F7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ + 0x03E7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ + 0x03E7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */ + 0x03F7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */ + 0x03F7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */ + 0x03E7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */ + 0x03E7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */ + 0x03F7C000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */ + 0x03E7C000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 14, Tf = 2412M */ }; static const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = { - 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ - 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ - 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ - 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ - 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ - 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ - 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ - 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */ - 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */ - 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */ - 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */ - 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */ - 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */ - 0x06666100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 14, Tf = 2412M */ + 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ + 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ + 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ + 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ + 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ + 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ + 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ + 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */ + 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */ + 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */ + 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */ + 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */ + 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */ + 0x06666100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 14, Tf = 2412M */ }; static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = { - 0x04040900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04041900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04042900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04043900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04044900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04045900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04046900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04047900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04048900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04049900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0404A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0404B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0404C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0404D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0404E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0404F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04050900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04051900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04052900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04053900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04054900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04055900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04056900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04057900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04058900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04059900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0405A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0405B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0405C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0405D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0405E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0405F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04060900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04061900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04062900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04063900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04064900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04065900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04066900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04067900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04068900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04069900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0406A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0406B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0406C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0406D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0406E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0406F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04070900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04071900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04072900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04073900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04074900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04075900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04076900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04077900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04078900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x04079900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0407A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0407B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0407C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0407D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0407E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, - 0x0407F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW + 0x04040900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04041900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04042900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04043900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04044900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04045900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04046900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04047900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04048900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04049900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0404A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0404B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0404C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0404D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0404E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0404F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04050900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04051900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04052900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04053900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04054900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04055900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04056900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04057900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04058900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04059900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0405A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0405B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0405C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0405D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0405E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0405F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04060900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04061900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04062900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04063900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04064900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04065900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04066900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04067900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04068900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04069900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0406A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0406B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0406C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0406D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0406E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0406F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04070900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04071900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04072900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04073900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04074900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04075900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04076900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04077900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04078900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x04079900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0407A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0407B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0407C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0407D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0407E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, + 0x0407F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW }; /* 40MHz reference frequency * Need to Pull PLLON(PE3) low when writing channel registers through 3-wire. */ static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = { - 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */ - 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */ - 0x841FF200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 451FE2 */ - 0x3FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 5FDFA3 */ - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* 11b/g // Need modify for 11a */ + 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel1 // Need modify for 11a */ + 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel1 // Need modify for 11a */ + 0x841FF200 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 451FE2 */ + 0x3FDFA300 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 5FDFA3 */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* 11b/g // Need modify for 11a */ /* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */ - 0x802B5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 8D1B55 */ - 0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, - 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 860207 */ - 0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, - 0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, - 0xE0000A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: E0600A */ - 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */ + 0x802B5500 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 8D1B55 */ + 0x56AF3600 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, + 0xCE020700 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 860207 */ + 0x6EBC0800 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, + 0x221BB900 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, + 0xE0000A00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: E0600A */ + 0x08031B00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */ /* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */ - 0x000A3C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 00143C */ - 0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, - 0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, - 0x1ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* Need modify for 11a: 12BACF */ + 0x000A3C00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 00143C */ + 0xFFFFFD00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, + 0x00000E00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, + 0x1ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* Need modify for 11a: 12BACF */ }; static const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = { - 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */ - 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */ - 0x451FE200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */ - 0x5FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */ - 0x67F78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* 11a // Need modify for 11b/g */ - 0x853F5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g, RoberYu:20050113 */ - 0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, - 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */ - 0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, - 0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, - 0xE0600A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */ - 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */ - 0x00147C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */ - 0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, - 0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, - 0x12BACF00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* Need modify for 11b/g */ + 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */ + 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */ + 0x451FE200 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */ + 0x5FDFA300 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */ + 0x67F78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* 11a // Need modify for 11b/g */ + 0x853F5500 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g, RoberYu:20050113 */ + 0x56AF3600 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, + 0xCE020700 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */ + 0x6EBC0800 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, + 0x221BB900 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, + 0xE0600A00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */ + 0x08031B00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */ + 0x00147C00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */ + 0xFFFFFD00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, + 0x00000E00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, + 0x12BACF00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* Need modify for 11b/g */ }; static const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = { - 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ - 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ - 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ - 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ - 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ - 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ - 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ - 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49 */ - 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49 */ - 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49 */ - 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49 */ - 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49 */ - 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49 */ - 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */ + 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ + 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ + 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ + 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ + 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ + 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ + 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ + 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49 */ + 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49 */ + 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49 */ + 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49 */ + 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49 */ + 0x0037C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49 */ + 0x0037C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */ /* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */ - 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */ - 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */ - 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */ - 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */ - 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */ - 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */ - 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */ - 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */ + 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */ + 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */ + 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */ + 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */ + 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */ + 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */ + 0x0FF53000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */ + 0x0FF53000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */ /* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, - * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */ - - 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */ - 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */ - 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */ - 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */ - 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */ - 0x0FF55000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */ - 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */ - 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */ - 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49 */ - 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */ - 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */ - 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */ - 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */ - 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */ - 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */ - 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */ - 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */ - 0x0FF59000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */ - - 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */ - 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */ - 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */ - 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */ - 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */ - 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */ - 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */ - 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */ - 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */ - 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */ - 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */ - 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */ - 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */ - 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */ - 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */ - 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */ + * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) + */ + + 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */ + 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */ + 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */ + 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */ + 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */ + 0x0FF55000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */ + 0x0FF56000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */ + 0x0FF56000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */ + 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49 */ + 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */ + 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */ + 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */ + 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */ + 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */ + 0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */ + 0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */ + 0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */ + 0x0FF59000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */ + + 0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */ + 0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */ + 0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */ + 0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */ + 0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */ + 0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */ + 0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */ + 0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */ + 0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */ + 0x0FF5F000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */ + 0x0FF5F000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */ + 0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */ + 0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */ + 0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */ + 0x0FF61000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */ + 0x0FF61000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */ }; static const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = { - 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ - 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ - 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ - 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ - 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ - 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ - 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ - 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */ - 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */ - 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */ - 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */ - 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */ - 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */ - 0x06666100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */ + 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ + 0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ + 0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ + 0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ + 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ + 0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ + 0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ + 0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */ + 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */ + 0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */ + 0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */ + 0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */ + 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */ + 0x06666100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */ /* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */ - 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */ - 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */ - 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */ - 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */ - 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */ - 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */ - 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */ - 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */ + 0x1D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */ + 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */ + 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */ + 0x08000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */ + 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */ + 0x0D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */ + 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */ + 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */ /* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, - * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */ - 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */ - 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */ - 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */ - 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */ - 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */ - 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */ - 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */ - 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */ - 0x10000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */ - 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */ - 0x1AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */ - 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */ - 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */ - 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */ - 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */ - 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */ - 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */ - 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */ - 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */ - 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */ - 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */ - 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */ - 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */ - 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */ - 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */ - 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */ - 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */ - 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */ - 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */ - 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */ - 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */ - 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */ - 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */ - 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */ + * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) + */ + 0x1D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */ + 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */ + 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */ + 0x08000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */ + 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */ + 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */ + 0x05555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */ + 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */ + 0x10000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */ + 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */ + 0x1AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */ + 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */ + 0x05555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */ + 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */ + 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */ + 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */ + 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */ + 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */ + 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */ + 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */ + 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */ + 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */ + 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */ + 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */ + 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */ + 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */ + 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */ + 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */ + 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */ + 0x18000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */ + 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */ + 0x0D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */ + 0x18000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */ + 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */ }; static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = { - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */ - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */ - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */ - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */ - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */ - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */ - 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */ + 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */ /* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */ - 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */ - 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */ + 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */ + 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */ /* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, - * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */ - 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */ - 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */ - 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */ - 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */ - 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */ - 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */ - 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */ - 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */ + * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) + */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */ + 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */ + 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */ + 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */ + 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */ + 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */ + 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */ + 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */ + 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */ }; /* @@ -410,7 +408,7 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = { * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * Out: * none * @@ -419,16 +417,16 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = { */ static bool s_bAL7230Init(struct vnt_private *priv) { - void __iomem *dwIoBase = priv->PortOffset; + void __iomem *iobase = priv->PortOffset; int ii; bool ret; ret = true; /* 3-wire control for normal mode */ - VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0); + VNSvOutPortB(iobase + MAC_REG_SOFTPWRCTL, 0); - MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI | + MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV)); BBvPowerSaveModeOFF(priv); /* RobertYu:20050106, have DC value for Calibration */ @@ -436,20 +434,20 @@ static bool s_bAL7230Init(struct vnt_private *priv) ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]); /* PLL On */ - MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); + MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); /* Calibration */ MACvTimer0MicroSDelay(priv, 150);/* 150us */ /* TXDCOC:active, RCK:disable */ - ret &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); + ret &= IFRFbWriteEmbedded(priv, (0x9ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW)); MACvTimer0MicroSDelay(priv, 30);/* 30us */ /* TXDCOC:disable, RCK:active */ - ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); + ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW)); MACvTimer0MicroSDelay(priv, 30);/* 30us */ /* TXDCOC:disable, RCK:disable */ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]); - MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 | + MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 | SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV)); @@ -458,7 +456,7 @@ static bool s_bAL7230Init(struct vnt_private *priv) /* PE1: TX_ON, PE2: RX_ON, PE3: PLLON */ /* 3-wire control for power saving mode */ - VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */ + VNSvOutPortB(iobase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */ return ret; } @@ -468,26 +466,26 @@ static bool s_bAL7230Init(struct vnt_private *priv) */ static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byChannel) { - void __iomem *dwIoBase = priv->PortOffset; + void __iomem *iobase = priv->PortOffset; bool ret; ret = true; /* PLLON Off */ - MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); + MACvWordRegBitsOff(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]); ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]); ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]); /* PLLOn On */ - MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); + MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); /* Set Channel[7] = 0 to tell H/W channel is changing now. */ - VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F)); + VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel & 0x7F)); MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL7230); /* Set Channel[7] = 1 to tell H/W channel change is done. */ - VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80)); + VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel | 0x80)); return ret; } @@ -497,7 +495,7 @@ static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byCha * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * dwData - data to write * Out: * none @@ -507,15 +505,15 @@ static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byCha */ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData) { - void __iomem *dwIoBase = priv->PortOffset; + void __iomem *iobase = priv->PortOffset; unsigned short ww; unsigned long dwValue; - VNSvOutPortD(dwIoBase + MAC_REG_IFREGCTL, dwData); + VNSvOutPortD(iobase + MAC_REG_IFREGCTL, dwData); /* W_MAX_TIMEOUT is the timeout period */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortD(dwIoBase + MAC_REG_IFREGCTL, &dwValue); + VNSvInPortD(iobase + MAC_REG_IFREGCTL, &dwValue); if (dwValue & IFREGCTL_DONE) break; } @@ -531,7 +529,7 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData) * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * Out: * none * @@ -540,51 +538,51 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData) */ static bool RFbAL2230Init(struct vnt_private *priv) { - void __iomem *dwIoBase = priv->PortOffset; + void __iomem *iobase = priv->PortOffset; int ii; bool ret; ret = true; /* 3-wire control for normal mode */ - VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0); + VNSvOutPortB(iobase + MAC_REG_SOFTPWRCTL, 0); - MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI | + MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV)); /* PLL Off */ - MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); + MACvWordRegBitsOff(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); /* patch abnormal AL2230 frequency output */ - IFRFbWriteEmbedded(priv, (0x07168700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); + IFRFbWriteEmbedded(priv, (0x07168700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW)); for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++) ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]); MACvTimer0MicroSDelay(priv, 30); /* delay 30 us */ /* PLL On */ - MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); + MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); MACvTimer0MicroSDelay(priv, 150);/* 150us */ - ret &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); + ret &= IFRFbWriteEmbedded(priv, (0x00d80f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW)); MACvTimer0MicroSDelay(priv, 30);/* 30us */ - ret &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); + ret &= IFRFbWriteEmbedded(priv, (0x00780f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW)); MACvTimer0MicroSDelay(priv, 30);/* 30us */ ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]); - MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 | + MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 | SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPECTI | SOFTPWRCTL_TXPEINV)); /* 3-wire control for power saving mode */ - VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */ + VNSvOutPortB(iobase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */ return ret; } static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byChannel) { - void __iomem *dwIoBase = priv->PortOffset; + void __iomem *iobase = priv->PortOffset; bool ret; ret = true; @@ -593,10 +591,10 @@ static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byCha ret &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]); /* Set Channel[7] = 0 to tell H/W channel is changing now. */ - VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F)); + VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel & 0x7F)); MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL2230); /* Set Channel[7] = 1 to tell H/W channel change is done. */ - VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80)); + VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel | 0x80)); return ret; } @@ -681,7 +679,7 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * uChannel - channel number * bySleepCnt - SleepProgSyn count * @@ -691,12 +689,12 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, u16 uChannel) { - void __iomem *dwIoBase = priv->PortOffset; + void __iomem *iobase = priv->PortOffset; int ii; unsigned char byInitCount = 0; unsigned char bySleepCount = 0; - VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, 0); + VNSvOutPortW(iobase + MAC_REG_MISCFFNDEX, 0); switch (byRFType) { case RF_AIROHA: case RF_AL2230S: @@ -758,7 +756,7 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * dwRFPowerTable - RF Tx Power Setting * Out: * none @@ -830,7 +828,7 @@ bool RFbSetPower( * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * dwRFPowerTable - RF Tx Power Setting * Out: * none @@ -855,20 +853,20 @@ bool RFbRawSetPower( case RF_AIROHA: ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]); if (rate <= RATE_11M) - ret &= IFRFbWriteEmbedded(priv, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); + ret &= IFRFbWriteEmbedded(priv, 0x0001B400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW); else - ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); + ret &= IFRFbWriteEmbedded(priv, 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW); break; case RF_AL2230S: ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]); if (rate <= RATE_11M) { - ret &= IFRFbWriteEmbedded(priv, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); - ret &= IFRFbWriteEmbedded(priv, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); + ret &= IFRFbWriteEmbedded(priv, 0x040C1400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW); + ret &= IFRFbWriteEmbedded(priv, 0x00299B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW); } else { - ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); - ret &= IFRFbWriteEmbedded(priv, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); + ret &= IFRFbWriteEmbedded(priv, 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW); + ret &= IFRFbWriteEmbedded(priv, 0x00099B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW); } break; diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h index e9c786995506cce2d8b05427f9714b32a1142735..b6e853784a26168c2ae3dd568cb8303c0648bd24 100644 --- a/drivers/staging/vt6655/rf.h +++ b/drivers/staging/vt6655/rf.h @@ -12,11 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * * File: rf.h * * Purpose: diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c index 7e69bc99d60fd671a62f3a0c832a0893217812cf..3efe19a1b13f55cd4d1d895a32797951681432e1 100644 --- a/drivers/staging/vt6655/rxtx.c +++ b/drivers/staging/vt6655/rxtx.c @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: rxtx.c * * Purpose: handle WMAC/802.3/802.11 rx & tx functions @@ -1086,8 +1082,8 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType, } /* - * Use for AUTO FALL BACK - */ + * Use for AUTO FALL BACK + */ if (fifo_ctl & FIFOCTL_AUTO_FB_0) byFBOption = AUTO_FB_0; else if (fifo_ctl & FIFOCTL_AUTO_FB_1) diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h index 1e30ecb5c63c08b6a3ab1b06658203a32c5bdf8d..89de67115826052ecb9f978728bbd0efc2f539aa 100644 --- a/drivers/staging/vt6655/rxtx.h +++ b/drivers/staging/vt6655/rxtx.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: rxtx.h * * Purpose: diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c index ee992772066f8f522fa1b8c80ea885ec0d27f835..635f271595f680ebe4755d48ad5858c4dfe879f0 100644 --- a/drivers/staging/vt6655/srom.c +++ b/drivers/staging/vt6655/srom.c @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: srom.c * * Purpose:Implement functions to access eeprom @@ -64,7 +60,7 @@ * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * byContntOffset - address of EEPROM * Out: * none @@ -72,7 +68,7 @@ * Return Value: data read * */ -unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, +unsigned char SROMbyReadEmbedded(void __iomem *iobase, unsigned char byContntOffset) { unsigned short wDelay, wNoACK; @@ -81,18 +77,18 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, unsigned char byOrg; byData = 0xFF; - VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg); + VNSvInPortB(iobase + MAC_REG_I2MCFG, &byOrg); /* turn off hardware retry for getting NACK */ - VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY))); + VNSvOutPortB(iobase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY))); for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) { - VNSvOutPortB(dwIoBase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID); - VNSvOutPortB(dwIoBase + MAC_REG_I2MTGAD, byContntOffset); + VNSvOutPortB(iobase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID); + VNSvOutPortB(iobase + MAC_REG_I2MTGAD, byContntOffset); /* issue read command */ - VNSvOutPortB(dwIoBase + MAC_REG_I2MCSR, I2MCSR_EEMR); + VNSvOutPortB(iobase + MAC_REG_I2MCSR, I2MCSR_EEMR); /* wait DONE be set */ for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) { - VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait); + VNSvInPortB(iobase + MAC_REG_I2MCSR, &byWait); if (byWait & (I2MCSR_DONE | I2MCSR_NACK)) break; PCAvDelayByIO(CB_DELAY_LOOP_WAIT); @@ -102,8 +98,8 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, break; } } - VNSvInPortB(dwIoBase + MAC_REG_I2MDIPT, &byData); - VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg); + VNSvInPortB(iobase + MAC_REG_I2MDIPT, &byData); + VNSvOutPortB(iobase + MAC_REG_I2MCFG, byOrg); return byData; } @@ -112,20 +108,20 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * Out: * pbyEepromRegs - EEPROM content Buffer * * Return Value: none * */ -void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs) +void SROMvReadAllContents(void __iomem *iobase, unsigned char *pbyEepromRegs) { int ii; /* ii = Rom Address */ for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) { - *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase, + *pbyEepromRegs = SROMbyReadEmbedded(iobase, (unsigned char)ii); pbyEepromRegs++; } @@ -136,21 +132,21 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs) * * Parameters: * In: - * dwIoBase - I/O base address + * iobase - I/O base address * Out: * pbyEtherAddress - Ethernet Address buffer * * Return Value: none * */ -void SROMvReadEtherAddress(void __iomem *dwIoBase, +void SROMvReadEtherAddress(void __iomem *iobase, unsigned char *pbyEtherAddress) { unsigned char ii; /* ii = Rom Address */ for (ii = 0; ii < ETH_ALEN; ii++) { - *pbyEtherAddress = SROMbyReadEmbedded(dwIoBase, ii); + *pbyEtherAddress = SROMbyReadEmbedded(iobase, ii); pbyEtherAddress++; } } diff --git a/drivers/staging/vt6655/srom.h b/drivers/staging/vt6655/srom.h index 531bf0069373fa0c8a9a934de045b465e1ba0e09..6e03ab6dfa9d33800a5987c47b56140a3329cfa0 100644 --- a/drivers/staging/vt6655/srom.h +++ b/drivers/staging/vt6655/srom.h @@ -12,11 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * * File: srom.h * * Purpose: Implement functions to access eeprom @@ -90,12 +85,12 @@ /*--------------------- Export Functions --------------------------*/ -unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, +unsigned char SROMbyReadEmbedded(void __iomem *iobase, unsigned char byContntOffset); -void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs); +void SROMvReadAllContents(void __iomem *iobase, unsigned char *pbyEepromRegs); -void SROMvReadEtherAddress(void __iomem *dwIoBase, +void SROMvReadEtherAddress(void __iomem *iobase, unsigned char *pbyEtherAddress); #endif /* __EEPROM_H__*/ diff --git a/drivers/staging/vt6655/tmacro.h b/drivers/staging/vt6655/tmacro.h index 597efefc017f9b8f9f0d4ab7a3975a7e78aff487..d6a0563ad55c1381b5a30b719742590642440b97 100644 --- a/drivers/staging/vt6655/tmacro.h +++ b/drivers/staging/vt6655/tmacro.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: tmacro.h * * Purpose: define basic common types and macros diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h index 85fe0464cfb3a20a44dfab6cb3020d2d4b87f6da..9806b5989014fa147705a7417a77a3f554c401c9 100644 --- a/drivers/staging/vt6655/upc.h +++ b/drivers/staging/vt6655/upc.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * File: upc.h * * Purpose: Macros to access device diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h index 7cc13874f8f16b18b6327763ae2d5f52231069c1..fe1c25c64cca14d9098986d8ef384bafc7af279f 100644 --- a/drivers/staging/vt6656/baseband.h +++ b/drivers/staging/vt6656/baseband.h @@ -86,15 +86,15 @@ struct vnt_phy_field { unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type, unsigned int frame_length, u16 tx_rate); -void vnt_get_phy_field(struct vnt_private *, u32 frame_length, - u16 tx_rate, u8 pkt_type, struct vnt_phy_field *); - -void vnt_set_short_slot_time(struct vnt_private *); -void vnt_set_vga_gain_offset(struct vnt_private *, u8); -void vnt_set_antenna_mode(struct vnt_private *, u8); -int vnt_vt3184_init(struct vnt_private *); -void vnt_set_deep_sleep(struct vnt_private *); -void vnt_exit_deep_sleep(struct vnt_private *); -void vnt_update_pre_ed_threshold(struct vnt_private *, int scanning); +void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length, + u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy); + +void vnt_set_short_slot_time(struct vnt_private *priv); +void vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data); +void vnt_set_antenna_mode(struct vnt_private *priv, u8 antenna_mode); +int vnt_vt3184_init(struct vnt_private *priv); +void vnt_set_deep_sleep(struct vnt_private *priv); +void vnt_exit_deep_sleep(struct vnt_private *priv); +void vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning); #endif /* __BASEBAND_H__ */ diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c index 53b469c71dc26e740f93c5ec3b6ead53a6aae23f..0e5a993750995b11a564e7e5ab225511f7ba68d9 100644 --- a/drivers/staging/vt6656/card.c +++ b/drivers/staging/vt6656/card.c @@ -501,16 +501,7 @@ u8 vnt_get_pkt_type(struct vnt_private *priv) */ u64 vnt_get_tsf_offset(u8 rx_rate, u64 tsf1, u64 tsf2) { - u64 tsf_offset = 0; - u16 rx_bcn_offset; - - rx_bcn_offset = cw_rxbcntsf_off[rx_rate % MAX_RATE]; - - tsf2 += (u64)rx_bcn_offset; - - tsf_offset = tsf1 - tsf2; - - return tsf_offset; + return tsf1 - tsf2 - (u64)cw_rxbcntsf_off[rx_rate % MAX_RATE]; } /* @@ -610,8 +601,8 @@ u64 vnt_get_next_tbtt(u64 tsf, u16 beacon_interval) beacon_int = beacon_interval * 1024; /* Next TBTT = - * ((local_current_TSF / beacon_interval) + 1) * beacon_interval - */ + * ((local_current_TSF / beacon_interval) + 1) * beacon_interval + */ if (beacon_int) { do_div(tsf, beacon_int); tsf += 1; diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c index eeed16e9124ed9fe0ca608bdfe90066d3011b8c8..611da4929ddc18efbd596445f1f32b6fe51aee58 100644 --- a/drivers/staging/vt6656/mac.c +++ b/drivers/staging/vt6656/mac.c @@ -121,7 +121,7 @@ void vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx, u16 offset; offset = MISCFIFO_KEYETRY0; - offset += (entry_idx * MISCFIFO_KEYENTRYSIZE); + offset += entry_idx * MISCFIFO_KEYENTRYSIZE; set_key.u.write.key_ctl = cpu_to_le16(key_ctl); ether_addr_copy(set_key.u.write.addr, addr); diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 0594828bdabf92feca8736a7b777c5d6b7b07763..50d02d9aa5356d29323460f25224990e1210e282 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -85,7 +85,7 @@ MODULE_PARM_DESC(tx_buffers, "Number of receive usb tx buffers"); * Static vars definitions */ -static struct usb_device_id vt6656_table[] = { +static const struct usb_device_id vt6656_table[] = { {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)}, {} }; @@ -326,9 +326,9 @@ static int vnt_init_registers(struct vnt_private *priv) priv->current_net_addr); /* - * set BB and packet type at the same time - * set Short Slot Time, xIFS, and RSPINF - */ + * set BB and packet type at the same time + * set Short Slot Time, xIFS, and RSPINF + */ if (priv->bb_type == BB_TYPE_11A) priv->short_slot_time = true; else diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c index 79a3108719a678c225cc519106b11fa909773318..6101a35582b6c0eec3fcfce05cad9b875f7a68b1 100644 --- a/drivers/staging/vt6656/rf.c +++ b/drivers/staging/vt6656/rf.c @@ -730,9 +730,9 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate) return false; /* - * 0x080F1B00 for 3 wire control TxGain(D10) - * and 0x31 as TX Gain value - */ + * 0x080F1B00 for 3 wire control TxGain(D10) + * and 0x31 as TX Gain value + */ power_setting = 0x080c0b00 | (power << 12); ret &= vnt_rf_write_embedded(priv, power_setting); @@ -800,8 +800,8 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate) /* Convert rssi to dbm */ void vnt_rf_rssi_to_dbm(struct vnt_private *priv, u8 rssi, long *dbm) { - u8 idx = (((rssi & 0xc0) >> 6) & 0x03); - long b = (rssi & 0x3f); + u8 idx = ((rssi & 0xc0) >> 6) & 0x03; + long b = rssi & 0x3f; long a = 0; u8 airoharf[4] = {0, 18, 0, 40}; diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c index 4b51c0ac27ac1b7ef27105aa16c379b6d0d42cdc..622994795222b7ea07d31bc2dcf1c6a93daaa24e 100644 --- a/drivers/staging/wilc1000/coreconfigurator.c +++ b/drivers/staging/wilc1000/coreconfigurator.c @@ -224,9 +224,7 @@ static inline u16 get_asoc_status(u8 *data) u16 asoc_status; asoc_status = data[3]; - asoc_status = (asoc_status << 8) | data[2]; - - return asoc_status; + return (asoc_status << 8) | data[2]; } static inline u16 get_asoc_id(u8 *data) diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c index 6ab7443eabdefa2bdf7c4a4653433e10250b9bf2..b00ea75524e407deed8f767dc9ddc686370beabe 100644 --- a/drivers/staging/wilc1000/host_interface.c +++ b/drivers/staging/wilc1000/host_interface.c @@ -1722,10 +1722,8 @@ static int Handle_Key(struct wilc_vif *vif, case PMKSA: pu8keybuf = kmalloc((pstrHostIFkeyAttr->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1, GFP_KERNEL); - if (!pu8keybuf) { - netdev_err(vif->ndev, "No buffer to send PMKSA Key\n"); + if (!pu8keybuf) return -ENOMEM; - } pu8keybuf[0] = pstrHostIFkeyAttr->attr.pmkid.numpmkid; @@ -1932,7 +1930,7 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif, wid.val = kmalloc(wid.size, GFP_KERNEL); stamac = wid.val; - memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN); + ether_addr_copy(stamac, strHostIfStaInactiveT->mac); result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, wilc_get_vif_idx(vif)); @@ -2168,7 +2166,7 @@ static void Handle_DelStation(struct wilc_vif *vif, pu8CurrByte = wid.val; - memcpy(pu8CurrByte, pstrDelStaParam->mac_addr, ETH_ALEN); + ether_addr_copy(pu8CurrByte, pstrDelStaParam->mac_addr); result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, wilc_get_vif_idx(vif)); @@ -2322,10 +2320,8 @@ static u32 Handle_ListenStateExpired(struct wilc_vif *vif, wid.size = 2; wid.val = kmalloc(wid.size, GFP_KERNEL); - if (!wid.val) { - netdev_err(vif->ndev, "Failed to allocate memory\n"); + if (!wid.val) return -ENOMEM; - } wid.val[0] = u8remain_on_chan_flag; wid.val[1] = FALSE_FRMWR_CHANNEL; diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h index ddfea29df2a72ab667c04726c2e68c2c9abd9049..f36d3b5a03702a8c4e076bf956985f27950d60ad 100644 --- a/drivers/staging/wilc1000/host_interface.h +++ b/drivers/staging/wilc1000/host_interface.h @@ -367,7 +367,6 @@ extern u8 wilc_connected_ssid[6]; extern u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN]; extern int wilc_connecting; -extern u8 wilc_initialized; extern struct timer_list wilc_during_ip_timer; #endif diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c index 242f82f4d24f296faabcb7c719f72d82852e39f6..f328d75de0d120e6110638211ff2bb29815b3b2f 100644 --- a/drivers/staging/wilc1000/linux_mon.c +++ b/drivers/staging/wilc1000/linux_mon.c @@ -111,7 +111,7 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size) } skb->dev = wilc_wfi_mon; - skb_set_mac_header(skb, 0); + skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); @@ -215,7 +215,7 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb, cb_hdr->tx_flags = 0x0004; skb2->dev = wilc_wfi_mon; - skb_set_mac_header(skb2, 0); + skb_reset_mac_header(skb2); skb2->ip_summed = CHECKSUM_UNNECESSARY; skb2->pkt_type = PACKET_OTHERHOST; skb2->protocol = htons(ETH_P_802_2); diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c index 6370a5efe343abd657226735be6d044e28c91d99..3775706578b2e5779def5b9e153c2f98cec25006 100644 --- a/drivers/staging/wilc1000/linux_wlan.c +++ b/drivers/staging/wilc1000/linux_wlan.c @@ -37,6 +37,8 @@ static void linux_wlan_tx_complete(void *priv, int status); static int mac_init_fn(struct net_device *ndev); static struct net_device_stats *mac_stats(struct net_device *dev); static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd); +static int wilc_mac_open(struct net_device *ndev); +static int wilc_mac_close(struct net_device *ndev); static void wilc_set_multicast_list(struct net_device *dev); bool wilc_enable_ps = true; @@ -218,17 +220,6 @@ static void deinit_irq(struct net_device *dev) } } -int wilc_lock_timeout(struct wilc *nic, void *vp, u32 timeout) -{ - /* FIXME: replace with mutex_lock or wait_for_completion */ - int error = -1; - - if (vp) - error = down_timeout(vp, - msecs_to_jiffies(timeout)); - return error; -} - void wilc_mac_indicate(struct wilc *wilc, int flag) { int status; @@ -269,23 +260,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header) int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode) { - int i = 0; - int ret = -1; - struct wilc_vif *vif; - struct wilc *wilc; - - vif = netdev_priv(wilc_netdev); - wilc = vif->wilc; + struct wilc_vif *vif = netdev_priv(wilc_netdev); - for (i = 0; i < wilc->vif_num; i++) - if (wilc->vif[i]->ndev == wilc_netdev) { - memcpy(wilc->vif[i]->bssid, bssid, 6); - wilc->vif[i]->mode = mode; - ret = 0; - break; - } + memcpy(vif->bssid, bssid, 6); + vif->mode = mode; - return ret; + return 0; } int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc) @@ -847,7 +827,7 @@ static int mac_init_fn(struct net_device *ndev) return 0; } -int wilc_mac_open(struct net_device *ndev) +static int wilc_mac_open(struct net_device *ndev) { struct wilc_vif *vif; @@ -1038,7 +1018,7 @@ int wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev) return 0; } -int wilc_mac_close(struct net_device *ndev) +static int wilc_mac_close(struct net_device *ndev) { struct wilc_priv *priv; struct wilc_vif *vif; @@ -1212,16 +1192,11 @@ void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size) void wilc_netdev_cleanup(struct wilc *wilc) { - int i = 0; - struct wilc_vif *vif[NUM_CONCURRENT_IFC]; + int i; - if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) { + if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) unregister_inetaddr_notifier(&g_dev_notifier); - for (i = 0; i < NUM_CONCURRENT_IFC; i++) - vif[i] = netdev_priv(wilc->vif[i]->ndev); - } - if (wilc && wilc->firmware) { release_firmware(wilc->firmware); wilc->firmware = NULL; @@ -1230,7 +1205,7 @@ void wilc_netdev_cleanup(struct wilc *wilc) if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) { for (i = 0; i < NUM_CONCURRENT_IFC; i++) if (wilc->vif[i]->ndev) - if (vif[i]->mac_opened) + if (wilc->vif[i]->mac_opened) wilc_mac_close(wilc->vif[i]->ndev); for (i = 0; i < NUM_CONCURRENT_IFC; i++) { @@ -1278,9 +1253,9 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, vif->idx = wl->vif_num; vif->wilc = *wilc; + vif->ndev = ndev; wl->vif[i] = vif; - wl->vif[wl->vif_num]->ndev = ndev; - wl->vif_num++; + wl->vif_num = i; ndev->netdev_ops = &wilc_netdev_ops; { diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c index 802bb1d5e2071eca41e8d6f53eec9621ec0d40f8..07260c497db4e400f43c5f40ef8a2d3ba26ce25f 100644 --- a/drivers/staging/wilc1000/wilc_debugfs.c +++ b/drivers/staging/wilc1000/wilc_debugfs.c @@ -62,16 +62,16 @@ static ssize_t wilc_debug_level_write(struct file *filp, const char __user *buf, return ret; if (flag > DBG_LEVEL_ALL) { - printk("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&WILC_DEBUG_LEVEL)); + pr_info("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&WILC_DEBUG_LEVEL)); return -EINVAL; } atomic_set(&WILC_DEBUG_LEVEL, (int)flag); if (flag == 0) - printk(KERN_INFO "Debug-level disabled\n"); + pr_info("Debug-level disabled\n"); else - printk(KERN_INFO "Debug-level enabled\n"); + pr_info("Debug-level enabled\n"); return count; } diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c index 39b73fb27398b0599dd0ee14066b3b41c3181226..3ad7cec4662dd1020f6f75b4009b665a23190b80 100644 --- a/drivers/staging/wilc1000/wilc_sdio.c +++ b/drivers/staging/wilc1000/wilc_sdio.c @@ -39,6 +39,7 @@ struct wilc_sdio { }; static struct wilc_sdio g_sdio; +static const struct wilc_hif_func wilc_hif_sdio; static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data); static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data); @@ -1100,7 +1101,7 @@ static int sdio_sync_ext(struct wilc *wilc, int nint) * ********************************************/ -const struct wilc_hif_func wilc_hif_sdio = { +static const struct wilc_hif_func wilc_hif_sdio = { .hif_init = sdio_init, .hif_deinit = sdio_deinit, .hif_read_reg = sdio_read_reg, diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c index f08cf6d9e1aff1a6d63c4db380141396ad1a61f6..55d53c3a95df9ca7adead27d79bcf9ab63e42766 100644 --- a/drivers/staging/wilc1000/wilc_spi.c +++ b/drivers/staging/wilc1000/wilc_spi.c @@ -30,6 +30,7 @@ struct wilc_spi { }; static struct wilc_spi g_spi; +static const struct wilc_hif_func wilc_hif_spi; static int wilc_spi_read(struct wilc *wilc, u32, u8 *, u32); static int wilc_spi_write(struct wilc *wilc, u32, u8 *, u32); @@ -858,7 +859,8 @@ static int wilc_spi_init(struct wilc *wilc, bool resume) /* the SPI to it's initial value. */ if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, ®)) { /* Read failed. Try with CRC off. This might happen when module - * is removed but chip isn't reset*/ + * is removed but chip isn't reset + */ g_spi.crc_off = 1; dev_err(&spi->dev, "Failed internal read protocol with CRC on, retrying with CRC off...\n"); if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, ®)) { @@ -1133,7 +1135,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint) * Global spi HIF function table * ********************************************/ -const struct wilc_hif_func wilc_hif_spi = { +static const struct wilc_hif_func wilc_hif_spi = { .hif_init = wilc_spi_init, .hif_deinit = _wilc_spi_deinit, .hif_read_reg = wilc_spi_read_reg, diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c index 60d8b055bb2f0e2d7d2bd99abe595e4c74ed65a3..c1a24f7bc85fdcb1dabe665126497b4a7d1cc604 100644 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c @@ -90,17 +90,12 @@ static const struct wiphy_wowlan_support wowlan_support = { #define IS_MGMT_STATUS_SUCCES 0x040 #define GET_PKT_OFFSET(a) (((a) >> 22) & 0x1ff) -extern int wilc_mac_open(struct net_device *ndev); -extern int wilc_mac_close(struct net_device *ndev); - static struct network_info last_scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW]; static u32 last_scanned_cnt; struct timer_list wilc_during_ip_timer; static struct timer_list hAgingTimer; static u8 op_ifcs; -u8 wilc_initialized = 1; - #define CHAN2G(_channel, _freq, _flags) { \ .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ @@ -1193,6 +1188,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev, u32 i = 0; u32 associatedsta = ~0; u32 inactive_time = 0; + priv = wiphy_priv(wiphy); vif = netdev_priv(dev); @@ -1590,28 +1586,25 @@ static int remain_on_channel(struct wiphy *wiphy, priv->strRemainOnChanParams.u32ListenDuration = duration; priv->strRemainOnChanParams.u32ListenSessionID++; - s32Error = wilc_remain_on_channel(vif, + return wilc_remain_on_channel(vif, priv->strRemainOnChanParams.u32ListenSessionID, duration, chan->hw_value, WILC_WFI_RemainOnChannelExpired, WILC_WFI_RemainOnChannelReady, (void *)priv); - - return s32Error; } static int cancel_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie) { - s32 s32Error = 0; struct wilc_priv *priv; struct wilc_vif *vif; priv = wiphy_priv(wiphy); vif = netdev_priv(priv->dev); - s32Error = wilc_listen_state_expired(vif, priv->strRemainOnChanParams.u32ListenSessionID); - return s32Error; + return wilc_listen_state_expired(vif, + priv->strRemainOnChanParams.u32ListenSessionID); } static int mgmt_tx(struct wiphy *wiphy, @@ -1935,12 +1928,10 @@ static int start_ap(struct wiphy *wiphy, struct net_device *dev, wilc_wlan_set_bssid(dev, wl->vif[vif->idx]->src_addr, AP_MODE); wilc_set_power_mgmt(vif, 0, 0); - s32Error = wilc_add_beacon(vif, settings->beacon_interval, + return wilc_add_beacon(vif, settings->beacon_interval, settings->dtim_period, beacon->head_len, (u8 *)beacon->head, beacon->tail_len, (u8 *)beacon->tail); - - return s32Error; } static int change_beacon(struct wiphy *wiphy, struct net_device *dev, @@ -1948,16 +1939,13 @@ static int change_beacon(struct wiphy *wiphy, struct net_device *dev, { struct wilc_priv *priv; struct wilc_vif *vif; - s32 s32Error = 0; priv = wiphy_priv(wiphy); vif = netdev_priv(priv->dev); - s32Error = wilc_add_beacon(vif, 0, 0, beacon->head_len, + return wilc_add_beacon(vif, 0, 0, beacon->head_len, (u8 *)beacon->head, beacon->tail_len, (u8 *)beacon->tail); - - return s32Error; } static int stop_ap(struct wiphy *wiphy, struct net_device *dev) diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h index ec6b1674cf38425e3deb9a8a67e25ffb27845390..d431673bc46c2f74071ade3071694673d73becf9 100644 --- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h +++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h @@ -225,7 +225,6 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif); void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset); void wilc_mac_indicate(struct wilc *wilc, int flag); -int wilc_lock_timeout(struct wilc *wilc, void *, u32 timeout); void wilc_netdev_cleanup(struct wilc *wilc); int wilc_netdev_init(struct wilc **wilc, struct device *, int io_type, int gpio, const struct wilc_hif_func *ops); diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h index de6c4ddbf45a75988e2f79df375f57d91a213085..11365efcc5d07382e5fd30f96f3dcc84c7579782 100644 --- a/drivers/staging/wilc1000/wilc_wlan.h +++ b/drivers/staging/wilc1000/wilc_wlan.h @@ -248,9 +248,6 @@ struct wilc_hif_func { void (*disable_interrupt)(struct wilc *nic); }; -extern const struct wilc_hif_func wilc_hif_spi; -extern const struct wilc_hif_func wilc_hif_sdio; - /******************************************** * * Configuration Structure @@ -297,9 +294,6 @@ void wilc_enable_tcp_ack_filter(bool value); int wilc_wlan_get_num_conn_ifcs(struct wilc *); int wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev); -int wilc_mac_open(struct net_device *ndev); -int wilc_mac_close(struct net_device *ndev); - void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size); void host_wakeup_notify(struct wilc *wilc); void host_sleep_notify(struct wilc *wilc); diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 182b2d56462789bdf3f1c09c0132d3b17e5d19d9..aa0e5a3d4a899daf2b1c7d4215d70ce1dbdc303b 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -323,7 +323,7 @@ static int prism2_scan(struct wiphy *wiphy, priv->scan_request = request; - memset(&msg1, 0x00, sizeof(struct p80211msg_dot11req_scan)); + memset(&msg1, 0x00, sizeof(msg1)); msg1.msgcode = DIDmsg_dot11req_scan; msg1.bsstype.data = P80211ENUM_bsstype_any; @@ -375,13 +375,13 @@ static int prism2_scan(struct wiphy *wiphy, ie_buf[0] = WLAN_EID_SSID; ie_buf[1] = msg2.ssid.data.len; ie_len = ie_buf[1] + 2; - memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len); + memcpy(&ie_buf[2], &msg2.ssid.data.data, msg2.ssid.data.len); freq = ieee80211_channel_to_frequency(msg2.dschannel.data, NL80211_BAND_2GHZ); bss = cfg80211_inform_bss(wiphy, ieee80211_get_channel(wiphy, freq), CFG80211_BSS_FTYPE_UNKNOWN, - (const u8 *)&(msg2.bssid.data.data), + (const u8 *)&msg2.bssid.data.data, msg2.timestamp.data, msg2.capinfo.data, msg2.beaconperiod.data, ie_buf, diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h index 43c299c3b631ef7810b37e4c5465201e4ec69584..60caf9c3772712ab7a4423de57afcc4daed9aca7 100644 --- a/drivers/staging/wlan-ng/hfa384x.h +++ b/drivers/staging/wlan-ng/hfa384x.h @@ -137,21 +137,11 @@ #define HFA384x_DLSTATE_FLASHENABLED 2 /*--- Register Field Masks --------------------------*/ -#define HFA384x_CMD_AINFO ((u16)(BIT(14) | BIT(13) \ - | BIT(12) | BIT(11) \ - | BIT(10) | BIT(9) \ - | BIT(8))) -#define HFA384x_CMD_MACPORT ((u16)(BIT(10) | BIT(9) | \ - BIT(8))) -#define HFA384x_CMD_PROGMODE ((u16)(BIT(9) | BIT(8))) -#define HFA384x_CMD_CMDCODE ((u16)(BIT(5) | BIT(4) | \ - BIT(3) | BIT(2) | \ - BIT(1) | BIT(0))) - -#define HFA384x_STATUS_RESULT ((u16)(BIT(14) | BIT(13) \ - | BIT(12) | BIT(11) \ - | BIT(10) | BIT(9) \ - | BIT(8))) +#define HFA384x_CMD_AINFO ((u16)GENMASK(14, 8)) +#define HFA384x_CMD_MACPORT ((u16)GENMASK(10, 8)) +#define HFA384x_CMD_PROGMODE ((u16)GENMASK(9, 8)) +#define HFA384x_CMD_CMDCODE ((u16)GENMASK(5, 0)) +#define HFA384x_STATUS_RESULT ((u16)GENMASK(14, 8)) /*--- Command Code Constants --------------------------*/ /*--- Controller Commands --------------------------*/ @@ -266,7 +256,7 @@ #define HFA384x_RID_DBMCOMMSQUALITY_LEN \ ((u16)sizeof(struct hfa384x_dbmcommsquality)) #define HFA384x_RID_JOINREQUEST_LEN \ - ((u16)sizeof(struct hfa384x_JoinRequest_data)) + ((u16)sizeof(struct hfa384x_join_request_data)) /*-------------------------------------------------------------------- * Information RIDs: Modem Information @@ -286,7 +276,7 @@ #define HFA384x_RID_CNFWEPFLAGS ((u16)0xFC28) #define HFA384x_RID_CNFAUTHENTICATION ((u16)0xFC2A) #define HFA384x_RID_CNFROAMINGMODE ((u16)0xFC2D) -#define HFA384x_RID_CNFAPBCNint ((u16)0xFC33) +#define HFA384x_RID_CNFAPBCNINT ((u16)0xFC33) #define HFA384x_RID_CNFDBMADJUST ((u16)0xFC46) #define HFA384x_RID_CNFWPADATA ((u16)0xFC48) #define HFA384x_RID_CNFBASICRATES ((u16)0xFCB3) @@ -408,27 +398,27 @@ struct hfa384x_caplevel { #define HFA384x_CREATEIBSS_JOINCREATEIBSS 0 /*-- Configuration Record: HostScanRequest (data portion only) --*/ -struct hfa384x_HostScanRequest_data { - u16 channelList; - u16 txRate; +struct hfa384x_host_scan_request_data { + u16 channel_list; + u16 tx_rate; struct hfa384x_bytestr32 ssid; } __packed; /*-- Configuration Record: JoinRequest (data portion only) --*/ -struct hfa384x_JoinRequest_data { +struct hfa384x_join_request_data { u8 bssid[WLAN_BSSID_LEN]; u16 channel; } __packed; /*-- Configuration Record: authenticateStation (data portion only) --*/ -struct hfa384x_authenticateStation_data { +struct hfa384x_authenticate_station_data { u8 address[ETH_ALEN]; u16 status; u16 algorithm; } __packed; /*-- Configuration Record: WPAData (data portion only) --*/ -struct hfa384x_WPAData { +struct hfa384x_wpa_data { u16 datalen; u8 data[0]; /* max 80 */ } __packed; @@ -455,16 +445,16 @@ struct hfa384x_downloadbuffer { /*-- Information Record: commsquality --*/ struct hfa384x_commsquality { - u16 CQ_currBSS; - u16 ASL_currBSS; - u16 ANL_currFC; + u16 cq_curr_bss; + u16 asl_curr_bss; + u16 anl_curr_fc; } __packed; /*-- Information Record: dmbcommsquality --*/ struct hfa384x_dbmcommsquality { - u16 CQdbm_currBSS; - u16 ASLdbm_currBSS; - u16 ANLdbm_currFC; + u16 cq_dbm_curr_bss; + u16 asl_dbm_curr_bss; + u16 anl_dbm_curr_fc; } __packed; /*-------------------------------------------------------------------- @@ -511,9 +501,8 @@ struct hfa384x_tx_frame { #define HFA384x_TXSTATUS_AGEDERR ((u16)BIT(1)) #define HFA384x_TXSTATUS_RETRYERR ((u16)BIT(0)) /*-- Transmit Control Field --*/ -#define HFA384x_TX_MACPORT ((u16)(BIT(10) | \ - BIT(9) | BIT(8))) -#define HFA384x_TX_STRUCTYPE ((u16)(BIT(4) | BIT(3))) +#define HFA384x_TX_MACPORT ((u16)GENMASK(10, 8)) +#define HFA384x_TX_STRUCTYPE ((u16)GENMASK(4, 3)) #define HFA384x_TX_TXEX ((u16)BIT(2)) #define HFA384x_TX_TXOK ((u16)BIT(1)) /*-------------------------------------------------------------------- @@ -571,9 +560,7 @@ struct hfa384x_rx_frame { */ /*-- Status Fields --*/ -#define HFA384x_RXSTATUS_MACPORT ((u16)(BIT(10) | \ - BIT(9) | \ - BIT(8))) +#define HFA384x_RXSTATUS_MACPORT ((u16)GENMASK(10, 8)) #define HFA384x_RXSTATUS_FCSERR ((u16)BIT(0)) /*-------------------------------------------------------------------- * Communication Frames: Test/Get/Set Field Values for Receive Frames @@ -610,7 +597,7 @@ struct hfa384x_rx_frame { */ /*-- Inquiry Frame, Diagnose: Communication Tallies --*/ -struct hfa384x_CommTallies16 { +struct hfa384x_comm_tallies_16 { u16 txunicastframes; u16 txmulticastframes; u16 txfragments; @@ -634,7 +621,7 @@ struct hfa384x_CommTallies16 { u16 rxmsginbadmsgfrag; } __packed; -struct hfa384x_CommTallies32 { +struct hfa384x_comm_tallies_32 { u32 txunicastframes; u32 txmulticastframes; u32 txfragments; @@ -659,7 +646,7 @@ struct hfa384x_CommTallies32 { } __packed; /*-- Inquiry Frame, Diagnose: Scan Results & Subfields--*/ -struct hfa384x_ScanResultSub { +struct hfa384x_scan_result_sub { u16 chid; u16 anl; u16 sl; @@ -671,14 +658,14 @@ struct hfa384x_ScanResultSub { u16 proberesp_rate; } __packed; -struct hfa384x_ScanResult { +struct hfa384x_scan_result { u16 rsvd; u16 scanreason; - struct hfa384x_ScanResultSub result[HFA384x_SCANRESULT_MAX]; + struct hfa384x_scan_result_sub result[HFA384x_SCANRESULT_MAX]; } __packed; /*-- Inquiry Frame, Diagnose: ChInfo Results & Subfields--*/ -struct hfa384x_ChInfoResultSub { +struct hfa384x_ch_info_result_sub { u16 chid; u16 anl; u16 pnl; @@ -688,13 +675,13 @@ struct hfa384x_ChInfoResultSub { #define HFA384x_CHINFORESULT_BSSACTIVE BIT(0) #define HFA384x_CHINFORESULT_PCFACTIVE BIT(1) -struct hfa384x_ChInfoResult { +struct hfa384x_ch_info_result { u16 scanchannels; - struct hfa384x_ChInfoResultSub result[HFA384x_CHINFORESULT_MAX]; + struct hfa384x_ch_info_result_sub result[HFA384x_CHINFORESULT_MAX]; } __packed; /*-- Inquiry Frame, Diagnose: Host Scan Results & Subfields--*/ -struct hfa384x_HScanResultSub { +struct hfa384x_hscan_result_sub { u16 chid; u16 anl; u16 sl; @@ -707,10 +694,10 @@ struct hfa384x_HScanResultSub { u16 atim; } __packed; -struct hfa384x_HScanResult { +struct hfa384x_hscan_result { u16 nresult; u16 rsvd; - struct hfa384x_HScanResultSub result[HFA384x_HSCANRESULT_MAX]; + struct hfa384x_hscan_result_sub result[HFA384x_HSCANRESULT_MAX]; } __packed; /*-- Unsolicited Frame, MAC Mgmt: LinkStatus --*/ @@ -723,7 +710,7 @@ struct hfa384x_HScanResult { #define HFA384x_LINK_AP_INRANGE ((u16)5) #define HFA384x_LINK_ASSOCFAIL ((u16)6) -struct hfa384x_LinkStatus { +struct hfa384x_link_status { u16 linkstatus; } __packed; @@ -733,7 +720,7 @@ struct hfa384x_LinkStatus { #define HFA384x_ASSOCSTATUS_REASSOC ((u16)2) #define HFA384x_ASSOCSTATUS_AUTHFAIL ((u16)5) -struct hfa384x_AssocStatus { +struct hfa384x_assoc_status { u16 assocstatus; u8 sta_addr[ETH_ALEN]; /* old_ap_addr is only valid if assocstatus == 2 */ @@ -744,37 +731,37 @@ struct hfa384x_AssocStatus { /*-- Unsolicited Frame, MAC Mgmt: AuthRequest (AP Only) --*/ -struct hfa384x_AuthRequest { +struct hfa384x_auth_request { u8 sta_addr[ETH_ALEN]; u16 algorithm; } __packed; /*-- Unsolicited Frame, MAC Mgmt: PSUserCount (AP Only) --*/ -struct hfa384x_PSUserCount { +struct hfa384x_ps_user_count { u16 usercnt; } __packed; -struct hfa384x_KeyIDChanged { +struct hfa384x_key_id_changed { u8 sta_addr[ETH_ALEN]; u16 keyid; } __packed; /*-- Collection of all Inf frames ---------------*/ union hfa384x_infodata { - struct hfa384x_CommTallies16 commtallies16; - struct hfa384x_CommTallies32 commtallies32; - struct hfa384x_ScanResult scanresult; - struct hfa384x_ChInfoResult chinforesult; - struct hfa384x_HScanResult hscanresult; - struct hfa384x_LinkStatus linkstatus; - struct hfa384x_AssocStatus assocstatus; - struct hfa384x_AuthRequest authreq; - struct hfa384x_PSUserCount psusercnt; - struct hfa384x_KeyIDChanged keyidchanged; -} __packed; - -struct hfa384x_InfFrame { + struct hfa384x_comm_tallies_16 commtallies16; + struct hfa384x_comm_tallies_32 commtallies32; + struct hfa384x_scan_result scanresult; + struct hfa384x_ch_info_result chinforesult; + struct hfa384x_hscan_result hscanresult; + struct hfa384x_link_status linkstatus; + struct hfa384x_assoc_status assocstatus; + struct hfa384x_auth_request authreq; + struct hfa384x_ps_user_count psusercnt; + struct hfa384x_key_id_changed keyidchanged; +} __packed; + +struct hfa384x_inf_frame { u16 framelen; u16 infotype; union hfa384x_infodata info; @@ -862,7 +849,7 @@ struct hfa384x_usb_rxfrm { struct hfa384x_usb_infofrm { u16 type; - struct hfa384x_InfFrame info; + struct hfa384x_inf_frame info; } __packed; struct hfa384x_usb_statusresp { @@ -1169,7 +1156,6 @@ enum ctlx_state { CTLX_REQ_COMPLETE, /* OUT URB complete */ CTLX_RESP_COMPLETE /* IN URB received */ }; -typedef enum ctlx_state CTLX_STATE; struct hfa384x_usbctlx; struct hfa384x; @@ -1186,7 +1172,7 @@ struct hfa384x_usbctlx { union hfa384x_usbout outbuf; /* pkt buf for OUT */ union hfa384x_usbin inbuf; /* pkt buf for IN(a copy) */ - CTLX_STATE state; /* Tracks running state */ + enum ctlx_state state; /* Tracks running state */ struct completion done; volatile int reapable; /* Food for the reaper task */ @@ -1294,7 +1280,7 @@ struct hfa384x { int scanflag; /* to signal scan complete */ int join_ap; /* are we joined to a specific ap */ int join_retries; /* number of join retries till we fail */ - struct hfa384x_JoinRequest_data joinreq; /* join request saved data */ + struct hfa384x_join_request_data joinreq;/* join request saved data */ struct wlandevice *wlandev; /* Timer to allow for the deferred processing of linkstatus messages */ @@ -1360,17 +1346,17 @@ struct hfa384x { struct hfa384x_caplevel cap_act_ap_mfi; /* ap f/w to modem interface */ u32 psusercount; /* Power save user count. */ - struct hfa384x_CommTallies32 tallies; /* Communication tallies. */ + struct hfa384x_comm_tallies_32 tallies; /* Communication tallies. */ u8 comment[WLAN_COMMENT_MAX + 1]; /* User comment */ /* Channel Info request results (AP only) */ struct { atomic_t done; u8 count; - struct hfa384x_ChInfoResult results; + struct hfa384x_ch_info_result results; } channel_info; - struct hfa384x_InfFrame *scanresults; + struct hfa384x_inf_frame *scanresults; struct prism2sta_authlist authlist; /* Authenticated station list. */ unsigned int accessmode; /* Access mode. */ diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index 6a107f8a06e2929d32431f7190d5065eb365e0e9..4fe037aeef12975e56919e0cc01e4e472f4fe415 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -1,114 +1,114 @@ /* src/prism2/driver/hfa384x_usb.c -* -* Functions that talk to the USB variantof the Intersil hfa384x MAC -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -* -* This file implements functions that correspond to the prism2/hfa384x -* 802.11 MAC hardware and firmware host interface. -* -* The functions can be considered to represent several levels of -* abstraction. The lowest level functions are simply C-callable wrappers -* around the register accesses. The next higher level represents C-callable -* prism2 API functions that match the Intersil documentation as closely -* as is reasonable. The next higher layer implements common sequences -* of invocations of the API layer (e.g. write to bap, followed by cmd). -* -* Common sequences: -* hfa384x_drvr_xxx Highest level abstractions provided by the -* hfa384x code. They are driver defined wrappers -* for common sequences. These functions generally -* use the services of the lower levels. -* -* hfa384x_drvr_xxxconfig An example of the drvr level abstraction. These -* functions are wrappers for the RID get/set -* sequence. They call copy_[to|from]_bap() and -* cmd_access(). These functions operate on the -* RIDs and buffers without validation. The caller -* is responsible for that. -* -* API wrapper functions: -* hfa384x_cmd_xxx functions that provide access to the f/w commands. -* The function arguments correspond to each command -* argument, even command arguments that get packed -* into single registers. These functions _just_ -* issue the command by setting the cmd/parm regs -* & reading the status/resp regs. Additional -* activities required to fully use a command -* (read/write from/to bap, get/set int status etc.) -* are implemented separately. Think of these as -* C-callable prism2 commands. -* -* Lowest Layer Functions: -* hfa384x_docmd_xxx These functions implement the sequence required -* to issue any prism2 command. Primarily used by the -* hfa384x_cmd_xxx functions. -* -* hfa384x_bap_xxx BAP read/write access functions. -* Note: we usually use BAP0 for non-interrupt context -* and BAP1 for interrupt context. -* -* hfa384x_dl_xxx download related functions. -* -* Driver State Issues: -* Note that there are two pairs of functions that manage the -* 'initialized' and 'running' states of the hw/MAC combo. The four -* functions are create(), destroy(), start(), and stop(). create() -* sets up the data structures required to support the hfa384x_* -* functions and destroy() cleans them up. The start() function gets -* the actual hardware running and enables the interrupts. The stop() -* function shuts the hardware down. The sequence should be: -* create() -* start() -* . -* . Do interesting things w/ the hardware -* . -* stop() -* destroy() -* -* Note that destroy() can be called without calling stop() first. -* -------------------------------------------------------------------- -*/ + * + * Functions that talk to the USB variantof the Intersil hfa384x MAC + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + * + * This file implements functions that correspond to the prism2/hfa384x + * 802.11 MAC hardware and firmware host interface. + * + * The functions can be considered to represent several levels of + * abstraction. The lowest level functions are simply C-callable wrappers + * around the register accesses. The next higher level represents C-callable + * prism2 API functions that match the Intersil documentation as closely + * as is reasonable. The next higher layer implements common sequences + * of invocations of the API layer (e.g. write to bap, followed by cmd). + * + * Common sequences: + * hfa384x_drvr_xxx Highest level abstractions provided by the + * hfa384x code. They are driver defined wrappers + * for common sequences. These functions generally + * use the services of the lower levels. + * + * hfa384x_drvr_xxxconfig An example of the drvr level abstraction. These + * functions are wrappers for the RID get/set + * sequence. They call copy_[to|from]_bap() and + * cmd_access(). These functions operate on the + * RIDs and buffers without validation. The caller + * is responsible for that. + * + * API wrapper functions: + * hfa384x_cmd_xxx functions that provide access to the f/w commands. + * The function arguments correspond to each command + * argument, even command arguments that get packed + * into single registers. These functions _just_ + * issue the command by setting the cmd/parm regs + * & reading the status/resp regs. Additional + * activities required to fully use a command + * (read/write from/to bap, get/set int status etc.) + * are implemented separately. Think of these as + * C-callable prism2 commands. + * + * Lowest Layer Functions: + * hfa384x_docmd_xxx These functions implement the sequence required + * to issue any prism2 command. Primarily used by the + * hfa384x_cmd_xxx functions. + * + * hfa384x_bap_xxx BAP read/write access functions. + * Note: we usually use BAP0 for non-interrupt context + * and BAP1 for interrupt context. + * + * hfa384x_dl_xxx download related functions. + * + * Driver State Issues: + * Note that there are two pairs of functions that manage the + * 'initialized' and 'running' states of the hw/MAC combo. The four + * functions are create(), destroy(), start(), and stop(). create() + * sets up the data structures required to support the hfa384x_* + * functions and destroy() cleans them up. The start() function gets + * the actual hardware running and enables the interrupts. The stop() + * function shuts the hardware down. The sequence should be: + * create() + * start() + * . + * . Do interesting things w/ the hardware + * . + * stop() + * destroy() + * + * Note that destroy() can be called without calling stop() first. + * -------------------------------------------------------------------- + */ #include #include @@ -153,8 +153,8 @@ enum cmd_mode { static void dbprint_urb(struct urb *urb); #endif -static void -hfa384x_int_rxmonitor(struct wlandevice *wlandev, struct hfa384x_usb_rxfrm *rxfrm); +static void hfa384x_int_rxmonitor(struct wlandevice *wlandev, + struct hfa384x_usb_rxfrm *rxfrm); static void hfa384x_usb_defer(struct work_struct *data); @@ -173,7 +173,8 @@ hfa384x_usbin_txcompl(struct wlandevice *wlandev, union hfa384x_usbin *usbin); static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb); -static void hfa384x_usbin_info(struct wlandevice *wlandev, union hfa384x_usbin *usbin); +static void hfa384x_usbin_info(struct wlandevice *wlandev, + union hfa384x_usbin *usbin); static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin, int urb_status); @@ -193,9 +194,11 @@ static void hfa384x_usbctlx_completion_task(unsigned long data); static void hfa384x_usbctlx_reaper_task(unsigned long data); -static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx); +static int hfa384x_usbctlx_submit(struct hfa384x *hw, + struct hfa384x_usbctlx *ctlx); -static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx); +static void unlocked_usbctlx_complete(struct hfa384x *hw, + struct hfa384x_usbctlx *ctlx); struct usbctlx_completor { int (*complete)(struct usbctlx_completor *); @@ -209,7 +212,8 @@ hfa384x_usbctlx_complete_sync(struct hfa384x *hw, static int unlocked_usbctlx_cancel_async(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx); -static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *ctlx); +static void hfa384x_cb_status(struct hfa384x *hw, + const struct hfa384x_usbctlx *ctlx); static int usbctlx_get_status(const struct hfa384x_usb_statusresp *cmdresp, @@ -263,7 +267,7 @@ hfa384x_dowmem(struct hfa384x *hw, static int hfa384x_isgood_pdrcode(u16 pdrcode); -static inline const char *ctlxstr(CTLX_STATE s) +static inline const char *ctlxstr(enum ctlx_state s) { static const char * const ctlx_str[] = { "Initial state", @@ -307,21 +311,22 @@ void dbprint_urb(struct urb *urb) #endif /*---------------------------------------------------------------- -* submit_rx_urb -* -* Listen for input data on the BULK-IN pipe. If the pipe has -* stalled then schedule it to be reset. -* -* Arguments: -* hw device struct -* memflags memory allocation flags -* -* Returns: -* error code from submission -* -* Call context: -* Any -----------------------------------------------------------------*/ + * submit_rx_urb + * + * Listen for input data on the BULK-IN pipe. If the pipe has + * stalled then schedule it to be reset. + * + * Arguments: + * hw device struct + * memflags memory allocation flags + * + * Returns: + * error code from submission + * + * Call context: + * Any + *---------------------------------------------------------------- + */ static int submit_rx_urb(struct hfa384x *hw, gfp_t memflags) { struct sk_buff *skb; @@ -367,23 +372,24 @@ static int submit_rx_urb(struct hfa384x *hw, gfp_t memflags) } /*---------------------------------------------------------------- -* submit_tx_urb -* -* Prepares and submits the URB of transmitted data. If the -* submission fails then it will schedule the output pipe to -* be reset. -* -* Arguments: -* hw device struct -* tx_urb URB of data for transmission -* memflags memory allocation flags -* -* Returns: -* error code from submission -* -* Call context: -* Any -----------------------------------------------------------------*/ + * submit_tx_urb + * + * Prepares and submits the URB of transmitted data. If the + * submission fails then it will schedule the output pipe to + * be reset. + * + * Arguments: + * hw device struct + * tx_urb URB of data for transmission + * memflags memory allocation flags + * + * Returns: + * error code from submission + * + * Call context: + * Any + *---------------------------------------------------------------- + */ static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t memflags) { struct net_device *netdev = hw->wlandev->netdev; @@ -412,21 +418,22 @@ static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t memflags) } /*---------------------------------------------------------------- -* hfa394x_usb_defer -* -* There are some things that the USB stack cannot do while -* in interrupt context, so we arrange this function to run -* in process context. -* -* Arguments: -* hw device structure -* -* Returns: -* nothing -* -* Call context: -* process (by design) -----------------------------------------------------------------*/ + * hfa394x_usb_defer + * + * There are some things that the USB stack cannot do while + * in interrupt context, so we arrange this function to run + * in process context. + * + * Arguments: + * hw device structure + * + * Returns: + * nothing + * + * Call context: + * process (by design) + *---------------------------------------------------------------- + */ static void hfa384x_usb_defer(struct work_struct *data) { struct hfa384x *hw = container_of(data, struct hfa384x, usb_work); @@ -501,29 +508,30 @@ static void hfa384x_usb_defer(struct work_struct *data) } /*---------------------------------------------------------------- -* hfa384x_create -* -* Sets up the struct hfa384x data structure for use. Note this -* does _not_ initialize the actual hardware, just the data structures -* we use to keep track of its state. -* -* Arguments: -* hw device structure -* irq device irq number -* iobase i/o base address for register access -* membase memory base address for register access -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_create + * + * Sets up the struct hfa384x data structure for use. Note this + * does _not_ initialize the actual hardware, just the data structures + * we use to keep track of its state. + * + * Arguments: + * hw device structure + * irq device irq number + * iobase i/o base address for register access + * membase memory base address for register access + * + * Returns: + * nothing + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb) { - memset(hw, 0, sizeof(struct hfa384x)); + memset(hw, 0, sizeof(*hw)); hw->usb = usb; /* set up the endpoints */ @@ -571,27 +579,28 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb) } /*---------------------------------------------------------------- -* hfa384x_destroy -* -* Partner to hfa384x_create(). This function cleans up the hw -* structure so that it can be freed by the caller using a simple -* kfree. Currently, this function is just a placeholder. If, at some -* point in the future, an hw in the 'shutdown' state requires a 'deep' -* kfree, this is where it should be done. Note that if this function -* is called on a _running_ hw structure, the drvr_stop() function is -* called. -* -* Arguments: -* hw device structure -* -* Returns: -* nothing, this function is not allowed to fail. -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_destroy + * + * Partner to hfa384x_create(). This function cleans up the hw + * structure so that it can be freed by the caller using a simple + * kfree. Currently, this function is just a placeholder. If, at some + * point in the future, an hw in the 'shutdown' state requires a 'deep' + * kfree, this is where it should be done. Note that if this function + * is called on a _running_ hw structure, the drvr_stop() function is + * called. + * + * Arguments: + * hw device structure + * + * Returns: + * nothing, this function is not allowed to fail. + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ void hfa384x_destroy(struct hfa384x *hw) { struct sk_buff *skb; @@ -645,10 +654,11 @@ usbctlx_get_rridresult(const struct hfa384x_usb_rridresp *rridresp, } /*---------------------------------------------------------------- -* Completor object: -* This completor must be passed to hfa384x_usbctlx_complete_sync() -* when processing a CTLX that returns a struct hfa384x_cmdresult structure. -----------------------------------------------------------------*/ + * Completor object: + * This completor must be passed to hfa384x_usbctlx_complete_sync() + * when processing a CTLX that returns a struct hfa384x_cmdresult structure. + *---------------------------------------------------------------- + */ struct usbctlx_cmd_completor { struct usbctlx_completor head; @@ -664,24 +674,23 @@ static inline int usbctlx_cmd_completor_fn(struct usbctlx_completor *head) return usbctlx_get_status(complete->cmdresp, complete->result); } -static inline struct usbctlx_completor *init_cmd_completor( - struct usbctlx_cmd_completor - *completor, - const struct hfa384x_usb_statusresp - *cmdresp, - struct hfa384x_cmdresult *result) +static inline struct usbctlx_completor * +init_cmd_completor(struct usbctlx_cmd_completor *completor, + const struct hfa384x_usb_statusresp *cmdresp, + struct hfa384x_cmdresult *result) { completor->head.complete = usbctlx_cmd_completor_fn; completor->cmdresp = cmdresp; completor->result = result; - return &(completor->head); + return &completor->head; } /*---------------------------------------------------------------- -* Completor object: -* This completor must be passed to hfa384x_usbctlx_complete_sync() -* when processing a CTLX that reads a RID. -----------------------------------------------------------------*/ + * Completor object: + * This completor must be passed to hfa384x_usbctlx_complete_sync() + * when processing a CTLX that reads a RID. + *---------------------------------------------------------------- + */ struct usbctlx_rrid_completor { struct usbctlx_completor head; @@ -710,37 +719,38 @@ static int usbctlx_rrid_completor_fn(struct usbctlx_completor *head) return 0; } -static inline struct usbctlx_completor *init_rrid_completor( - struct usbctlx_rrid_completor - *completor, - const struct hfa384x_usb_rridresp - *rridresp, - void *riddata, - unsigned int riddatalen) +static inline struct usbctlx_completor * +init_rrid_completor(struct usbctlx_rrid_completor *completor, + const struct hfa384x_usb_rridresp *rridresp, + void *riddata, + unsigned int riddatalen) { completor->head.complete = usbctlx_rrid_completor_fn; completor->rridresp = rridresp; completor->riddata = riddata; completor->riddatalen = riddatalen; - return &(completor->head); + return &completor->head; } /*---------------------------------------------------------------- -* Completor object: -* Interprets the results of a synchronous RID-write -----------------------------------------------------------------*/ + * Completor object: + * Interprets the results of a synchronous RID-write + *---------------------------------------------------------------- + */ #define init_wrid_completor init_cmd_completor /*---------------------------------------------------------------- -* Completor object: -* Interprets the results of a synchronous memory-write -----------------------------------------------------------------*/ + * Completor object: + * Interprets the results of a synchronous memory-write + *---------------------------------------------------------------- + */ #define init_wmem_completor init_cmd_completor /*---------------------------------------------------------------- -* Completor object: -* Interprets the results of a synchronous memory-read -----------------------------------------------------------------*/ + * Completor object: + * Interprets the results of a synchronous memory-read + *---------------------------------------------------------------- + */ struct usbctlx_rmem_completor { struct usbctlx_completor head; @@ -759,43 +769,43 @@ static int usbctlx_rmem_completor_fn(struct usbctlx_completor *head) return 0; } -static inline struct usbctlx_completor *init_rmem_completor( - struct usbctlx_rmem_completor - *completor, - struct hfa384x_usb_rmemresp - *rmemresp, - void *data, - unsigned int len) +static inline struct usbctlx_completor * +init_rmem_completor(struct usbctlx_rmem_completor *completor, + struct hfa384x_usb_rmemresp *rmemresp, + void *data, + unsigned int len) { completor->head.complete = usbctlx_rmem_completor_fn; completor->rmemresp = rmemresp; completor->data = data; completor->len = len; - return &(completor->head); + return &completor->head; } /*---------------------------------------------------------------- -* hfa384x_cb_status -* -* Ctlx_complete handler for async CMD type control exchanges. -* mark the hw struct as such. -* -* Note: If the handling is changed here, it should probably be -* changed in docmd as well. -* -* Arguments: -* hw hw struct -* ctlx completed CTLX -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* interrupt -----------------------------------------------------------------*/ -static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *ctlx) + * hfa384x_cb_status + * + * Ctlx_complete handler for async CMD type control exchanges. + * mark the hw struct as such. + * + * Note: If the handling is changed here, it should probably be + * changed in docmd as well. + * + * Arguments: + * hw hw struct + * ctlx completed CTLX + * + * Returns: + * nothing + * + * Side effects: + * + * Call context: + * interrupt + *---------------------------------------------------------------- + */ +static void hfa384x_cb_status(struct hfa384x *hw, + const struct hfa384x_usbctlx *ctlx) { if (ctlx->usercb) { struct hfa384x_cmdresult cmdresult; @@ -812,7 +822,8 @@ static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx * } } -static inline int hfa384x_docmd_wait(struct hfa384x *hw, struct hfa384x_metacmd *cmd) +static inline int hfa384x_docmd_wait(struct hfa384x *hw, + struct hfa384x_metacmd *cmd) { return hfa384x_docmd(hw, DOWAIT, cmd, NULL, NULL, NULL); } @@ -905,24 +916,25 @@ hfa384x_dowmem_async(struct hfa384x *hw, } /*---------------------------------------------------------------- -* hfa384x_cmd_initialize -* -* Issues the initialize command and sets the hw->state based -* on the result. -* -* Arguments: -* hw device structure -* -* Returns: -* 0 success -* >0 f/w reported error - f/w status code -* <0 driver reported error -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_cmd_initialize + * + * Issues the initialize command and sets the hw->state based + * on the result. + * + * Arguments: + * hw device structure + * + * Returns: + * 0 success + * >0 f/w reported error - f/w status code + * <0 driver reported error + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_cmd_initialize(struct hfa384x *hw) { int result = 0; @@ -950,25 +962,26 @@ int hfa384x_cmd_initialize(struct hfa384x *hw) } /*---------------------------------------------------------------- -* hfa384x_cmd_disable -* -* Issues the disable command to stop communications on one of -* the MACs 'ports'. -* -* Arguments: -* hw device structure -* macport MAC port number (host order) -* -* Returns: -* 0 success -* >0 f/w reported failure - f/w status code -* <0 driver reported error (timeout|bad arg) -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_cmd_disable + * + * Issues the disable command to stop communications on one of + * the MACs 'ports'. + * + * Arguments: + * hw device structure + * macport MAC port number (host order) + * + * Returns: + * 0 success + * >0 f/w reported failure - f/w status code + * <0 driver reported error (timeout|bad arg) + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport) { struct hfa384x_metacmd cmd; @@ -983,25 +996,26 @@ int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport) } /*---------------------------------------------------------------- -* hfa384x_cmd_enable -* -* Issues the enable command to enable communications on one of -* the MACs 'ports'. -* -* Arguments: -* hw device structure -* macport MAC port number -* -* Returns: -* 0 success -* >0 f/w reported failure - f/w status code -* <0 driver reported error (timeout|bad arg) -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_cmd_enable + * + * Issues the enable command to enable communications on one of + * the MACs 'ports'. + * + * Arguments: + * hw device structure + * macport MAC port number + * + * Returns: + * 0 success + * >0 f/w reported failure - f/w status code + * <0 driver reported error (timeout|bad arg) + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport) { struct hfa384x_metacmd cmd; @@ -1016,34 +1030,35 @@ int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport) } /*---------------------------------------------------------------- -* hfa384x_cmd_monitor -* -* Enables the 'monitor mode' of the MAC. Here's the description of -* monitor mode that I've received thus far: -* -* "The "monitor mode" of operation is that the MAC passes all -* frames for which the PLCP checks are correct. All received -* MPDUs are passed to the host with MAC Port = 7, with a -* receive status of good, FCS error, or undecryptable. Passing -* certain MPDUs is a violation of the 802.11 standard, but useful -* for a debugging tool." Normal communication is not possible -* while monitor mode is enabled. -* -* Arguments: -* hw device structure -* enable a code (0x0b|0x0f) that enables/disables -* monitor mode. (host order) -* -* Returns: -* 0 success -* >0 f/w reported failure - f/w status code -* <0 driver reported error (timeout|bad arg) -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_cmd_monitor + * + * Enables the 'monitor mode' of the MAC. Here's the description of + * monitor mode that I've received thus far: + * + * "The "monitor mode" of operation is that the MAC passes all + * frames for which the PLCP checks are correct. All received + * MPDUs are passed to the host with MAC Port = 7, with a + * receive status of good, FCS error, or undecryptable. Passing + * certain MPDUs is a violation of the 802.11 standard, but useful + * for a debugging tool." Normal communication is not possible + * while monitor mode is enabled. + * + * Arguments: + * hw device structure + * enable a code (0x0b|0x0f) that enables/disables + * monitor mode. (host order) + * + * Returns: + * 0 success + * >0 f/w reported failure - f/w status code + * <0 driver reported error (timeout|bad arg) + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable) { struct hfa384x_metacmd cmd; @@ -1058,43 +1073,44 @@ int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable) } /*---------------------------------------------------------------- -* hfa384x_cmd_download -* -* Sets the controls for the MAC controller code/data download -* process. The arguments set the mode and address associated -* with a download. Note that the aux registers should be enabled -* prior to setting one of the download enable modes. -* -* Arguments: -* hw device structure -* mode 0 - Disable programming and begin code exec -* 1 - Enable volatile mem programming -* 2 - Enable non-volatile mem programming -* 3 - Program non-volatile section from NV download -* buffer. -* (host order) -* lowaddr -* highaddr For mode 1, sets the high & low order bits of -* the "destination address". This address will be -* the execution start address when download is -* subsequently disabled. -* For mode 2, sets the high & low order bits of -* the destination in NV ram. -* For modes 0 & 3, should be zero. (host order) -* NOTE: these are CMD format. -* codelen Length of the data to write in mode 2, -* zero otherwise. (host order) -* -* Returns: -* 0 success -* >0 f/w reported failure - f/w status code -* <0 driver reported error (timeout|bad arg) -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_cmd_download + * + * Sets the controls for the MAC controller code/data download + * process. The arguments set the mode and address associated + * with a download. Note that the aux registers should be enabled + * prior to setting one of the download enable modes. + * + * Arguments: + * hw device structure + * mode 0 - Disable programming and begin code exec + * 1 - Enable volatile mem programming + * 2 - Enable non-volatile mem programming + * 3 - Program non-volatile section from NV download + * buffer. + * (host order) + * lowaddr + * highaddr For mode 1, sets the high & low order bits of + * the "destination address". This address will be + * the execution start address when download is + * subsequently disabled. + * For mode 2, sets the high & low order bits of + * the destination in NV ram. + * For modes 0 & 3, should be zero. (host order) + * NOTE: these are CMD format. + * codelen Length of the data to write in mode 2, + * zero otherwise. (host order) + * + * Returns: + * 0 success + * >0 f/w reported failure - f/w status code + * <0 driver reported error (timeout|bad arg) + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_cmd_download(struct hfa384x *hw, u16 mode, u16 lowaddr, u16 highaddr, u16 codelen) { @@ -1114,29 +1130,31 @@ int hfa384x_cmd_download(struct hfa384x *hw, u16 mode, u16 lowaddr, } /*---------------------------------------------------------------- -* hfa384x_corereset -* -* Perform a reset of the hfa38xx MAC core. We assume that the hw -* structure is in its "created" state. That is, it is initialized -* with proper values. Note that if a reset is done after the -* device has been active for awhile, the caller might have to clean -* up some leftover cruft in the hw structure. -* -* Arguments: -* hw device structure -* holdtime how long (in ms) to hold the reset -* settletime how long (in ms) to wait after releasing -* the reset -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ -int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime, int genesis) + * hfa384x_corereset + * + * Perform a reset of the hfa38xx MAC core. We assume that the hw + * structure is in its "created" state. That is, it is initialized + * with proper values. Note that if a reset is done after the + * device has been active for awhile, the caller might have to clean + * up some leftover cruft in the hw structure. + * + * Arguments: + * hw device structure + * holdtime how long (in ms) to hold the reset + * settletime how long (in ms) to wait after releasing + * the reset + * + * Returns: + * nothing + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ +int hfa384x_corereset(struct hfa384x *hw, int holdtime, + int settletime, int genesis) { int result; @@ -1150,29 +1168,30 @@ int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime, int gene } /*---------------------------------------------------------------- -* hfa384x_usbctlx_complete_sync -* -* Waits for a synchronous CTLX object to complete, -* and then handles the response. -* -* Arguments: -* hw device structure -* ctlx CTLX ptr -* completor functor object to decide what to -* do with the CTLX's result. -* -* Returns: -* 0 Success -* -ERESTARTSYS Interrupted by a signal -* -EIO CTLX failed -* -ENODEV Adapter was unplugged -* ??? Result from completor -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_usbctlx_complete_sync + * + * Waits for a synchronous CTLX object to complete, + * and then handles the response. + * + * Arguments: + * hw device structure + * ctlx CTLX ptr + * completor functor object to decide what to + * do with the CTLX's result. + * + * Returns: + * 0 Success + * -ERESTARTSYS Interrupted by a signal + * -EIO CTLX failed + * -ENODEV Adapter was unplugged + * ??? Result from completor + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ static int hfa384x_usbctlx_complete_sync(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx, struct usbctlx_completor *completor) @@ -1257,37 +1276,38 @@ static int hfa384x_usbctlx_complete_sync(struct hfa384x *hw, } /*---------------------------------------------------------------- -* hfa384x_docmd -* -* Constructs a command CTLX and submits it. -* -* NOTE: Any changes to the 'post-submit' code in this function -* need to be carried over to hfa384x_cbcmd() since the handling -* is virtually identical. -* -* Arguments: -* hw device structure -* mode DOWAIT or DOASYNC -* cmd cmd structure. Includes all arguments and result -* data points. All in host order. in host order -* cmdcb command-specific callback -* usercb user callback for async calls, NULL for DOWAIT calls -* usercb_data user supplied data pointer for async calls, NULL -* for DOASYNC calls -* -* Returns: -* 0 success -* -EIO CTLX failure -* -ERESTARTSYS Awakened on signal -* >0 command indicated error, Status and Resp0-2 are -* in hw structure. -* -* Side effects: -* -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_docmd + * + * Constructs a command CTLX and submits it. + * + * NOTE: Any changes to the 'post-submit' code in this function + * need to be carried over to hfa384x_cbcmd() since the handling + * is virtually identical. + * + * Arguments: + * hw device structure + * mode DOWAIT or DOASYNC + * cmd cmd structure. Includes all arguments and result + * data points. All in host order. in host order + * cmdcb command-specific callback + * usercb user callback for async calls, NULL for DOWAIT calls + * usercb_data user supplied data pointer for async calls, NULL + * for DOASYNC calls + * + * Returns: + * 0 success + * -EIO CTLX failure + * -ERESTARTSYS Awakened on signal + * >0 command indicated error, Status and Resp0-2 are + * in hw structure. + * + * Side effects: + * + * + * Call context: + * process + *---------------------------------------------------------------- + */ static int hfa384x_docmd(struct hfa384x *hw, enum cmd_mode mode, @@ -1341,41 +1361,42 @@ hfa384x_docmd(struct hfa384x *hw, } /*---------------------------------------------------------------- -* hfa384x_dorrid -* -* Constructs a read rid CTLX and issues it. -* -* NOTE: Any changes to the 'post-submit' code in this function -* need to be carried over to hfa384x_cbrrid() since the handling -* is virtually identical. -* -* Arguments: -* hw device structure -* mode DOWAIT or DOASYNC -* rid Read RID number (host order) -* riddata Caller supplied buffer that MAC formatted RID.data -* record will be written to for DOWAIT calls. Should -* be NULL for DOASYNC calls. -* riddatalen Buffer length for DOWAIT calls. Zero for DOASYNC calls. -* cmdcb command callback for async calls, NULL for DOWAIT calls -* usercb user callback for async calls, NULL for DOWAIT calls -* usercb_data user supplied data pointer for async calls, NULL -* for DOWAIT calls -* -* Returns: -* 0 success -* -EIO CTLX failure -* -ERESTARTSYS Awakened on signal -* -ENODATA riddatalen != macdatalen -* >0 command indicated error, Status and Resp0-2 are -* in hw structure. -* -* Side effects: -* -* Call context: -* interrupt (DOASYNC) -* process (DOWAIT or DOASYNC) -----------------------------------------------------------------*/ + * hfa384x_dorrid + * + * Constructs a read rid CTLX and issues it. + * + * NOTE: Any changes to the 'post-submit' code in this function + * need to be carried over to hfa384x_cbrrid() since the handling + * is virtually identical. + * + * Arguments: + * hw device structure + * mode DOWAIT or DOASYNC + * rid Read RID number (host order) + * riddata Caller supplied buffer that MAC formatted RID.data + * record will be written to for DOWAIT calls. Should + * be NULL for DOASYNC calls. + * riddatalen Buffer length for DOWAIT calls. Zero for DOASYNC calls. + * cmdcb command callback for async calls, NULL for DOWAIT calls + * usercb user callback for async calls, NULL for DOWAIT calls + * usercb_data user supplied data pointer for async calls, NULL + * for DOWAIT calls + * + * Returns: + * 0 success + * -EIO CTLX failure + * -ERESTARTSYS Awakened on signal + * -ENODATA riddatalen != macdatalen + * >0 command indicated error, Status and Resp0-2 are + * in hw structure. + * + * Side effects: + * + * Call context: + * interrupt (DOASYNC) + * process (DOWAIT or DOASYNC) + *---------------------------------------------------------------- + */ static int hfa384x_dorrid(struct hfa384x *hw, enum cmd_mode mode, @@ -1426,37 +1447,38 @@ hfa384x_dorrid(struct hfa384x *hw, } /*---------------------------------------------------------------- -* hfa384x_dowrid -* -* Constructs a write rid CTLX and issues it. -* -* NOTE: Any changes to the 'post-submit' code in this function -* need to be carried over to hfa384x_cbwrid() since the handling -* is virtually identical. -* -* Arguments: -* hw device structure -* enum cmd_mode DOWAIT or DOASYNC -* rid RID code -* riddata Data portion of RID formatted for MAC -* riddatalen Length of the data portion in bytes -* cmdcb command callback for async calls, NULL for DOWAIT calls -* usercb user callback for async calls, NULL for DOWAIT calls -* usercb_data user supplied data pointer for async calls -* -* Returns: -* 0 success -* -ETIMEDOUT timed out waiting for register ready or -* command completion -* >0 command indicated error, Status and Resp0-2 are -* in hw structure. -* -* Side effects: -* -* Call context: -* interrupt (DOASYNC) -* process (DOWAIT or DOASYNC) -----------------------------------------------------------------*/ + * hfa384x_dowrid + * + * Constructs a write rid CTLX and issues it. + * + * NOTE: Any changes to the 'post-submit' code in this function + * need to be carried over to hfa384x_cbwrid() since the handling + * is virtually identical. + * + * Arguments: + * hw device structure + * enum cmd_mode DOWAIT or DOASYNC + * rid RID code + * riddata Data portion of RID formatted for MAC + * riddatalen Length of the data portion in bytes + * cmdcb command callback for async calls, NULL for DOWAIT calls + * usercb user callback for async calls, NULL for DOWAIT calls + * usercb_data user supplied data pointer for async calls + * + * Returns: + * 0 success + * -ETIMEDOUT timed out waiting for register ready or + * command completion + * >0 command indicated error, Status and Resp0-2 are + * in hw structure. + * + * Side effects: + * + * Call context: + * interrupt (DOASYNC) + * process (DOWAIT or DOASYNC) + *---------------------------------------------------------------- + */ static int hfa384x_dowrid(struct hfa384x *hw, enum cmd_mode mode, @@ -1512,38 +1534,39 @@ hfa384x_dowrid(struct hfa384x *hw, } /*---------------------------------------------------------------- -* hfa384x_dormem -* -* Constructs a readmem CTLX and issues it. -* -* NOTE: Any changes to the 'post-submit' code in this function -* need to be carried over to hfa384x_cbrmem() since the handling -* is virtually identical. -* -* Arguments: -* hw device structure -* mode DOWAIT or DOASYNC -* page MAC address space page (CMD format) -* offset MAC address space offset -* data Ptr to data buffer to receive read -* len Length of the data to read (max == 2048) -* cmdcb command callback for async calls, NULL for DOWAIT calls -* usercb user callback for async calls, NULL for DOWAIT calls -* usercb_data user supplied data pointer for async calls -* -* Returns: -* 0 success -* -ETIMEDOUT timed out waiting for register ready or -* command completion -* >0 command indicated error, Status and Resp0-2 are -* in hw structure. -* -* Side effects: -* -* Call context: -* interrupt (DOASYNC) -* process (DOWAIT or DOASYNC) -----------------------------------------------------------------*/ + * hfa384x_dormem + * + * Constructs a readmem CTLX and issues it. + * + * NOTE: Any changes to the 'post-submit' code in this function + * need to be carried over to hfa384x_cbrmem() since the handling + * is virtually identical. + * + * Arguments: + * hw device structure + * mode DOWAIT or DOASYNC + * page MAC address space page (CMD format) + * offset MAC address space offset + * data Ptr to data buffer to receive read + * len Length of the data to read (max == 2048) + * cmdcb command callback for async calls, NULL for DOWAIT calls + * usercb user callback for async calls, NULL for DOWAIT calls + * usercb_data user supplied data pointer for async calls + * + * Returns: + * 0 success + * -ETIMEDOUT timed out waiting for register ready or + * command completion + * >0 command indicated error, Status and Resp0-2 are + * in hw structure. + * + * Side effects: + * + * Call context: + * interrupt (DOASYNC) + * process (DOWAIT or DOASYNC) + *---------------------------------------------------------------- + */ static int hfa384x_dormem(struct hfa384x *hw, enum cmd_mode mode, @@ -1603,38 +1626,39 @@ hfa384x_dormem(struct hfa384x *hw, } /*---------------------------------------------------------------- -* hfa384x_dowmem -* -* Constructs a writemem CTLX and issues it. -* -* NOTE: Any changes to the 'post-submit' code in this function -* need to be carried over to hfa384x_cbwmem() since the handling -* is virtually identical. -* -* Arguments: -* hw device structure -* mode DOWAIT or DOASYNC -* page MAC address space page (CMD format) -* offset MAC address space offset -* data Ptr to data buffer containing write data -* len Length of the data to read (max == 2048) -* cmdcb command callback for async calls, NULL for DOWAIT calls -* usercb user callback for async calls, NULL for DOWAIT calls -* usercb_data user supplied data pointer for async calls. -* -* Returns: -* 0 success -* -ETIMEDOUT timed out waiting for register ready or -* command completion -* >0 command indicated error, Status and Resp0-2 are -* in hw structure. -* -* Side effects: -* -* Call context: -* interrupt (DOWAIT) -* process (DOWAIT or DOASYNC) -----------------------------------------------------------------*/ + * hfa384x_dowmem + * + * Constructs a writemem CTLX and issues it. + * + * NOTE: Any changes to the 'post-submit' code in this function + * need to be carried over to hfa384x_cbwmem() since the handling + * is virtually identical. + * + * Arguments: + * hw device structure + * mode DOWAIT or DOASYNC + * page MAC address space page (CMD format) + * offset MAC address space offset + * data Ptr to data buffer containing write data + * len Length of the data to read (max == 2048) + * cmdcb command callback for async calls, NULL for DOWAIT calls + * usercb user callback for async calls, NULL for DOWAIT calls + * usercb_data user supplied data pointer for async calls. + * + * Returns: + * 0 success + * -ETIMEDOUT timed out waiting for register ready or + * command completion + * >0 command indicated error, Status and Resp0-2 are + * in hw structure. + * + * Side effects: + * + * Call context: + * interrupt (DOWAIT) + * process (DOWAIT or DOASYNC) + *---------------------------------------------------------------- + */ static int hfa384x_dowmem(struct hfa384x *hw, enum cmd_mode mode, @@ -1694,27 +1718,28 @@ hfa384x_dowmem(struct hfa384x *hw, } /*---------------------------------------------------------------- -* hfa384x_drvr_disable -* -* Issues the disable command to stop communications on one of -* the MACs 'ports'. Only macport 0 is valid for stations. -* APs may also disable macports 1-6. Only ports that have been -* previously enabled may be disabled. -* -* Arguments: -* hw device structure -* macport MAC port number (host order) -* -* Returns: -* 0 success -* >0 f/w reported failure - f/w status code -* <0 driver reported error (timeout|bad arg) -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_drvr_disable + * + * Issues the disable command to stop communications on one of + * the MACs 'ports'. Only macport 0 is valid for stations. + * APs may also disable macports 1-6. Only ports that have been + * previously enabled may be disabled. + * + * Arguments: + * hw device structure + * macport MAC port number (host order) + * + * Returns: + * 0 success + * >0 f/w reported failure - f/w status code + * <0 driver reported error (timeout|bad arg) + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport) { int result = 0; @@ -1732,27 +1757,28 @@ int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport) } /*---------------------------------------------------------------- -* hfa384x_drvr_enable -* -* Issues the enable command to enable communications on one of -* the MACs 'ports'. Only macport 0 is valid for stations. -* APs may also enable macports 1-6. Only ports that are currently -* disabled may be enabled. -* -* Arguments: -* hw device structure -* macport MAC port number -* -* Returns: -* 0 success -* >0 f/w reported failure - f/w status code -* <0 driver reported error (timeout|bad arg) -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_drvr_enable + * + * Issues the enable command to enable communications on one of + * the MACs 'ports'. Only macport 0 is valid for stations. + * APs may also enable macports 1-6. Only ports that are currently + * disabled may be enabled. + * + * Arguments: + * hw device structure + * macport MAC port number + * + * Returns: + * 0 success + * >0 f/w reported failure - f/w status code + * <0 driver reported error (timeout|bad arg) + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport) { int result = 0; @@ -1770,26 +1796,27 @@ int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport) } /*---------------------------------------------------------------- -* hfa384x_drvr_flashdl_enable -* -* Begins the flash download state. Checks to see that we're not -* already in a download state and that a port isn't enabled. -* Sets the download state and retrieves the flash download -* buffer location, buffer size, and timeout length. -* -* Arguments: -* hw device structure -* -* Returns: -* 0 success -* >0 f/w reported error - f/w status code -* <0 driver reported error -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_drvr_flashdl_enable + * + * Begins the flash download state. Checks to see that we're not + * already in a download state and that a port isn't enabled. + * Sets the download state and retrieves the flash download + * buffer location, buffer size, and timeout length. + * + * Arguments: + * hw device structure + * + * Returns: + * 0 success + * >0 f/w reported error - f/w status code + * <0 driver reported error + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw) { int result = 0; @@ -1809,7 +1836,7 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw) /* Retrieve the buffer loc&size and timeout */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_DOWNLOADBUFFER, - &(hw->bufinfo), sizeof(hw->bufinfo)); + &hw->bufinfo, sizeof(hw->bufinfo)); if (result) return result; @@ -1817,7 +1844,7 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw) hw->bufinfo.offset = le16_to_cpu(hw->bufinfo.offset); hw->bufinfo.len = le16_to_cpu(hw->bufinfo.len); result = hfa384x_drvr_getconfig16(hw, HFA384x_RID_MAXLOADTIME, - &(hw->dltimeout)); + &hw->dltimeout); if (result) return result; @@ -1831,24 +1858,25 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw) } /*---------------------------------------------------------------- -* hfa384x_drvr_flashdl_disable -* -* Ends the flash download state. Note that this will cause the MAC -* firmware to restart. -* -* Arguments: -* hw device structure -* -* Returns: -* 0 success -* >0 f/w reported error - f/w status code -* <0 driver reported error -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_drvr_flashdl_disable + * + * Ends the flash download state. Note that this will cause the MAC + * firmware to restart. + * + * Arguments: + * hw device structure + * + * Returns: + * 0 success + * >0 f/w reported error - f/w status code + * <0 driver reported error + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_drvr_flashdl_disable(struct hfa384x *hw) { /* Check that we're already in the download state */ @@ -1866,35 +1894,37 @@ int hfa384x_drvr_flashdl_disable(struct hfa384x *hw) } /*---------------------------------------------------------------- -* hfa384x_drvr_flashdl_write -* -* Performs a FLASH download of a chunk of data. First checks to see -* that we're in the FLASH download state, then sets the download -* mode, uses the aux functions to 1) copy the data to the flash -* buffer, 2) sets the download 'write flash' mode, 3) readback and -* compare. Lather rinse, repeat as many times an necessary to get -* all the given data into flash. -* When all data has been written using this function (possibly -* repeatedly), call drvr_flashdl_disable() to end the download state -* and restart the MAC. -* -* Arguments: -* hw device structure -* daddr Card address to write to. (host order) -* buf Ptr to data to write. -* len Length of data (host order). -* -* Returns: -* 0 success -* >0 f/w reported error - f/w status code -* <0 driver reported error -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ -int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len) + * hfa384x_drvr_flashdl_write + * + * Performs a FLASH download of a chunk of data. First checks to see + * that we're in the FLASH download state, then sets the download + * mode, uses the aux functions to 1) copy the data to the flash + * buffer, 2) sets the download 'write flash' mode, 3) readback and + * compare. Lather rinse, repeat as many times an necessary to get + * all the given data into flash. + * When all data has been written using this function (possibly + * repeatedly), call drvr_flashdl_disable() to end the download state + * and restart the MAC. + * + * Arguments: + * hw device structure + * daddr Card address to write to. (host order) + * buf Ptr to data to write. + * len Length of data (host order). + * + * Returns: + * 0 success + * >0 f/w reported error - f/w status code + * <0 driver reported error + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ +int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, + void *buf, u32 len) { int result = 0; u32 dlbufaddr; @@ -2008,30 +2038,31 @@ int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len } /*---------------------------------------------------------------- -* hfa384x_drvr_getconfig -* -* Performs the sequence necessary to read a config/info item. -* -* Arguments: -* hw device structure -* rid config/info record id (host order) -* buf host side record buffer. Upon return it will -* contain the body portion of the record (minus the -* RID and len). -* len buffer length (in bytes, should match record length) -* -* Returns: -* 0 success -* >0 f/w reported error - f/w status code -* <0 driver reported error -* -ENODATA length mismatch between argument and retrieved -* record. -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_drvr_getconfig + * + * Performs the sequence necessary to read a config/info item. + * + * Arguments: + * hw device structure + * rid config/info record id (host order) + * buf host side record buffer. Upon return it will + * contain the body portion of the record (minus the + * RID and len). + * len buffer length (in bytes, should match record length) + * + * Returns: + * 0 success + * >0 f/w reported error - f/w status code + * <0 driver reported error + * -ENODATA length mismatch between argument and retrieved + * record. + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len) { return hfa384x_dorrid_wait(hw, rid, buf, len); @@ -2059,7 +2090,8 @@ int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len) * * Call context: * process - ----------------------------------------------------------------*/ + *---------------------------------------------------------------- + */ int hfa384x_drvr_setconfig_async(struct hfa384x *hw, u16 rid, @@ -2071,23 +2103,24 @@ hfa384x_drvr_setconfig_async(struct hfa384x *hw, } /*---------------------------------------------------------------- -* hfa384x_drvr_ramdl_disable -* -* Ends the ram download state. -* -* Arguments: -* hw device structure -* -* Returns: -* 0 success -* >0 f/w reported error - f/w status code -* <0 driver reported error -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_drvr_ramdl_disable + * + * Ends the ram download state. + * + * Arguments: + * hw device structure + * + * Returns: + * 0 success + * >0 f/w reported error - f/w status code + * <0 driver reported error + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_drvr_ramdl_disable(struct hfa384x *hw) { /* Check that we're already in the download state */ @@ -2105,29 +2138,30 @@ int hfa384x_drvr_ramdl_disable(struct hfa384x *hw) } /*---------------------------------------------------------------- -* hfa384x_drvr_ramdl_enable -* -* Begins the ram download state. Checks to see that we're not -* already in a download state and that a port isn't enabled. -* Sets the download state and calls cmd_download with the -* ENABLE_VOLATILE subcommand and the exeaddr argument. -* -* Arguments: -* hw device structure -* exeaddr the card execution address that will be -* jumped to when ramdl_disable() is called -* (host order). -* -* Returns: -* 0 success -* >0 f/w reported error - f/w status code -* <0 driver reported error -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_drvr_ramdl_enable + * + * Begins the ram download state. Checks to see that we're not + * already in a download state and that a port isn't enabled. + * Sets the download state and calls cmd_download with the + * ENABLE_VOLATILE subcommand and the exeaddr argument. + * + * Arguments: + * hw device structure + * exeaddr the card execution address that will be + * jumped to when ramdl_disable() is called + * (host order). + * + * Returns: + * 0 success + * >0 f/w reported error - f/w status code + * <0 driver reported error + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr) { int result = 0; @@ -2146,7 +2180,8 @@ int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr) /* Check that we're not already in a download state */ if (hw->dlstate != HFA384x_DLSTATE_DISABLED) { - netdev_err(hw->wlandev->netdev, "Download state not disabled.\n"); + netdev_err(hw->wlandev->netdev, + "Download state not disabled.\n"); return -EINVAL; } @@ -2171,31 +2206,32 @@ int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr) } /*---------------------------------------------------------------- -* hfa384x_drvr_ramdl_write -* -* Performs a RAM download of a chunk of data. First checks to see -* that we're in the RAM download state, then uses the [read|write]mem USB -* commands to 1) copy the data, 2) readback and compare. The download -* state is unaffected. When all data has been written using -* this function, call drvr_ramdl_disable() to end the download state -* and restart the MAC. -* -* Arguments: -* hw device structure -* daddr Card address to write to. (host order) -* buf Ptr to data to write. -* len Length of data (host order). -* -* Returns: -* 0 success -* >0 f/w reported error - f/w status code -* <0 driver reported error -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_drvr_ramdl_write + * + * Performs a RAM download of a chunk of data. First checks to see + * that we're in the RAM download state, then uses the [read|write]mem USB + * commands to 1) copy the data, 2) readback and compare. The download + * state is unaffected. When all data has been written using + * this function, call drvr_ramdl_disable() to end the download state + * and restart the MAC. + * + * Arguments: + * hw device structure + * daddr Card address to write to. (host order) + * buf Ptr to data to write. + * len Length of data (host order). + * + * Returns: + * 0 success + * >0 f/w reported error - f/w status code + * <0 driver reported error + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len) { int result = 0; @@ -2246,36 +2282,37 @@ int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len) } /*---------------------------------------------------------------- -* hfa384x_drvr_readpda -* -* Performs the sequence to read the PDA space. Note there is no -* drvr_writepda() function. Writing a PDA is -* generally implemented by a calling component via calls to -* cmd_download and writing to the flash download buffer via the -* aux regs. -* -* Arguments: -* hw device structure -* buf buffer to store PDA in -* len buffer length -* -* Returns: -* 0 success -* >0 f/w reported error - f/w status code -* <0 driver reported error -* -ETIMEDOUT timeout waiting for the cmd regs to become -* available, or waiting for the control reg -* to indicate the Aux port is enabled. -* -ENODATA the buffer does NOT contain a valid PDA. -* Either the card PDA is bad, or the auxdata -* reads are giving us garbage. - -* -* Side effects: -* -* Call context: -* process or non-card interrupt. -----------------------------------------------------------------*/ + * hfa384x_drvr_readpda + * + * Performs the sequence to read the PDA space. Note there is no + * drvr_writepda() function. Writing a PDA is + * generally implemented by a calling component via calls to + * cmd_download and writing to the flash download buffer via the + * aux regs. + * + * Arguments: + * hw device structure + * buf buffer to store PDA in + * len buffer length + * + * Returns: + * 0 success + * >0 f/w reported error - f/w status code + * <0 driver reported error + * -ETIMEDOUT timeout waiting for the cmd regs to become + * available, or waiting for the control reg + * to indicate the Aux port is enabled. + * -ENODATA the buffer does NOT contain a valid PDA. + * Either the card PDA is bad, or the auxdata + * reads are giving us garbage. + * + * + * Side effects: + * + * Call context: + * process or non-card interrupt. + *---------------------------------------------------------------- + */ int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len) { int result = 0; @@ -2306,7 +2343,7 @@ int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len) /* units of bytes */ result = hfa384x_dormem_wait(hw, currpage, curroffset, buf, - len); + len); if (result) { netdev_warn(hw->wlandev->netdev, @@ -2366,51 +2403,52 @@ int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len) } /*---------------------------------------------------------------- -* hfa384x_drvr_setconfig -* -* Performs the sequence necessary to write a config/info item. -* -* Arguments: -* hw device structure -* rid config/info record id (in host order) -* buf host side record buffer -* len buffer length (in bytes) -* -* Returns: -* 0 success -* >0 f/w reported error - f/w status code -* <0 driver reported error -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_drvr_setconfig + * + * Performs the sequence necessary to write a config/info item. + * + * Arguments: + * hw device structure + * rid config/info record id (in host order) + * buf host side record buffer + * len buffer length (in bytes) + * + * Returns: + * 0 success + * >0 f/w reported error - f/w status code + * <0 driver reported error + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_drvr_setconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len) { return hfa384x_dowrid_wait(hw, rid, buf, len); } /*---------------------------------------------------------------- -* hfa384x_drvr_start -* -* Issues the MAC initialize command, sets up some data structures, -* and enables the interrupts. After this function completes, the -* low-level stuff should be ready for any/all commands. -* -* Arguments: -* hw device structure -* Returns: -* 0 success -* >0 f/w reported error - f/w status code -* <0 driver reported error -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ - + * hfa384x_drvr_start + * + * Issues the MAC initialize command, sets up some data structures, + * and enables the interrupts. After this function completes, the + * low-level stuff should be ready for any/all commands. + * + * Arguments: + * hw device structure + * Returns: + * 0 success + * >0 f/w reported error - f/w status code + * <0 driver reported error + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_drvr_start(struct hfa384x *hw) { int result, result1, result2; @@ -2494,24 +2532,25 @@ int hfa384x_drvr_start(struct hfa384x *hw) } /*---------------------------------------------------------------- -* hfa384x_drvr_stop -* -* Shuts down the MAC to the point where it is safe to unload the -* driver. Any subsystem that may be holding a data or function -* ptr into the driver must be cleared/deinitialized. -* -* Arguments: -* hw device structure -* Returns: -* 0 success -* >0 f/w reported error - f/w status code -* <0 driver reported error -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ + * hfa384x_drvr_stop + * + * Shuts down the MAC to the point where it is safe to unload the + * driver. Any subsystem that may be holding a data or function + * ptr into the driver must be cleared/deinitialized. + * + * Arguments: + * hw device structure + * Returns: + * 0 success + * >0 f/w reported error - f/w status code + * <0 driver reported error + * + * Side effects: + * + * Call context: + * process + *---------------------------------------------------------------- + */ int hfa384x_drvr_stop(struct hfa384x *hw) { int i; @@ -2542,26 +2581,27 @@ int hfa384x_drvr_stop(struct hfa384x *hw) } /*---------------------------------------------------------------- -* hfa384x_drvr_txframe -* -* Takes a frame from prism2sta and queues it for transmission. -* -* Arguments: -* hw device structure -* skb packet buffer struct. Contains an 802.11 -* data frame. -* p80211_hdr points to the 802.11 header for the packet. -* Returns: -* 0 Success and more buffs available -* 1 Success but no more buffs -* 2 Allocation failure -* 4 Buffer full or queue busy -* -* Side effects: -* -* Call context: -* interrupt -----------------------------------------------------------------*/ + * hfa384x_drvr_txframe + * + * Takes a frame from prism2sta and queues it for transmission. + * + * Arguments: + * hw device structure + * skb packet buffer struct. Contains an 802.11 + * data frame. + * p80211_hdr points to the 802.11 header for the packet. + * Returns: + * 0 Success and more buffs available + * 1 Success but no more buffs + * 2 Allocation failure + * 4 Buffer full or queue busy + * + * Side effects: + * + * Call context: + * interrupt + *---------------------------------------------------------------- + */ int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb, union p80211_hdr *p80211_hdr, struct p80211_metawep *p80211_wep) @@ -2608,7 +2648,7 @@ int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb, cpu_to_le16(hw->txbuff.txfrm.desc.tx_control); /* copy the header over to the txdesc */ - memcpy(&(hw->txbuff.txfrm.desc.frame_control), p80211_hdr, + memcpy(&hw->txbuff.txfrm.desc.frame_control, p80211_hdr, sizeof(union p80211_hdr)); /* if we're using host WEP, increase size by IV+ICV */ @@ -2638,9 +2678,9 @@ int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb, memcpy(ptr, p80211_wep->icv, sizeof(p80211_wep->icv)); /* Send the USB packet */ - usb_fill_bulk_urb(&(hw->tx_urb), hw->usb, + usb_fill_bulk_urb(&hw->tx_urb, hw->usb, hw->endp_out, - &(hw->txbuff), ROUNDUP64(usbpktlen), + &hw->txbuff, ROUNDUP64(usbpktlen), hfa384x_usbout_callback, hw->wlandev); hw->tx_urb.transfer_flags |= USB_QUEUE_BULK; @@ -2676,18 +2716,19 @@ void hfa384x_tx_timeout(struct wlandevice *wlandev) } /*---------------------------------------------------------------- -* hfa384x_usbctlx_reaper_task -* -* Tasklet to delete dead CTLX objects -* -* Arguments: -* data ptr to a struct hfa384x -* -* Returns: -* -* Call context: -* Interrupt -----------------------------------------------------------------*/ + * hfa384x_usbctlx_reaper_task + * + * Tasklet to delete dead CTLX objects + * + * Arguments: + * data ptr to a struct hfa384x + * + * Returns: + * + * Call context: + * Interrupt + *---------------------------------------------------------------- + */ static void hfa384x_usbctlx_reaper_task(unsigned long data) { struct hfa384x *hw = (struct hfa384x *)data; @@ -2708,19 +2749,20 @@ static void hfa384x_usbctlx_reaper_task(unsigned long data) } /*---------------------------------------------------------------- -* hfa384x_usbctlx_completion_task -* -* Tasklet to call completion handlers for returned CTLXs -* -* Arguments: -* data ptr to struct hfa384x -* -* Returns: -* Nothing -* -* Call context: -* Interrupt -----------------------------------------------------------------*/ + * hfa384x_usbctlx_completion_task + * + * Tasklet to call completion handlers for returned CTLXs + * + * Arguments: + * data ptr to struct hfa384x + * + * Returns: + * Nothing + * + * Call context: + * Interrupt + *---------------------------------------------------------------- + */ static void hfa384x_usbctlx_completion_task(unsigned long data) { struct hfa384x *hw = (struct hfa384x *)data; @@ -2781,22 +2823,23 @@ static void hfa384x_usbctlx_completion_task(unsigned long data) } /*---------------------------------------------------------------- -* unlocked_usbctlx_cancel_async -* -* Mark the CTLX dead asynchronously, and ensure that the -* next command on the queue is run afterwards. -* -* Arguments: -* hw ptr to the struct hfa384x structure -* ctlx ptr to a CTLX structure -* -* Returns: -* 0 the CTLX's URB is inactive -* -EINPROGRESS the URB is currently being unlinked -* -* Call context: -* Either process or interrupt, but presumably interrupt -----------------------------------------------------------------*/ + * unlocked_usbctlx_cancel_async + * + * Mark the CTLX dead asynchronously, and ensure that the + * next command on the queue is run afterwards. + * + * Arguments: + * hw ptr to the struct hfa384x structure + * ctlx ptr to a CTLX structure + * + * Returns: + * 0 the CTLX's URB is inactive + * -EINPROGRESS the URB is currently being unlinked + * + * Call context: + * Either process or interrupt, but presumably interrupt + *---------------------------------------------------------------- + */ static int unlocked_usbctlx_cancel_async(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx) { @@ -2826,28 +2869,30 @@ static int unlocked_usbctlx_cancel_async(struct hfa384x *hw, } /*---------------------------------------------------------------- -* unlocked_usbctlx_complete -* -* A CTLX has completed. It may have been successful, it may not -* have been. At this point, the CTLX should be quiescent. The URBs -* aren't active and the timers should have been stopped. -* -* The CTLX is migrated to the "completing" queue, and the completing -* tasklet is scheduled. -* -* Arguments: -* hw ptr to a struct hfa384x structure -* ctlx ptr to a ctlx structure -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* Either, assume interrupt -----------------------------------------------------------------*/ -static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx) + * unlocked_usbctlx_complete + * + * A CTLX has completed. It may have been successful, it may not + * have been. At this point, the CTLX should be quiescent. The URBs + * aren't active and the timers should have been stopped. + * + * The CTLX is migrated to the "completing" queue, and the completing + * tasklet is scheduled. + * + * Arguments: + * hw ptr to a struct hfa384x structure + * ctlx ptr to a ctlx structure + * + * Returns: + * nothing + * + * Side effects: + * + * Call context: + * Either, assume interrupt + *---------------------------------------------------------------- + */ +static void unlocked_usbctlx_complete(struct hfa384x *hw, + struct hfa384x_usbctlx *ctlx) { /* Timers have been stopped, and ctlx should be in * a terminal state. Retire it from the "active" @@ -2871,21 +2916,22 @@ static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx } /*---------------------------------------------------------------- -* hfa384x_usbctlxq_run -* -* Checks to see if the head item is running. If not, starts it. -* -* Arguments: -* hw ptr to struct hfa384x -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* any -----------------------------------------------------------------*/ + * hfa384x_usbctlxq_run + * + * Checks to see if the head item is running. If not, starts it. + * + * Arguments: + * hw ptr to struct hfa384x + * + * Returns: + * nothing + * + * Side effects: + * + * Call context: + * any + *---------------------------------------------------------------- + */ static void hfa384x_usbctlxq_run(struct hfa384x *hw) { unsigned long flags; @@ -2916,9 +2962,9 @@ static void hfa384x_usbctlxq_run(struct hfa384x *hw) list_move_tail(&head->list, &hw->ctlxq.active); /* Fill the out packet */ - usb_fill_bulk_urb(&(hw->ctlx_urb), hw->usb, + usb_fill_bulk_urb(&hw->ctlx_urb, hw->usb, hw->endp_out, - &(head->outbuf), ROUNDUP64(head->outbufsize), + &head->outbuf, ROUNDUP64(head->outbufsize), hfa384x_ctlxout_callback, hw); hw->ctlx_urb.transfer_flags |= USB_QUEUE_BULK; @@ -2971,26 +3017,27 @@ static void hfa384x_usbctlxq_run(struct hfa384x *hw) } /*---------------------------------------------------------------- -* hfa384x_usbin_callback -* -* Callback for URBs on the BULKIN endpoint. -* -* Arguments: -* urb ptr to the completed urb -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* interrupt -----------------------------------------------------------------*/ + * hfa384x_usbin_callback + * + * Callback for URBs on the BULKIN endpoint. + * + * Arguments: + * urb ptr to the completed urb + * + * Returns: + * nothing + * + * Side effects: + * + * Call context: + * interrupt + *---------------------------------------------------------------- + */ static void hfa384x_usbin_callback(struct urb *urb) { struct wlandevice *wlandev = urb->context; struct hfa384x *hw; - union hfa384x_usbin *usbin = (union hfa384x_usbin *)urb->transfer_buffer; + union hfa384x_usbin *usbin; struct sk_buff *skb = NULL; int result; int urb_status; @@ -3010,7 +3057,10 @@ static void hfa384x_usbin_callback(struct urb *urb) goto exit; skb = hw->rx_urb_skb; - BUG_ON(!skb || (skb->data != urb->transfer_buffer)); + if (!skb || (skb->data != urb->transfer_buffer)) { + WARN_ON(1); + return; + } hw->rx_urb_skb = NULL; @@ -3089,6 +3139,7 @@ static void hfa384x_usbin_callback(struct urb *urb) /* Note: the check of the sw_support field, the type field doesn't * have bit 12 set like the docs suggest. */ + usbin = (union hfa384x_usbin *)urb->transfer_buffer; type = le16_to_cpu(usbin->type); if (HFA384x_USB_ISRXFRM(type)) { if (action == HANDLE) { @@ -3147,25 +3198,26 @@ static void hfa384x_usbin_callback(struct urb *urb) } /*---------------------------------------------------------------- -* hfa384x_usbin_ctlx -* -* We've received a URB containing a Prism2 "response" message. -* This message needs to be matched up with a CTLX on the active -* queue and our state updated accordingly. -* -* Arguments: -* hw ptr to struct hfa384x -* usbin ptr to USB IN packet -* urb_status status of this Bulk-In URB -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* interrupt -----------------------------------------------------------------*/ + * hfa384x_usbin_ctlx + * + * We've received a URB containing a Prism2 "response" message. + * This message needs to be matched up with a CTLX on the active + * queue and our state updated accordingly. + * + * Arguments: + * hw ptr to struct hfa384x + * usbin ptr to USB IN packet + * urb_status status of this Bulk-In URB + * + * Returns: + * nothing + * + * Side effects: + * + * Call context: + * interrupt + *---------------------------------------------------------------- + */ static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin, int urb_status) { @@ -3269,22 +3321,23 @@ static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin, } /*---------------------------------------------------------------- -* hfa384x_usbin_txcompl -* -* At this point we have the results of a previous transmit. -* -* Arguments: -* wlandev wlan device -* usbin ptr to the usb transfer buffer -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* interrupt -----------------------------------------------------------------*/ + * hfa384x_usbin_txcompl + * + * At this point we have the results of a previous transmit. + * + * Arguments: + * wlandev wlan device + * usbin ptr to the usb transfer buffer + * + * Returns: + * nothing + * + * Side effects: + * + * Call context: + * interrupt + *---------------------------------------------------------------- + */ static void hfa384x_usbin_txcompl(struct wlandevice *wlandev, union hfa384x_usbin *usbin) { @@ -3300,22 +3353,23 @@ static void hfa384x_usbin_txcompl(struct wlandevice *wlandev, } /*---------------------------------------------------------------- -* hfa384x_usbin_rx -* -* At this point we have a successful received a rx frame packet. -* -* Arguments: -* wlandev wlan device -* usbin ptr to the usb transfer buffer -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* interrupt -----------------------------------------------------------------*/ + * hfa384x_usbin_rx + * + * At this point we have a successful received a rx frame packet. + * + * Arguments: + * wlandev wlan device + * usbin ptr to the usb transfer buffer + * + * Returns: + * nothing + * + * Side effects: + * + * Call context: + * interrupt + *---------------------------------------------------------------- + */ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb) { union hfa384x_usbin *usbin = (union hfa384x_usbin *)skb->data; @@ -3396,30 +3450,31 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb) } /*---------------------------------------------------------------- -* hfa384x_int_rxmonitor -* -* Helper function for int_rx. Handles monitor frames. -* Note that this function allocates space for the FCS and sets it -* to 0xffffffff. The hfa384x doesn't give us the FCS value but the -* higher layers expect it. 0xffffffff is used as a flag to indicate -* the FCS is bogus. -* -* Arguments: -* wlandev wlan device structure -* rxfrm rx descriptor read from card in int_rx -* -* Returns: -* nothing -* -* Side effects: -* Allocates an skb and passes it up via the PF_PACKET interface. -* Call context: -* interrupt -----------------------------------------------------------------*/ + * hfa384x_int_rxmonitor + * + * Helper function for int_rx. Handles monitor frames. + * Note that this function allocates space for the FCS and sets it + * to 0xffffffff. The hfa384x doesn't give us the FCS value but the + * higher layers expect it. 0xffffffff is used as a flag to indicate + * the FCS is bogus. + * + * Arguments: + * wlandev wlan device structure + * rxfrm rx descriptor read from card in int_rx + * + * Returns: + * nothing + * + * Side effects: + * Allocates an skb and passes it up via the PF_PACKET interface. + * Call context: + * interrupt + *---------------------------------------------------------------- + */ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev, struct hfa384x_usb_rxfrm *rxfrm) { - struct hfa384x_rx_frame *rxdesc = &(rxfrm->desc); + struct hfa384x_rx_frame *rxdesc = &rxfrm->desc; unsigned int hdrlen = 0; unsigned int datalen = 0; unsigned int skblen = 0; @@ -3474,9 +3529,10 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev, } /* Copy the 802.11 header to the skb - (ctl frames may be less than a full header) */ + * (ctl frames may be less than a full header) + */ datap = skb_put(skb, hdrlen); - memcpy(datap, &(rxdesc->frame_control), hdrlen); + memcpy(datap, &rxdesc->frame_control, hdrlen); /* If any, copy the data from the card to the skb */ if (datalen > 0) { @@ -3501,22 +3557,23 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev, } /*---------------------------------------------------------------- -* hfa384x_usbin_info -* -* At this point we have a successful received a Prism2 info frame. -* -* Arguments: -* wlandev wlan device -* usbin ptr to the usb transfer buffer -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* interrupt -----------------------------------------------------------------*/ + * hfa384x_usbin_info + * + * At this point we have a successful received a Prism2 info frame. + * + * Arguments: + * wlandev wlan device + * usbin ptr to the usb transfer buffer + * + * Returns: + * nothing + * + * Side effects: + * + * Call context: + * interrupt + *---------------------------------------------------------------- + */ static void hfa384x_usbin_info(struct wlandevice *wlandev, union hfa384x_usbin *usbin) { @@ -3526,21 +3583,22 @@ static void hfa384x_usbin_info(struct wlandevice *wlandev, } /*---------------------------------------------------------------- -* hfa384x_usbout_callback -* -* Callback for URBs on the BULKOUT endpoint. -* -* Arguments: -* urb ptr to the completed urb -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* interrupt -----------------------------------------------------------------*/ + * hfa384x_usbout_callback + * + * Callback for URBs on the BULKOUT endpoint. + * + * Arguments: + * urb ptr to the completed urb + * + * Returns: + * nothing + * + * Side effects: + * + * Call context: + * interrupt + *---------------------------------------------------------------- + */ static void hfa384x_usbout_callback(struct urb *urb) { struct wlandevice *wlandev = urb->context; @@ -3601,21 +3659,22 @@ static void hfa384x_usbout_callback(struct urb *urb) } /*---------------------------------------------------------------- -* hfa384x_ctlxout_callback -* -* Callback for control data on the BULKOUT endpoint. -* -* Arguments: -* urb ptr to the completed urb -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* interrupt -----------------------------------------------------------------*/ + * hfa384x_ctlxout_callback + * + * Callback for control data on the BULKOUT endpoint. + * + * Arguments: + * urb ptr to the completed urb + * + * Returns: + * nothing + * + * Side effects: + * + * Call context: + * interrupt + *---------------------------------------------------------------- + */ static void hfa384x_ctlxout_callback(struct urb *urb) { struct hfa384x *hw = urb->context; @@ -3730,23 +3789,24 @@ static void hfa384x_ctlxout_callback(struct urb *urb) } /*---------------------------------------------------------------- -* hfa384x_usbctlx_reqtimerfn -* -* Timer response function for CTLX request timeouts. If this -* function is called, it means that the callback for the OUT -* URB containing a Prism2.x XXX_Request was never called. -* -* Arguments: -* data a ptr to the struct hfa384x -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* interrupt -----------------------------------------------------------------*/ + * hfa384x_usbctlx_reqtimerfn + * + * Timer response function for CTLX request timeouts. If this + * function is called, it means that the callback for the OUT + * URB containing a Prism2.x XXX_Request was never called. + * + * Arguments: + * data a ptr to the struct hfa384x + * + * Returns: + * nothing + * + * Side effects: + * + * Call context: + * interrupt + *---------------------------------------------------------------- + */ static void hfa384x_usbctlx_reqtimerfn(unsigned long data) { struct hfa384x *hw = (struct hfa384x *)data; @@ -3788,23 +3848,24 @@ static void hfa384x_usbctlx_reqtimerfn(unsigned long data) } /*---------------------------------------------------------------- -* hfa384x_usbctlx_resptimerfn -* -* Timer response function for CTLX response timeouts. If this -* function is called, it means that the callback for the IN -* URB containing a Prism2.x XXX_Response was never called. -* -* Arguments: -* data a ptr to the struct hfa384x -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* interrupt -----------------------------------------------------------------*/ + * hfa384x_usbctlx_resptimerfn + * + * Timer response function for CTLX response timeouts. If this + * function is called, it means that the callback for the IN + * URB containing a Prism2.x XXX_Response was never called. + * + * Arguments: + * data a ptr to the struct hfa384x + * + * Returns: + * nothing + * + * Side effects: + * + * Call context: + * interrupt + *---------------------------------------------------------------- + */ static void hfa384x_usbctlx_resptimerfn(unsigned long data) { struct hfa384x *hw = (struct hfa384x *)data; @@ -3830,20 +3891,21 @@ static void hfa384x_usbctlx_resptimerfn(unsigned long data) } /*---------------------------------------------------------------- -* hfa384x_usb_throttlefn -* -* -* Arguments: -* data ptr to hw -* -* Returns: -* Nothing -* -* Side effects: -* -* Call context: -* Interrupt -----------------------------------------------------------------*/ + * hfa384x_usb_throttlefn + * + * + * Arguments: + * data ptr to hw + * + * Returns: + * Nothing + * + * Side effects: + * + * Call context: + * Interrupt + *---------------------------------------------------------------- + */ static void hfa384x_usb_throttlefn(unsigned long data) { struct hfa384x *hw = (struct hfa384x *)data; @@ -3869,24 +3931,26 @@ static void hfa384x_usb_throttlefn(unsigned long data) } /*---------------------------------------------------------------- -* hfa384x_usbctlx_submit -* -* Called from the doxxx functions to submit a CTLX to the queue -* -* Arguments: -* hw ptr to the hw struct -* ctlx ctlx structure to enqueue -* -* Returns: -* -ENODEV if the adapter is unplugged -* 0 -* -* Side effects: -* -* Call context: -* process or interrupt -----------------------------------------------------------------*/ -static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx) + * hfa384x_usbctlx_submit + * + * Called from the doxxx functions to submit a CTLX to the queue + * + * Arguments: + * hw ptr to the hw struct + * ctlx ctlx structure to enqueue + * + * Returns: + * -ENODEV if the adapter is unplugged + * 0 + * + * Side effects: + * + * Call context: + * process or interrupt + *---------------------------------------------------------------- + */ +static int hfa384x_usbctlx_submit(struct hfa384x *hw, + struct hfa384x_usbctlx *ctlx) { unsigned long flags; @@ -3906,21 +3970,22 @@ static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ct } /*---------------------------------------------------------------- -* hfa384x_isgood_pdrcore -* -* Quick check of PDR codes. -* -* Arguments: -* pdrcode PDR code number (host order) -* -* Returns: -* zero not good. -* one is good. -* -* Side effects: -* -* Call context: -----------------------------------------------------------------*/ + * hfa384x_isgood_pdrcore + * + * Quick check of PDR codes. + * + * Arguments: + * pdrcode PDR code number (host order) + * + * Returns: + * zero not good. + * one is good. + * + * Side effects: + * + * Call context: + *---------------------------------------------------------------- + */ static int hfa384x_isgood_pdrcode(u16 pdrcode) { switch (pdrcode) { diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c index 0247cbc291454761a0c88e5519b7fdc21f93ddfe..8387e6a3031a64ab0b233dbad7a6a535833bccfa 100644 --- a/drivers/staging/wlan-ng/p80211conv.c +++ b/drivers/staging/wlan-ng/p80211conv.c @@ -1,56 +1,56 @@ /* src/p80211/p80211conv.c -* -* Ether/802.11 conversions and packet buffer routines -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -* -* This file defines the functions that perform Ethernet to/from -* 802.11 frame conversions. -* -* -------------------------------------------------------------------- -* -*================================================================ -*/ + * + * Ether/802.11 conversions and packet buffer routines + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + * + * This file defines the functions that perform Ethernet to/from + * 802.11 frame conversions. + * + * -------------------------------------------------------------------- + * + *================================================================ + */ #include #include @@ -79,31 +79,31 @@ static const u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 }; static const u8 oui_8021h[] = { 0x00, 0x00, 0xf8 }; /*---------------------------------------------------------------- -* p80211pb_ether_to_80211 -* -* Uses the contents of the ether frame and the etherconv setting -* to build the elements of the 802.11 frame. -* -* We don't actually set -* up the frame header here. That's the MAC's job. We're only handling -* conversion of DIXII or 802.3+LLC frames to something that works -* with 802.11. -* -* Note -- 802.11 header is NOT part of the skb. Likewise, the 802.11 -* FCS is also not present and will need to be added elsewhere. -* -* Arguments: -* ethconv Conversion type to perform -* skb skbuff containing the ether frame -* p80211_hdr 802.11 header -* -* Returns: -* 0 on success, non-zero otherwise -* -* Call context: -* May be called in interrupt or non-interrupt context -*---------------------------------------------------------------- -*/ + * p80211pb_ether_to_80211 + * + * Uses the contents of the ether frame and the etherconv setting + * to build the elements of the 802.11 frame. + * + * We don't actually set + * up the frame header here. That's the MAC's job. We're only handling + * conversion of DIXII or 802.3+LLC frames to something that works + * with 802.11. + * + * Note -- 802.11 header is NOT part of the skb. Likewise, the 802.11 + * FCS is also not present and will need to be added elsewhere. + * + * Arguments: + * ethconv Conversion type to perform + * skb skbuff containing the ether frame + * p80211_hdr 802.11 header + * + * Returns: + * 0 on success, non-zero otherwise + * + * Call context: + * May be called in interrupt or non-interrupt context + *---------------------------------------------------------------- + */ int skb_ether_to_p80211(struct wlandevice *wlandev, u32 ethconv, struct sk_buff *skb, union p80211_hdr *p80211_hdr, struct p80211_metawep *p80211_wep) @@ -255,25 +255,25 @@ static void orinoco_spy_gather(struct wlandevice *wlandev, char *mac, } /*---------------------------------------------------------------- -* p80211pb_80211_to_ether -* -* Uses the contents of a received 802.11 frame and the etherconv -* setting to build an ether frame. -* -* This function extracts the src and dest address from the 802.11 -* frame to use in the construction of the eth frame. -* -* Arguments: -* ethconv Conversion type to perform -* skb Packet buffer containing the 802.11 frame -* -* Returns: -* 0 on success, non-zero otherwise -* -* Call context: -* May be called in interrupt or non-interrupt context -*---------------------------------------------------------------- -*/ + * p80211pb_80211_to_ether + * + * Uses the contents of a received 802.11 frame and the etherconv + * setting to build an ether frame. + * + * This function extracts the src and dest address from the 802.11 + * frame to use in the construction of the eth frame. + * + * Arguments: + * ethconv Conversion type to perform + * skb Packet buffer containing the 802.11 frame + * + * Returns: + * 0 on success, non-zero otherwise + * + * Call context: + * May be called in interrupt or non-interrupt context + *---------------------------------------------------------------- + */ int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv, struct sk_buff *skb) { @@ -508,22 +508,22 @@ int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv, } /*---------------------------------------------------------------- -* p80211_stt_findproto -* -* Searches the 802.1h Selective Translation Table for a given -* protocol. -* -* Arguments: -* proto protocol number (in host order) to search for. -* -* Returns: -* 1 - if the table is empty or a match is found. -* 0 - if the table is non-empty and a match is not found. -* -* Call context: -* May be called in interrupt or non-interrupt context -*---------------------------------------------------------------- -*/ + * p80211_stt_findproto + * + * Searches the 802.1h Selective Translation Table for a given + * protocol. + * + * Arguments: + * proto protocol number (in host order) to search for. + * + * Returns: + * 1 - if the table is empty or a match is found. + * 0 - if the table is non-empty and a match is not found. + * + * Call context: + * May be called in interrupt or non-interrupt context + *---------------------------------------------------------------- + */ int p80211_stt_findproto(u16 proto) { /* Always return found for now. This is the behavior used by the */ @@ -540,21 +540,21 @@ int p80211_stt_findproto(u16 proto) } /*---------------------------------------------------------------- -* p80211skb_rxmeta_detach -* -* Disconnects the frmmeta and rxmeta from an skb. -* -* Arguments: -* wlandev The wlandev this skb belongs to. -* skb The skb we're attaching to. -* -* Returns: -* 0 on success, non-zero otherwise -* -* Call context: -* May be called in interrupt or non-interrupt context -*---------------------------------------------------------------- -*/ + * p80211skb_rxmeta_detach + * + * Disconnects the frmmeta and rxmeta from an skb. + * + * Arguments: + * wlandev The wlandev this skb belongs to. + * skb The skb we're attaching to. + * + * Returns: + * 0 on success, non-zero otherwise + * + * Call context: + * May be called in interrupt or non-interrupt context + *---------------------------------------------------------------- + */ void p80211skb_rxmeta_detach(struct sk_buff *skb) { struct p80211_rxmeta *rxmeta; @@ -584,22 +584,22 @@ void p80211skb_rxmeta_detach(struct sk_buff *skb) } /*---------------------------------------------------------------- -* p80211skb_rxmeta_attach -* -* Allocates a p80211rxmeta structure, initializes it, and attaches -* it to an skb. -* -* Arguments: -* wlandev The wlandev this skb belongs to. -* skb The skb we're attaching to. -* -* Returns: -* 0 on success, non-zero otherwise -* -* Call context: -* May be called in interrupt or non-interrupt context -*---------------------------------------------------------------- -*/ + * p80211skb_rxmeta_attach + * + * Allocates a p80211rxmeta structure, initializes it, and attaches + * it to an skb. + * + * Arguments: + * wlandev The wlandev this skb belongs to. + * skb The skb we're attaching to. + * + * Returns: + * 0 on success, non-zero otherwise + * + * Call context: + * May be called in interrupt or non-interrupt context + *---------------------------------------------------------------- + */ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb) { int result = 0; @@ -615,11 +615,9 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb) } /* Allocate the rxmeta */ - rxmeta = kzalloc(sizeof(struct p80211_rxmeta), GFP_ATOMIC); + rxmeta = kzalloc(sizeof(*rxmeta), GFP_ATOMIC); if (!rxmeta) { - netdev_err(wlandev->netdev, - "%s: Failed to allocate rxmeta.\n", wlandev->name); result = 1; goto exit; } @@ -638,22 +636,22 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb) } /*---------------------------------------------------------------- -* p80211skb_free -* -* Frees an entire p80211skb by checking and freeing the meta struct -* and then freeing the skb. -* -* Arguments: -* wlandev The wlandev this skb belongs to. -* skb The skb we're attaching to. -* -* Returns: -* 0 on success, non-zero otherwise -* -* Call context: -* May be called in interrupt or non-interrupt context -*---------------------------------------------------------------- -*/ + * p80211skb_free + * + * Frees an entire p80211skb by checking and freeing the meta struct + * and then freeing the skb. + * + * Arguments: + * wlandev The wlandev this skb belongs to. + * skb The skb we're attaching to. + * + * Returns: + * 0 on success, non-zero otherwise + * + * Call context: + * May be called in interrupt or non-interrupt context + *---------------------------------------------------------------- + */ void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb) { struct p80211_frmmeta *meta; diff --git a/drivers/staging/wlan-ng/p80211conv.h b/drivers/staging/wlan-ng/p80211conv.h index 8c10357bedf0f51dda4212964c8959a33c91c59e..ed70d98e5cf1ac7c43c2fba30194928649337f4b 100644 --- a/drivers/staging/wlan-ng/p80211conv.h +++ b/drivers/staging/wlan-ng/p80211conv.h @@ -1,54 +1,54 @@ /* p80211conv.h -* -* Ether/802.11 conversions and packet buffer routines -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -* -* This file declares the functions, types and macros that perform -* Ethernet to/from 802.11 frame conversions. -* -* -------------------------------------------------------------------- -*/ + * + * Ether/802.11 conversions and packet buffer routines + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + * + * This file declares the functions, types and macros that perform + * Ethernet to/from 802.11 frame conversions. + * + * -------------------------------------------------------------------- + */ #ifndef _LINUX_P80211CONV_H #define _LINUX_P80211CONV_H diff --git a/drivers/staging/wlan-ng/p80211hdr.h b/drivers/staging/wlan-ng/p80211hdr.h index 79d9b20b364d8a4323d5faf76f596e1a6822ca0f..2c44c613a5864e64d242bc3e2c076812cc1fba5f 100644 --- a/drivers/staging/wlan-ng/p80211hdr.h +++ b/drivers/staging/wlan-ng/p80211hdr.h @@ -1,61 +1,61 @@ /* p80211hdr.h -* -* Macros, types, and functions for handling 802.11 MAC headers -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -* -* This file declares the constants and types used in the interface -* between a wlan driver and the user mode utilities. -* -* Note: -* - Constant values are always in HOST byte order. To assign -* values to multi-byte fields they _must_ be converted to -* ieee byte order. To retrieve multi-byte values from incoming -* frames, they must be converted to host order. -* -* All functions declared here are implemented in p80211.c -* -------------------------------------------------------------------- -*/ + * + * Macros, types, and functions for handling 802.11 MAC headers + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + * + * This file declares the constants and types used in the interface + * between a wlan driver and the user mode utilities. + * + * Note: + * - Constant values are always in HOST byte order. To assign + * values to multi-byte fields they _must_ be converted to + * ieee byte order. To retrieve multi-byte values from incoming + * frames, they must be converted to host order. + * + * All functions declared here are implemented in p80211.c + * -------------------------------------------------------------------- + */ #ifndef _P80211HDR_H #define _P80211HDR_H @@ -131,8 +131,8 @@ /* SET_FC_FSTYPE(WLAN_FSTYPE_RTS) ); */ /*------------------------------------------------------------*/ -#define WLAN_GET_FC_FTYPE(n) ((((u16)(n)) & (BIT(2) | BIT(3))) >> 2) -#define WLAN_GET_FC_FSTYPE(n) ((((u16)(n)) & (BIT(4)|BIT(5)|BIT(6)|BIT(7))) >> 4) +#define WLAN_GET_FC_FTYPE(n) ((((u16)(n)) & GENMASK(3, 2)) >> 2) +#define WLAN_GET_FC_FSTYPE(n) ((((u16)(n)) & GENMASK(7, 4)) >> 4) #define WLAN_GET_FC_TODS(n) ((((u16)(n)) & (BIT(8))) >> 8) #define WLAN_GET_FC_FROMDS(n) ((((u16)(n)) & (BIT(9))) >> 9) #define WLAN_GET_FC_ISWEP(n) ((((u16)(n)) & (BIT(14))) >> 14) diff --git a/drivers/staging/wlan-ng/p80211ioctl.h b/drivers/staging/wlan-ng/p80211ioctl.h index 06c5e36649a7ff9439adac7d0ce5bb5086995ad3..ab6067e650502bab60eaa030bd0923ce50c61504 100644 --- a/drivers/staging/wlan-ng/p80211ioctl.h +++ b/drivers/staging/wlan-ng/p80211ioctl.h @@ -1,64 +1,64 @@ /* p80211ioctl.h -* -* Declares constants and types for the p80211 ioctls -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -* -* While this file is called 'ioctl' is purpose goes a little beyond -* that. This file defines the types and contants used to implement -* the p80211 request/confirm/indicate interfaces on Linux. The -* request/confirm interface is, in fact, normally implemented as an -* ioctl. The indicate interface on the other hand, is implemented -* using the Linux 'netlink' interface. -* -* The reason I say that request/confirm is 'normally' implemented -* via ioctl is that we're reserving the right to be able to send -* request commands via the netlink interface. This will be necessary -* if we ever need to send request messages when there aren't any -* wlan network devices present (i.e. sending a message that only p80211 -* cares about. -* -------------------------------------------------------------------- -*/ + * + * Declares constants and types for the p80211 ioctls + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + * + * While this file is called 'ioctl' is purpose goes a little beyond + * that. This file defines the types and contants used to implement + * the p80211 request/confirm/indicate interfaces on Linux. The + * request/confirm interface is, in fact, normally implemented as an + * ioctl. The indicate interface on the other hand, is implemented + * using the Linux 'netlink' interface. + * + * The reason I say that request/confirm is 'normally' implemented + * via ioctl is that we're reserving the right to be able to send + * request commands via the netlink interface. This will be necessary + * if we ever need to send request messages when there aren't any + * wlan network devices present (i.e. sending a message that only p80211 + * cares about. + * -------------------------------------------------------------------- + */ #ifndef _P80211IOCTL_H #define _P80211IOCTL_H diff --git a/drivers/staging/wlan-ng/p80211metadef.h b/drivers/staging/wlan-ng/p80211metadef.h index b0d3567ca0ad394f6352f7fb18dba52b86301d31..ea3d9ce222b95192ef87c3d5bad0e724bd017b4f 100644 --- a/drivers/staging/wlan-ng/p80211metadef.h +++ b/drivers/staging/wlan-ng/p80211metadef.h @@ -1,48 +1,48 @@ /* This file is GENERATED AUTOMATICALLY. DO NOT EDIT OR MODIFY. -* -------------------------------------------------------------------- -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -*/ + * -------------------------------------------------------------------- + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + */ #ifndef _P80211MKMETADEF_H #define _P80211MKMETADEF_H diff --git a/drivers/staging/wlan-ng/p80211mgmt.h b/drivers/staging/wlan-ng/p80211mgmt.h index 3dd066ac034e9cedb01ac560af34c3211b4f84c3..653950fd9843472e644f00ce3c43fe08298d8bf5 100644 --- a/drivers/staging/wlan-ng/p80211mgmt.h +++ b/drivers/staging/wlan-ng/p80211mgmt.h @@ -1,101 +1,101 @@ /* p80211mgmt.h -* -* Macros, types, and functions to handle 802.11 mgmt frames -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -* -* This file declares the constants and types used in the interface -* between a wlan driver and the user mode utilities. -* -* Notes: -* - Constant values are always in HOST byte order. To assign -* values to multi-byte fields they _must_ be converted to -* ieee byte order. To retrieve multi-byte values from incoming -* frames, they must be converted to host order. -* -* - The len member of the frame structure does NOT!!! include -* the MAC CRC. Therefore, the len field on rx'd frames should -* have 4 subtracted from it. -* -* All functions declared here are implemented in p80211.c -* -* The types, macros, and functions defined here are primarily -* used for encoding and decoding management frames. They are -* designed to follow these patterns of use: -* -* DECODE: -* 1) a frame of length len is received into buffer b -* 2) using the hdr structure and macros, we determine the type -* 3) an appropriate mgmt frame structure, mf, is allocated and zeroed -* 4) mf.hdr = b -* mf.buf = b -* mf.len = len -* 5) call mgmt_decode( mf ) -* 6) the frame field pointers in mf are now set. Note that any -* multi-byte frame field values accessed using the frame field -* pointers are in ieee byte order and will have to be converted -* to host order. -* -* ENCODE: -* 1) Library client allocates buffer space for maximum length -* frame of the desired type -* 2) Library client allocates a mgmt frame structure, called mf, -* of the desired type -* 3) Set the following: -* mf.type = -* mf.buf = -* 4) call mgmt_encode( mf ) -* 5) all of the fixed field pointers and fixed length information element -* pointers in mf are now set to their respective locations in the -* allocated space (fortunately, all variable length information elements -* fall at the end of their respective frames). -* 5a) The length field is set to include the last of the fixed and fixed -* length fields. It may have to be updated for optional or variable -* length information elements. -* 6) Optional and variable length information elements are special cases -* and must be handled individually by the client code. -* -------------------------------------------------------------------- -*/ + * + * Macros, types, and functions to handle 802.11 mgmt frames + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + * + * This file declares the constants and types used in the interface + * between a wlan driver and the user mode utilities. + * + * Notes: + * - Constant values are always in HOST byte order. To assign + * values to multi-byte fields they _must_ be converted to + * ieee byte order. To retrieve multi-byte values from incoming + * frames, they must be converted to host order. + * + * - The len member of the frame structure does NOT!!! include + * the MAC CRC. Therefore, the len field on rx'd frames should + * have 4 subtracted from it. + * + * All functions declared here are implemented in p80211.c + * + * The types, macros, and functions defined here are primarily + * used for encoding and decoding management frames. They are + * designed to follow these patterns of use: + * + * DECODE: + * 1) a frame of length len is received into buffer b + * 2) using the hdr structure and macros, we determine the type + * 3) an appropriate mgmt frame structure, mf, is allocated and zeroed + * 4) mf.hdr = b + * mf.buf = b + * mf.len = len + * 5) call mgmt_decode( mf ) + * 6) the frame field pointers in mf are now set. Note that any + * multi-byte frame field values accessed using the frame field + * pointers are in ieee byte order and will have to be converted + * to host order. + * + * ENCODE: + * 1) Library client allocates buffer space for maximum length + * frame of the desired type + * 2) Library client allocates a mgmt frame structure, called mf, + * of the desired type + * 3) Set the following: + * mf.type = + * mf.buf = + * 4) call mgmt_encode( mf ) + * 5) all of the fixed field pointers and fixed length information element + * pointers in mf are now set to their respective locations in the + * allocated space (fortunately, all variable length information elements + * fall at the end of their respective frames). + * 5a) The length field is set to include the last of the fixed and fixed + * length fields. It may have to be updated for optional or variable + * length information elements. + * 6) Optional and variable length information elements are special cases + * and must be handled individually by the client code. + * -------------------------------------------------------------------- + */ #ifndef _P80211MGMT_H #define _P80211MGMT_H diff --git a/drivers/staging/wlan-ng/p80211msg.h b/drivers/staging/wlan-ng/p80211msg.h index 43d2f971e2cdb5fdc2fbe4fff4be65a714e00301..40c5cf5997c7027dac88f3084c58cd01e5ec0b3f 100644 --- a/drivers/staging/wlan-ng/p80211msg.h +++ b/drivers/staging/wlan-ng/p80211msg.h @@ -1,49 +1,49 @@ /* p80211msg.h -* -* Macros, constants, types, and funcs for req and ind messages -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -*/ + * + * Macros, constants, types, and funcs for req and ind messages + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + */ #ifndef _P80211MSG_H #define _P80211MSG_H diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c index 825a63a7c0e319c582c97b5182b501ff5f821054..73fcf07254fefd8c25d37295bf3d2375d9c978af 100644 --- a/drivers/staging/wlan-ng/p80211netdev.c +++ b/drivers/staging/wlan-ng/p80211netdev.c @@ -1,53 +1,53 @@ /* src/p80211/p80211knetdev.c -* -* Linux Kernel net device interface -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -* -* The functions required for a Linux network device are defined here. -* -* -------------------------------------------------------------------- -*/ + * + * Linux Kernel net device interface + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + * + * The functions required for a Linux network device are defined here. + * + * -------------------------------------------------------------------- + */ #include #include @@ -112,17 +112,18 @@ module_param(wlan_wext_write, int, 0644); MODULE_PARM_DESC(wlan_wext_write, "enable write wireless extensions"); /*---------------------------------------------------------------- -* p80211knetdev_init -* -* Init method for a Linux netdevice. Called in response to -* register_netdev. -* -* Arguments: -* none -* -* Returns: -* nothing -----------------------------------------------------------------*/ + * p80211knetdev_init + * + * Init method for a Linux netdevice. Called in response to + * register_netdev. + * + * Arguments: + * none + * + * Returns: + * nothing + *---------------------------------------------------------------- + */ static int p80211knetdev_init(struct net_device *netdev) { /* Called in response to register_netdev */ @@ -133,19 +134,20 @@ static int p80211knetdev_init(struct net_device *netdev) } /*---------------------------------------------------------------- -* p80211knetdev_open -* -* Linux netdevice open method. Following a successful call here, -* the device is supposed to be ready for tx and rx. In our -* situation that may not be entirely true due to the state of the -* MAC below. -* -* Arguments: -* netdev Linux network device structure -* -* Returns: -* zero on success, non-zero otherwise -----------------------------------------------------------------*/ + * p80211knetdev_open + * + * Linux netdevice open method. Following a successful call here, + * the device is supposed to be ready for tx and rx. In our + * situation that may not be entirely true due to the state of the + * MAC below. + * + * Arguments: + * netdev Linux network device structure + * + * Returns: + * zero on success, non-zero otherwise + *---------------------------------------------------------------- + */ static int p80211knetdev_open(struct net_device *netdev) { int result = 0; /* success */ @@ -170,17 +172,18 @@ static int p80211knetdev_open(struct net_device *netdev) } /*---------------------------------------------------------------- -* p80211knetdev_stop -* -* Linux netdevice stop (close) method. Following this call, -* no frames should go up or down through this interface. -* -* Arguments: -* netdev Linux network device structure -* -* Returns: -* zero on success, non-zero otherwise -----------------------------------------------------------------*/ + * p80211knetdev_stop + * + * Linux netdevice stop (close) method. Following this call, + * no frames should go up or down through this interface. + * + * Arguments: + * netdev Linux network device structure + * + * Returns: + * zero on success, non-zero otherwise + *---------------------------------------------------------------- + */ static int p80211knetdev_stop(struct net_device *netdev) { int result = 0; @@ -196,18 +199,19 @@ static int p80211knetdev_stop(struct net_device *netdev) } /*---------------------------------------------------------------- -* p80211netdev_rx -* -* Frame receive function called by the mac specific driver. -* -* Arguments: -* wlandev WLAN network device structure -* skb skbuff containing a full 802.11 frame. -* Returns: -* nothing -* Side effects: -* -----------------------------------------------------------------*/ + * p80211netdev_rx + * + * Frame receive function called by the mac specific driver. + * + * Arguments: + * wlandev WLAN network device structure + * skb skbuff containing a full 802.11 frame. + * Returns: + * nothing + * Side effects: + * + *---------------------------------------------------------------- + */ void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb) { /* Enqueue for post-irq processing */ @@ -227,7 +231,8 @@ void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb) * CONV_TO_ETHER_FAILED if conversion failed * CONV_TO_ETHER_SKIPPED if frame is ignored */ -static int p80211_convert_to_ether(struct wlandevice *wlandev, struct sk_buff *skb) +static int p80211_convert_to_ether(struct wlandevice *wlandev, + struct sk_buff *skb) { struct p80211_hdr_a3 *hdr; @@ -272,7 +277,6 @@ static void p80211netdev_rx_bh(unsigned long arg) /* Let's empty our our queue */ while ((skb = skb_dequeue(&wlandev->nsd_rxq))) { if (wlandev->state == WLAN_DEVICE_OPEN) { - if (dev->type != ARPHRD_ETHER) { /* RAW frame; we shouldn't convert it */ /* XXX Append the Prism Header here instead. */ @@ -299,24 +303,25 @@ static void p80211netdev_rx_bh(unsigned long arg) } /*---------------------------------------------------------------- -* p80211knetdev_hard_start_xmit -* -* Linux netdevice method for transmitting a frame. -* -* Arguments: -* skb Linux sk_buff containing the frame. -* netdev Linux netdevice. -* -* Side effects: -* If the lower layers report that buffers are full. netdev->tbusy -* will be set to prevent higher layers from sending more traffic. -* -* Note: If this function returns non-zero, higher layers retain -* ownership of the skb. -* -* Returns: -* zero on success, non-zero on failure. -----------------------------------------------------------------*/ + * p80211knetdev_hard_start_xmit + * + * Linux netdevice method for transmitting a frame. + * + * Arguments: + * skb Linux sk_buff containing the frame. + * netdev Linux netdevice. + * + * Side effects: + * If the lower layers report that buffers are full. netdev->tbusy + * will be set to prevent higher layers from sending more traffic. + * + * Note: If this function returns non-zero, higher layers retain + * ownership of the skb. + * + * Returns: + * zero on success, non-zero on failure. + *---------------------------------------------------------------- + */ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) { @@ -336,8 +341,8 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, goto failed; } - memset(&p80211_hdr, 0, sizeof(union p80211_hdr)); - memset(&p80211_wep, 0, sizeof(struct p80211_metawep)); + memset(&p80211_hdr, 0, sizeof(p80211_hdr)); + memset(&p80211_wep, 0, sizeof(p80211_wep)); if (netif_queue_stopped(netdev)) { netdev_dbg(netdev, "called when queue stopped.\n"); @@ -375,8 +380,8 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, goto failed; } /* move the header over */ - memcpy(&p80211_hdr, skb->data, sizeof(union p80211_hdr)); - skb_pull(skb, sizeof(union p80211_hdr)); + memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr)); + skb_pull(skb, sizeof(p80211_hdr)); } else { if (skb_ether_to_p80211 (wlandev, wlandev->ethconv, skb, &p80211_hdr, @@ -435,17 +440,18 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, } /*---------------------------------------------------------------- -* p80211knetdev_set_multicast_list -* -* Called from higher layers whenever there's a need to set/clear -* promiscuous mode or rewrite the multicast list. -* -* Arguments: -* none -* -* Returns: -* nothing -----------------------------------------------------------------*/ + * p80211knetdev_set_multicast_list + * + * Called from higher layers whenever there's a need to set/clear + * promiscuous mode or rewrite the multicast list. + * + * Arguments: + * none + * + * Returns: + * nothing + *---------------------------------------------------------------- + */ static void p80211knetdev_set_multicast_list(struct net_device *dev) { struct wlandevice *wlandev = dev->ml_priv; @@ -454,12 +460,12 @@ static void p80211knetdev_set_multicast_list(struct net_device *dev) if (wlandev->set_multicast_list) wlandev->set_multicast_list(wlandev, dev); - } #ifdef SIOCETHTOOL -static int p80211netdev_ethtool(struct wlandevice *wlandev, void __user *useraddr) +static int p80211netdev_ethtool(struct wlandevice *wlandev, + void __user *useraddr) { u32 ethcmd; struct ethtool_drvinfo info; @@ -505,33 +511,35 @@ static int p80211netdev_ethtool(struct wlandevice *wlandev, void __user *useradd #endif /*---------------------------------------------------------------- -* p80211knetdev_do_ioctl -* -* Handle an ioctl call on one of our devices. Everything Linux -* ioctl specific is done here. Then we pass the contents of the -* ifr->data to the request message handler. -* -* Arguments: -* dev Linux kernel netdevice -* ifr Our private ioctl request structure, typed for the -* generic struct ifreq so we can use ptr to func -* w/o cast. -* -* Returns: -* zero on success, a negative errno on failure. Possible values: -* -ENETDOWN Device isn't up. -* -EBUSY cmd already in progress -* -ETIME p80211 cmd timed out (MSD may have its own timers) -* -EFAULT memory fault copying msg from user buffer -* -ENOMEM unable to allocate kernel msg buffer -* -ENOSYS bad magic, it the cmd really for us? -* -EintR sleeping on cmd, awakened by signal, cmd cancelled. -* -* Call Context: -* Process thread (ioctl caller). TODO: SMP support may require -* locks. -----------------------------------------------------------------*/ -static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + * p80211knetdev_do_ioctl + * + * Handle an ioctl call on one of our devices. Everything Linux + * ioctl specific is done here. Then we pass the contents of the + * ifr->data to the request message handler. + * + * Arguments: + * dev Linux kernel netdevice + * ifr Our private ioctl request structure, typed for the + * generic struct ifreq so we can use ptr to func + * w/o cast. + * + * Returns: + * zero on success, a negative errno on failure. Possible values: + * -ENETDOWN Device isn't up. + * -EBUSY cmd already in progress + * -ETIME p80211 cmd timed out (MSD may have its own timers) + * -EFAULT memory fault copying msg from user buffer + * -ENOMEM unable to allocate kernel msg buffer + * -EINVAL bad magic, it the cmd really for us? + * -EintR sleeping on cmd, awakened by signal, cmd cancelled. + * + * Call Context: + * Process thread (ioctl caller). TODO: SMP support may require + * locks. + *---------------------------------------------------------------- + */ +static int p80211knetdev_do_ioctl(struct net_device *dev, + struct ifreq *ifr, int cmd) { int result = 0; struct p80211ioctl_req *req = (struct p80211ioctl_req *)ifr; @@ -550,7 +558,7 @@ static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int /* Test the magic, assume ifr is good if it's there */ if (req->magic != P80211_IOCTL_MAGIC) { - result = -ENOSYS; + result = -EINVAL; goto bail; } @@ -558,7 +566,7 @@ static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int result = 0; goto bail; } else if (cmd != P80211_IFREQ) { - result = -ENOSYS; + result = -EINVAL; goto bail; } @@ -586,30 +594,31 @@ static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int } /*---------------------------------------------------------------- -* p80211knetdev_set_mac_address -* -* Handles the ioctl for changing the MACAddress of a netdevice -* -* references: linux/netdevice.h and drivers/net/net_init.c -* -* NOTE: [MSM] We only prevent address changes when the netdev is -* up. We don't control anything based on dot11 state. If the -* address is changed on a STA that's currently associated, you -* will probably lose the ability to send and receive data frames. -* Just be aware. Therefore, this should usually only be done -* prior to scan/join/auth/assoc. -* -* Arguments: -* dev netdevice struct -* addr the new MACAddress (a struct) -* -* Returns: -* zero on success, a negative errno on failure. Possible values: -* -EBUSY device is bussy (cmd not possible) -* -and errors returned by: p80211req_dorequest(..) -* -* by: Collin R. Mulliner -----------------------------------------------------------------*/ + * p80211knetdev_set_mac_address + * + * Handles the ioctl for changing the MACAddress of a netdevice + * + * references: linux/netdevice.h and drivers/net/net_init.c + * + * NOTE: [MSM] We only prevent address changes when the netdev is + * up. We don't control anything based on dot11 state. If the + * address is changed on a STA that's currently associated, you + * will probably lose the ability to send and receive data frames. + * Just be aware. Therefore, this should usually only be done + * prior to scan/join/auth/assoc. + * + * Arguments: + * dev netdevice struct + * addr the new MACAddress (a struct) + * + * Returns: + * zero on success, a negative errno on failure. Possible values: + * -EBUSY device is bussy (cmd not possible) + * -and errors returned by: p80211req_dorequest(..) + * + * by: Collin R. Mulliner + *---------------------------------------------------------------- + */ static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *new_addr = addr; @@ -629,9 +638,9 @@ static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr) resultcode = &dot11req.resultcode; /* Set up a dot11req_mibset */ - memset(&dot11req, 0, sizeof(struct p80211msg_dot11req_mibset)); + memset(&dot11req, 0, sizeof(dot11req)); dot11req.msgcode = DIDmsg_dot11req_mibset; - dot11req.msglen = sizeof(struct p80211msg_dot11req_mibset); + dot11req.msglen = sizeof(dot11req); memcpy(dot11req.devname, ((struct wlandevice *)dev->ml_priv)->name, WLAN_DEVNAMELEN_MAX - 1); @@ -669,18 +678,6 @@ static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr) return result; } -static int wlan_change_mtu(struct net_device *dev, int new_mtu) -{ - /* 2312 is max 802.11 payload, 20 is overhead, (ether + llc +snap) - and another 8 for wep. */ - if ((new_mtu < 68) || (new_mtu > (2312 - 20 - 8))) - return -EINVAL; - - dev->mtu = new_mtu; - - return 0; -} - static const struct net_device_ops p80211_netdev_ops = { .ndo_init = p80211knetdev_init, .ndo_open = p80211knetdev_open, @@ -690,33 +687,33 @@ static const struct net_device_ops p80211_netdev_ops = { .ndo_do_ioctl = p80211knetdev_do_ioctl, .ndo_set_mac_address = p80211knetdev_set_mac_address, .ndo_tx_timeout = p80211knetdev_tx_timeout, - .ndo_change_mtu = wlan_change_mtu, .ndo_validate_addr = eth_validate_addr, }; /*---------------------------------------------------------------- -* wlan_setup -* -* Roughly matches the functionality of ether_setup. Here -* we set up any members of the wlandevice structure that are common -* to all devices. Additionally, we allocate a linux 'struct device' -* and perform the same setup as ether_setup. -* -* Note: It's important that the caller have setup the wlandev->name -* ptr prior to calling this function. -* -* Arguments: -* wlandev ptr to the wlandev structure for the -* interface. -* physdev ptr to usb device -* Returns: -* zero on success, non-zero otherwise. -* Call Context: -* Should be process thread. We'll assume it might be -* interrupt though. When we add support for statically -* compiled drivers, this function will be called in the -* context of the kernel startup code. -----------------------------------------------------------------*/ + * wlan_setup + * + * Roughly matches the functionality of ether_setup. Here + * we set up any members of the wlandevice structure that are common + * to all devices. Additionally, we allocate a linux 'struct device' + * and perform the same setup as ether_setup. + * + * Note: It's important that the caller have setup the wlandev->name + * ptr prior to calling this function. + * + * Arguments: + * wlandev ptr to the wlandev structure for the + * interface. + * physdev ptr to usb device + * Returns: + * zero on success, non-zero otherwise. + * Call Context: + * Should be process thread. We'll assume it might be + * interrupt though. When we add support for statically + * compiled drivers, this function will be called in the + * context of the kernel startup code. + *---------------------------------------------------------------- + */ int wlan_setup(struct wlandevice *wlandev, struct device *physdev) { int result = 0; @@ -756,6 +753,11 @@ int wlan_setup(struct wlandevice *wlandev, struct device *physdev) wdev->wiphy = wiphy; wdev->iftype = NL80211_IFTYPE_STATION; netdev->ieee80211_ptr = wdev; + netdev->min_mtu = 68; + /* 2312 is max 802.11 payload, 20 is overhead, + * (ether + llc + snap) and another 8 for wep. + */ + netdev->max_mtu = (2312 - 20 - 8); netif_stop_queue(netdev); netif_carrier_off(netdev); @@ -765,24 +767,25 @@ int wlan_setup(struct wlandevice *wlandev, struct device *physdev) } /*---------------------------------------------------------------- -* wlan_unsetup -* -* This function is paired with the wlan_setup routine. It should -* be called after unregister_wlandev. Basically, all it does is -* free the 'struct device' that's associated with the wlandev. -* We do it here because the 'struct device' isn't allocated -* explicitly in the driver code, it's done in wlan_setup. To -* do the free in the driver might seem like 'magic'. -* -* Arguments: -* wlandev ptr to the wlandev structure for the -* interface. -* Call Context: -* Should be process thread. We'll assume it might be -* interrupt though. When we add support for statically -* compiled drivers, this function will be called in the -* context of the kernel startup code. -----------------------------------------------------------------*/ + * wlan_unsetup + * + * This function is paired with the wlan_setup routine. It should + * be called after unregister_wlandev. Basically, all it does is + * free the 'struct device' that's associated with the wlandev. + * We do it here because the 'struct device' isn't allocated + * explicitly in the driver code, it's done in wlan_setup. To + * do the free in the driver might seem like 'magic'. + * + * Arguments: + * wlandev ptr to the wlandev structure for the + * interface. + * Call Context: + * Should be process thread. We'll assume it might be + * interrupt though. When we add support for statically + * compiled drivers, this function will be called in the + * context of the kernel startup code. + *---------------------------------------------------------------- + */ void wlan_unsetup(struct wlandevice *wlandev) { struct wireless_dev *wdev; @@ -799,46 +802,48 @@ void wlan_unsetup(struct wlandevice *wlandev) } /*---------------------------------------------------------------- -* register_wlandev -* -* Roughly matches the functionality of register_netdev. This function -* is called after the driver has successfully probed and set up the -* resources for the device. It's now ready to become a named device -* in the Linux system. -* -* First we allocate a name for the device (if not already set), then -* we call the Linux function register_netdevice. -* -* Arguments: -* wlandev ptr to the wlandev structure for the -* interface. -* Returns: -* zero on success, non-zero otherwise. -* Call Context: -* Can be either interrupt or not. -----------------------------------------------------------------*/ + * register_wlandev + * + * Roughly matches the functionality of register_netdev. This function + * is called after the driver has successfully probed and set up the + * resources for the device. It's now ready to become a named device + * in the Linux system. + * + * First we allocate a name for the device (if not already set), then + * we call the Linux function register_netdevice. + * + * Arguments: + * wlandev ptr to the wlandev structure for the + * interface. + * Returns: + * zero on success, non-zero otherwise. + * Call Context: + * Can be either interrupt or not. + *---------------------------------------------------------------- + */ int register_wlandev(struct wlandevice *wlandev) { return register_netdev(wlandev->netdev); } /*---------------------------------------------------------------- -* unregister_wlandev -* -* Roughly matches the functionality of unregister_netdev. This -* function is called to remove a named device from the system. -* -* First we tell linux that the device should no longer exist. -* Then we remove it from the list of known wlan devices. -* -* Arguments: -* wlandev ptr to the wlandev structure for the -* interface. -* Returns: -* zero on success, non-zero otherwise. -* Call Context: -* Can be either interrupt or not. -----------------------------------------------------------------*/ + * unregister_wlandev + * + * Roughly matches the functionality of unregister_netdev. This + * function is called to remove a named device from the system. + * + * First we tell linux that the device should no longer exist. + * Then we remove it from the list of known wlan devices. + * + * Arguments: + * wlandev ptr to the wlandev structure for the + * interface. + * Returns: + * zero on success, non-zero otherwise. + * Call Context: + * Can be either interrupt or not. + *---------------------------------------------------------------- + */ int unregister_wlandev(struct wlandevice *wlandev) { struct sk_buff *skb; @@ -853,35 +858,36 @@ int unregister_wlandev(struct wlandevice *wlandev) } /*---------------------------------------------------------------- -* p80211netdev_hwremoved -* -* Hardware removed notification. This function should be called -* immediately after an MSD has detected that the underlying hardware -* has been yanked out from under us. The primary things we need -* to do are: -* - Mark the wlandev -* - Prevent any further traffic from the knetdev i/f -* - Prevent any further requests from mgmt i/f -* - If there are any waitq'd mgmt requests or mgmt-frame exchanges, -* shut them down. -* - Call the MSD hwremoved function. -* -* The remainder of the cleanup will be handled by unregister(). -* Our primary goal here is to prevent as much tickling of the MSD -* as possible since the MSD is already in a 'wounded' state. -* -* TODO: As new features are added, this function should be -* updated. -* -* Arguments: -* wlandev WLAN network device structure -* Returns: -* nothing -* Side effects: -* -* Call context: -* Usually interrupt. -----------------------------------------------------------------*/ + * p80211netdev_hwremoved + * + * Hardware removed notification. This function should be called + * immediately after an MSD has detected that the underlying hardware + * has been yanked out from under us. The primary things we need + * to do are: + * - Mark the wlandev + * - Prevent any further traffic from the knetdev i/f + * - Prevent any further requests from mgmt i/f + * - If there are any waitq'd mgmt requests or mgmt-frame exchanges, + * shut them down. + * - Call the MSD hwremoved function. + * + * The remainder of the cleanup will be handled by unregister(). + * Our primary goal here is to prevent as much tickling of the MSD + * as possible since the MSD is already in a 'wounded' state. + * + * TODO: As new features are added, this function should be + * updated. + * + * Arguments: + * wlandev WLAN network device structure + * Returns: + * nothing + * Side effects: + * + * Call context: + * Usually interrupt. + *---------------------------------------------------------------- + */ void p80211netdev_hwremoved(struct wlandevice *wlandev) { wlandev->hwremoved = 1; @@ -892,26 +898,27 @@ void p80211netdev_hwremoved(struct wlandevice *wlandev) } /*---------------------------------------------------------------- -* p80211_rx_typedrop -* -* Classifies the frame, increments the appropriate counter, and -* returns 0|1|2 indicating whether the driver should handle, ignore, or -* drop the frame -* -* Arguments: -* wlandev wlan device structure -* fc frame control field -* -* Returns: -* zero if the frame should be handled by the driver, -* one if the frame should be ignored -* anything else means we drop it. -* -* Side effects: -* -* Call context: -* interrupt -----------------------------------------------------------------*/ + * p80211_rx_typedrop + * + * Classifies the frame, increments the appropriate counter, and + * returns 0|1|2 indicating whether the driver should handle, ignore, or + * drop the frame + * + * Arguments: + * wlandev wlan device structure + * fc frame control field + * + * Returns: + * zero if the frame should be handled by the driver, + * one if the frame should be ignored + * anything else means we drop it. + * + * Side effects: + * + * Call context: + * interrupt + *---------------------------------------------------------------- + */ static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc) { u16 ftype; diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h index 1e6a774fc7c50d77314a64a1ab089cae212df3b5..8e0d08298c8bc13b9b9e1b99560e601f469affcd 100644 --- a/drivers/staging/wlan-ng/p80211netdev.h +++ b/drivers/staging/wlan-ng/p80211netdev.h @@ -1,54 +1,54 @@ /* p80211netdev.h -* -* WLAN net device structure and functions -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -* -* This file declares the structure type that represents each wlan -* interface. -* -* -------------------------------------------------------------------- -*/ + * + * WLAN net device structure and functions + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + * + * This file declares the structure type that represents each wlan + * interface. + * + * -------------------------------------------------------------------- + */ #ifndef _LINUX_P80211NETDEV_H #define _LINUX_P80211NETDEV_H @@ -143,7 +143,7 @@ extern struct iw_handler_def p80211wext_handler_def; #define NUM_WEPKEYS 4 #define MAX_KEYLEN 32 -#define HOSTWEP_DEFAULTKEY_MASK (BIT(1)|BIT(0)) +#define HOSTWEP_DEFAULTKEY_MASK GENMASK(1, 0) #define HOSTWEP_SHAREDKEY BIT(3) #define HOSTWEP_DECRYPT BIT(4) #define HOSTWEP_ENCRYPT BIT(5) diff --git a/drivers/staging/wlan-ng/p80211req.c b/drivers/staging/wlan-ng/p80211req.c index d43e85b5d49b6cc75d267895f226c55a89666a60..621df98183bf9353f7411ca62f5e03488f7b4d9e 100644 --- a/drivers/staging/wlan-ng/p80211req.c +++ b/drivers/staging/wlan-ng/p80211req.c @@ -1,54 +1,54 @@ /* src/p80211/p80211req.c -* -* Request/Indication/MacMgmt interface handling functions -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -* -* This file contains the functions, types, and macros to support the -* MLME request interface that's implemented via the device ioctls. -* -* -------------------------------------------------------------------- -*/ + * + * Request/Indication/MacMgmt interface handling functions + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + * + * This file contains the functions, types, and macros to support the + * MLME request interface that's implemented via the device ioctls. + * + * -------------------------------------------------------------------- + */ #include #include @@ -72,10 +72,11 @@ #include "p80211metastruct.h" #include "p80211req.h" -static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *msg); +static void p80211req_handlemsg(struct wlandevice *wlandev, + struct p80211msg *msg); static void p80211req_mibset_mibget(struct wlandevice *wlandev, - struct p80211msg_dot11req_mibget *mib_msg, - int isget); + struct p80211msg_dot11req_mibget *mib_msg, + int isget); static void p80211req_handle_action(struct wlandevice *wlandev, u32 *data, int isget, u32 flag) @@ -93,21 +94,22 @@ static void p80211req_handle_action(struct wlandevice *wlandev, u32 *data, } /*---------------------------------------------------------------- -* p80211req_dorequest -* -* Handles an MLME request/confirm message. -* -* Arguments: -* wlandev WLAN device struct -* msgbuf Buffer containing a request message -* -* Returns: -* 0 on success, an errno otherwise -* -* Call context: -* Potentially blocks the caller, so it's a good idea to -* not call this function from an interrupt context. -----------------------------------------------------------------*/ + * p80211req_dorequest + * + * Handles an MLME request/confirm message. + * + * Arguments: + * wlandev WLAN device struct + * msgbuf Buffer containing a request message + * + * Returns: + * 0 on success, an errno otherwise + * + * Call context: + * Potentially blocks the caller, so it's a good idea to + * not call this function from an interrupt context. + *---------------------------------------------------------------- + */ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf) { struct p80211msg *msg = (struct p80211msg *)msgbuf; @@ -122,7 +124,7 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf) /* Check Permissions */ if (!capable(CAP_NET_ADMIN) && - (msg->msgcode != DIDmsg_dot11req_mibget)) { + (msg->msgcode != DIDmsg_dot11req_mibget)) { netdev_err(wlandev->netdev, "%s: only dot11req_mibget allowed for non-root.\n", wlandev->name); @@ -130,7 +132,7 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf) } /* Check for busy status */ - if (test_and_set_bit(1, &(wlandev->request_pending))) + if (test_and_set_bit(1, &wlandev->request_pending)) return -EBUSY; /* Allow p80211 to look at msg and handle if desired. */ @@ -139,35 +141,36 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf) p80211req_handlemsg(wlandev, msg); /* Pass it down to wlandev via wlandev->mlmerequest */ - if (wlandev->mlmerequest != NULL) + if (wlandev->mlmerequest) wlandev->mlmerequest(wlandev, msg); - clear_bit(1, &(wlandev->request_pending)); + clear_bit(1, &wlandev->request_pending); return 0; /* if result==0, msg->status still may contain an err */ } /*---------------------------------------------------------------- -* p80211req_handlemsg -* -* p80211 message handler. Primarily looks for messages that -* belong to p80211 and then dispatches the appropriate response. -* TODO: we don't do anything yet. Once the linuxMIB is better -* defined we'll need a get/set handler. -* -* Arguments: -* wlandev WLAN device struct -* msg message structure -* -* Returns: -* nothing (any results are set in the status field of the msg) -* -* Call context: -* Process thread -----------------------------------------------------------------*/ -static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *msg) + * p80211req_handlemsg + * + * p80211 message handler. Primarily looks for messages that + * belong to p80211 and then dispatches the appropriate response. + * TODO: we don't do anything yet. Once the linuxMIB is better + * defined we'll need a get/set handler. + * + * Arguments: + * wlandev WLAN device struct + * msg message structure + * + * Returns: + * nothing (any results are set in the status field of the msg) + * + * Call context: + * Process thread + *---------------------------------------------------------------- + */ +static void p80211req_handlemsg(struct wlandevice *wlandev, + struct p80211msg *msg) { switch (msg->msgcode) { - case DIDmsg_lnxreq_hostwep:{ struct p80211msg_lnxreq_hostwep *req = (struct p80211msg_lnxreq_hostwep *)msg; @@ -192,8 +195,8 @@ static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *ms } static void p80211req_mibset_mibget(struct wlandevice *wlandev, - struct p80211msg_dot11req_mibget *mib_msg, - int isget) + struct p80211msg_dot11req_mibget *mib_msg, + int isget) { struct p80211itemd *mibitem = (struct p80211itemd *)mib_msg->mibattribute.data; struct p80211pstrd *pstr = (struct p80211pstrd *)mibitem->data; diff --git a/drivers/staging/wlan-ng/p80211req.h b/drivers/staging/wlan-ng/p80211req.h index 8d3054c22a0529c7f9dda6bf2a207ceed2aa7158..6c72f59993e0b0c8d7e9f72253a5f12bf3f7d58b 100644 --- a/drivers/staging/wlan-ng/p80211req.h +++ b/drivers/staging/wlan-ng/p80211req.h @@ -1,49 +1,49 @@ /* p80211req.h -* -* Request handling functions -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -*/ + * + * Request handling functions + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + */ #ifndef _LINUX_P80211REQ_H #define _LINUX_P80211REQ_H diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c index 23b1837380378b0b7cae4a402dfd539f8d0020bb..6492ffe59085f3e4eceac881e9a636a9687ce479 100644 --- a/drivers/staging/wlan-ng/p80211wep.c +++ b/drivers/staging/wlan-ng/p80211wep.c @@ -1,49 +1,49 @@ /* src/p80211/p80211wep.c -* -* WEP encode/decode for P80211. -* -* Copyright (C) 2002 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -*/ + * + * WEP encode/decode for P80211. + * + * Copyright (C) 2002 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + */ /*================================================================*/ /* System Includes */ @@ -52,8 +52,6 @@ #include #include #include - - #include "p80211hdr.h" #include "p80211types.h" #include "p80211msg.h" @@ -125,14 +123,13 @@ int wep_change_key(struct wlandevice *wlandev, int keynum, u8 *key, int keylen) return -1; if (keylen >= MAX_KEYLEN) return -1; - if (key == NULL) + if (!key) return -1; if (keynum < 0) return -1; if (keynum >= NUM_WEPKEYS) return -1; - wlandev->wep_keylens[keynum] = keylen; memcpy(wlandev->wep_keys[keynum], key, keylen); @@ -176,7 +173,6 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override, keylen += 3; /* add in IV bytes */ - /* set up the RC4 state */ for (i = 0; i < 256; i++) s[i] = i; @@ -217,8 +213,8 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override, } /* encrypts in-place. */ -int wep_encrypt(struct wlandevice *wlandev, u8 *buf, u8 *dst, u32 len, int keynum, - u8 *iv, u8 *icv) +int wep_encrypt(struct wlandevice *wlandev, u8 *buf, + u8 *dst, u32 len, int keynum, u8 *iv, u8 *icv) { u32 i, j, k, crc, keylen; u8 s[256], key[64]; diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c index 96aa21188669d34d3d03e703c6659dafbddb855c..2e349f87e73818e3d9714708c345a9633904abea 100644 --- a/drivers/staging/wlan-ng/prism2fw.c +++ b/drivers/staging/wlan-ng/prism2fw.c @@ -1,49 +1,49 @@ /* from src/prism2/download/prism2dl.c -* -* utility for downloading prism2 images moved into kernelspace -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -*/ + * + * utility for downloading prism2 images moved into kernelspace + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + */ /*================================================================*/ /* System Includes */ @@ -124,7 +124,7 @@ struct imgchunk { /* Data records */ static unsigned int ns3data; -static struct s3datarec s3data[S3DATA_MAX]; +static struct s3datarec *s3data; /* Plug records */ static unsigned int ns3plug; @@ -161,7 +161,7 @@ static struct hfa384x_caplevel priid; /* Local Function Declarations */ static int prism2_fwapply(const struct ihex_binrec *rfptr, -struct wlandevice *wlandev); + struct wlandevice *wlandev); static int read_fwfile(const struct ihex_binrec *rfptr); @@ -172,13 +172,15 @@ static int read_cardpda(struct pda *pda, struct wlandevice *wlandev); static int mkpdrlist(struct pda *pda); static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks, - struct s3plugrec *s3plug, unsigned int ns3plug, struct pda *pda); + struct s3plugrec *s3plug, unsigned int ns3plug, + struct pda *pda); static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks, - struct s3crcrec *s3crc, unsigned int ns3crc); + struct s3crcrec *s3crc, unsigned int ns3crc); static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk, - unsigned int nfchunks); + unsigned int nfchunks); + static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks); static void free_srecs(void); @@ -189,30 +191,31 @@ static int validate_identity(void); /* Function Definitions */ /*---------------------------------------------------------------- -* prism2_fwtry -* -* Try and get firmware into memory -* -* Arguments: -* udev usb device structure -* wlandev wlan device structure -* -* Returns: -* 0 - success -* ~0 - failure -----------------------------------------------------------------*/ + * prism2_fwtry + * + * Try and get firmware into memory + * + * Arguments: + * udev usb device structure + * wlandev wlan device structure + * + * Returns: + * 0 - success + * ~0 - failure + *---------------------------------------------------------------- + */ static int prism2_fwtry(struct usb_device *udev, struct wlandevice *wlandev) { const struct firmware *fw_entry = NULL; netdev_info(wlandev->netdev, "prism2_usb: Checking for firmware %s\n", - PRISM2_USB_FWFILE); + PRISM2_USB_FWFILE); if (request_ihex_firmware(&fw_entry, PRISM2_USB_FWFILE, &udev->dev) != 0) { netdev_info(wlandev->netdev, - "prism2_usb: Firmware not available, but not essential\n"); + "prism2_usb: Firmware not available, but not essential\n"); netdev_info(wlandev->netdev, - "prism2_usb: can continue to use card anyway.\n"); + "prism2_usb: can continue to use card anyway.\n"); return 1; } @@ -226,18 +229,19 @@ static int prism2_fwtry(struct usb_device *udev, struct wlandevice *wlandev) } /*---------------------------------------------------------------- -* prism2_fwapply -* -* Apply the firmware loaded into memory -* -* Arguments: -* rfptr firmware image in kernel memory -* wlandev device -* -* Returns: -* 0 - success -* ~0 - failure -----------------------------------------------------------------*/ + * prism2_fwapply + * + * Apply the firmware loaded into memory + * + * Arguments: + * rfptr firmware image in kernel memory + * wlandev device + * + * Returns: + * 0 - success + * ~0 - failure + *---------------------------------------------------------------- + */ static int prism2_fwapply(const struct ihex_binrec *rfptr, struct wlandevice *wlandev) { @@ -248,7 +252,12 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr, /* Initialize the data structures */ ns3data = 0; - memset(s3data, 0, sizeof(s3data)); + s3data = kcalloc(S3DATA_MAX, sizeof(*s3data), GFP_KERNEL); + if (!s3data) { + result = -ENOMEM; + goto out; + } + ns3plug = 0; memset(s3plug, 0, sizeof(s3plug)); ns3crc = 0; @@ -372,24 +381,25 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr, } /*---------------------------------------------------------------- -* crcimage -* -* Adds a CRC16 in the two bytes prior to each block identified by -* an S3 CRC record. Currently, we don't actually do a CRC we just -* insert the value 0xC0DE in hfa384x order. -* -* Arguments: -* fchunk Array of image chunks -* nfchunks Number of image chunks -* s3crc Array of crc records -* ns3crc Number of crc records -* -* Returns: -* 0 success -* ~0 failure -----------------------------------------------------------------*/ + * crcimage + * + * Adds a CRC16 in the two bytes prior to each block identified by + * an S3 CRC record. Currently, we don't actually do a CRC we just + * insert the value 0xC0DE in hfa384x order. + * + * Arguments: + * fchunk Array of image chunks + * nfchunks Number of image chunks + * s3crc Array of crc records + * ns3crc Number of crc records + * + * Returns: + * 0 success + * ~0 failure + *---------------------------------------------------------------- + */ static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks, - struct s3crcrec *s3crc, unsigned int ns3crc) + struct s3crcrec *s3crc, unsigned int ns3crc) { int result = 0; int i; @@ -433,22 +443,22 @@ static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks, dest = fchunk[c].data + chunkoff; *dest = 0xde; *(dest + 1) = 0xc0; - } return result; } /*---------------------------------------------------------------- -* free_chunks -* -* Clears the chunklist data structures in preparation for a new file. -* -* Arguments: -* none -* -* Returns: -* nothing -----------------------------------------------------------------*/ + * free_chunks + * + * Clears the chunklist data structures in preparation for a new file. + * + * Arguments: + * none + * + * Returns: + * nothing + *---------------------------------------------------------------- + */ static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks) { int i; @@ -458,24 +468,24 @@ static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks) *nfchunks = 0; memset(fchunk, 0, sizeof(*fchunk)); - } /*---------------------------------------------------------------- -* free_srecs -* -* Clears the srec data structures in preparation for a new file. -* -* Arguments: -* none -* -* Returns: -* nothing -----------------------------------------------------------------*/ + * free_srecs + * + * Clears the srec data structures in preparation for a new file. + * + * Arguments: + * none + * + * Returns: + * nothing + *---------------------------------------------------------------- + */ static void free_srecs(void) { ns3data = 0; - memset(s3data, 0, sizeof(s3data)); + kfree(s3data); ns3plug = 0; memset(s3plug, 0, sizeof(s3plug)); ns3crc = 0; @@ -486,19 +496,20 @@ static void free_srecs(void) } /*---------------------------------------------------------------- -* mkimage -* -* Scans the currently loaded set of S records for data residing -* in contiguous memory regions. Each contiguous region is then -* made into a 'chunk'. This function assumes that we're building -* a new chunk list. Assumes the s3data items are in sorted order. -* -* Arguments: none -* -* Returns: -* 0 - success -* ~0 - failure (probably an errno) -----------------------------------------------------------------*/ + * mkimage + * + * Scans the currently loaded set of S records for data residing + * in contiguous memory regions. Each contiguous region is then + * made into a 'chunk'. This function assumes that we're building + * a new chunk list. Assumes the s3data items are in sorted order. + * + * Arguments: none + * + * Returns: + * 0 - success + * ~0 - failure (probably an errno) + *---------------------------------------------------------------- + */ static int mkimage(struct imgchunk *clist, unsigned int *ccnt) { int result = 0; @@ -577,19 +588,20 @@ static int mkimage(struct imgchunk *clist, unsigned int *ccnt) } /*---------------------------------------------------------------- -* mkpdrlist -* -* Reads a raw PDA and builds an array of pdrec_t structures. -* -* Arguments: -* pda buffer containing raw PDA bytes -* pdrec ptr to an array of pdrec_t's. Will be filled on exit. -* nrec ptr to a variable that will contain the count of PDRs -* -* Returns: -* 0 - success -* ~0 - failure (probably an errno) -----------------------------------------------------------------*/ + * mkpdrlist + * + * Reads a raw PDA and builds an array of pdrec_t structures. + * + * Arguments: + * pda buffer containing raw PDA bytes + * pdrec ptr to an array of pdrec_t's. Will be filled on exit. + * nrec ptr to a variable that will contain the count of PDRs + * + * Returns: + * 0 - success + * ~0 - failure (probably an errno) + *---------------------------------------------------------------- + */ static int mkpdrlist(struct pda *pda) { u16 *pda16 = (u16 *)pda->buf; @@ -599,7 +611,7 @@ static int mkpdrlist(struct pda *pda) curroff = 0; while (curroff < (HFA384x_PDA_LEN_MAX / 2 - 1) && le16_to_cpu(pda16[curroff + 1]) != HFA384x_PDR_END_OF_PDA) { - pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&(pda16[curroff]); + pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&pda16[curroff]; if (le16_to_cpu(pda->rec[pda->nrec]->code) == HFA384x_PDR_NICID) { @@ -631,37 +643,38 @@ static int mkpdrlist(struct pda *pda) (pda->nrec)++; curroff += le16_to_cpu(pda16[curroff]) + 1; - } if (curroff >= (HFA384x_PDA_LEN_MAX / 2 - 1)) { pr_err("no end record found or invalid lengths in PDR data, exiting. %x %d\n", curroff, pda->nrec); return 1; } - pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&(pda16[curroff]); + pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&pda16[curroff]; (pda->nrec)++; return 0; } /*---------------------------------------------------------------- -* plugimage -* -* Plugs the given image using the given plug records from the given -* PDA and filename. -* -* Arguments: -* fchunk Array of image chunks -* nfchunks Number of image chunks -* s3plug Array of plug records -* ns3plug Number of plug records -* pda Current pda data -* -* Returns: -* 0 success -* ~0 failure -----------------------------------------------------------------*/ + * plugimage + * + * Plugs the given image using the given plug records from the given + * PDA and filename. + * + * Arguments: + * fchunk Array of image chunks + * nfchunks Number of image chunks + * s3plug Array of plug records + * ns3plug Number of plug records + * pda Current pda data + * + * Returns: + * 0 success + * ~0 failure + *---------------------------------------------------------------- + */ static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks, - struct s3plugrec *s3plug, unsigned int ns3plug, struct pda *pda) + struct s3plugrec *s3plug, unsigned int ns3plug, + struct pda *pda) { int result = 0; int i; /* plug index */ @@ -741,31 +754,31 @@ static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks, memset(dest, 0, s3plug[i].len); strncpy(dest, PRISM2_USB_FWFILE, s3plug[i].len - 1); } else { /* plug a PDR */ - memcpy(dest, &(pda->rec[j]->data), s3plug[i].len); + memcpy(dest, &pda->rec[j]->data, s3plug[i].len); } } return result; - } /*---------------------------------------------------------------- -* read_cardpda -* -* Sends the command for the driver to read the pda from the card -* named in the device variable. Upon success, the card pda is -* stored in the "cardpda" variables. Note that the pda structure -* is considered 'well formed' after this function. That means -* that the nrecs is valid, the rec array has been set up, and there's -* a valid PDAEND record in the raw PDA data. -* -* Arguments: -* pda pda structure -* wlandev device -* -* Returns: -* 0 - success -* ~0 - failure (probably an errno) -----------------------------------------------------------------*/ + * read_cardpda + * + * Sends the command for the driver to read the pda from the card + * named in the device variable. Upon success, the card pda is + * stored in the "cardpda" variables. Note that the pda structure + * is considered 'well formed' after this function. That means + * that the nrecs is valid, the rec array has been set up, and there's + * a valid PDAEND record in the raw PDA data. + * + * Arguments: + * pda pda structure + * wlandev device + * + * Returns: + * 0 - success + * ~0 - failure (probably an errno) + *---------------------------------------------------------------- + */ static int read_cardpda(struct pda *pda, struct wlandevice *wlandev) { int result = 0; @@ -802,65 +815,66 @@ static int read_cardpda(struct pda *pda, struct wlandevice *wlandev) } /*---------------------------------------------------------------- -* read_fwfile -* -* Reads the given fw file which should have been compiled from an srec -* file. Each record in the fw file will either be a plain data record, -* a start address record, or other records used for plugging. -* -* Note that data records are expected to be sorted into -* ascending address order in the fw file. -* -* Note also that the start address record, originally an S7 record in -* the srec file, is expected in the fw file to be like a data record but -* with a certain address to make it identifiable. -* -* Here's the SREC format that the fw should have come from: -* S[37]nnaaaaaaaaddd...dddcc -* -* nn - number of bytes starting with the address field -* aaaaaaaa - address in readable (or big endian) format -* dd....dd - 0-245 data bytes (two chars per byte) -* cc - checksum -* -* The S7 record's (there should be only one) address value gets -* converted to an S3 record with address of 0xff400000, with the -* start address being stored as a 4 byte data word. That address is -* the start execution address used for RAM downloads. -* -* The S3 records have a collection of subformats indicated by the -* value of aaaaaaaa: -* 0xff000000 - Plug record, data field format: -* xxxxxxxxaaaaaaaassssssss -* x - PDR code number (little endian) -* a - Address in load image to plug (little endian) -* s - Length of plug data area (little endian) -* -* 0xff100000 - CRC16 generation record, data field format: -* aaaaaaaassssssssbbbbbbbb -* a - Start address for CRC calculation (little endian) -* s - Length of data to calculate over (little endian) -* b - Boolean, true=write crc, false=don't write -* -* 0xff200000 - Info record, data field format: -* ssssttttdd..dd -* s - Size in words (little endian) -* t - Info type (little endian), see #defines and -* struct s3inforec for details about types. -* d - (s - 1) little endian words giving the contents of -* the given info type. -* -* 0xff400000 - Start address record, data field format: -* aaaaaaaa -* a - Address in load image to plug (little endian) -* -* Arguments: -* record firmware image (ihex record structure) in kernel memory -* -* Returns: -* 0 - success -* ~0 - failure (probably an errno) -----------------------------------------------------------------*/ + * read_fwfile + * + * Reads the given fw file which should have been compiled from an srec + * file. Each record in the fw file will either be a plain data record, + * a start address record, or other records used for plugging. + * + * Note that data records are expected to be sorted into + * ascending address order in the fw file. + * + * Note also that the start address record, originally an S7 record in + * the srec file, is expected in the fw file to be like a data record but + * with a certain address to make it identifiable. + * + * Here's the SREC format that the fw should have come from: + * S[37]nnaaaaaaaaddd...dddcc + * + * nn - number of bytes starting with the address field + * aaaaaaaa - address in readable (or big endian) format + * dd....dd - 0-245 data bytes (two chars per byte) + * cc - checksum + * + * The S7 record's (there should be only one) address value gets + * converted to an S3 record with address of 0xff400000, with the + * start address being stored as a 4 byte data word. That address is + * the start execution address used for RAM downloads. + * + * The S3 records have a collection of subformats indicated by the + * value of aaaaaaaa: + * 0xff000000 - Plug record, data field format: + * xxxxxxxxaaaaaaaassssssss + * x - PDR code number (little endian) + * a - Address in load image to plug (little endian) + * s - Length of plug data area (little endian) + * + * 0xff100000 - CRC16 generation record, data field format: + * aaaaaaaassssssssbbbbbbbb + * a - Start address for CRC calculation (little endian) + * s - Length of data to calculate over (little endian) + * b - Boolean, true=write crc, false=don't write + * + * 0xff200000 - Info record, data field format: + * ssssttttdd..dd + * s - Size in words (little endian) + * t - Info type (little endian), see #defines and + * struct s3inforec for details about types. + * d - (s - 1) little endian words giving the contents of + * the given info type. + * + * 0xff400000 - Start address record, data field format: + * aaaaaaaa + * a - Address in load image to plug (little endian) + * + * Arguments: + * record firmware image (ihex record structure) in kernel memory + * + * Returns: + * 0 - success + * ~0 - failure (probably an errno) + *---------------------------------------------------------------- + */ static int read_fwfile(const struct ihex_binrec *record) { int i; @@ -872,7 +886,6 @@ static int read_fwfile(const struct ihex_binrec *record) pr_debug("Reading fw file ...\n"); while (record) { - rcnt++; len = be16_to_cpu(record->len); @@ -887,8 +900,8 @@ static int read_fwfile(const struct ihex_binrec *record) case S3ADDR_START: startaddr = *ptr32; pr_debug(" S7 start addr, record=%d addr=0x%08x\n", - rcnt, - startaddr); + rcnt, + startaddr); break; case S3ADDR_PLUG: s3plug[ns3plug].itemcode = *ptr32; @@ -896,10 +909,10 @@ static int read_fwfile(const struct ihex_binrec *record) s3plug[ns3plug].len = *(ptr32 + 2); pr_debug(" S3 plugrec, record=%d itemcode=0x%08x addr=0x%08x len=%d\n", - rcnt, - s3plug[ns3plug].itemcode, - s3plug[ns3plug].addr, - s3plug[ns3plug].len); + rcnt, + s3plug[ns3plug].itemcode, + s3plug[ns3plug].addr, + s3plug[ns3plug].len); ns3plug++; if (ns3plug == S3PLUG_MAX) { @@ -913,10 +926,10 @@ static int read_fwfile(const struct ihex_binrec *record) s3crc[ns3crc].dowrite = *(ptr32 + 2); pr_debug(" S3 crcrec, record=%d addr=0x%08x len=%d write=0x%08x\n", - rcnt, - s3crc[ns3crc].addr, - s3crc[ns3crc].len, - s3crc[ns3crc].dowrite); + rcnt, + s3crc[ns3crc].addr, + s3crc[ns3crc].len, + s3crc[ns3crc].dowrite); ns3crc++; if (ns3crc == S3CRC_MAX) { pr_err("S3 crcrec limit reached - aborting\n"); @@ -928,16 +941,16 @@ static int read_fwfile(const struct ihex_binrec *record) s3info[ns3info].type = *(ptr16 + 1); pr_debug(" S3 inforec, record=%d len=0x%04x type=0x%04x\n", - rcnt, - s3info[ns3info].len, - s3info[ns3info].type); + rcnt, + s3info[ns3info].len, + s3info[ns3info].type); if (((s3info[ns3info].len - 1) * sizeof(u16)) > sizeof(s3info[ns3info].info)) { pr_err("S3 inforec length too long - aborting\n"); return 1; } - tmpinfo = (u16 *)&(s3info[ns3info].info.version); + tmpinfo = (u16 *)&s3info[ns3info].info.version; pr_debug(" info="); for (i = 0; i < s3info[ns3info].len - 1; i++) { tmpinfo[i] = *(ptr16 + 2 + i); @@ -968,22 +981,23 @@ static int read_fwfile(const struct ihex_binrec *record) } /*---------------------------------------------------------------- -* writeimage -* -* Takes the chunks, builds p80211 messages and sends them down -* to the driver for writing to the card. -* -* Arguments: -* wlandev device -* fchunk Array of image chunks -* nfchunks Number of image chunks -* -* Returns: -* 0 success -* ~0 failure -----------------------------------------------------------------*/ + * writeimage + * + * Takes the chunks, builds p80211 messages and sends them down + * to the driver for writing to the card. + * + * Arguments: + * wlandev device + * fchunk Array of image chunks + * nfchunks Number of image chunks + * + * Returns: + * 0 success + * ~0 failure + *---------------------------------------------------------------- + */ static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk, - unsigned int nfchunks) + unsigned int nfchunks) { int result = 0; struct p80211msg_p2req_ramdl_state *rstmsg; @@ -1099,7 +1113,6 @@ static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk, result = 1; goto free_result; } - } } diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c index 170de1c9eac4d9251211143939d67a0eb494e856..c558ad656c49e2a8348edac02fde055fd7f1732c 100644 --- a/drivers/staging/wlan-ng/prism2mgmt.c +++ b/drivers/staging/wlan-ng/prism2mgmt.c @@ -1,61 +1,61 @@ /* src/prism2/driver/prism2mgmt.c -* -* Management request handler functions. -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -* -* The functions in this file handle management requests sent from -* user mode. -* -* Most of these functions have two separate blocks of code that are -* conditional on whether this is a station or an AP. This is used -* to separate out the STA and AP responses to these management primitives. -* It's a choice (good, bad, indifferent?) to have the code in the same -* place so it's clear that the same primitive is implemented in both -* cases but has different behavior. -* -* -------------------------------------------------------------------- -*/ + * + * Management request handler functions. + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + * + * The functions in this file handle management requests sent from + * user mode. + * + * Most of these functions have two separate blocks of code that are + * conditional on whether this is a station or an AP. This is used + * to separate out the STA and AP responses to these management primitives. + * It's a choice (good, bad, indifferent?) to have the code in the same + * place so it's clear that the same primitive is implemented in both + * cases but has different behavior. + * + * -------------------------------------------------------------------- + */ #include #include @@ -84,35 +84,36 @@ #include "prism2mgmt.h" /* Converts 802.11 format rate specifications to prism2 */ -#define p80211rate_to_p2bit(n) ((((n)&~BIT(7)) == 2) ? BIT(0) : \ - (((n)&~BIT(7)) == 4) ? BIT(1) : \ - (((n)&~BIT(7)) == 11) ? BIT(2) : \ - (((n)&~BIT(7)) == 22) ? BIT(3) : 0) +#define p80211rate_to_p2bit(n) ((((n) & ~BIT(7)) == 2) ? BIT(0) : \ + (((n) & ~BIT(7)) == 4) ? BIT(1) : \ + (((n) & ~BIT(7)) == 11) ? BIT(2) : \ + (((n) & ~BIT(7)) == 22) ? BIT(3) : 0) /*---------------------------------------------------------------- -* prism2mgmt_scan -* -* Initiate a scan for BSSs. -* -* This function corresponds to MLME-scan.request and part of -* MLME-scan.confirm. As far as I can tell in the standard, there -* are no restrictions on when a scan.request may be issued. We have -* to handle in whatever state the driver/MAC happen to be. -* -* Arguments: -* wlandev wlan device structure -* msgp ptr to msg buffer -* -* Returns: -* 0 success and done -* <0 success, but we're waiting for something to finish. -* >0 an error occurred while handling the message. -* Side effects: -* -* Call context: -* process thread (usually) -* interrupt -----------------------------------------------------------------*/ + * prism2mgmt_scan + * + * Initiate a scan for BSSs. + * + * This function corresponds to MLME-scan.request and part of + * MLME-scan.confirm. As far as I can tell in the standard, there + * are no restrictions on when a scan.request may be issued. We have + * to handle in whatever state the driver/MAC happen to be. + * + * Arguments: + * wlandev wlan device structure + * msgp ptr to msg buffer + * + * Returns: + * 0 success and done + * <0 success, but we're waiting for something to finish. + * >0 an error occurred while handling the message. + * Side effects: + * + * Call context: + * process thread (usually) + * interrupt + *---------------------------------------------------------------- + */ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp) { int result = 0; @@ -122,7 +123,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp) int i, timeout; int istmpenable = 0; - struct hfa384x_HostScanRequest_data scanreq; + struct hfa384x_host_scan_request_data scanreq; /* gatekeeper check */ if (HFA384x_FIRMWARE_VERSION(hw->ident_sta_fw.major, @@ -184,7 +185,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp) /* set up the txrate to be 2MBPS. Should be fastest basicrate... */ word = HFA384x_RATEBIT_2; - scanreq.txRate = cpu_to_le16(word); + scanreq.tx_rate = cpu_to_le16(word); /* set up the channel list */ word = 0; @@ -196,7 +197,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp) /* channel 1 is BIT 0 ... channel 14 is BIT 13 */ word |= (1 << (channel - 1)); } - scanreq.channelList = cpu_to_le16(word); + scanreq.channel_list = cpu_to_le16(word); /* set up the ssid, if present. */ scanreq.ssid.len = cpu_to_le16(msg->ssid.data.len); @@ -292,7 +293,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp) result = hfa384x_drvr_setconfig(hw, HFA384x_RID_HOSTSCAN, &scanreq, - sizeof(struct hfa384x_HostScanRequest_data)); + sizeof(scanreq)); if (result) { netdev_err(wlandev->netdev, "setconfig(SCANREQUEST) failed. result=%d\n", @@ -347,31 +348,32 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp) } /*---------------------------------------------------------------- -* prism2mgmt_scan_results -* -* Retrieve the BSS description for one of the BSSs identified in -* a scan. -* -* Arguments: -* wlandev wlan device structure -* msgp ptr to msg buffer -* -* Returns: -* 0 success and done -* <0 success, but we're waiting for something to finish. -* >0 an error occurred while handling the message. -* Side effects: -* -* Call context: -* process thread (usually) -* interrupt -----------------------------------------------------------------*/ + * prism2mgmt_scan_results + * + * Retrieve the BSS description for one of the BSSs identified in + * a scan. + * + * Arguments: + * wlandev wlan device structure + * msgp ptr to msg buffer + * + * Returns: + * 0 success and done + * <0 success, but we're waiting for something to finish. + * >0 an error occurred while handling the message. + * Side effects: + * + * Call context: + * process thread (usually) + * interrupt + *---------------------------------------------------------------- + */ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp) { int result = 0; struct p80211msg_dot11req_scan_results *req; struct hfa384x *hw = wlandev->priv; - struct hfa384x_HScanResultSub *item = NULL; + struct hfa384x_hscan_result_sub *item = NULL; int count; @@ -425,8 +427,8 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp) #define REQBASICRATE(N) \ do { \ if ((count >= N) && DOT11_RATE5_ISBASIC_GET( \ - item->supprates[(N)-1])) { \ - req->basicrate ## N .data = item->supprates[(N)-1]; \ + item->supprates[(N) - 1])) { \ + req->basicrate ## N .data = item->supprates[(N) - 1]; \ req->basicrate ## N .status = \ P80211ENUM_msgitem_status_data_ok; \ } \ @@ -444,7 +446,7 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp) #define REQSUPPRATE(N) \ do { \ if (count >= N) { \ - req->supprate ## N .data = item->supprates[(N)-1]; \ + req->supprate ## N .data = item->supprates[(N) - 1]; \ req->supprate ## N .status = \ P80211ENUM_msgitem_status_data_ok; \ } \ @@ -507,24 +509,25 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp) } /*---------------------------------------------------------------- -* prism2mgmt_start -* -* Start a BSS. Any station can do this for IBSS, only AP for ESS. -* -* Arguments: -* wlandev wlan device structure -* msgp ptr to msg buffer -* -* Returns: -* 0 success and done -* <0 success, but we're waiting for something to finish. -* >0 an error occurred while handling the message. -* Side effects: -* -* Call context: -* process thread (usually) -* interrupt -----------------------------------------------------------------*/ + * prism2mgmt_start + * + * Start a BSS. Any station can do this for IBSS, only AP for ESS. + * + * Arguments: + * wlandev wlan device structure + * msgp ptr to msg buffer + * + * Returns: + * 0 success and done + * <0 success, but we're waiting for something to finish. + * >0 an error occurred while handling the message. + * Side effects: + * + * Call context: + * process thread (usually) + * interrupt + *---------------------------------------------------------------- + */ int prism2mgmt_start(struct wlandevice *wlandev, void *msgp) { int result = 0; @@ -580,7 +583,7 @@ int prism2mgmt_start(struct wlandevice *wlandev, void *msgp) /* beacon period */ word = msg->beaconperiod.data; - result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFAPBCNint, word); + result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFAPBCNINT, word); if (result) { netdev_err(wlandev->netdev, "Failed to set beacon period=%d.\n", word); @@ -689,23 +692,24 @@ int prism2mgmt_start(struct wlandevice *wlandev, void *msgp) } /*---------------------------------------------------------------- -* prism2mgmt_readpda -* -* Collect the PDA data and put it in the message. -* -* Arguments: -* wlandev wlan device structure -* msgp ptr to msg buffer -* -* Returns: -* 0 success and done -* <0 success, but we're waiting for something to finish. -* >0 an error occurred while handling the message. -* Side effects: -* -* Call context: -* process thread (usually) -----------------------------------------------------------------*/ + * prism2mgmt_readpda + * + * Collect the PDA data and put it in the message. + * + * Arguments: + * wlandev wlan device structure + * msgp ptr to msg buffer + * + * Returns: + * 0 success and done + * <0 success, but we're waiting for something to finish. + * >0 an error occurred while handling the message. + * Side effects: + * + * Call context: + * process thread (usually) + *---------------------------------------------------------------- + */ int prism2mgmt_readpda(struct wlandevice *wlandev, void *msgp) { struct hfa384x *hw = wlandev->priv; @@ -748,30 +752,31 @@ int prism2mgmt_readpda(struct wlandevice *wlandev, void *msgp) } /*---------------------------------------------------------------- -* prism2mgmt_ramdl_state -* -* Establishes the beginning/end of a card RAM download session. -* -* It is expected that the ramdl_write() function will be called -* one or more times between the 'enable' and 'disable' calls to -* this function. -* -* Note: This function should not be called when a mac comm port -* is active. -* -* Arguments: -* wlandev wlan device structure -* msgp ptr to msg buffer -* -* Returns: -* 0 success and done -* <0 success, but we're waiting for something to finish. -* >0 an error occurred while handling the message. -* Side effects: -* -* Call context: -* process thread (usually) -----------------------------------------------------------------*/ + * prism2mgmt_ramdl_state + * + * Establishes the beginning/end of a card RAM download session. + * + * It is expected that the ramdl_write() function will be called + * one or more times between the 'enable' and 'disable' calls to + * this function. + * + * Note: This function should not be called when a mac comm port + * is active. + * + * Arguments: + * wlandev wlan device structure + * msgp ptr to msg buffer + * + * Returns: + * 0 success and done + * <0 success, but we're waiting for something to finish. + * >0 an error occurred while handling the message. + * Side effects: + * + * Call context: + * process thread (usually) + *---------------------------------------------------------------- + */ int prism2mgmt_ramdl_state(struct wlandevice *wlandev, void *msgp) { struct hfa384x *hw = wlandev->priv; @@ -808,25 +813,26 @@ int prism2mgmt_ramdl_state(struct wlandevice *wlandev, void *msgp) } /*---------------------------------------------------------------- -* prism2mgmt_ramdl_write -* -* Writes a buffer to the card RAM using the download state. This -* is for writing code to card RAM. To just read or write raw data -* use the aux functions. -* -* Arguments: -* wlandev wlan device structure -* msgp ptr to msg buffer -* -* Returns: -* 0 success and done -* <0 success, but we're waiting for something to finish. -* >0 an error occurred while handling the message. -* Side effects: -* -* Call context: -* process thread (usually) -----------------------------------------------------------------*/ + * prism2mgmt_ramdl_write + * + * Writes a buffer to the card RAM using the download state. This + * is for writing code to card RAM. To just read or write raw data + * use the aux functions. + * + * Arguments: + * wlandev wlan device structure + * msgp ptr to msg buffer + * + * Returns: + * 0 success and done + * <0 success, but we're waiting for something to finish. + * >0 an error occurred while handling the message. + * Side effects: + * + * Call context: + * process thread (usually) + *---------------------------------------------------------------- + */ int prism2mgmt_ramdl_write(struct wlandevice *wlandev, void *msgp) { struct hfa384x *hw = wlandev->priv; @@ -864,30 +870,31 @@ int prism2mgmt_ramdl_write(struct wlandevice *wlandev, void *msgp) } /*---------------------------------------------------------------- -* prism2mgmt_flashdl_state -* -* Establishes the beginning/end of a card Flash download session. -* -* It is expected that the flashdl_write() function will be called -* one or more times between the 'enable' and 'disable' calls to -* this function. -* -* Note: This function should not be called when a mac comm port -* is active. -* -* Arguments: -* wlandev wlan device structure -* msgp ptr to msg buffer -* -* Returns: -* 0 success and done -* <0 success, but we're waiting for something to finish. -* >0 an error occurred while handling the message. -* Side effects: -* -* Call context: -* process thread (usually) -----------------------------------------------------------------*/ + * prism2mgmt_flashdl_state + * + * Establishes the beginning/end of a card Flash download session. + * + * It is expected that the flashdl_write() function will be called + * one or more times between the 'enable' and 'disable' calls to + * this function. + * + * Note: This function should not be called when a mac comm port + * is active. + * + * Arguments: + * wlandev wlan device structure + * msgp ptr to msg buffer + * + * Returns: + * 0 success and done + * <0 success, but we're waiting for something to finish. + * >0 an error occurred while handling the message. + * Side effects: + * + * Call context: + * process thread (usually) + *---------------------------------------------------------------- + */ int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp) { int result = 0; @@ -942,23 +949,24 @@ int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp) } /*---------------------------------------------------------------- -* prism2mgmt_flashdl_write -* -* -* -* Arguments: -* wlandev wlan device structure -* msgp ptr to msg buffer -* -* Returns: -* 0 success and done -* <0 success, but we're waiting for something to finish. -* >0 an error occurred while handling the message. -* Side effects: -* -* Call context: -* process thread (usually) -----------------------------------------------------------------*/ + * prism2mgmt_flashdl_write + * + * + * + * Arguments: + * wlandev wlan device structure + * msgp ptr to msg buffer + * + * Returns: + * 0 success and done + * <0 success, but we're waiting for something to finish. + * >0 an error occurred while handling the message. + * Side effects: + * + * Call context: + * process thread (usually) + *---------------------------------------------------------------- + */ int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp) { struct hfa384x *hw = wlandev->priv; @@ -1001,24 +1009,25 @@ int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp) } /*---------------------------------------------------------------- -* prism2mgmt_autojoin -* -* Associate with an ESS. -* -* Arguments: -* wlandev wlan device structure -* msgp ptr to msg buffer -* -* Returns: -* 0 success and done -* <0 success, but we're waiting for something to finish. -* >0 an error occurred while handling the message. -* Side effects: -* -* Call context: -* process thread (usually) -* interrupt -----------------------------------------------------------------*/ + * prism2mgmt_autojoin + * + * Associate with an ESS. + * + * Arguments: + * wlandev wlan device structure + * msgp ptr to msg buffer + * + * Returns: + * 0 success and done + * <0 success, but we're waiting for something to finish. + * >0 an error occurred while handling the message. + * Side effects: + * + * Call context: + * process thread (usually) + * interrupt + *---------------------------------------------------------------- + */ int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp) { struct hfa384x *hw = wlandev->priv; @@ -1072,24 +1081,25 @@ int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp) } /*---------------------------------------------------------------- -* prism2mgmt_wlansniff -* -* Start or stop sniffing. -* -* Arguments: -* wlandev wlan device structure -* msgp ptr to msg buffer -* -* Returns: -* 0 success and done -* <0 success, but we're waiting for something to finish. -* >0 an error occurred while handling the message. -* Side effects: -* -* Call context: -* process thread (usually) -* interrupt -----------------------------------------------------------------*/ + * prism2mgmt_wlansniff + * + * Start or stop sniffing. + * + * Arguments: + * wlandev wlan device structure + * msgp ptr to msg buffer + * + * Returns: + * 0 success and done + * <0 success, but we're waiting for something to finish. + * >0 an error occurred while handling the message. + * Side effects: + * + * Call context: + * process thread (usually) + * interrupt + *---------------------------------------------------------------- + */ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp) { int result = 0; diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h index cc1ac7a60dfeb59ab706ff23ebdf3ea470a92624..88b979ff68b3c998d9df99fcdc5f90508a2eb036 100644 --- a/drivers/staging/wlan-ng/prism2mgmt.h +++ b/drivers/staging/wlan-ng/prism2mgmt.h @@ -1,61 +1,61 @@ /* prism2mgmt.h -* -* Declares the mgmt command handler functions -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -* -* This file contains the constants and data structures for interaction -* with the hfa384x Wireless LAN (WLAN) Media Access Controller (MAC). -* The hfa384x is a portion of the Harris PRISM(tm) WLAN chipset. -* -* [Implementation and usage notes] -* -* [References] -* CW10 Programmer's Manual v1.5 -* IEEE 802.11 D10.0 -* -* -------------------------------------------------------------------- -*/ + * + * Declares the mgmt command handler functions + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + * + * This file contains the constants and data structures for interaction + * with the hfa384x Wireless LAN (WLAN) Media Access Controller (MAC). + * The hfa384x is a portion of the Harris PRISM(tm) WLAN chipset. + * + * [Implementation and usage notes] + * + * [References] + * CW10 Programmer's Manual v1.5 + * IEEE 802.11 D10.0 + * + * -------------------------------------------------------------------- + */ #ifndef _PRISM2MGMT_H #define _PRISM2MGMT_H @@ -65,7 +65,8 @@ extern int prism2_reset_settletime; u32 prism2sta_ifstate(struct wlandevice *wlandev, u32 ifstate); -void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf); +void prism2sta_ev_info(struct wlandevice *wlandev, + struct hfa384x_inf_frame *inf); void prism2sta_ev_txexc(struct wlandevice *wlandev, u16 status); void prism2sta_ev_tx(struct wlandevice *wlandev, u16 status); void prism2sta_ev_alloc(struct wlandevice *wlandev); @@ -83,9 +84,11 @@ int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp); int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp); /*--------------------------------------------------------------- -* conversion functions going between wlan message data types and -* Prism2 data types ----------------------------------------------------------------*/ + * conversion functions going between wlan message data types and + * Prism2 data types + *--------------------------------------------------------------- + */ + /* byte area conversion functions*/ void prism2mgmt_bytearea2pstr(u8 *bytearea, struct p80211pstrd *pstr, int len); diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c index 63ab6bc886543dd16310481b380741c56f9f1cef..8ea6a647d037250480d755b1817ee7123cd83666 100644 --- a/drivers/staging/wlan-ng/prism2mib.c +++ b/drivers/staging/wlan-ng/prism2mib.c @@ -1,54 +1,54 @@ /* src/prism2/driver/prism2mib.c -* -* Management request for mibset/mibget -* -* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. -* -------------------------------------------------------------------- -* -* linux-wlan -* -* The contents of this file are subject to the Mozilla Public -* License Version 1.1 (the "License"); you may not use this file -* except in compliance with the License. You may obtain a copy of -* the License at http://www.mozilla.org/MPL/ -* -* Software distributed under the License is distributed on an "AS -* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or -* implied. See the License for the specific language governing -* rights and limitations under the License. -* -* Alternatively, the contents of this file may be used under the -* terms of the GNU Public License version 2 (the "GPL"), in which -* case the provisions of the GPL are applicable instead of the -* above. If you wish to allow the use of your version of this file -* only under the terms of the GPL and not to allow others to use -* your version of this file under the MPL, indicate your decision -* by deleting the provisions above and replace them with the notice -* and other provisions required by the GPL. If you do not delete -* the provisions above, a recipient may use your version of this -* file under either the MPL or the GPL. -* -* -------------------------------------------------------------------- -* -* Inquiries regarding the linux-wlan Open Source project can be -* made directly to: -* -* AbsoluteValue Systems Inc. -* info@linux-wlan.com -* http://www.linux-wlan.com -* -* -------------------------------------------------------------------- -* -* Portions of the development of this software were funded by -* Intersil Corporation as part of PRISM(R) chipset product development. -* -* -------------------------------------------------------------------- -* -* The functions in this file handle the mibset/mibget management -* functions. -* -* -------------------------------------------------------------------- -*/ + * + * Management request for mibset/mibget + * + * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. + * -------------------------------------------------------------------- + * + * linux-wlan + * + * The contents of this file are subject to the Mozilla Public + * License Version 1.1 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of + * the License at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU Public License version 2 (the "GPL"), in which + * case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision + * by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL. If you do not delete + * the provisions above, a recipient may use your version of this + * file under either the MPL or the GPL. + * + * -------------------------------------------------------------------- + * + * Inquiries regarding the linux-wlan Open Source project can be + * made directly to: + * + * AbsoluteValue Systems Inc. + * info@linux-wlan.com + * http://www.linux-wlan.com + * + * -------------------------------------------------------------------- + * + * Portions of the development of this software were funded by + * Intersil Corporation as part of PRISM(R) chipset product development. + * + * -------------------------------------------------------------------- + * + * The functions in this file handle the mibset/mibget management + * functions. + * + * -------------------------------------------------------------------- + */ #include #include @@ -709,7 +709,7 @@ static int prism2mib_priv(struct mibrec *mib, switch (mib->did) { case DIDmib_lnx_lnxConfigTable_lnxRSNAIE:{ - struct hfa384x_WPAData wpa; + struct hfa384x_wpa_data wpa; if (isget) { hfa384x_drvr_getconfig(hw, diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c index e1b4a94292ff9b6b8b4a40cb44d0f9f93f7e67b0..984804b92e054934e51785f828c9b5a7ee7bc1eb 100644 --- a/drivers/staging/wlan-ng/prism2sta.c +++ b/drivers/staging/wlan-ng/prism2sta.c @@ -104,32 +104,33 @@ static void prism2sta_reset(struct wlandevice *wlandev); static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb, union p80211_hdr *p80211_hdr, struct p80211_metawep *p80211_wep); -static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *msg); +static int prism2sta_mlmerequest(struct wlandevice *wlandev, + struct p80211msg *msg); static int prism2sta_getcardinfo(struct wlandevice *wlandev); static int prism2sta_globalsetup(struct wlandevice *wlandev); static int prism2sta_setmulticast(struct wlandevice *wlandev, struct net_device *dev); static void prism2sta_inf_handover(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf); + struct hfa384x_inf_frame *inf); static void prism2sta_inf_tallies(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf); + struct hfa384x_inf_frame *inf); static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf); + struct hfa384x_inf_frame *inf); static void prism2sta_inf_scanresults(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf); + struct hfa384x_inf_frame *inf); static void prism2sta_inf_chinforesults(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf); + struct hfa384x_inf_frame *inf); static void prism2sta_inf_linkstatus(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf); + struct hfa384x_inf_frame *inf); static void prism2sta_inf_assocstatus(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf); + struct hfa384x_inf_frame *inf); static void prism2sta_inf_authreq(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf); + struct hfa384x_inf_frame *inf); static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf); + struct hfa384x_inf_frame *inf); static void prism2sta_inf_psusercnt(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf); + struct hfa384x_inf_frame *inf); /* * prism2sta_open @@ -278,7 +279,8 @@ static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb, * Call context: * process thread */ -static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *msg) +static int prism2sta_mlmerequest(struct wlandevice *wlandev, + struct p80211msg *msg) { struct hfa384x *hw = wlandev->priv; @@ -370,9 +372,10 @@ static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *m qualmsg->noise.status = P80211ENUM_msgitem_status_data_ok; - qualmsg->link.data = le16_to_cpu(hw->qual.CQ_currBSS); - qualmsg->level.data = le16_to_cpu(hw->qual.ASL_currBSS); - qualmsg->noise.data = le16_to_cpu(hw->qual.ANL_currFC); + qualmsg->link.data = le16_to_cpu(hw->qual.cq_curr_bss); + qualmsg->level.data = + le16_to_cpu(hw->qual.asl_curr_bss); + qualmsg->noise.data = le16_to_cpu(hw->qual.anl_curr_fc); qualmsg->txrate.data = hw->txrate; break; @@ -606,8 +609,8 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev) hw->ident_nic.minor = le16_to_cpu(hw->ident_nic.minor); netdev_info(wlandev->netdev, "ident: nic h/w: id=0x%02x %d.%d.%d\n", - hw->ident_nic.id, hw->ident_nic.major, - hw->ident_nic.minor, hw->ident_nic.variant); + hw->ident_nic.id, hw->ident_nic.major, + hw->ident_nic.minor, hw->ident_nic.variant); /* Primary f/w identity */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRIIDENTITY, @@ -625,8 +628,8 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev) hw->ident_pri_fw.minor = le16_to_cpu(hw->ident_pri_fw.minor); netdev_info(wlandev->netdev, "ident: pri f/w: id=0x%02x %d.%d.%d\n", - hw->ident_pri_fw.id, hw->ident_pri_fw.major, - hw->ident_pri_fw.minor, hw->ident_pri_fw.variant); + hw->ident_pri_fw.id, hw->ident_pri_fw.major, + hw->ident_pri_fw.minor, hw->ident_pri_fw.variant); /* Station (Secondary?) f/w identity */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STAIDENTITY, @@ -639,7 +642,7 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev) if (hw->ident_nic.id < 0x8000) { netdev_err(wlandev->netdev, - "FATAL: Card is not an Intersil Prism2/2.5/3\n"); + "FATAL: Card is not an Intersil Prism2/2.5/3\n"); result = -1; goto failed; } @@ -651,19 +654,19 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev) hw->ident_sta_fw.minor = le16_to_cpu(hw->ident_sta_fw.minor); /* strip out the 'special' variant bits */ - hw->mm_mods = hw->ident_sta_fw.variant & (BIT(14) | BIT(15)); - hw->ident_sta_fw.variant &= ~((u16)(BIT(14) | BIT(15))); + hw->mm_mods = hw->ident_sta_fw.variant & GENMASK(15, 14); + hw->ident_sta_fw.variant &= ~((u16)GENMASK(15, 14)); if (hw->ident_sta_fw.id == 0x1f) { netdev_info(wlandev->netdev, - "ident: sta f/w: id=0x%02x %d.%d.%d\n", - hw->ident_sta_fw.id, hw->ident_sta_fw.major, - hw->ident_sta_fw.minor, hw->ident_sta_fw.variant); + "ident: sta f/w: id=0x%02x %d.%d.%d\n", + hw->ident_sta_fw.id, hw->ident_sta_fw.major, + hw->ident_sta_fw.minor, hw->ident_sta_fw.variant); } else { netdev_info(wlandev->netdev, - "ident: ap f/w: id=0x%02x %d.%d.%d\n", - hw->ident_sta_fw.id, hw->ident_sta_fw.major, - hw->ident_sta_fw.minor, hw->ident_sta_fw.variant); + "ident: ap f/w: id=0x%02x %d.%d.%d\n", + hw->ident_sta_fw.id, hw->ident_sta_fw.major, + hw->ident_sta_fw.minor, hw->ident_sta_fw.variant); netdev_err(wlandev->netdev, "Unsupported Tertiary AP firmware loaded!\n"); goto failed; } @@ -687,10 +690,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev) hw->cap_sup_mfi.top = le16_to_cpu(hw->cap_sup_mfi.top); netdev_info(wlandev->netdev, - "MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", - hw->cap_sup_mfi.role, hw->cap_sup_mfi.id, - hw->cap_sup_mfi.variant, hw->cap_sup_mfi.bottom, - hw->cap_sup_mfi.top); + "MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", + hw->cap_sup_mfi.role, hw->cap_sup_mfi.id, + hw->cap_sup_mfi.variant, hw->cap_sup_mfi.bottom, + hw->cap_sup_mfi.top); /* Compatibility range, Controller supplier */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CFISUPRANGE, @@ -711,10 +714,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev) hw->cap_sup_cfi.top = le16_to_cpu(hw->cap_sup_cfi.top); netdev_info(wlandev->netdev, - "CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", - hw->cap_sup_cfi.role, hw->cap_sup_cfi.id, - hw->cap_sup_cfi.variant, hw->cap_sup_cfi.bottom, - hw->cap_sup_cfi.top); + "CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", + hw->cap_sup_cfi.role, hw->cap_sup_cfi.id, + hw->cap_sup_cfi.variant, hw->cap_sup_cfi.bottom, + hw->cap_sup_cfi.top); /* Compatibility range, Primary f/w supplier */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRISUPRANGE, @@ -735,10 +738,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev) hw->cap_sup_pri.top = le16_to_cpu(hw->cap_sup_pri.top); netdev_info(wlandev->netdev, - "PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", - hw->cap_sup_pri.role, hw->cap_sup_pri.id, - hw->cap_sup_pri.variant, hw->cap_sup_pri.bottom, - hw->cap_sup_pri.top); + "PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", + hw->cap_sup_pri.role, hw->cap_sup_pri.id, + hw->cap_sup_pri.variant, hw->cap_sup_pri.bottom, + hw->cap_sup_pri.top); /* Compatibility range, Station f/w supplier */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STASUPRANGE, @@ -791,10 +794,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev) hw->cap_act_pri_cfi.top = le16_to_cpu(hw->cap_act_pri_cfi.top); netdev_info(wlandev->netdev, - "PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", - hw->cap_act_pri_cfi.role, hw->cap_act_pri_cfi.id, - hw->cap_act_pri_cfi.variant, hw->cap_act_pri_cfi.bottom, - hw->cap_act_pri_cfi.top); + "PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", + hw->cap_act_pri_cfi.role, hw->cap_act_pri_cfi.id, + hw->cap_act_pri_cfi.variant, hw->cap_act_pri_cfi.bottom, + hw->cap_act_pri_cfi.top); /* Compatibility range, sta f/w actor, CFI supplier */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_CFIACTRANGES, @@ -815,10 +818,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev) hw->cap_act_sta_cfi.top = le16_to_cpu(hw->cap_act_sta_cfi.top); netdev_info(wlandev->netdev, - "STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", - hw->cap_act_sta_cfi.role, hw->cap_act_sta_cfi.id, - hw->cap_act_sta_cfi.variant, hw->cap_act_sta_cfi.bottom, - hw->cap_act_sta_cfi.top); + "STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", + hw->cap_act_sta_cfi.role, hw->cap_act_sta_cfi.id, + hw->cap_act_sta_cfi.variant, hw->cap_act_sta_cfi.bottom, + hw->cap_act_sta_cfi.top); /* Compatibility range, sta f/w actor, MFI supplier */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_MFIACTRANGES, @@ -839,10 +842,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev) hw->cap_act_sta_mfi.top = le16_to_cpu(hw->cap_act_sta_mfi.top); netdev_info(wlandev->netdev, - "STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", - hw->cap_act_sta_mfi.role, hw->cap_act_sta_mfi.id, - hw->cap_act_sta_mfi.variant, hw->cap_act_sta_mfi.bottom, - hw->cap_act_sta_mfi.top); + "STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", + hw->cap_act_sta_mfi.role, hw->cap_act_sta_mfi.id, + hw->cap_act_sta_mfi.variant, hw->cap_act_sta_mfi.bottom, + hw->cap_act_sta_mfi.top); /* Serial Number */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_NICSERIALNUMBER, @@ -920,7 +923,7 @@ static int prism2sta_globalsetup(struct wlandevice *wlandev) } static int prism2sta_setmulticast(struct wlandevice *wlandev, - struct net_device *dev) + struct net_device *dev) { int result = 0; struct hfa384x *hw = wlandev->priv; @@ -962,7 +965,7 @@ static int prism2sta_setmulticast(struct wlandevice *wlandev, * interrupt */ static void prism2sta_inf_handover(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf) + struct hfa384x_inf_frame *inf) { pr_debug("received infoframe:HANDOVER (unhandled)\n"); } @@ -985,7 +988,7 @@ static void prism2sta_inf_handover(struct wlandevice *wlandev, * interrupt */ static void prism2sta_inf_tallies(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf) + struct hfa384x_inf_frame *inf) { struct hfa384x *hw = wlandev->priv; u16 *src16; @@ -999,7 +1002,7 @@ static void prism2sta_inf_tallies(struct wlandevice *wlandev, * record length of the info record. */ - cnt = sizeof(struct hfa384x_CommTallies32) / sizeof(u32); + cnt = sizeof(struct hfa384x_comm_tallies_32) / sizeof(u32); if (inf->framelen > 22) { dst = (u32 *)&hw->tallies; src32 = (u32 *)&inf->info.commtallies32; @@ -1031,19 +1034,19 @@ static void prism2sta_inf_tallies(struct wlandevice *wlandev, * interrupt */ static void prism2sta_inf_scanresults(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf) + struct hfa384x_inf_frame *inf) { struct hfa384x *hw = wlandev->priv; int nbss; - struct hfa384x_ScanResult *sr = &(inf->info.scanresult); + struct hfa384x_scan_result *sr = &inf->info.scanresult; int i; - struct hfa384x_JoinRequest_data joinreq; + struct hfa384x_join_request_data joinreq; int result; /* Get the number of results, first in bytes, then in results */ nbss = (inf->framelen * sizeof(u16)) - sizeof(inf->infotype) - sizeof(inf->info.scanresult.scanreason); - nbss /= sizeof(struct hfa384x_ScanResultSub); + nbss /= sizeof(struct hfa384x_scan_result_sub); /* Print em */ pr_debug("rx scanresults, reason=%d, nbss=%d:\n", @@ -1064,7 +1067,7 @@ static void prism2sta_inf_scanresults(struct wlandevice *wlandev, &joinreq, HFA384x_RID_JOINREQUEST_LEN); if (result) { netdev_err(wlandev->netdev, "setconfig(joinreq) failed, result=%d\n", - result); + result); } } @@ -1086,7 +1089,7 @@ static void prism2sta_inf_scanresults(struct wlandevice *wlandev, * interrupt */ static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf) + struct hfa384x_inf_frame *inf) { struct hfa384x *hw = wlandev->priv; int nbss; @@ -1099,7 +1102,7 @@ static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev, kfree(hw->scanresults); - hw->scanresults = kmemdup(inf, sizeof(struct hfa384x_InfFrame), GFP_ATOMIC); + hw->scanresults = kmemdup(inf, sizeof(*inf), GFP_ATOMIC); if (nbss == 0) nbss = -1; @@ -1127,7 +1130,7 @@ static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev, * interrupt */ static void prism2sta_inf_chinforesults(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf) + struct hfa384x_inf_frame *inf) { struct hfa384x *hw = wlandev->priv; unsigned int i, n; @@ -1136,8 +1139,8 @@ static void prism2sta_inf_chinforesults(struct wlandevice *wlandev, le16_to_cpu(inf->info.chinforesult.scanchannels); for (i = 0, n = 0; i < HFA384x_CHINFORESULT_MAX; i++) { - struct hfa384x_ChInfoResultSub *result; - struct hfa384x_ChInfoResultSub *chinforesult; + struct hfa384x_ch_info_result_sub *result; + struct hfa384x_ch_info_result_sub *chinforesult; int chan; if (!(hw->channel_info.results.scanchannels & (1 << i))) @@ -1179,10 +1182,10 @@ void prism2sta_processing_defer(struct work_struct *data) /* First let's process the auth frames */ { struct sk_buff *skb; - struct hfa384x_InfFrame *inf; + struct hfa384x_inf_frame *inf; while ((skb = skb_dequeue(&hw->authq))) { - inf = (struct hfa384x_InfFrame *)skb->data; + inf = (struct hfa384x_inf_frame *)skb->data; prism2sta_inf_authreq_defer(wlandev, inf); } @@ -1294,7 +1297,7 @@ void prism2sta_processing_defer(struct work_struct *data) */ if (wlandev->netdev->type == ARPHRD_ETHER) netdev_info(wlandev->netdev, - "linkstatus=DISCONNECTED (unhandled)\n"); + "linkstatus=DISCONNECTED (unhandled)\n"); wlandev->macmode = WLAN_MACMODE_NONE; netif_carrier_off(wlandev->netdev); @@ -1391,7 +1394,7 @@ void prism2sta_processing_defer(struct work_struct *data) * Disable Transmits, Ignore receives of data frames */ if (hw->join_ap && --hw->join_retries > 0) { - struct hfa384x_JoinRequest_data joinreq; + struct hfa384x_join_request_data joinreq; joinreq = hw->joinreq; /* Send the join request */ @@ -1415,7 +1418,7 @@ void prism2sta_processing_defer(struct work_struct *data) default: /* This is bad, IO port problems? */ netdev_warn(wlandev->netdev, - "unknown linkstatus=0x%02x\n", hw->link_status); + "unknown linkstatus=0x%02x\n", hw->link_status); return; } @@ -1440,7 +1443,7 @@ void prism2sta_processing_defer(struct work_struct *data) * interrupt */ static void prism2sta_inf_linkstatus(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf) + struct hfa384x_inf_frame *inf) { struct hfa384x *hw = wlandev->priv; @@ -1468,10 +1471,10 @@ static void prism2sta_inf_linkstatus(struct wlandevice *wlandev, * interrupt */ static void prism2sta_inf_assocstatus(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf) + struct hfa384x_inf_frame *inf) { struct hfa384x *hw = wlandev->priv; - struct hfa384x_AssocStatus rec; + struct hfa384x_assoc_status rec; int i; memcpy(&rec, &inf->info.assocstatus, sizeof(rec)); @@ -1529,7 +1532,7 @@ static void prism2sta_inf_assocstatus(struct wlandevice *wlandev, * */ static void prism2sta_inf_authreq(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf) + struct hfa384x_inf_frame *inf) { struct hfa384x *hw = wlandev->priv; struct sk_buff *skb; @@ -1544,10 +1547,10 @@ static void prism2sta_inf_authreq(struct wlandevice *wlandev, } static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf) + struct hfa384x_inf_frame *inf) { struct hfa384x *hw = wlandev->priv; - struct hfa384x_authenticateStation_data rec; + struct hfa384x_authenticate_station_data rec; int i, added, result, cnt; u8 *addr; @@ -1718,7 +1721,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev, * interrupt */ static void prism2sta_inf_psusercnt(struct wlandevice *wlandev, - struct hfa384x_InfFrame *inf) + struct hfa384x_inf_frame *inf) { struct hfa384x *hw = wlandev->priv; @@ -1742,7 +1745,8 @@ static void prism2sta_inf_psusercnt(struct wlandevice *wlandev, * Call context: * interrupt */ -void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf) +void prism2sta_ev_info(struct wlandevice *wlandev, + struct hfa384x_inf_frame *inf) { inf->infotype = le16_to_cpu(inf->infotype); /* Dispatch */ @@ -1785,7 +1789,7 @@ void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf) break; default: netdev_warn(wlandev->netdev, - "Unknown info type=0x%02x\n", inf->infotype); + "Unknown info type=0x%02x\n", inf->infotype); break; } } @@ -1859,32 +1863,32 @@ void prism2sta_ev_alloc(struct wlandevice *wlandev) } /* -* create_wlan -* -* Called at module init time. This creates the struct wlandevice structure -* and initializes it with relevant bits. -* -* Arguments: -* none -* -* Returns: -* the created struct wlandevice structure. -* -* Side effects: -* also allocates the priv/hw structures. -* -* Call context: -* process thread -* -*/ + * create_wlan + * + * Called at module init time. This creates the struct wlandevice structure + * and initializes it with relevant bits. + * + * Arguments: + * none + * + * Returns: + * the created struct wlandevice structure. + * + * Side effects: + * also allocates the priv/hw structures. + * + * Call context: + * process thread + * + */ static struct wlandevice *create_wlan(void) { struct wlandevice *wlandev = NULL; struct hfa384x *hw = NULL; /* Alloc our structures */ - wlandev = kzalloc(sizeof(struct wlandevice), GFP_KERNEL); - hw = kzalloc(sizeof(struct hfa384x), GFP_KERNEL); + wlandev = kzalloc(sizeof(*wlandev), GFP_KERNEL); + hw = kzalloc(sizeof(*hw), GFP_KERNEL); if (!wlandev || !hw) { kfree(wlandev); @@ -1943,9 +1947,9 @@ void prism2sta_commsqual_defer(struct work_struct *data) } pr_debug("commsqual %d %d %d\n", - le16_to_cpu(hw->qual.CQ_currBSS), - le16_to_cpu(hw->qual.ASL_currBSS), - le16_to_cpu(hw->qual.ANL_currFC)); + le16_to_cpu(hw->qual.cq_curr_bss), + le16_to_cpu(hw->qual.asl_curr_bss), + le16_to_cpu(hw->qual.anl_curr_fc)); } /* Get the signal rate */ diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h index 85079fea71524fd64189c3546ce190f024ccda59..7a80a90f229ff127d33ec94e48f4e235f104f7d9 100644 --- a/drivers/staging/xgifb/XGI_main.h +++ b/drivers/staging/xgifb/XGI_main.h @@ -139,7 +139,7 @@ static const struct _XGIbios_mode { static const unsigned short XGI310paneltype[] = { LCD_UNKNOWN, LCD_800x600, LCD_1024x768, LCD_1280x1024, - LCD_640x480, LCD_1024x600, LCD_1152x864, LCD_1280x960, + LCD_640x480, LCD_1024x600, LCD_1152x864, LCD_1280x960, LCD_1152x768, LCD_1400x1050, LCD_1280x768, LCD_1600x1200, LCD_1024x768, LCD_1024x768, LCD_1024x768}; @@ -174,7 +174,7 @@ static const struct _XGI_tvtype { {"NTSC", 2}, {"pal", 1}, {"ntsc", 2}, - {"\0", -1} + {"\0", -1} }; static const struct _XGI_vrate { @@ -183,44 +183,44 @@ static const struct _XGI_vrate { u16 yres; u16 refresh; } XGIfb_vrate[] = { - {1, 640, 480, 60}, {2, 640, 480, 72}, - {3, 640, 480, 75}, {4, 640, 480, 85}, + {1, 640, 480, 60}, {2, 640, 480, 72}, + {3, 640, 480, 75}, {4, 640, 480, 85}, {5, 640, 480, 100}, {6, 640, 480, 120}, - {7, 640, 480, 160}, {8, 640, 480, 200}, + {7, 640, 480, 160}, {8, 640, 480, 200}, - {1, 720, 480, 60}, - {1, 720, 576, 58}, - {1, 800, 480, 60}, {2, 800, 480, 75}, {3, 800, 480, 85}, - {1, 800, 600, 60}, {2, 800, 600, 72}, {3, 800, 600, 75}, - {4, 800, 600, 85}, {5, 800, 600, 100}, - {6, 800, 600, 120}, {7, 800, 600, 160}, + {1, 720, 480, 60}, + {1, 720, 576, 58}, + {1, 800, 480, 60}, {2, 800, 480, 75}, {3, 800, 480, 85}, + {1, 800, 600, 60}, {2, 800, 600, 72}, {3, 800, 600, 75}, + {4, 800, 600, 85}, {5, 800, 600, 100}, + {6, 800, 600, 120}, {7, 800, 600, 160}, - {1, 1024, 768, 60}, {2, 1024, 768, 70}, {3, 1024, 768, 75}, - {4, 1024, 768, 85}, {5, 1024, 768, 100}, {6, 1024, 768, 120}, - {1, 1024, 576, 60}, {2, 1024, 576, 75}, {3, 1024, 576, 85}, - {1, 1024, 600, 60}, - {1, 1152, 768, 60}, - {1, 1280, 720, 60}, {2, 1280, 720, 75}, {3, 1280, 720, 85}, - {1, 1280, 768, 60}, + {1, 1024, 768, 60}, {2, 1024, 768, 70}, {3, 1024, 768, 75}, + {4, 1024, 768, 85}, {5, 1024, 768, 100}, {6, 1024, 768, 120}, + {1, 1024, 576, 60}, {2, 1024, 576, 75}, {3, 1024, 576, 85}, + {1, 1024, 600, 60}, + {1, 1152, 768, 60}, + {1, 1280, 720, 60}, {2, 1280, 720, 75}, {3, 1280, 720, 85}, + {1, 1280, 768, 60}, {1, 1280, 1024, 60}, {2, 1280, 1024, 75}, {3, 1280, 1024, 85}, - {1, 1280, 960, 70}, - {1, 1400, 1050, 60}, - {1, 1600, 1200, 60}, {2, 1600, 1200, 65}, + {1, 1280, 960, 70}, + {1, 1400, 1050, 60}, + {1, 1600, 1200, 60}, {2, 1600, 1200, 65}, {3, 1600, 1200, 70}, {4, 1600, 1200, 75}, - {5, 1600, 1200, 85}, {6, 1600, 1200, 100}, + {5, 1600, 1200, 85}, {6, 1600, 1200, 100}, {7, 1600, 1200, 120}, - {1, 1920, 1440, 60}, {2, 1920, 1440, 65}, + {1, 1920, 1440, 60}, {2, 1920, 1440, 65}, {3, 1920, 1440, 70}, {4, 1920, 1440, 75}, - {5, 1920, 1440, 85}, {6, 1920, 1440, 100}, - {1, 2048, 1536, 60}, {2, 2048, 1536, 65}, + {5, 1920, 1440, 85}, {6, 1920, 1440, 100}, + {1, 2048, 1536, 60}, {2, 2048, 1536, 65}, {3, 2048, 1536, 70}, {4, 2048, 1536, 75}, - {5, 2048, 1536, 85}, - {0, 0, 0, 0} + {5, 2048, 1536, 85}, + {0, 0, 0, 0} }; static const struct _XGI_TV_filter { diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c index 0c78491ff5a1c6f0a42d0d55e51390fb4410bbd2..777cd6e116944b7ac62559d3d86e7e35a95cb5b8 100644 --- a/drivers/staging/xgifb/XGI_main_26.c +++ b/drivers/staging/xgifb/XGI_main_26.c @@ -56,8 +56,8 @@ static inline void dumpVGAReg(struct xgifb_video_info *xgifb_info) /* --------------- Hardware Access Routines -------------------------- */ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr, - struct xgi_hw_device_info *HwDeviceExtension, - unsigned char modeno) + struct xgi_hw_device_info *HwDeviceExtension, + unsigned char modeno) { unsigned short ModeNo = modeno; unsigned short ModeIdIndex = 0, ClockIndex = 0; @@ -68,7 +68,7 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr, XGI_SearchModeID(ModeNo, &ModeIdIndex); RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo, - ModeIdIndex, XGI_Pr); + ModeIdIndex, XGI_Pr); ClockIndex = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK; @@ -76,11 +76,11 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr, } static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr, - struct xgi_hw_device_info *HwDeviceExtension, - unsigned char modeno, - u32 *left_margin, u32 *right_margin, u32 *upper_margin, - u32 *lower_margin, u32 *hsync_len, u32 *vsync_len, u32 *sync, - u32 *vmode) + struct xgi_hw_device_info *HwDeviceExtension, + unsigned char modeno, u32 *left_margin, + u32 *right_margin, u32 *upper_margin, + u32 *lower_margin, u32 *hsync_len, + u32 *vsync_len, u32 *sync, u32 *vmode) { unsigned short ModeNo = modeno; unsigned short ModeIdIndex, index = 0; @@ -95,7 +95,7 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr, if (!XGI_SearchModeID(ModeNo, &ModeIdIndex)) return 0; RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo, - ModeIdIndex, XGI_Pr); + ModeIdIndex, XGI_Pr); index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC; sr_data = XGI_CRT1Table[index].CR[5]; @@ -105,7 +105,7 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr, cr_data = XGI_CRT1Table[index].CR[3]; /* Horizontal retrace (=sync) start */ - HRS = (cr_data & 0xff) | ((unsigned short) (sr_data & 0xC0) << 2); + HRS = (cr_data & 0xff) | ((unsigned short)(sr_data & 0xC0) << 2); F = HRS - HDE - 3; sr_data = XGI_CRT1Table[index].CR[6]; @@ -115,8 +115,8 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr, cr_data2 = XGI_CRT1Table[index].CR[4]; /* Horizontal blank end */ - HBE = (cr_data & 0x1f) | ((unsigned short) (cr_data2 & 0x80) >> 2) - | ((unsigned short) (sr_data & 0x03) << 6); + HBE = (cr_data & 0x1f) | ((unsigned short)(cr_data2 & 0x80) >> 2) + | ((unsigned short)(sr_data & 0x03) << 6); /* Horizontal retrace (=sync) end */ HRE = (cr_data2 & 0x1f) | ((sr_data & 0x04) << 3); @@ -142,15 +142,15 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr, cr_data = XGI_CRT1Table[index].CR[10]; /* Vertical retrace (=sync) start */ - VRS = (cr_data & 0xff) | ((unsigned short) (cr_data2 & 0x04) << 6) - | ((unsigned short) (cr_data2 & 0x80) << 2) - | ((unsigned short) (sr_data & 0x08) << 7); + VRS = (cr_data & 0xff) | ((unsigned short)(cr_data2 & 0x04) << 6) + | ((unsigned short)(cr_data2 & 0x80) << 2) + | ((unsigned short)(sr_data & 0x08) << 7); F = VRS + 1 - VDE; cr_data = XGI_CRT1Table[index].CR[13]; /* Vertical blank end */ - VBE = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x10) << 4); + VBE = (cr_data & 0xff) | ((unsigned short)(sr_data & 0x10) << 4); temp = VBE - ((VDE - 1) & 511); B = (temp > 0) ? temp : (temp + 512); @@ -231,11 +231,11 @@ static int XGIfb_GetXG21DefaultLVDSModeIdx(struct xgifb_video_info *xgifb_info) { int i = 0; - while ((XGIbios_mode[i].mode_no != 0) - && (XGIbios_mode[i].xres <= xgifb_info->lvds_data.LVDSHDE)) { - if ((XGIbios_mode[i].xres == xgifb_info->lvds_data.LVDSHDE) - && (XGIbios_mode[i].yres == xgifb_info->lvds_data.LVDSVDE) - && (XGIbios_mode[i].bpp == 8)) { + while ((XGIbios_mode[i].mode_no != 0) && + (XGIbios_mode[i].xres <= xgifb_info->lvds_data.LVDSHDE)) { + if ((XGIbios_mode[i].xres == xgifb_info->lvds_data.LVDSHDE) && + (XGIbios_mode[i].yres == xgifb_info->lvds_data.LVDSVDE) && + (XGIbios_mode[i].bpp == 8)) { return i; } i++; @@ -384,9 +384,8 @@ static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex) return -1; break; case 640: - if ((XGIbios_mode[myindex].yres != 400) - && (XGIbios_mode[myindex].yres - != 480)) + if ((XGIbios_mode[myindex].yres != 400) && + (XGIbios_mode[myindex].yres != 480)) return -1; break; case 800: @@ -518,7 +517,7 @@ static void XGIfb_search_crt2type(const char *name) { int i = 0; - if (name == NULL) + if (!name) return; while (XGI_crt2type[i].type_no != -1) { @@ -562,7 +561,7 @@ static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info, != 1)) { pr_debug("Adjusting rate from %d down to %d\n", rate, - XGIfb_vrate[i-1].refresh); + XGIfb_vrate[i - 1].refresh); xgifb_info->rate_idx = XGIfb_vrate[i - 1].idx; xgifb_info->refresh_rate = @@ -589,7 +588,7 @@ static void XGIfb_search_tvstd(const char *name) { int i = 0; - if (name == NULL) + if (!name) return; while (XGI_tvtype[i].type_no != -1) { @@ -683,7 +682,7 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info) xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30); xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR31, cr31); xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR33, - (xgifb_info->rate_idx & 0x0F)); + (xgifb_info->rate_idx & 0x0F)); } static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info) @@ -730,7 +729,6 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info) if (xgifb_info->display2 == XGIFB_DISP_TV && xgifb_info->hasVB == HASVB_301) { - reg = xgifb_reg_get(XGIPART4, 0x01); if (reg < 0xB0) { /* Set filter for XGI301 */ @@ -763,16 +761,13 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info) 0x01); if (xgifb_info->TV_type == TVMODE_NTSC) { - xgifb_reg_and(XGIPART2, 0x3a, 0x1f); if (xgifb_info->TV_plug == TVPLUG_SVIDEO) { - xgifb_reg_and(XGIPART2, 0x30, 0xdf); } else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE) { - xgifb_reg_or(XGIPART2, 0x30, 0x20); switch (xgifb_info->video_width) { @@ -822,16 +817,13 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info) } } else if (xgifb_info->TV_type == TVMODE_PAL) { - xgifb_reg_and(XGIPART2, 0x3A, 0x1F); if (xgifb_info->TV_plug == TVPLUG_SVIDEO) { - xgifb_reg_and(XGIPART2, 0x30, 0xDF); } else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE) { - xgifb_reg_or(XGIPART2, 0x30, 0x20); switch (xgifb_info->video_width) { @@ -912,7 +904,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info) } static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive, - struct fb_info *info) + struct fb_info *info) { struct xgifb_video_info *xgifb_info = info->par; struct xgi_hw_device_info *hw_info = &xgifb_info->hw_info; @@ -945,17 +937,15 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive, if (var->pixclock) { drate = 1000000000 / var->pixclock; hrate = (drate * 1000) / htotal; - xgifb_info->refresh_rate = (unsigned int) (hrate * 2 + xgifb_info->refresh_rate = (unsigned int)(hrate * 2 / vtotal); } else { xgifb_info->refresh_rate = 60; } pr_debug("Change mode to %dx%dx%d-%dHz\n", - var->xres, - var->yres, - var->bits_per_pixel, - xgifb_info->refresh_rate); + var->xres, var->yres, var->bits_per_pixel, + xgifb_info->refresh_rate); old_mode = xgifb_info->mode_idx; xgifb_info->mode_idx = 0; @@ -992,7 +982,6 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive, } if (isactive) { - XGIfb_pre_setmode(xgifb_info); if (XGISetModeNew(xgifb_info, hw_info, XGIbios_mode[xgifb_info->mode_idx].mode_no) @@ -1064,7 +1053,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive, break; } } - XGIfb_bpp_to_var(xgifb_info, var); /*update ARGB info*/ + XGIfb_bpp_to_var(xgifb_info, var); /* update ARGB info */ dumpVGAReg(xgifb_info); return 0; @@ -1150,7 +1139,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red, } break; case 16: - ((u32 *) (info->pseudo_palette))[regno] = ((red & 0xf800)) + ((u32 *)(info->pseudo_palette))[regno] = ((red & 0xf800)) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); break; @@ -1158,7 +1147,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red, red >>= 8; green >>= 8; blue >>= 8; - ((u32 *) (info->pseudo_palette))[regno] = (red << 16) | (green + ((u32 *)(info->pseudo_palette))[regno] = (red << 16) | (green << 8) | (blue); break; } @@ -1168,7 +1157,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red, /* ----------- FBDev related routines for all series ---------- */ static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con, - struct fb_info *info) + struct fb_info *info) { struct xgifb_video_info *xgifb_info = info->par; @@ -1250,7 +1239,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) drate = 1000000000 / var->pixclock; hrate = (drate * 1000) / htotal; xgifb_info->refresh_rate = - (unsigned int) (hrate * 2 / vtotal); + (unsigned int)(hrate * 2 / vtotal); pr_debug( "%s: pixclock = %d ,htotal=%d, vtotal=%d\n" "%s: drate=%d, hrate=%d, refresh_rate=%d\n", @@ -1262,10 +1251,10 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) search_idx = 0; while ((XGIbios_mode[search_idx].mode_no != 0) && - (XGIbios_mode[search_idx].xres <= var->xres)) { + (XGIbios_mode[search_idx].xres <= var->xres)) { if ((XGIbios_mode[search_idx].xres == var->xres) && - (XGIbios_mode[search_idx].yres == var->yres) && - (XGIbios_mode[search_idx].bpp == var->bits_per_pixel)) { + (XGIbios_mode[search_idx].yres == var->yres) && + (XGIbios_mode[search_idx].bpp == var->bits_per_pixel)) { if (XGIfb_validate_mode(xgifb_info, search_idx) > 0) { found_mode = 1; break; @@ -1275,9 +1264,8 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) } if (!found_mode) { - pr_err("%dx%dx%d is no valid mode\n", - var->xres, var->yres, var->bits_per_pixel); + var->xres, var->yres, var->bits_per_pixel); search_idx = 0; while (XGIbios_mode[search_idx].mode_no != 0) { if ((var->xres <= XGIbios_mode[search_idx].xres) && @@ -1296,11 +1284,11 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) var->xres = XGIbios_mode[search_idx].xres; var->yres = XGIbios_mode[search_idx].yres; pr_debug("Adapted to mode %dx%dx%d\n", - var->xres, var->yres, var->bits_per_pixel); + var->xres, var->yres, var->bits_per_pixel); } else { pr_err("Failed to find similar mode to %dx%dx%d\n", - var->xres, var->yres, var->bits_per_pixel); + var->xres, var->yres, var->bits_per_pixel); return -EINVAL; } } @@ -1332,7 +1320,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) } static int XGIfb_pan_display(struct fb_var_screeninfo *var, - struct fb_info *info) + struct fb_info *info) { int err; @@ -1344,9 +1332,8 @@ static int XGIfb_pan_display(struct fb_var_screeninfo *var, if (var->vmode & FB_VMODE_YWRAP) { if (var->yoffset >= info->var.yres_virtual || var->xoffset) return -EINVAL; - } else if (var->xoffset + info->var.xres > info->var.xres_virtual - || var->yoffset + info->var.yres - > info->var.yres_virtual) { + } else if (var->xoffset + info->var.xres > info->var.xres_virtual || + var->yoffset + info->var.yres > info->var.yres_virtual) { return -EINVAL; } err = XGIfb_pan_var(var, info); @@ -1401,7 +1388,6 @@ static struct fb_ops XGIfb_ops = { static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info) { - u8 ChannelNum, tmp; u8 reg = 0; @@ -1474,10 +1460,8 @@ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info) xgifb_info->video_size = xgifb_info->video_size * ChannelNum; pr_info("SR14=%x DramSzie %x ChannelNum %x\n", - reg, - xgifb_info->video_size, ChannelNum); + reg, xgifb_info->video_size, ChannelNum); return 0; - } static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info) @@ -1597,7 +1581,6 @@ static int __init XGIfb_setup(char *options) pr_info("Options: %s\n", options); while ((this_opt = strsep(&options, ",")) != NULL) { - if (!*this_opt) continue; @@ -1634,8 +1617,7 @@ static int __init XGIfb_setup(char *options) return 0; } -static int xgifb_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { u8 reg, reg1; u8 CR48, CR38; @@ -1670,7 +1652,7 @@ static int xgifb_probe(struct pci_dev *pdev, xgifb_info->mmio_size = pci_resource_len(pdev, 1); xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30; dev_info(&pdev->dev, "Relocate IO address: %Lx [%08lx]\n", - (u64) pci_resource_start(pdev, 2), + (u64)pci_resource_start(pdev, 2), xgifb_info->vga_base); if (pci_enable_device(pdev)) { @@ -1688,7 +1670,7 @@ static int xgifb_probe(struct pci_dev *pdev, xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD); reg1 = xgifb_reg_get(XGISR, IND_SIS_PASSWORD); - if (reg1 != 0xa1) { /*I/O error */ + if (reg1 != 0xa1) { /* I/O error */ dev_err(&pdev->dev, "I/O error\n"); ret = -EIO; goto error_disable; @@ -1698,7 +1680,7 @@ static int xgifb_probe(struct pci_dev *pdev, case PCI_DEVICE_ID_XGI_20: xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN); CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1); - if (CR48&GPIOG_READ) + if (CR48 & GPIOG_READ) xgifb_info->chip = XG21; else xgifb_info->chip = XG20; @@ -1727,7 +1709,7 @@ static int xgifb_probe(struct pci_dev *pdev, xgifb_info->video_size = video_size_max; } - /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */ + /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */ xgifb_reg_or(XGISR, IND_SIS_PCI_ADDRESS_SET, (SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE)); @@ -1740,7 +1722,7 @@ static int xgifb_probe(struct pci_dev *pdev, xgifb_info->video_size, "XGIfb FB")) { dev_err(&pdev->dev, "Unable request memory size %x\n", - xgifb_info->video_size); + xgifb_info->video_size); dev_err(&pdev->dev, "Fatal error: Unable to reserve frame buffer memory. Is there another framebuffer driver active?\n"); ret = -ENODEV; @@ -1763,13 +1745,13 @@ static int xgifb_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "Framebuffer at 0x%Lx, mapped to 0x%p, size %dk\n", - (u64) xgifb_info->video_base, + (u64)xgifb_info->video_base, xgifb_info->video_vbase, xgifb_info->video_size / 1024); dev_info(&pdev->dev, "MMIO at 0x%Lx, mapped to 0x%p, size %ldk\n", - (u64) xgifb_info->mmio_base, xgifb_info->mmio_vbase, + (u64)xgifb_info->mmio_base, xgifb_info->mmio_vbase, xgifb_info->mmio_size / 1024); pci_set_drvdata(pdev, xgifb_info); @@ -1784,9 +1766,9 @@ static int xgifb_probe(struct pci_dev *pdev, xgifb_info->hasVB = HASVB_NONE; } else if (xgifb_info->chip == XG21) { CR38 = xgifb_reg_get(XGICR, 0x38); - if ((CR38&0xE0) == 0xC0) + if ((CR38 & 0xE0) == 0xC0) xgifb_info->display2 = XGIFB_DISP_LCD; - else if ((CR38&0xE0) == 0x60) + else if ((CR38 & 0xE0) == 0x60) xgifb_info->hasVB = HASVB_CHRONTEL; else xgifb_info->hasVB = HASVB_NONE; @@ -1903,8 +1885,7 @@ static int xgifb_probe(struct pci_dev *pdev, xgifb_info->refresh_rate = refresh_rate; if (xgifb_info->refresh_rate == 0) xgifb_info->refresh_rate = 60; - if (XGIfb_search_refresh_rate(xgifb_info, - xgifb_info->refresh_rate) == 0) { + if (XGIfb_search_refresh_rate(xgifb_info, xgifb_info->refresh_rate) == 0) { xgifb_info->rate_idx = 1; xgifb_info->refresh_rate = 60; } @@ -1939,15 +1920,13 @@ static int xgifb_probe(struct pci_dev *pdev, default: xgifb_info->video_cmap_len = 16; pr_info("Unsupported depth %d\n", - xgifb_info->video_bpp); + xgifb_info->video_bpp); break; } pr_info("Default mode is %dx%dx%d (%dHz)\n", - xgifb_info->video_width, - xgifb_info->video_height, - xgifb_info->video_bpp, - xgifb_info->refresh_rate); + xgifb_info->video_width, xgifb_info->video_height, + xgifb_info->video_bpp, xgifb_info->refresh_rate); fb_info->var.red.length = 8; fb_info->var.green.length = 8; @@ -1964,22 +1943,20 @@ static int xgifb_probe(struct pci_dev *pdev, XGIfb_bpp_to_var(xgifb_info, &fb_info->var); - fb_info->var.pixclock = (u32) (1000000000 / - XGIfb_mode_rate_to_dclock(&xgifb_info->dev_info, - hw_info, - XGIbios_mode[xgifb_info->mode_idx].mode_no)); - - if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info, hw_info, - XGIbios_mode[xgifb_info->mode_idx].mode_no, - &fb_info->var.left_margin, - &fb_info->var.right_margin, - &fb_info->var.upper_margin, - &fb_info->var.lower_margin, - &fb_info->var.hsync_len, - &fb_info->var.vsync_len, - &fb_info->var.sync, - &fb_info->var.vmode)) { - + fb_info->var.pixclock = (u32)(1000000000 / XGIfb_mode_rate_to_dclock + (&xgifb_info->dev_info, hw_info, + XGIbios_mode[xgifb_info->mode_idx].mode_no)); + + if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info, + hw_info, XGIbios_mode[xgifb_info->mode_idx].mode_no, + &fb_info->var.left_margin, + &fb_info->var.right_margin, + &fb_info->var.upper_margin, + &fb_info->var.lower_margin, + &fb_info->var.hsync_len, + &fb_info->var.vsync_len, + &fb_info->var.sync, + &fb_info->var.vmode)) { if ((fb_info->var.vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) { fb_info->var.yres <<= 1; @@ -1990,7 +1967,6 @@ static int xgifb_probe(struct pci_dev *pdev, fb_info->var.yres >>= 1; fb_info->var.yres_virtual >>= 1; } - } fb_info->flags = FBINFO_FLAG_DEFAULT; @@ -2028,9 +2004,7 @@ static int xgifb_probe(struct pci_dev *pdev, return ret; } -/*****************************************************/ -/* PCI DEVICE HANDLING */ -/*****************************************************/ +/* -------------------- PCI DEVICE HANDLING -------------------- */ static void xgifb_remove(struct pci_dev *pdev) { @@ -2054,25 +2028,23 @@ static struct pci_driver xgifb_driver = { .remove = xgifb_remove }; -/*****************************************************/ -/* MODULE */ -/*****************************************************/ +/* -------------------- MODULE -------------------- */ -module_param(mode, charp, 0); +module_param(mode, charp, 0000); MODULE_PARM_DESC(mode, - "Selects the desired default display mode in the format XxYxDepth (eg. 1024x768x16)."); + "Selects the desired default display mode in the format XxYxDepth (eg. 1024x768x16)."); -module_param(forcecrt2type, charp, 0); +module_param(forcecrt2type, charp, 0000); MODULE_PARM_DESC(forcecrt2type, - "Force the second display output type. Possible values are NONE, LCD, TV, VGA, SVIDEO or COMPOSITE."); + "Force the second display output type. Possible values are NONE, LCD, TV, VGA, SVIDEO or COMPOSITE."); -module_param(vesa, int, 0); +module_param(vesa, int, 0000); MODULE_PARM_DESC(vesa, - "Selects the desired default display mode by VESA mode number (eg. 0x117)."); + "Selects the desired default display mode by VESA mode number (eg. 0x117)."); -module_param(filter, int, 0); +module_param(filter, int, 0000); MODULE_PARM_DESC(filter, - "Selects TV flicker filter type (only for systems with a SiS301 video bridge). Possible values 0-7. Default: [no filter])."); + "Selects TV flicker filter type (only for systems with a SiS301 video bridge). Possible values 0-7. Default: [no filter])."); static int __init xgifb_init(void) { diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c index 062ece22ed84c6aa4436e8c5c0dc109ced2b738f..14af157958cd68b38069ba7718a8a4503d0470ce 100644 --- a/drivers/staging/xgifb/vb_init.c +++ b/drivers/staging/xgifb/vb_init.c @@ -55,8 +55,9 @@ XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension, xgifb_reg_or(pVBInfo->P3d4, 0x4A, 0x80); /* Enable GPIOH read */ /* GPIOF 0:DVI 1:DVO */ data = xgifb_reg_get(pVBInfo->P3d4, 0x48); - /* HOTPLUG_SUPPORT */ - /* for current XG20 & XG21, GPIOH is floating, driver will + /* + * HOTPLUG_SUPPORT + * for current XG20 & XG21, GPIOH is floating, driver will * fix DDR temporarily */ /* DVI read GPIOH */ @@ -199,7 +200,8 @@ static void XGINew_DDRII_Bootup_XG27( } static void XGINew_DDR2_MRS_XG20(struct xgi_hw_device_info *HwDeviceExtension, - unsigned long P3c4, struct vb_device_info *pVBInfo) + unsigned long P3c4, + struct vb_device_info *pVBInfo) { unsigned long P3d4 = P3c4 + 0x10; @@ -353,8 +355,8 @@ static void XGINew_DDR2_DefaultRegister( unsigned long Port, struct vb_device_info *pVBInfo) { unsigned long P3d4 = Port, P3c4 = Port - 0x10; - - /* keep following setting sequence, each setting in + /* + * keep following setting sequence, each setting in * the same reg insert idle */ xgifb_reg_set(P3d4, 0x82, 0x77); @@ -387,7 +389,7 @@ static void XGINew_DDR2_DefaultRegister( } static void XGI_SetDRAM_Helper(unsigned long P3d4, u8 seed, u8 temp2, u8 reg, - u8 shift_factor, u8 mask1, u8 mask2) + u8 shift_factor, u8 mask1, u8 mask2) { u8 j; @@ -460,15 +462,15 @@ static void XGINew_SetDRAMDefaultRegister340( for (j = 0; j <= 6; j++) /* CR90 - CR96 */ xgifb_reg_set(P3d4, (0x90 + j), - pVBInfo->CR40[14 + j][pVBInfo->ram_type]); + pVBInfo->CR40[14 + j][pVBInfo->ram_type]); for (j = 0; j <= 2; j++) /* CRC3 - CRC5 */ xgifb_reg_set(P3d4, (0xC3 + j), - pVBInfo->CR40[21 + j][pVBInfo->ram_type]); + pVBInfo->CR40[21 + j][pVBInfo->ram_type]); for (j = 0; j < 2; j++) /* CR8A - CR8B */ xgifb_reg_set(P3d4, (0x8A + j), - pVBInfo->CR40[1 + j][pVBInfo->ram_type]); + pVBInfo->CR40[1 + j][pVBInfo->ram_type]); if (HwDeviceExtension->jChipType == XG42) xgifb_reg_set(P3d4, 0x8C, 0x87); @@ -539,7 +541,8 @@ static unsigned short XGINew_SetDRAMSize20Reg( } static int XGINew_ReadWriteRest(unsigned short StopAddr, - unsigned short StartAddr, struct vb_device_info *pVBInfo) + unsigned short StartAddr, + struct vb_device_info *pVBInfo) { int i; unsigned long Position = 0; @@ -583,7 +586,7 @@ static unsigned char XGINew_CheckFrequence(struct vb_device_info *pVBInfo) } static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension, - struct vb_device_info *pVBInfo) + struct vb_device_info *pVBInfo) { unsigned char data; @@ -647,7 +650,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension, pVBInfo->ram_bus = 16; /* 16 bits */ /* (0x31:12x8x2) 22bit + 2 rank */ xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1); - /* 0x41:16Mx16 bit*/ + /* 0x41:16Mx16 bit */ xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x41); usleep_range(15, 1015); @@ -660,7 +663,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension, xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x31); - /* 0x31:8Mx16 bit*/ + /* 0x31:8Mx16 bit */ xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x31); @@ -678,7 +681,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension, pVBInfo->ram_bus = 8; /* 8 bits */ /* (0x31:12x8x2) 22bit + 2 rank */ xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1); - /* 0x30:8Mx8 bit*/ + /* 0x30:8Mx8 bit */ xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x30); usleep_range(15, 1015); @@ -697,7 +700,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension, case XG27: pVBInfo->ram_bus = 16; /* 16 bits */ pVBInfo->ram_channel = 1; /* Single channel */ - xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x51); /* 32Mx16 bit*/ + xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x51); /* 32Mx16 bit */ break; case XG42: /* @@ -785,7 +788,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension, } static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension, - struct vb_device_info *pVBInfo) + struct vb_device_info *pVBInfo) { u8 i, size; unsigned short memsize, start_addr; @@ -827,8 +830,8 @@ static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension, } static void XGINew_SetDRAMSize_340(struct xgifb_video_info *xgifb_info, - struct xgi_hw_device_info *HwDeviceExtension, - struct vb_device_info *pVBInfo) + struct xgi_hw_device_info *HwDeviceExtension, + struct vb_device_info *pVBInfo) { unsigned short data; @@ -905,9 +908,9 @@ static bool xgifb_read_vbios(struct pci_dev *pdev) goto error; if (j == 0xff) j = 1; - /* - * Read the LVDS table index scratch register set by the BIOS. - */ + + /* Read the LVDS table index scratch register set by the BIOS. */ + entry = xgifb_reg_get(xgifb_info->dev_info.P3d4, 0x36); if (entry >= j) entry = 0; @@ -1039,8 +1042,9 @@ static void XGINew_SetModeScratch(struct vb_device_info *pVBInfo) } tempcl |= SetSimuScanMode; - if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) || (temp & ActiveTV) - || (temp & ActiveCRT2))) + if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) || + (temp & ActiveTV) || + (temp & ActiveCRT2))) tempcl ^= (SetSimuScanMode | SwitchCRT2); if ((temp & ActiveLCD) && (temp & ActiveTV)) tempcl ^= (SetSimuScanMode | SwitchCRT2); @@ -1085,7 +1089,7 @@ static unsigned short XGINew_SenseLCD(struct xgi_hw_device_info } static void XGINew_GetXG21Sense(struct pci_dev *pdev, - struct vb_device_info *pVBInfo) + struct vb_device_info *pVBInfo) { struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev); unsigned char Temp; @@ -1095,7 +1099,7 @@ static void XGINew_GetXG21Sense(struct pci_dev *pdev, /* LVDS on chip */ xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0); } else { - /* Enable GPIOA/B read */ + /* Enable GPIOA/B read */ xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x03, 0x03); Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0xC0; if (Temp == 0xC0) { /* DVI & DVO GPIOA/B pull high */ @@ -1119,7 +1123,7 @@ static void XGINew_GetXG27Sense(struct vb_device_info *pVBInfo) unsigned char Temp, bCR4A; bCR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A); - /* Enable GPIOA/B/C read */ + /* Enable GPIOA/B/C read */ xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x07, 0x07); Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0x07; xgifb_reg_set(pVBInfo->P3d4, 0x4A, bCR4A); diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c index d8010c5c1a700b5cb9b4c656e4fdb4497f92e857..7c7c8c8f1df3798fdd4b240789286f39256fd9cc 100644 --- a/drivers/staging/xgifb/vb_setmode.c +++ b/drivers/staging/xgifb/vb_setmode.c @@ -55,7 +55,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo) pVBInfo->XGINew_CR97 = 0xc1; pVBInfo->SR18 = XG27_SR18; - /*Z11m DDR*/ + /* Z11m DDR */ temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B); /* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */ if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08)) @@ -73,7 +73,7 @@ static void XGI_SetSeqRegs(struct vb_device_info *pVBInfo) /* Get SR1,2,3,4 from file */ /* SR1 is with screen off 0x20 */ SRdata = XGI330_StandTable.SR[i]; - xgifb_reg_set(pVBInfo->P3c4, i+1, SRdata); /* Set SR 1 2 3 4 */ + xgifb_reg_set(pVBInfo->P3c4, i + 1, SRdata); /* Set SR 1 2 3 4 */ } } @@ -167,7 +167,8 @@ static unsigned char XGI_SetDefaultVCLK(struct vb_device_info *pVBInfo) } static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex, - unsigned short RefreshRateTableIndex, unsigned short *i, + unsigned short RefreshRateTableIndex, + unsigned short *i, struct vb_device_info *pVBInfo) { unsigned short tempax, tempbx, resinfo, modeflag, infoflag; @@ -244,7 +245,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex, } static void XGI_SetSync(unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo) + struct vb_device_info *pVBInfo) { unsigned short sync, temp; @@ -257,7 +258,7 @@ static void XGI_SetSync(unsigned short RefreshRateTableIndex, } static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo, - struct xgi_hw_device_info *HwDeviceExtension) + struct xgi_hw_device_info *HwDeviceExtension) { unsigned char data, data1, pushax; unsigned short i, j; @@ -359,9 +360,9 @@ static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex, } static void XGI_SetCRT1CRTC(unsigned short ModeIdIndex, - unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo, - struct xgi_hw_device_info *HwDeviceExtension) + unsigned short RefreshRateTableIndex, + struct vb_device_info *pVBInfo, + struct xgi_hw_device_info *HwDeviceExtension) { unsigned char index, data; unsigned short i; @@ -390,14 +391,14 @@ static void XGI_SetCRT1CRTC(unsigned short ModeIdIndex, xgifb_reg_set(pVBInfo->P3d4, 0x14, 0x4F); } -/* --------------------------------------------------------------------- */ -/* Function : XGI_SetXG21CRTC */ -/* Input : Stand or enhance CRTC table */ -/* Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F */ -/* Description : Set LCD timing */ -/* --------------------------------------------------------------------- */ +/* + * Function : XGI_SetXG21CRTC + * Input : Stand or enhance CRTC table + * Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F + * Description : Set LCD timing + */ static void XGI_SetXG21CRTC(unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo) + struct vb_device_info *pVBInfo) { unsigned char index, Tempax, Tempbx, Tempcx, Tempdx; unsigned short Temp1, Temp2, Temp3; @@ -506,8 +507,8 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex, /* SR0B */ Tempax = XGI_CRT1Table[index].CR[5]; - Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8]*/ - Tempbx |= (Tempax << 2); /* Tempbx: HRS[9:0] */ + Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8] */ + Tempbx |= Tempax << 2; /* Tempbx: HRS[9:0] */ Tempax = XGI_CRT1Table[index].CR[4]; /* CR5 HRE */ Tempax &= 0x1F; /* Tempax[4:0]: HRE[4:0] */ @@ -530,7 +531,7 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex, Tempax = XGI_CRT1Table[index].CR[5]; /* SR0B */ Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8]*/ Tempax >>= 6; /* Tempax[1:0]: HRS[9:8]*/ - Tempax |= ((Tempbx << 2) & 0xFF); /* Tempax[7:2]: HRE[5:0] */ + Tempax |= (Tempbx << 2) & 0xFF; /* Tempax[7:2]: HRE[5:0] */ /* SR2F [7:2][1:0]: HRE[5:0]HRS[9:8] */ xgifb_reg_set(pVBInfo->P3c4, 0x2F, Tempax); xgifb_reg_and_or(pVBInfo->P3c4, 0x30, 0xE3, 00); @@ -548,12 +549,12 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex, Tempax >>= 2; /* Tempax[0]: VRS[8] */ /* SR35[0]: VRS[8] */ xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x01, Tempax); - Tempcx |= (Tempax << 8); /* Tempcx <= VRS[8:0] */ - Tempcx |= ((Tempbx & 0x80) << 2); /* Tempcx <= VRS[9:0] */ + Tempcx |= Tempax << 8; /* Tempcx <= VRS[8:0] */ + Tempcx |= (Tempbx & 0x80) << 2; /* Tempcx <= VRS[9:0] */ /* Tempax: SR0A */ Tempax = XGI_CRT1Table[index].CR[14]; Tempax &= 0x08; /* SR0A[3] VRS[10] */ - Tempcx |= (Tempax << 7); /* Tempcx <= VRS[10:0] */ + Tempcx |= Tempax << 7; /* Tempcx <= VRS[10:0] */ /* Tempax: CR11 VRE */ Tempax = XGI_CRT1Table[index].CR[11]; @@ -636,12 +637,12 @@ static void xgifb_set_lcd(int chip_id, xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80); } -/* --------------------------------------------------------------------- */ -/* Function : XGI_UpdateXG21CRTC */ -/* Input : */ -/* Output : CRT1 CRTC */ -/* Description : Modify CRT1 Hsync/Vsync to fix LCD mode timing */ -/* --------------------------------------------------------------------- */ +/* + * Function : XGI_UpdateXG21CRTC + * Input : + * Output : CRT1 CRTC + * Description : Modify CRT1 Hsync/Vsync to fix LCD mode timing + */ static void XGI_UpdateXG21CRTC(unsigned short ModeNo, struct vb_device_info *pVBInfo, unsigned short RefreshRateTableIndex) @@ -665,19 +666,19 @@ static void XGI_UpdateXG21CRTC(unsigned short ModeNo, if (index != -1) { xgifb_reg_set(pVBInfo->P3d4, 0x02, - XGI_UpdateCRT1Table[index].CR02); + XGI_UpdateCRT1Table[index].CR02); xgifb_reg_set(pVBInfo->P3d4, 0x03, - XGI_UpdateCRT1Table[index].CR03); + XGI_UpdateCRT1Table[index].CR03); xgifb_reg_set(pVBInfo->P3d4, 0x15, - XGI_UpdateCRT1Table[index].CR15); + XGI_UpdateCRT1Table[index].CR15); xgifb_reg_set(pVBInfo->P3d4, 0x16, - XGI_UpdateCRT1Table[index].CR16); + XGI_UpdateCRT1Table[index].CR16); } } static void XGI_SetCRT1DE(unsigned short ModeIdIndex, - unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo) + unsigned short RefreshRateTableIndex, + struct vb_device_info *pVBInfo) { unsigned short resindex, tempax, tempbx, tempcx, temp, modeflag; @@ -715,7 +716,7 @@ static void XGI_SetCRT1DE(unsigned short ModeIdIndex, xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */ xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short)(tempcx & 0xff)); xgifb_reg_and_or(pVBInfo->P3d4, 0x0b, ~0x0c, - (unsigned short)((tempcx & 0x0ff00) >> 10)); + (unsigned short)((tempcx & 0x0ff00) >> 10)); xgifb_reg_set(pVBInfo->P3d4, 0x12, (unsigned short)(tempbx & 0xff)); tempax = 0; tempbx >>= 8; @@ -796,7 +797,7 @@ static void XGI_SetCRT1Offset(unsigned short ModeNo, i |= temp; xgifb_reg_set(pVBInfo->P3c4, 0x0E, i); - temp = (unsigned char) temp2; + temp = (unsigned char)temp2; temp &= 0xFF; /* al */ xgifb_reg_set(pVBInfo->P3d4, 0x13, temp); @@ -822,15 +823,15 @@ static void XGI_SetCRT1Offset(unsigned short ModeNo, } static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeIdIndex, - unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo) + unsigned short RefreshRateTableIndex, + struct vb_device_info *pVBInfo) { unsigned short VCLKIndex, modeflag; /* si+Ext_ResInfo */ modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag; - if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /*301b*/ + if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /* 301b */ if (pVBInfo->LCDResInfo != Panel_1024x768) /* LCDXlat2VCLK */ VCLKIndex = VCLK108_2_315 + 5; @@ -951,8 +952,8 @@ static void XGI_SetCRT1FIFO(struct xgi_hw_device_info *HwDeviceExtension, } static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension, - unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo) + unsigned short RefreshRateTableIndex, + struct vb_device_info *pVBInfo) { unsigned short data, data2 = 0; short VCLK; @@ -989,9 +990,9 @@ static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension, } static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension, - unsigned short ModeIdIndex, - unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo) + unsigned short ModeIdIndex, + unsigned short RefreshRateTableIndex, + struct vb_device_info *pVBInfo) { unsigned short data, data2, data3, infoflag = 0, modeflag, resindex, xres; @@ -1087,9 +1088,9 @@ static void XGI_WriteDAC(unsigned short dl, else swap(bl, bh); } - outb((unsigned short) dh, pVBInfo->P3c9); - outb((unsigned short) bh, pVBInfo->P3c9); - outb((unsigned short) bl, pVBInfo->P3c9); + outb((unsigned short)dh, pVBInfo->P3c9); + outb((unsigned short)bh, pVBInfo->P3c9); + outb((unsigned short)bl, pVBInfo->P3c9); } static void XGI_LoadDAC(struct vb_device_info *pVBInfo) @@ -1187,8 +1188,8 @@ static void XGI_GetLVDSResInfo(unsigned short ModeIdIndex, } static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table, - unsigned short ModeIdIndex, - struct vb_device_info *pVBInfo) + unsigned short ModeIdIndex, + struct vb_device_info *pVBInfo) { unsigned short i, tempdx, tempbx, modeflag; @@ -1201,12 +1202,12 @@ static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table, while (table[i].PANELID != 0xff) { tempdx = pVBInfo->LCDResInfo; if (tempbx & 0x0080) { /* OEMUtil */ - tempbx &= (~0x0080); + tempbx &= ~0x0080; tempdx = pVBInfo->LCDTypeInfo; } if (pVBInfo->LCDInfo & EnableScalingLCD) - tempdx &= (~PanelResInfo); + tempdx &= ~PanelResInfo; if (table[i].PANELID == tempdx) { tempbx = table[i].MASK; @@ -1226,8 +1227,8 @@ static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table, } static struct SiS_TVData const *XGI_GetTVPtr(unsigned short ModeIdIndex, - unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo) + unsigned short RefreshRateTableIndex, + struct vb_device_info *pVBInfo) { unsigned short i, tempdx, tempal, modeflag; @@ -1441,9 +1442,9 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex, tempbx >>= 3; xgifb_reg_set(pVBInfo->Part1Port, 0x16, - (unsigned short) (tempbx & 0xff)); + (unsigned short)(tempbx & 0xff)); xgifb_reg_set(pVBInfo->Part1Port, 0x17, - (unsigned short) (tempcx & 0xff)); + (unsigned short)(tempcx & 0xff)); tempax = pVBInfo->HT; @@ -1469,7 +1470,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex, xgifb_reg_set(pVBInfo->Part1Port, 0x15, tempax); xgifb_reg_set(pVBInfo->Part1Port, 0x14, - (unsigned short) (tempbx & 0xff)); + (unsigned short)(tempbx & 0xff)); tempax = pVBInfo->VT; tempbx = LCDPtr1->LCDVDES; @@ -1480,17 +1481,14 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex, if (tempcx >= tempax) tempcx -= tempax; - xgifb_reg_set(pVBInfo->Part1Port, 0x1b, - (unsigned short) (tempbx & 0xff)); - xgifb_reg_set(pVBInfo->Part1Port, 0x1c, - (unsigned short) (tempcx & 0xff)); + xgifb_reg_set(pVBInfo->Part1Port, 0x1b, (unsigned short)(tempbx & 0xff)); + xgifb_reg_set(pVBInfo->Part1Port, 0x1c, (unsigned short)(tempcx & 0xff)); tempbx = (tempbx >> 8) & 0x07; tempcx = (tempcx >> 8) & 0x07; - xgifb_reg_set(pVBInfo->Part1Port, 0x1d, - (unsigned short) ((tempcx << 3) - | tempbx)); + xgifb_reg_set(pVBInfo->Part1Port, 0x1d, (unsigned short)((tempcx << 3) | + tempbx)); tempax = pVBInfo->VT; tempbx = LCDPtr1->LCDVRS; @@ -1504,10 +1502,8 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex, if (tempcx >= tempax) tempcx -= tempax; - xgifb_reg_set(pVBInfo->Part1Port, 0x18, - (unsigned short) (tempbx & 0xff)); - xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f, - (unsigned short) (tempcx & 0x0f)); + xgifb_reg_set(pVBInfo->Part1Port, 0x18, (unsigned short)(tempbx & 0xff)); + xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f, (unsigned short)(tempcx & 0x0f)); tempax = ((tempbx >> 8) & 0x07) << 3; @@ -1518,8 +1514,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex, if (pVBInfo->LCDInfo & XGI_EnableLVDSDDA) tempax |= 0x40; - xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07, - tempax); + xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07, tempax); tempbx = pVBInfo->VDE; tempax = pVBInfo->VGAVDE; @@ -1527,7 +1522,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex, temp = tempax; /* 0430 ylshieh */ temp1 = (temp << 18) / tempbx; - tempdx = (unsigned short) ((temp << 18) % tempbx); + tempdx = (unsigned short)((temp << 18) % tempbx); if (tempdx != 0) temp1 += 1; @@ -1535,12 +1530,10 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex, temp2 = temp1; push3 = temp2; - xgifb_reg_set(pVBInfo->Part1Port, 0x37, - (unsigned short) (temp2 & 0xff)); - xgifb_reg_set(pVBInfo->Part1Port, 0x36, - (unsigned short) ((temp2 >> 8) & 0xff)); + xgifb_reg_set(pVBInfo->Part1Port, 0x37, (unsigned short)(temp2 & 0xff)); + xgifb_reg_set(pVBInfo->Part1Port, 0x36, (unsigned short)((temp2 >> 8) & 0xff)); - tempbx = (unsigned short) (temp2 >> 16); + tempbx = (unsigned short)(temp2 >> 16); tempax = tempbx & 0x03; tempbx = pVBInfo->VGAVDE; @@ -1553,24 +1546,20 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex, temp2 = push3; xgifb_reg_set(pVBInfo->Part4Port, 0x3c, - (unsigned short) (temp2 & 0xff)); + (unsigned short)(temp2 & 0xff)); xgifb_reg_set(pVBInfo->Part4Port, 0x3b, - (unsigned short) ((temp2 >> 8) & + (unsigned short)((temp2 >> 8) & 0xff)); - tempbx = (unsigned short) (temp2 >> 16); - xgifb_reg_and_or(pVBInfo->Part4Port, 0x3a, - ~0xc0, - (unsigned short) ((tempbx & - 0xff) << 6)); + tempbx = (unsigned short)(temp2 >> 16); + xgifb_reg_and_or(pVBInfo->Part4Port, 0x3a, ~0xc0, + (unsigned short)((tempbx & 0xff) << 6)); tempcx = pVBInfo->VGAVDE; if (tempcx == pVBInfo->VDE) - xgifb_reg_and_or(pVBInfo->Part4Port, - 0x30, ~0x0c, 0x00); + xgifb_reg_and_or(pVBInfo->Part4Port, 0x30, ~0x0c, 0x00); else - xgifb_reg_and_or(pVBInfo->Part4Port, - 0x30, ~0x0c, 0x08); + xgifb_reg_and_or(pVBInfo->Part4Port, 0x30, ~0x0c, 0x08); } tempcx = pVBInfo->VGAHDE; @@ -1578,7 +1567,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex, temp1 = tempcx << 16; - tempax = (unsigned short) (temp1 / tempbx); + tempax = (unsigned short)(temp1 / tempbx); if ((tempbx & 0xffff) == (tempcx & 0xffff)) tempax = 65535; @@ -1592,42 +1581,38 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex, temp3 = (temp3 & 0xffff0000) + (temp1 & 0xffff); - tempax = (unsigned short) (temp3 & 0xff); + tempax = (unsigned short)(temp3 & 0xff); xgifb_reg_set(pVBInfo->Part1Port, 0x1f, tempax); temp1 = pVBInfo->VGAVDE << 18; temp1 = temp1 / push3; - tempbx = (unsigned short) (temp1 & 0xffff); + tempbx = (unsigned short)(temp1 & 0xffff); if (pVBInfo->LCDResInfo == Panel_1024x768) tempbx -= 1; tempax = ((tempbx >> 8) & 0xff) << 3; - tempax |= (unsigned short) ((temp3 >> 8) & 0x07); - xgifb_reg_set(pVBInfo->Part1Port, 0x20, - (unsigned short) (tempax & 0xff)); - xgifb_reg_set(pVBInfo->Part1Port, 0x21, - (unsigned short) (tempbx & 0xff)); + tempax |= (unsigned short)((temp3 >> 8) & 0x07); + xgifb_reg_set(pVBInfo->Part1Port, 0x20, (unsigned short)(tempax & 0xff)); + xgifb_reg_set(pVBInfo->Part1Port, 0x21, (unsigned short)(tempbx & 0xff)); temp3 >>= 16; if (modeflag & HalfDCLK) temp3 >>= 1; - xgifb_reg_set(pVBInfo->Part1Port, 0x22, - (unsigned short) ((temp3 >> 8) & 0xff)); - xgifb_reg_set(pVBInfo->Part1Port, 0x23, - (unsigned short) (temp3 & 0xff)); + xgifb_reg_set(pVBInfo->Part1Port, 0x22, (unsigned short)((temp3 >> 8) & 0xff)); + xgifb_reg_set(pVBInfo->Part1Port, 0x23, (unsigned short)(temp3 & 0xff)); } -/* --------------------------------------------------------------------- */ -/* Function : XGI_GETLCDVCLKPtr */ -/* Input : */ -/* Output : al -> VCLK Index */ -/* Description : */ -/* --------------------------------------------------------------------- */ +/* + * Function : XGI_GETLCDVCLKPtr + * Input : + * Output : al -> VCLK Index + * Description : + */ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1, - struct vb_device_info *pVBInfo) + struct vb_device_info *pVBInfo) { unsigned short index; @@ -1645,7 +1630,8 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1, } static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex, - unsigned short ModeIdIndex, struct vb_device_info *pVBInfo) + unsigned short ModeIdIndex, + struct vb_device_info *pVBInfo) { unsigned short index, modeflag; unsigned char tempal; @@ -1681,15 +1667,11 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex, return tempal; } - if (pVBInfo->TVInfo & TVSetYPbPr750p) { - tempal = XGI_YPbPr750pVCLK; - return tempal; - } + if (pVBInfo->TVInfo & TVSetYPbPr750p) + return XGI_YPbPr750pVCLK; - if (pVBInfo->TVInfo & TVSetYPbPr525p) { - tempal = YPbPr525pVCLK; - return tempal; - } + if (pVBInfo->TVInfo & TVSetYPbPr525p) + return YPbPr525pVCLK; tempal = NTSC1024VCLK; @@ -1705,12 +1687,11 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex, } /* {End of VB} */ inb((pVBInfo->P3ca + 0x02)); - tempal = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK; - return tempal; + return XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK; } static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0, - unsigned char *di_1, struct vb_device_info *pVBInfo) + unsigned char *di_1, struct vb_device_info *pVBInfo) { if (pVBInfo->VBType & (VB_SIS301 | VB_SIS301B | VB_SIS302B | VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) { @@ -1726,8 +1707,8 @@ static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0, } static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex, - unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo) + unsigned short RefreshRateTableIndex, + struct vb_device_info *pVBInfo) { unsigned char di_0, di_1, tempal; int i; @@ -1738,7 +1719,7 @@ static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex, for (i = 0; i < 4; i++) { xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30, - (unsigned short) (0x10 * i)); + (unsigned short)(0x10 * i)); if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) && !(pVBInfo->VBInfo & SetInSlaveMode)) { xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0); @@ -1876,8 +1857,7 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo) pVBInfo->VBType = tempbx; } -static void XGI_GetVBInfo(unsigned short ModeIdIndex, - struct vb_device_info *pVBInfo) +static void XGI_GetVBInfo(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo) { unsigned short tempax, push, tempbx, temp, modeflag; @@ -1921,7 +1901,7 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex, tempbx |= SetCRT2ToHiVision; if (temp != YPbPrMode1080i) { - tempbx &= (~SetCRT2ToHiVision); + tempbx &= ~SetCRT2ToHiVision; tempbx |= SetCRT2ToYPbPr525750; } } @@ -2002,8 +1982,7 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex, pVBInfo->VBInfo = tempbx; } -static void XGI_GetTVInfo(unsigned short ModeIdIndex, - struct vb_device_info *pVBInfo) +static void XGI_GetTVInfo(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo) { unsigned short tempbx = 0, resinfo = 0, modeflag, index1; @@ -2078,7 +2057,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeIdIndex, pVBInfo->LCDTypeInfo = 0; pVBInfo->LCDInfo = 0; - /* si+Ext_ResInfo // */ + /* si+Ext_ResInfo */ resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO; temp = xgifb_reg_get(pVBInfo->P3d4, 0x36); /* Get LCD Res.Info */ tempbx = temp & 0x0F; @@ -2175,12 +2154,12 @@ static unsigned char XG21GPIODataTransfer(unsigned char ujDate) return ujRet; } -/*----------------------------------------------------------------------------*/ -/* output */ -/* bl[5] : LVDS signal */ -/* bl[1] : LVDS backlight */ -/* bl[0] : LVDS VDD */ -/*----------------------------------------------------------------------------*/ +/* + * output + * bl[5] : LVDS signal + * bl[1] : LVDS backlight + * bl[0] : LVDS VDD + */ static unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo) { unsigned char CR4A, temp; @@ -2196,12 +2175,12 @@ static unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo) return temp; } -/*----------------------------------------------------------------------------*/ -/* output */ -/* bl[5] : LVDS signal */ -/* bl[1] : LVDS backlight */ -/* bl[0] : LVDS VDD */ -/*----------------------------------------------------------------------------*/ +/* + * output + * bl[5] : LVDS signal + * bl[1] : LVDS backlight + * bl[0] : LVDS VDD + */ static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo) { unsigned char CR4A, CRB4, temp; @@ -2219,17 +2198,17 @@ static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo) return temp; } -/*----------------------------------------------------------------------------*/ -/* input */ -/* bl[5] : 1;LVDS signal on */ -/* bl[1] : 1;LVDS backlight on */ -/* bl[0] : 1:LVDS VDD on */ -/* bh: 100000b : clear bit 5, to set bit5 */ -/* 000010b : clear bit 1, to set bit1 */ -/* 000001b : clear bit 0, to set bit0 */ -/*----------------------------------------------------------------------------*/ +/* + * input + * bl[5] : 1;LVDS signal on + * bl[1] : 1;LVDS backlight on + * bl[0] : 1:LVDS VDD on + * bh: 100000b : clear bit 5, to set bit5 + * 000010b : clear bit 1, to set bit1 + * 000001b : clear bit 0, to set bit0 + */ static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl, - struct vb_device_info *pVBInfo) + struct vb_device_info *pVBInfo) { unsigned char CR4A, temp; @@ -2254,7 +2233,7 @@ static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl, } static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl, - struct vb_device_info *pVBInfo) + struct vb_device_info *pVBInfo) { unsigned char CR4A, temp; unsigned short tempbh0, tempbl0; @@ -2284,8 +2263,8 @@ static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl, } static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info, - struct xgi_hw_device_info *pXGIHWDE, - struct vb_device_info *pVBInfo) + struct xgi_hw_device_info *pXGIHWDE, + struct vb_device_info *pVBInfo) { xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xDF, 0x00); if (pXGIHWDE->jChipType == XG21) { @@ -2328,8 +2307,8 @@ static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info, } void XGI_DisplayOff(struct xgifb_video_info *xgifb_info, - struct xgi_hw_device_info *pXGIHWDE, - struct vb_device_info *pVBInfo) + struct xgi_hw_device_info *pXGIHWDE, + struct vb_device_info *pVBInfo) { if (pXGIHWDE->jChipType == XG21) { if (pVBInfo->IF_DEF_LVDS == 1) { @@ -2448,7 +2427,7 @@ static void XGI_GetCRT2ResInfo(unsigned short ModeIdIndex, static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo) { if ((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) && - (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */ + (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */ return 1; return 0; @@ -2466,16 +2445,15 @@ static void XGI_GetRAMDAC2DATA(unsigned short ModeIdIndex, modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag; CRT1Index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC; CRT1Index &= IndexMask; - temp1 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[0]; - temp2 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[5]; + temp1 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[0]; + temp2 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[5]; tempax = (temp1 & 0xFF) | ((temp2 & 0x03) << 8); - tempbx = (unsigned short) XGI_CRT1Table[CRT1Index].CR[8]; - tempcx = (unsigned short) - XGI_CRT1Table[CRT1Index].CR[14] << 8; + tempbx = (unsigned short)XGI_CRT1Table[CRT1Index].CR[8]; + tempcx = (unsigned short)XGI_CRT1Table[CRT1Index].CR[14] << 8; tempcx &= 0x0100; tempcx <<= 2; tempbx |= tempcx; - temp1 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[9]; + temp1 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[9]; if (temp1 & 0x01) tempbx |= 0x0100; @@ -2497,8 +2475,8 @@ static void XGI_GetRAMDAC2DATA(unsigned short ModeIdIndex, } static void XGI_GetCRT2Data(unsigned short ModeIdIndex, - unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo) + unsigned short RefreshRateTableIndex, + struct vb_device_info *pVBInfo) { unsigned short tempax = 0, tempbx = 0, modeflag, resinfo; @@ -2667,8 +2645,8 @@ static void XGI_GetCRT2Data(unsigned short ModeIdIndex, } static void XGI_SetCRT2VCLK(unsigned short ModeIdIndex, - unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo) + unsigned short RefreshRateTableIndex, + struct vb_device_info *pVBInfo) { unsigned char di_0, di_1, tempal; @@ -2739,9 +2717,9 @@ static unsigned short XGI_GetOffset(unsigned short ModeNo, } static void XGI_SetCRT2Offset(unsigned short ModeNo, - unsigned short ModeIdIndex, - unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo) + unsigned short ModeIdIndex, + unsigned short RefreshRateTableIndex, + struct vb_device_info *pVBInfo) { unsigned short offset; unsigned char temp; @@ -2750,11 +2728,11 @@ static void XGI_SetCRT2Offset(unsigned short ModeNo, return; offset = XGI_GetOffset(ModeNo, ModeIdIndex, RefreshRateTableIndex); - temp = (unsigned char) (offset & 0xFF); + temp = (unsigned char)(offset & 0xFF); xgifb_reg_set(pVBInfo->Part1Port, 0x07, temp); - temp = (unsigned char) ((offset & 0xFF00) >> 8); + temp = (unsigned char)((offset & 0xFF00) >> 8); xgifb_reg_set(pVBInfo->Part1Port, 0x09, temp); - temp = (unsigned char) (((offset >> 3) & 0xFF) + 1); + temp = (unsigned char)(((offset >> 3) & 0xFF) + 1); xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp); } @@ -2767,8 +2745,8 @@ static void XGI_SetCRT2FIFO(struct vb_device_info *pVBInfo) } static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex, - unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo) + unsigned short RefreshRateTableIndex, + struct vb_device_info *pVBInfo) { u8 tempcx; @@ -2783,8 +2761,8 @@ static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex, } static void XGI_SetGroup1(unsigned short ModeIdIndex, - unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo) + unsigned short RefreshRateTableIndex, + struct vb_device_info *pVBInfo) { unsigned short temp = 0, tempax = 0, tempbx = 0, tempcx = 0, pushbx = 0, CRT1Index, modeflag; @@ -2933,11 +2911,11 @@ static unsigned short XGI_GetVGAHT2(struct vb_device_info *pVBInfo) tempax = (pVBInfo->VT - pVBInfo->VDE) * pVBInfo->RVBHCFACT; tempax = (tempax * pVBInfo->HT) / tempbx; - return (unsigned short) tempax; + return (unsigned short)tempax; } static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex, - struct vb_device_info *pVBInfo) + struct vb_device_info *pVBInfo) { unsigned short push1, push2, tempax, tempbx = 0, tempcx, temp, resinfo, modeflag; @@ -3044,14 +3022,14 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex, if (ModeNo == 0x50) { if (pVBInfo->TVInfo == SetNTSCTV) { xgifb_reg_set(pVBInfo->Part1Port, - 0x07, 0x30); + 0x07, 0x30); xgifb_reg_set(pVBInfo->Part1Port, - 0x08, 0x03); + 0x08, 0x03); } else { xgifb_reg_set(pVBInfo->Part1Port, - 0x07, 0x2f); + 0x07, 0x2f); xgifb_reg_set(pVBInfo->Part1Port, - 0x08, 0x02); + 0x08, 0x02); } } } @@ -3064,7 +3042,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex, tempbx = pVBInfo->VGAVT; push1 = tempbx; tempcx = 0x121; - tempbx = pVBInfo->VGAVDE; /* 0x0E Virtical Display End */ + tempbx = pVBInfo->VGAVDE; /* 0x0E Vertical Display End */ if (tempbx == 357) tempbx = 350; @@ -3116,7 +3094,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex, if (tempbx & 0x0400) tempcx |= 0x0600; - /* 0x11 Vertival Blank End */ + /* 0x11 Vertical Blank End */ xgifb_reg_set(pVBInfo->Part1Port, 0x11, 0x00); tempax = push1; @@ -3227,7 +3205,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex, } static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex, - struct vb_device_info *pVBInfo) + struct vb_device_info *pVBInfo) { unsigned short i, j, tempax, tempbx, tempcx, temp, push1, push2, modeflag; @@ -3315,7 +3293,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex, tempax = (tempax & 0x00FF) | ((tempax & 0x00FF) << 8); push1 = tempax; temp = (tempax & 0xFF00) >> 8; - temp += (unsigned short) TimingPoint[0]; + temp += (unsigned short)TimingPoint[0]; if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) { @@ -3526,7 +3504,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex, tempcx = 0x0101; - if (pVBInfo->VBInfo & SetCRT2ToTV) { /*301b*/ + if (pVBInfo->VBInfo & SetCRT2ToTV) { /* 301b */ if (pVBInfo->VGAHDE >= 1024) { tempcx = 0x1920; if (pVBInfo->VGAHDE >= 1280) { @@ -3562,7 +3540,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex, if (temp2 != 0) tempeax += 1; - tempax = (unsigned short) tempeax; + tempax = (unsigned short)tempeax; /* 301b */ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV @@ -3572,9 +3550,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex, /* end 301b */ tempbx = push1; - tempbx = (unsigned short) (((tempeax & 0x0000FF00) & 0x1F00) + tempbx = (unsigned short)(((tempeax & 0x0000FF00) & 0x1F00) | (tempbx & 0x00FF)); - tempax = (unsigned short) (((tempeax & 0x000000FF) << 8) + tempax = (unsigned short)(((tempeax & 0x000000FF) << 8) | (tempax & 0x00FF)); temp = (tempax & 0xFF00) >> 8; } else { @@ -3622,14 +3600,14 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex, xgifb_reg_set(pVBInfo->Part2Port, 0x4d, temp); temp = xgifb_reg_get(pVBInfo->Part2Port, 0x43); /* 301b change */ - xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short) (temp - 3)); + xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short)(temp - 3)); if (!(pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))) { if (pVBInfo->TVInfo & NTSC1024x768) { TimingPoint = XGI_NTSC1024AdjTime; for (i = 0x1c, j = 0; i <= 0x30; i++, j++) { xgifb_reg_set(pVBInfo->Part2Port, i, - TimingPoint[j]); + TimingPoint[j]); } xgifb_reg_set(pVBInfo->Part2Port, 0x43, 0x72); } @@ -3639,7 +3617,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex, if (pVBInfo->VBType & VB_XGI301C) { if (pVBInfo->TVInfo & TVSetPALM) xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x08, - 0x08); /* PALM Mode */ + 0x08); /* PALM Mode */ } if (pVBInfo->TVInfo & TVSetPALM) { @@ -3656,8 +3634,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex, } } -static void XGI_SetLCDRegs(unsigned short ModeIdIndex, - struct vb_device_info *pVBInfo) +static void XGI_SetLCDRegs(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo) { unsigned short pushbx, tempax, tempbx, tempcx, temp, tempah, tempbh, tempch; @@ -3853,12 +3830,12 @@ static void XGI_SetLCDRegs(unsigned short ModeIdIndex, } } -/* --------------------------------------------------------------------- */ -/* Function : XGI_GetTap4Ptr */ -/* Input : */ -/* Output : di -> Tap4 Reg. Setting Pointer */ -/* Description : */ -/* --------------------------------------------------------------------- */ +/* + * Function : XGI_GetTap4Ptr + * Input : + * Output : di -> Tap4 Reg. Setting Pointer + * Description : + */ static struct XGI301C_Tap4TimingStruct const *XGI_GetTap4Ptr(unsigned short tempcx, struct vb_device_info *pVBInfo) { @@ -3882,7 +3859,7 @@ static struct XGI301C_Tap4TimingStruct const if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) { if ((pVBInfo->TVInfo & TVSetYPbPr525i) || - (pVBInfo->TVInfo & TVSetYPbPr525p)) + (pVBInfo->TVInfo & TVSetYPbPr525p)) Tap4TimingPtr = xgifb_ntsc_525_tap4_timing; if (pVBInfo->TVInfo & TVSetYPbPr750p) Tap4TimingPtr = YPbPr750pTap4Timing; @@ -3988,8 +3965,8 @@ static void XGI_SetGroup3(unsigned short ModeIdIndex, } static void XGI_SetGroup4(unsigned short ModeIdIndex, - unsigned short RefreshRateTableIndex, - struct vb_device_info *pVBInfo) + unsigned short RefreshRateTableIndex, + struct vb_device_info *pVBInfo) { unsigned short tempax, tempcx, tempbx, modeflag, temp, temp2; @@ -4080,12 +4057,12 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex, if (templong != 0) tempebx++; - temp = (unsigned short) (tempebx & 0x000000FF); + temp = (unsigned short)(tempebx & 0x000000FF); xgifb_reg_set(pVBInfo->Part4Port, 0x1B, temp); - temp = (unsigned short) ((tempebx & 0x0000FF00) >> 8); + temp = (unsigned short)((tempebx & 0x0000FF00) >> 8); xgifb_reg_set(pVBInfo->Part4Port, 0x1A, temp); - tempbx = (unsigned short) (tempebx >> 16); + tempbx = (unsigned short)(tempebx >> 16); temp = tempbx & 0x00FF; temp <<= 4; temp |= ((tempcx & 0xFF00) >> 8); @@ -4132,8 +4109,7 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex, | TVSetHiVision))) { temp |= 0x0001; if ((pVBInfo->VBInfo & SetInSlaveMode) && - !(pVBInfo->TVInfo - & TVSimuMode)) + !(pVBInfo->TVInfo & TVSimuMode)) temp &= (~0x0001); } } @@ -4174,7 +4150,8 @@ static void XGI_DisableGatingCRT(struct vb_device_info *pVBInfo) } static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info, - unsigned short ModeNo, unsigned short ModeIdIndex) + unsigned short ModeNo, + unsigned short ModeIdIndex) { unsigned short xres, yres, colordepth, modeflag, resindex; @@ -4221,7 +4198,7 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info, unsigned short LVDSVT, LVDSVBS, LVDSVRS, LVDSVRE, LVDSVBE; unsigned short value; - temp = (unsigned char) ((xgifb_info->lvds_data.LVDS_Capability & + temp = (unsigned char)((xgifb_info->lvds_data.LVDS_Capability & (LCDPolarity << 8)) >> 8); temp &= LCDPolarity; Miscdata = inb(pVBInfo->P3cc); @@ -4354,12 +4331,12 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info, if (chip_id == XG27) { /* Panel VRS SR35[2:0] SR34[7:0] */ xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x07, - (value & 0x700) >> 8); + (value & 0x700) >> 8); xgifb_reg_set(pVBInfo->P3c4, 0x34, value & 0xFF); } else { /* Panel VRS SR3F[1:0] SR34[7:0] SR33[0] */ xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0x03, - (value & 0x600) >> 9); + (value & 0x600) >> 9); xgifb_reg_set(pVBInfo->P3c4, 0x34, (value >> 1) & 0xFF); xgifb_reg_and_or(pVBInfo->P3d4, 0x33, ~0x01, value & 0x01); } @@ -4372,11 +4349,11 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info, /* Panel VRE SR3F[7:2] */ if (chip_id == XG27) xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC, - (value << 2) & 0xFC); + (value << 2) & 0xFC); else /* SR3F[7] has to be 0, h/w bug */ xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC, - (value << 2) & 0x7C); + (value << 2) & 0x7C); for (temp = 0, value = 0; temp < 3; temp++) { xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, value); @@ -4400,13 +4377,13 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info, } } -/* --------------------------------------------------------------------- */ -/* Function : XGI_IsLCDON */ -/* Input : */ -/* Output : 0 : Skip PSC Control */ -/* 1: Disable PSC */ -/* Description : */ -/* --------------------------------------------------------------------- */ +/* + * Function : XGI_IsLCDON + * Input : + * Output : 0 : Skip PSC Control + * 1: Disable PSC + * Description : + */ static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo) { unsigned short tempax; @@ -4421,8 +4398,8 @@ static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo) } static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info, - struct xgi_hw_device_info *HwDeviceExtension, - struct vb_device_info *pVBInfo) + struct xgi_hw_device_info *HwDeviceExtension, + struct vb_device_info *pVBInfo) { unsigned short tempah = 0; @@ -4498,23 +4475,23 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info, } } -/* --------------------------------------------------------------------- */ -/* Function : XGI_GetTVPtrIndex */ -/* Input : */ -/* Output : */ -/* Description : bx 0 : ExtNTSC */ -/* 1 : StNTSC */ -/* 2 : ExtPAL */ -/* 3 : StPAL */ -/* 4 : ExtHiTV */ -/* 5 : StHiTV */ -/* 6 : Ext525i */ -/* 7 : St525i */ -/* 8 : Ext525p */ -/* 9 : St525p */ -/* A : Ext750p */ -/* B : St750p */ -/* --------------------------------------------------------------------- */ +/* + * Function : XGI_GetTVPtrIndex + * Input : + * Output : + * Description : bx 0 : ExtNTSC + * 1 : StNTSC + * 2 : ExtPAL + * 3 : StPAL + * 4 : ExtHiTV + * 5 : StHiTV + * 6 : Ext525i + * 7 : St525i + * 8 : Ext525p + * 9 : St525p + * A : Ext750p + * B : St750p + */ static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo) { unsigned short tempbx = 0; @@ -4535,24 +4512,24 @@ static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo) return tempbx; } -/* --------------------------------------------------------------------- */ -/* Function : XGI_GetTVPtrIndex2 */ -/* Input : */ -/* Output : bx 0 : NTSC */ -/* 1 : PAL */ -/* 2 : PALM */ -/* 3 : PALN */ -/* 4 : NTSC1024x768 */ -/* 5 : PAL-M 1024x768 */ -/* 6-7: reserved */ -/* cl 0 : YFilter1 */ -/* 1 : YFilter2 */ -/* ch 0 : 301A */ -/* 1 : 301B/302B/301LV/302LV */ -/* Description : */ -/* --------------------------------------------------------------------- */ +/* + * Function : XGI_GetTVPtrIndex2 + * Input : + * Output : bx 0 : NTSC + * 1 : PAL + * 2 : PALM + * 3 : PALN + * 4 : NTSC1024x768 + * 5 : PAL-M 1024x768 + * 6-7: reserved + * cl 0 : YFilter1 + * 1 : YFilter2 + * ch 0 : 301A + * 1 : 301B/302B/301LV/302LV + * Description : + */ static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl, - unsigned char *tempch, struct vb_device_info *pVBInfo) + unsigned char *tempch, struct vb_device_info *pVBInfo) { *tempbx = 0; *tempcl = 0; @@ -4637,33 +4614,32 @@ static void XGI_SetLCDCap_A(unsigned short tempcx, if (temp & LCDRGB18Bit) { xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F, - /* Enable Dither */ - (unsigned short) (0x20 | (tempcx & 0x00C0))); + /* Enable Dither */ + (unsigned short)(0x20 | (tempcx & 0x00C0))); xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x80); } else { xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F, - (unsigned short) (0x30 | (tempcx & 0x00C0))); + (unsigned short)(0x30 | (tempcx & 0x00C0))); xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00); } } -/* --------------------------------------------------------------------- */ -/* Function : XGI_SetLCDCap_B */ -/* Input : cx -> LCD Capability */ -/* Output : */ -/* Description : */ -/* --------------------------------------------------------------------- */ +/* + * Function : XGI_SetLCDCap_B + * Input : cx -> LCD Capability + * Output : + * Description : + */ static void XGI_SetLCDCap_B(unsigned short tempcx, struct vb_device_info *pVBInfo) { if (tempcx & EnableLCD24bpp) /* 24bits */ xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0, - (unsigned short) (((tempcx & 0x00ff) >> 6) - | 0x0c)); + (unsigned short)(((tempcx & 0x00ff) >> 6) | 0x0c)); else xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0, - (unsigned short) (((tempcx & 0x00ff) >> 6) - | 0x18)); /* Enable Dither */ + (unsigned short)(((tempcx & 0x00ff) >> 6) | 0x18)); + /* Enable Dither */ } static void XGI_LongWait(struct vb_device_info *pVBInfo) @@ -4698,13 +4674,13 @@ static void SetSpectrum(struct vb_device_info *pVBInfo) XGI_LongWait(pVBInfo); xgifb_reg_set(pVBInfo->Part4Port, 0x31, - pVBInfo->LCDCapList[index].Spectrum_31); + pVBInfo->LCDCapList[index].Spectrum_31); xgifb_reg_set(pVBInfo->Part4Port, 0x32, - pVBInfo->LCDCapList[index].Spectrum_32); + pVBInfo->LCDCapList[index].Spectrum_32); xgifb_reg_set(pVBInfo->Part4Port, 0x33, - pVBInfo->LCDCapList[index].Spectrum_33); + pVBInfo->LCDCapList[index].Spectrum_33); xgifb_reg_set(pVBInfo->Part4Port, 0x34, - pVBInfo->LCDCapList[index].Spectrum_34); + pVBInfo->LCDCapList[index].Spectrum_34); XGI_LongWait(pVBInfo); xgifb_reg_or(pVBInfo->Part4Port, 0x30, 0x40); /* enable spectrum */ } @@ -4721,13 +4697,13 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo) (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) { /* Set 301LV Capability */ xgifb_reg_set(pVBInfo->Part4Port, 0x24, - (unsigned char) (tempcx & 0x1F)); + (unsigned char)(tempcx & 0x1F)); } /* VB Driving */ xgifb_reg_and_or(pVBInfo->Part4Port, 0x0D, - ~((EnableVBCLKDRVLOW | EnablePLLSPLOW) >> 8), - (unsigned short) ((tempcx & (EnableVBCLKDRVLOW - | EnablePLLSPLOW)) >> 8)); + ~((EnableVBCLKDRVLOW | EnablePLLSPLOW) >> 8), + (unsigned short)((tempcx & (EnableVBCLKDRVLOW | + EnablePLLSPLOW)) >> 8)); if (pVBInfo->VBInfo & SetCRT2ToLCD) XGI_SetLCDCap_B(tempcx, pVBInfo); @@ -4744,12 +4720,12 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo) } } -/* --------------------------------------------------------------------- */ -/* Function : XGI_SetAntiFlicker */ -/* Input : */ -/* Output : */ -/* Description : Set TV Customized Param. */ -/* --------------------------------------------------------------------- */ +/* + * Function : XGI_SetAntiFlicker + * Input : + * Output : + * Description : Set TV Customized Param. + */ static void XGI_SetAntiFlicker(struct vb_device_info *pVBInfo) { unsigned short tempbx; @@ -4792,13 +4768,13 @@ static void XGI_SetPhaseIncr(struct vb_device_info *pVBInfo) XGI_GetTVPtrIndex2(&tempbx, &tempcl, &tempch, pVBInfo); /* bx, cl, ch */ tempData = TVPhaseList[tempbx]; - xgifb_reg_set(pVBInfo->Part2Port, 0x31, (unsigned short) (tempData + xgifb_reg_set(pVBInfo->Part2Port, 0x31, (unsigned short)(tempData & 0x000000FF)); - xgifb_reg_set(pVBInfo->Part2Port, 0x32, (unsigned short) ((tempData + xgifb_reg_set(pVBInfo->Part2Port, 0x32, (unsigned short)((tempData & 0x0000FF00) >> 8)); - xgifb_reg_set(pVBInfo->Part2Port, 0x33, (unsigned short) ((tempData + xgifb_reg_set(pVBInfo->Part2Port, 0x33, (unsigned short)((tempData & 0x00FF0000) >> 16)); - xgifb_reg_set(pVBInfo->Part2Port, 0x34, (unsigned short) ((tempData + xgifb_reg_set(pVBInfo->Part2Port, 0x34, (unsigned short)((tempData & 0xFF000000) >> 24)); } @@ -4866,12 +4842,12 @@ static void XGI_SetYFilter(unsigned short ModeIdIndex, } } -/* --------------------------------------------------------------------- */ -/* Function : XGI_OEM310Setting */ -/* Input : */ -/* Output : */ -/* Description : Customized Param. for 301 */ -/* --------------------------------------------------------------------- */ +/* + * Function : XGI_OEM310Setting + * Input : + * Output : + * Description : Customized Param. for 301 + */ static void XGI_OEM310Setting(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo) { @@ -4890,12 +4866,12 @@ static void XGI_OEM310Setting(unsigned short ModeIdIndex, } } -/* --------------------------------------------------------------------- */ -/* Function : XGI_SetCRT2ModeRegs */ -/* Input : */ -/* Output : */ -/* Description : Origin code for crt2group */ -/* --------------------------------------------------------------------- */ +/* + * Function : XGI_SetCRT2ModeRegs + * Input : + * Output : + * Description : Origin code for crt2group + */ static void XGI_SetCRT2ModeRegs(struct vb_device_info *pVBInfo) { unsigned short tempbl; @@ -4999,8 +4975,8 @@ static void XGI_SetCRT2ModeRegs(struct vb_device_info *pVBInfo) tempah |= 0x40; } - if ((pVBInfo->LCDResInfo == Panel_1280x1024) - || (pVBInfo->LCDResInfo == Panel_1280x1024x75)) + if ((pVBInfo->LCDResInfo == Panel_1280x1024) || + (pVBInfo->LCDResInfo == Panel_1280x1024x75)) tempah |= 0x80; if (pVBInfo->LCDResInfo == Panel_1280x960) @@ -5068,8 +5044,9 @@ void XGI_LockCRT2(struct vb_device_info *pVBInfo) } unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE, - unsigned short ModeNo, unsigned short ModeIdIndex, - struct vb_device_info *pVBInfo) + unsigned short ModeNo, + unsigned short ModeIdIndex, + struct vb_device_info *pVBInfo) { const u8 LCDARefreshIndex[] = { 0x00, 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x00 }; @@ -5143,14 +5120,14 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE, } static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex, - struct xgi_hw_device_info *HwDeviceExtension, - struct vb_device_info *pVBInfo) + struct xgi_hw_device_info *HwDeviceExtension, + struct vb_device_info *pVBInfo) { unsigned short RefreshRateTableIndex; pVBInfo->SetFlag |= ProgrammingCRT2; RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo, - ModeIdIndex, pVBInfo); + ModeIdIndex, pVBInfo); XGI_GetLVDSResInfo(ModeIdIndex, pVBInfo); XGI_GetLVDSData(ModeIdIndex, pVBInfo); XGI_ModCRT1Regs(ModeIdIndex, HwDeviceExtension, pVBInfo); @@ -5159,8 +5136,8 @@ static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex, } static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo, - struct xgi_hw_device_info *HwDeviceExtension, - struct vb_device_info *pVBInfo) + struct xgi_hw_device_info *HwDeviceExtension, + struct vb_device_info *pVBInfo) { unsigned short ModeIdIndex, RefreshRateTableIndex; @@ -5168,7 +5145,7 @@ static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo, XGI_SearchModeID(ModeNo, &ModeIdIndex); pVBInfo->SelectCRT2Rate = 4; RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo, - ModeIdIndex, pVBInfo); + ModeIdIndex, pVBInfo); XGI_SaveCRT2Info(ModeNo, pVBInfo); XGI_GetCRT2ResInfo(ModeIdIndex, pVBInfo); XGI_GetCRT2Data(ModeIdIndex, RefreshRateTableIndex, pVBInfo); @@ -5210,39 +5187,39 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo) CR63 = xgifb_reg_get(pVBInfo->P3d4, 0x63); SR01 = xgifb_reg_get(pVBInfo->P3c4, 0x01); - xgifb_reg_set(pVBInfo->P3c4, 0x01, (unsigned char) (SR01 & 0xDF)); - xgifb_reg_set(pVBInfo->P3d4, 0x63, (unsigned char) (CR63 & 0xBF)); + xgifb_reg_set(pVBInfo->P3c4, 0x01, (unsigned char)(SR01 & 0xDF)); + xgifb_reg_set(pVBInfo->P3d4, 0x63, (unsigned char)(CR63 & 0xBF)); CR17 = xgifb_reg_get(pVBInfo->P3d4, 0x17); - xgifb_reg_set(pVBInfo->P3d4, 0x17, (unsigned char) (CR17 | 0x80)); + xgifb_reg_set(pVBInfo->P3d4, 0x17, (unsigned char)(CR17 | 0x80)); SR1F = xgifb_reg_get(pVBInfo->P3c4, 0x1F); - xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) (SR1F | 0x04)); + xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char)(SR1F | 0x04)); SR07 = xgifb_reg_get(pVBInfo->P3c4, 0x07); - xgifb_reg_set(pVBInfo->P3c4, 0x07, (unsigned char) (SR07 & 0xFB)); + xgifb_reg_set(pVBInfo->P3c4, 0x07, (unsigned char)(SR07 & 0xFB)); SR06 = xgifb_reg_get(pVBInfo->P3c4, 0x06); - xgifb_reg_set(pVBInfo->P3c4, 0x06, (unsigned char) (SR06 & 0xC3)); + xgifb_reg_set(pVBInfo->P3c4, 0x06, (unsigned char)(SR06 & 0xC3)); xgifb_reg_set(pVBInfo->P3d4, 0x11, 0x00); for (i = 0; i < 8; i++) - xgifb_reg_set(pVBInfo->P3d4, (unsigned short) i, CRTCData[i]); + xgifb_reg_set(pVBInfo->P3d4, (unsigned short)i, CRTCData[i]); for (i = 8; i < 11; i++) - xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 8), - CRTCData[i]); + xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 8), + CRTCData[i]); for (i = 11; i < 13; i++) - xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 4), - CRTCData[i]); + xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 4), + CRTCData[i]); for (i = 13; i < 16; i++) - xgifb_reg_set(pVBInfo->P3c4, (unsigned short) (i - 3), - CRTCData[i]); + xgifb_reg_set(pVBInfo->P3c4, (unsigned short)(i - 3), + CRTCData[i]); - xgifb_reg_set(pVBInfo->P3c4, 0x0E, (unsigned char) (CRTCData[16] - & 0xE0)); + xgifb_reg_set(pVBInfo->P3c4, 0x0E, (unsigned char)(CRTCData[16] + & 0xE0)); xgifb_reg_set(pVBInfo->P3c4, 0x31, 0x00); xgifb_reg_set(pVBInfo->P3c4, 0x2B, 0x1B); @@ -5275,12 +5252,12 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo) xgifb_reg_set(pVBInfo->P3d4, 0x53, (xgifb_reg_get( pVBInfo->P3d4, 0x53) & 0xFD)); - xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) SR1F); + xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char)SR1F); } static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info, - struct xgi_hw_device_info *HwDeviceExtension, - struct vb_device_info *pVBInfo) + struct xgi_hw_device_info *HwDeviceExtension, + struct vb_device_info *pVBInfo) { unsigned short tempah; @@ -5310,11 +5287,11 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info, if (!(pVBInfo->VBInfo & DisableCRT2Display)) { xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0, - 0x20); /* shampoo 0129 */ + 0x20); /* shampoo 0129 */ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) { if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) - /* LVDS PLL power on */ + /* LVDS PLL power on */ xgifb_reg_and(pVBInfo->Part4Port, 0x2A, 0x7F); /* LVDS Driver power on */ @@ -5358,9 +5335,9 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info, } static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info, - struct xgi_hw_device_info *HwDeviceExtension, - unsigned short ModeNo, unsigned short ModeIdIndex, - struct vb_device_info *pVBInfo) + struct xgi_hw_device_info *HwDeviceExtension, + unsigned short ModeNo, unsigned short ModeIdIndex, + struct vb_device_info *pVBInfo) { unsigned short RefreshRateTableIndex, temp; @@ -5389,14 +5366,14 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info, } RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo, - ModeIdIndex, pVBInfo); + ModeIdIndex, pVBInfo); if (RefreshRateTableIndex != 0xFFFF) { XGI_SetSync(RefreshRateTableIndex, pVBInfo); XGI_SetCRT1CRTC(ModeIdIndex, RefreshRateTableIndex, pVBInfo, HwDeviceExtension); XGI_SetCRT1DE(ModeIdIndex, RefreshRateTableIndex, pVBInfo); XGI_SetCRT1Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex, - HwDeviceExtension, pVBInfo); + HwDeviceExtension, pVBInfo); XGI_SetCRT1VCLK(ModeIdIndex, HwDeviceExtension, RefreshRateTableIndex, pVBInfo); } @@ -5410,15 +5387,15 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info, XGI_SetXG21CRTC(RefreshRateTableIndex, pVBInfo); XGI_UpdateXG21CRTC(ModeNo, pVBInfo, - RefreshRateTableIndex); + RefreshRateTableIndex); xgifb_set_lcd(HwDeviceExtension->jChipType, pVBInfo, RefreshRateTableIndex); if (pVBInfo->IF_DEF_LVDS == 1) xgifb_set_lvds(xgifb_info, - HwDeviceExtension->jChipType, - ModeIdIndex, pVBInfo); + HwDeviceExtension->jChipType, + ModeIdIndex, pVBInfo); } } @@ -5430,8 +5407,8 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info, } unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info, - struct xgi_hw_device_info *HwDeviceExtension, - unsigned short ModeNo) + struct xgi_hw_device_info *HwDeviceExtension, + unsigned short ModeNo) { unsigned short ModeIdIndex; struct vb_device_info VBINF; @@ -5440,7 +5417,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info, pVBInfo->IF_DEF_LVDS = 0; if (HwDeviceExtension->jChipType >= XG20) - pVBInfo->VBType = 0; /*set VBType default 0*/ + pVBInfo->VBType = 0; /* set VBType default 0 */ XGIRegInit(pVBInfo, xgifb_info->vga_base); @@ -5473,13 +5450,13 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info, XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo); if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA) || - !(pVBInfo->VBInfo & SwitchCRT2)) { + !(pVBInfo->VBInfo & SwitchCRT2)) { XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo, - ModeIdIndex, pVBInfo); + ModeIdIndex, pVBInfo); if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) { XGI_SetLCDAGroup(ModeNo, ModeIdIndex, - HwDeviceExtension, pVBInfo); + HwDeviceExtension, pVBInfo); } } @@ -5488,7 +5465,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info, case VB_CHIP_301: /* fall through */ case VB_CHIP_302: XGI_SetCRT2Group301(ModeNo, HwDeviceExtension, - pVBInfo); /*add for CRT2 */ + pVBInfo); /* add for CRT2 */ break; default: @@ -5497,7 +5474,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info, } XGI_SetCRT2ModeRegs(pVBInfo); - XGI_OEM310Setting(ModeIdIndex, pVBInfo); /*0212*/ + XGI_OEM310Setting(ModeIdIndex, pVBInfo); /* 0212 */ XGI_EnableBridge(xgifb_info, HwDeviceExtension, pVBInfo); } /* !XG20 */ else { @@ -5515,7 +5492,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info, XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo); XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo, - ModeIdIndex, pVBInfo); + ModeIdIndex, pVBInfo); XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo); } diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h index c801deb142f6e5c8ede0a39e3149498f38e202a2..f9f98e06e6d56c6b059408181f785c6722050c4e 100644 --- a/drivers/staging/xgifb/vb_table.h +++ b/drivers/staging/xgifb/vb_table.h @@ -1701,6 +1701,7 @@ static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_1_Vx75[] = { { {0x28, 0xF5, 0x00, 0x84, 0xFF, 0x29, 0x90} },/* ; 04 (x768) */ { {0x28, 0x5A, 0x13, 0x87, 0xFF, 0x29, 0xA9} } /* ; 05 (x1024) */ }; + /* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */ static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_2_Hx75[] = { { {0x7E, 0x3B, 0x9A, 0x44, 0x12, 0x00, 0x01, 0x00} },/* ; 00 (320x) */ @@ -1886,17 +1887,17 @@ static const struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = { 0x6C, 0xC3, 0x35, 0x62, 0x0A, 0xC0, 0x28, 0x10}, /* LCDCap1280x1024 */ - {Panel_1280x1024, XGI_LCDDualLink+DefaultLCDCap, + {Panel_1280x1024, XGI_LCDDualLink + DefaultLCDCap, 0x70, 0x03, VCLK108_2_315, 0x70, 0x44, 0xF8, 0x2F, 0x0A, 0xC0, 0x30, 0x10}, /* LCDCap1400x1050 */ - {Panel_1400x1050, XGI_LCDDualLink+DefaultLCDCap, + {Panel_1400x1050, XGI_LCDDualLink + DefaultLCDCap, 0x70, 0x03, VCLK108_2_315, 0x70, 0x44, 0xF8, 0x2F, 0x0A, 0xC0, 0x30, 0x10}, /* LCDCap1600x1200 */ - {Panel_1600x1200, XGI_LCDDualLink+DefaultLCDCap, + {Panel_1600x1200, XGI_LCDDualLink + DefaultLCDCap, 0xC0, 0x03, VCLK162, 0x43, 0x22, 0x70, 0x24, 0x0A, 0xC0, 0x30, 0x10}, @@ -1905,7 +1906,7 @@ static const struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = { 0x2B, 0x61, 0x2B, 0x61, 0x0A, 0xC0, 0x28, 0x10}, /* LCDCap1280x1024x75 */ - {Panel_1280x1024x75, XGI_LCDDualLink+DefaultLCDCap, + {Panel_1280x1024x75, XGI_LCDDualLink + DefaultLCDCap, 0x90, 0x03, VCLK135_5, 0x54, 0x42, 0x4A, 0x61, 0x0A, 0xC0, 0x30, 0x10}, diff --git a/drivers/staging/xgifb/vb_util.h b/drivers/staging/xgifb/vb_util.h index 08db58b396b23f6422c487c7b3dbf4a56d05f956..052694e750539a5e650c067a7bb11010d46eee41 100644 --- a/drivers/staging/xgifb/vb_util.h +++ b/drivers/staging/xgifb/vb_util.h @@ -18,7 +18,7 @@ static inline void xgifb_reg_and_or(unsigned long port, u8 index, u8 temp; temp = xgifb_reg_get(port, index); - temp = (u8) ((temp & data_and) | data_or); + temp = (u8)((temp & data_and) | data_or); xgifb_reg_set(port, index, temp); } @@ -28,7 +28,7 @@ static inline void xgifb_reg_and(unsigned long port, u8 index, u8 temp; temp = xgifb_reg_get(port, index); - temp = (u8) (temp & data_and); + temp = (u8)(temp & data_and); xgifb_reg_set(port, index, temp); } diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c index ad26b9372f1096a1940c405a3f019066b35f8e33..96eedfc49c9428938dfdd9fe483d37a0735b7a14 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_main.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c @@ -653,6 +653,7 @@ static struct iscsit_transport cxgbit_transport = { static struct cxgb4_uld_info cxgbit_uld_info = { .name = DRV_NAME, .nrxq = MAX_ULD_QSETS, + .ntxq = MAX_ULD_QSETS, .rxq_size = 1024, .lro = true, .add = cxgbit_uld_add, diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index d02bf58aea6d85bfdfa0473b95ca94794135becc..8bcb9b71f764325d585f659fb1430b393b54214c 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include "cxgbit.h" diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index b7d747e92c7abf589e35154b25482a9dedb57118..da2c73a255dec194bba90826f6b3e95e9a264e32 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -23,7 +23,9 @@ #include #include #include +#include #include +#include #include #include #include diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h index 4cf2c0f2ba2f981699499cce77726d20aeee9dc9..e0db2ceb0f87cb170a2ff1b12fcb5a8e7a407cb2 100644 --- a/drivers/target/iscsi/iscsi_target.h +++ b/drivers/target/iscsi/iscsi_target.h @@ -1,6 +1,18 @@ #ifndef ISCSI_TARGET_H #define ISCSI_TARGET_H +#include +#include + +struct iscsi_cmd; +struct iscsi_conn; +struct iscsi_np; +struct iscsi_portal_group; +struct iscsi_session; +struct iscsi_tpg_np; +struct kref; +struct sockaddr_storage; + extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *); extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int); extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *); diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index e116f0e845c08c4f7c91bd9d91c891c1fb9683b7..903b667f8e0136d1e1d919c1bc51d2a3c99bd7c3 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -20,8 +20,8 @@ #include #include #include +#include #include - #include #include "iscsi_target_nego.h" #include "iscsi_target_auth.h" diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h index d22f7b96a06ca98aa3bd83f669d92eb686cfcec9..1b91c13cc9657e5661c6ea254e799a068eedc4b7 100644 --- a/drivers/target/iscsi/iscsi_target_auth.h +++ b/drivers/target/iscsi/iscsi_target_auth.h @@ -1,6 +1,8 @@ #ifndef _ISCSI_CHAP_H_ #define _ISCSI_CHAP_H_ +#include + #define CHAP_DIGEST_UNKNOWN 0 #define CHAP_DIGEST_MD5 5 #define CHAP_DIGEST_SHA 6 @@ -18,6 +20,9 @@ #define CHAP_STAGE_CLIENT_NRIC 4 #define CHAP_STAGE_SERVER_NR 5 +struct iscsi_node_auth; +struct iscsi_conn; + extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *, int *, int *); diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 923c032f0b95f1efe2d1e12e90cb9086f3fd9346..bf40f03755ddc50697652ccde864d40df840fa0b 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -21,10 +21,11 @@ #include #include #include +#include +#include #include #include #include - #include #include "iscsi_target_parameters.h" #include "iscsi_target_device.h" @@ -100,8 +101,10 @@ static ssize_t lio_target_np_driver_store(struct config_item *item, tpg_np_new = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, tpg_np, type); - if (IS_ERR(tpg_np_new)) + if (IS_ERR(tpg_np_new)) { + rc = PTR_ERR(tpg_np_new); goto out; + } } else { tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type); if (tpg_np_new) { diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c index 647d4a5dca5281838b904ba67fd03d8a0ea80642..173ddd93c75726937dabfb33a11e85479437a207 100644 --- a/drivers/target/iscsi/iscsi_target_datain_values.c +++ b/drivers/target/iscsi/iscsi_target_datain_values.c @@ -16,8 +16,8 @@ * GNU General Public License for more details. ******************************************************************************/ +#include #include - #include #include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_erl1.h" diff --git a/drivers/target/iscsi/iscsi_target_datain_values.h b/drivers/target/iscsi/iscsi_target_datain_values.h index 646429ac5a02bf3055f87204e7591d09c530062e..16edeeeb7777b447cd93a4222e4e2cf54a10cfa1 100644 --- a/drivers/target/iscsi/iscsi_target_datain_values.h +++ b/drivers/target/iscsi/iscsi_target_datain_values.h @@ -1,6 +1,9 @@ #ifndef ISCSI_TARGET_DATAIN_VALUES_H #define ISCSI_TARGET_DATAIN_VALUES_H +struct iscsi_cmd; +struct iscsi_datain; + extern struct iscsi_datain_req *iscsit_allocate_datain_req(void); extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *); extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *); diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h index a0e2df9e809032034d80da9b398252365b626b92..06dbff5cd52069af9539d24305d0a2960bf5402f 100644 --- a/drivers/target/iscsi/iscsi_target_device.h +++ b/drivers/target/iscsi/iscsi_target_device.h @@ -1,6 +1,9 @@ #ifndef ISCSI_TARGET_DEVICE_H #define ISCSI_TARGET_DEVICE_H +struct iscsi_cmd; +struct iscsi_session; + extern void iscsit_determine_maxcmdsn(struct iscsi_session *); extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *); diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h index a9e2f9497fb22a1734ae27393e63fa351533f2d8..60e69e2af6eda981efb74e4ac313fb0d031093bd 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.h +++ b/drivers/target/iscsi/iscsi_target_erl0.h @@ -1,6 +1,12 @@ #ifndef ISCSI_TARGET_ERL0_H #define ISCSI_TARGET_ERL0_H +#include + +struct iscsi_cmd; +struct iscsi_conn; +struct iscsi_session; + extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *); extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *); extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8); diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index 9214c9dafa2be56082b792eaccd31ca87420b59e..fe9b7f1e44aca5c8bcda8677310351f03e507095 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -17,6 +17,7 @@ ******************************************************************************/ #include +#include #include #include #include diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h index 2a3ebf118a342fb33200810237afef1694c78536..54d36bd25beacdf4da8856b0b97ab5e237d4b2aa 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.h +++ b/drivers/target/iscsi/iscsi_target_erl1.h @@ -1,6 +1,16 @@ #ifndef ISCSI_TARGET_ERL1_H #define ISCSI_TARGET_ERL1_H +#include +#include /* itt_t */ + +struct iscsi_cmd; +struct iscsi_conn; +struct iscsi_datain_req; +struct iscsi_ooo_cmdsn; +struct iscsi_pdu; +struct iscsi_session; + extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int); extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes( struct iscsi_cmd *, struct iscsi_datain_req *); diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index e24f1c7c5862d4af2f0ae53efb3e981f153080db..faf9ae014b30443583555c1a49f089a3ad7d32c0 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -17,6 +17,7 @@ * GNU General Public License for more details. ******************************************************************************/ +#include #include #include #include diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h index 63f2501f3fe08344ea000d3ae83e414fe4e361de..7965f1e865061ef0ec40ef63fe89d33a5e189379 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.h +++ b/drivers/target/iscsi/iscsi_target_erl2.h @@ -1,6 +1,13 @@ #ifndef ISCSI_TARGET_ERL2_H #define ISCSI_TARGET_ERL2_H +#include + +struct iscsi_cmd; +struct iscsi_conn; +struct iscsi_conn_recovery; +struct iscsi_session; + extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32); extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *); extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry( diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 15f79a2ca34ab6e17fd5fda6f68425b9af1809eb..450f51deb2a2ae18137ede36d4a3e8c188fd7352 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -20,6 +20,8 @@ #include #include #include +#include /* TCP_NODELAY */ +#include /* ipv6_addr_v4mapped() */ #include #include #include diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index b597aa2c61a1c60d2794610796ac156c220e43fe..0e1fd6cedd54cb83ffc576654d5428d47e75efa9 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -1,6 +1,13 @@ #ifndef ISCSI_TARGET_LOGIN_H #define ISCSI_TARGET_LOGIN_H +#include + +struct iscsi_conn; +struct iscsi_login; +struct iscsi_np; +struct sockaddr_storage; + extern int iscsi_login_setup_crypto(struct iscsi_conn *); extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *); extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 89d34bd6d87f94519c26168741b74bcb6478061a..46388c9e08dad3e5de751d3280f8fc829683154d 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -18,6 +18,8 @@ #include #include +#include +#include #include #include #include diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h index f021cbd330e51e0c61d85fc4f909e502073f09e5..53438bfca4c66bee15a05ca56ae4d89686d6a05d 100644 --- a/drivers/target/iscsi/iscsi_target_nego.h +++ b/drivers/target/iscsi/iscsi_target_nego.h @@ -4,6 +4,10 @@ #define DECIMAL 0 #define HEX 1 +struct iscsi_conn; +struct iscsi_login; +struct iscsi_np; + extern void convert_null_to_semi(char *, int); extern int extract_param(const char *, const char *, unsigned int, char *, unsigned char *); diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h index 0c69a46a62ec7f9b082679e7a24f24fb0ffe321e..79cdf06ade48bf8d63336877b40af572778f574a 100644 --- a/drivers/target/iscsi/iscsi_target_nodeattrib.h +++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h @@ -1,6 +1,11 @@ #ifndef ISCSI_TARGET_NODEATTRIB_H #define ISCSI_TARGET_NODEATTRIB_H +#include + +struct iscsi_node_acl; +struct iscsi_portal_group; + extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *, struct iscsi_portal_group *); extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32); diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 0efa80bb89628602598346c0647a958536acdf3b..e65bf78ceef3740fc1923c1b3ed446aa2996b82d 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -17,7 +17,7 @@ ******************************************************************************/ #include - +#include /* struct kvec */ #include #include "iscsi_target_util.h" #include "iscsi_target_parameters.h" diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h index a0751e3f0813429bd5c87c2ecab16780cfbfa1c7..9962ccf0ccd7d923d074923661a48e4b058e5400 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.h +++ b/drivers/target/iscsi/iscsi_target_parameters.h @@ -1,6 +1,7 @@ #ifndef ISCSI_PARAMETERS_H #define ISCSI_PARAMETERS_H +#include #include struct iscsi_extra_response { @@ -23,6 +24,11 @@ struct iscsi_param { struct list_head p_list; } ____cacheline_aligned; +struct iscsi_conn; +struct iscsi_conn_ops; +struct iscsi_param_list; +struct iscsi_sess_ops; + extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int); extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int); extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *); diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h index d5b153751a8d223c9edb21a01490748911a4756d..be1234362271b0b3f672c33a83b26f0407b630c8 100644 --- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h +++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h @@ -1,6 +1,9 @@ #ifndef ISCSI_SEQ_AND_PDU_LIST_H #define ISCSI_SEQ_AND_PDU_LIST_H +#include +#include + /* struct iscsi_pdu->status */ #define DATAOUT_PDU_SENT 1 @@ -78,6 +81,8 @@ struct iscsi_seq { u32 xfer_len; } ____cacheline_aligned; +struct iscsi_cmd; + extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32); extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32); extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *); diff --git a/drivers/target/iscsi/iscsi_target_tmr.h b/drivers/target/iscsi/iscsi_target_tmr.h index 142e992cb097a63cb9a8efbd41c0a725ba719592..64cc5c07e47c2d301cfc66c14082a6a76458542f 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.h +++ b/drivers/target/iscsi/iscsi_target_tmr.h @@ -1,6 +1,12 @@ #ifndef ISCSI_TARGET_TMR_H #define ISCSI_TARGET_TMR_H +#include + +struct iscsi_cmd; +struct iscsi_conn; +struct iscsi_tmr_req; + extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *); extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *, unsigned char *); diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 0814e5894a9616ffcc79fb0d0f086f718a971fd7..2e7e08dbda4807ed51c6e886b3399bc95199f3d5 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -16,9 +16,9 @@ * GNU General Public License for more details. ******************************************************************************/ +#include #include #include - #include #include "iscsi_target_erl0.h" #include "iscsi_target_login.h" @@ -260,7 +260,6 @@ int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_gro iscsi_release_param_list(tpg->param_list); tpg->param_list = NULL; } - kfree(tpg); return -ENOMEM; } diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h index 2da211920c186215e740d1aaa47f781999d1fb71..ceba298511677a5cf5ca49d784258f6c111da7dd 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.h +++ b/drivers/target/iscsi/iscsi_target_tpg.h @@ -1,6 +1,15 @@ #ifndef ISCSI_TARGET_TPG_H #define ISCSI_TARGET_TPG_H +#include + +struct iscsi_np; +struct iscsi_session; +struct iscsi_tiqn; +struct iscsi_tpg_np; +struct se_node_acl; +struct sockaddr_storage; + extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16); extern int iscsit_load_discovery_tpg(void); extern void iscsit_release_discovery_tpg(void); diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c index 08217d62fb0d6860e40bcb9fa4b2947e710b3bd4..c4eb141c6435983ea3493159f48cf6c3919a9f7c 100644 --- a/drivers/target/iscsi/iscsi_target_transport.c +++ b/drivers/target/iscsi/iscsi_target_transport.c @@ -1,5 +1,6 @@ #include #include +#include #include static LIST_HEAD(g_transport_list); diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 1f38177207e0806b18641766fdd6849ae1555a9b..b5a1b4ccba124d4dbf60fd528ec05d3a7d0dbf32 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -18,6 +18,7 @@ #include #include +#include /* ipv6_addr_equal() */ #include #include #include diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 995f1cb29d0e08268acf9f3547494d23498dc87b..8ff08856516aba68394fc07661ec71b635c8b6a2 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -1,8 +1,16 @@ #ifndef ISCSI_TARGET_UTIL_H #define ISCSI_TARGET_UTIL_H +#include +#include /* itt_t */ + #define MARKER_SIZE 8 +struct iscsi_cmd; +struct iscsi_conn; +struct iscsi_conn_recovery; +struct iscsi_session; + extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32); extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32); extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *); diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index 4346462094a1af4e4ce778abd41a0f1da1d0a559..a8a230b4e6b532866becda3b04d70d2fee7c93c2 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h @@ -1,3 +1,7 @@ +#include +#include +#include /* struct se_cmd */ + #define TCM_LOOP_VERSION "v2.1-rc2" #define TL_WWN_ADDR_LEN 256 #define TL_TPGS_PER_HBA 32 diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 58bb6ed181853b49370d9fda31f73e7bd24c7fda..e5c3e5f827d0b8f163bbe9f1e78c2da7cfc2bae5 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -928,7 +929,7 @@ static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess, struct sbp_target_request *req; int tag; - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); if (tag < 0) return ERR_PTR(-ENOMEM); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 4c82bbe19003d083979fac3139ab91e3a15a01a0..f5e330099bfca713f4cb12bd2dc77826fdad1b3b 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -26,8 +26,11 @@ #include #include #include +#include #include +#include #include +#include #include #include diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h index 9b250f9b33bfb830ff194e3b0bf028f9e0ff02fb..c69c11baf07f03ab6dae23a52bace4e956b613a2 100644 --- a/drivers/target/target_core_alua.h +++ b/drivers/target/target_core_alua.h @@ -1,6 +1,8 @@ #ifndef TARGET_CORE_ALUA_H #define TARGET_CORE_ALUA_H +#include + /* * INQUIRY response data, TPGS Field * diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 2001005bef45845dc45fdbf1e4bebf8c82d84fc5..54b36c9835be3ae2127cb1f447321eba73b824ac 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -143,13 +143,13 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item, pr_err("db_root: cannot open: %s\n", db_root_stage); return -EINVAL; } - if (!S_ISDIR(fp->f_inode->i_mode)) { - filp_close(fp, 0); + if (!S_ISDIR(file_inode(fp)->i_mode)) { + filp_close(fp, NULL); mutex_unlock(&g_tf_lock); pr_err("db_root: not a directory: %s\n", db_root_stage); return -EINVAL; } - filp_close(fp, 0); + filp_close(fp, NULL); strncpy(db_root, db_root_stage, read_bytes); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 6b423485c5d6b4f6e8e54a332f1dd62eda0325d9..1ebd13ef7bd333c5cbc488f7543eda58e29a2123 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 31a096aa16aba6dc4239cd412f813709709723f8..d8a16ca6baa507b235cbec2fbff56a648874fd2e 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -137,7 +137,7 @@ static int target_fabric_mappedlun_link( return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro); } -static int target_fabric_mappedlun_unlink( +static void target_fabric_mappedlun_unlink( struct config_item *lun_acl_ci, struct config_item *lun_ci) { @@ -146,7 +146,7 @@ static int target_fabric_mappedlun_unlink( struct se_lun *lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); - return core_dev_del_initiator_node_lun_acl(lun, lacl); + core_dev_del_initiator_node_lun_acl(lun, lacl); } static struct se_lun_acl *item_to_lun_acl(struct config_item *item) @@ -669,7 +669,7 @@ static int target_fabric_port_link( return ret; } -static int target_fabric_port_unlink( +static void target_fabric_port_unlink( struct config_item *lun_ci, struct config_item *se_dev_ci) { @@ -688,7 +688,6 @@ static int target_fabric_port_unlink( } core_dev_del_lun(se_tpg, lun); - return 0; } static void target_fabric_port_release(struct config_item *item) diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index d545993df18be9ede3253861e24c25726d9a8e27..87aa376a1a1ae9f9119369725199d7bd5ba22a1e 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index 068966fce3089527fb7f14dd7bd3aa0fafdd1041..526595a072de899c618487b4edc909ad0fd64d91 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -1,6 +1,8 @@ #ifndef TARGET_CORE_FILE_H #define TARGET_CORE_FILE_H +#include + #define FD_VERSION "4.0" #define FD_MAX_DEV_NAME 256 diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 372d744315f38e971da84d1f4eca26a66666a158..d316ed537d59132a3fde2dfd11eba3194d899bec 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -388,7 +388,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd) bio = bio_alloc(GFP_KERNEL, 0); bio->bi_end_io = iblock_end_io_flush; bio->bi_bdev = ib_dev->ibd_bd; - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); + bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; if (!immed) bio->bi_private = cmd; submit_bio(bio); @@ -686,15 +686,15 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); /* - * Force writethrough using WRITE_FUA if a volatile write cache + * Force writethrough using REQ_FUA if a volatile write cache * is not enabled, or if initiator set the Force Unit Access bit. */ op = REQ_OP_WRITE; if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) { if (cmd->se_cmd_flags & SCF_FUA) - op_flags = WRITE_FUA; + op_flags = REQ_FUA; else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) - op_flags = WRITE_FUA; + op_flags = REQ_FUA; } } else { op = REQ_OP_READ; diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index 01c2afd815008d6cfa9d6b6d5490e1c39394db89..718d3fcd3e7cd8d8cacd7057ff85119a836725ca 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -1,6 +1,9 @@ #ifndef TARGET_CORE_IBLOCK_H #define TARGET_CORE_IBLOCK_H +#include +#include + #define IBLOCK_VERSION "4.0" #define IBLOCK_MAX_CDBS 16 diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index e2c970a9d61c32c7a95d034f889992a7560fdb64..9ab7090f7c839c6900cb30ddf7db1b8be4bc78cf 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -1,6 +1,11 @@ #ifndef TARGET_CORE_INTERNAL_H #define TARGET_CORE_INTERNAL_H +#include +#include +#include +#include + #define TARGET_CORE_NAME_MAX_LEN 64 #define TARGET_FABRIC_NAME_SIZE 32 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 47463c99c3181ed8e133b2d39ba9362d0196541a..d761025144f9dc178cc43d4803b4c79b0147815b 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -29,6 +29,8 @@ #include #include #include +#include +#include #include #include @@ -253,8 +255,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd) if ((cmd->t_task_cdb[1] & 0x01) && (cmd->t_task_cdb[1] & 0x02)) { - pr_err("LongIO and Obselete Bits set, returning" - " ILLEGAL_REQUEST\n"); + pr_err("LongIO and Obsolete Bits set, returning ILLEGAL_REQUEST\n"); return TCM_UNSUPPORTED_SCSI_OPCODE; } /* diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index e3d26e9126a01fa860693d4ecb6852b0c7de3d36..847bd470339c7ab1e1d498a6ddf37be37c526d57 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h @@ -1,5 +1,9 @@ #ifndef TARGET_CORE_PR_H #define TARGET_CORE_PR_H + +#include +#include + /* * PERSISTENT_RESERVE_OUT service action codes * diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 9125d9358dea1d8c58c2bdad7be5ab4483229edd..04d7aa7390d0fcea217daabb05a0db725eac227d 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -935,13 +935,9 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, rc = bio_add_pc_page(pdv->pdv_sd->request_queue, bio, page, bytes, off); - if (rc != bytes) - goto fail; - pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", - bio->bi_vcnt, nr_vecs); - - if (bio->bi_vcnt > nr_vecs) { + bio_segments(bio), nr_vecs); + if (rc != bytes) { pr_debug("PSCSI: Reached bio->bi_vcnt max:" " %d i: %d bio: %p, allocating another" " bio\n", bio->bi_vcnt, i, bio); diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index 6d2007e35df65919f1687341499b26076ba25ed1..8a02fa47c7e8e907952f740b490f145f083e8459 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -15,11 +15,12 @@ #define PS_TIMEOUT_DISK (15*HZ) #define PS_TIMEOUT_OTHER (500*HZ) -#include -#include -#include +#include /* ___cacheline_aligned */ +#include /* struct se_device */ +struct block_device; struct scsi_device; +struct Scsi_Host; struct pscsi_plugin_task { unsigned char pscsi_sense[TRANSPORT_SENSE_BUFFER]; diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 24b36fd785f19a03d4dcd4507890d4ef850f023f..ddc216c9f1f63dcdea780b5be5edbf34d9cc93d4 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -26,7 +26,9 @@ #include #include +#include #include +#include #include #include #include diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index cc46a6a89b38e863a3d7b4c2f13207d251539fd2..91fc1a34791d909a1d68d265321caeaa833657db 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -1,6 +1,10 @@ #ifndef TARGET_CORE_RD_H #define TARGET_CORE_RD_H +#include +#include +#include + #define RD_HBA_VERSION "v4.0" #define RD_MCP_VERSION "4.0" diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 04f616b3ba0a848a80d4a70c084c1b45d406c168..4879e70e2eefb68ddc229effbe4a9822f369ce3f 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h index bd6e78ba153d68bd37b784ba7ebd52290932906c..97402856a8f0e3be40ae8eee5b0f74e74fdb2f9d 100644 --- a/drivers/target/target_core_ua.h +++ b/drivers/target/target_core_ua.h @@ -1,6 +1,8 @@ #ifndef TARGET_CORE_UA_H #define TARGET_CORE_UA_H +#include + /* * From spc4r17, Table D.1: ASC and ASCQ Assignement */ diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 47562509b4892b07b85cb304a3190804c658d6e9..8041710b697298ec7073c4e5910849bd1a154703 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -147,8 +148,8 @@ static const struct genl_multicast_group tcmu_mcgrps[] = { }; /* Our generic netlink family */ -static struct genl_family tcmu_genl_family = { - .id = GENL_ID_GENERATE, +static struct genl_family tcmu_genl_family __ro_after_init = { + .module = THIS_MODULE, .hdrsize = 0, .name = "TCM-USER", .version = 1, @@ -537,7 +538,7 @@ tcmu_queue_cmd(struct se_cmd *se_cmd) struct se_device *se_dev = se_cmd->se_dev; struct tcmu_dev *udev = TCMU_DEV(se_dev); struct tcmu_cmd *tcmu_cmd; - int ret; + sense_reason_t ret; tcmu_cmd = tcmu_alloc_cmd(se_cmd); if (!tcmu_cmd) @@ -685,8 +686,6 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION); cmd->se_cmd = NULL; - kmem_cache_free(tcmu_cmd_cache, cmd); - return 0; } diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 094a1440eacb3dccdd9c35a678a2940c3e03216d..37d5caebffa6b593025a28b703a54a71e7d940d3 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h index 700a981c7b415264c40d70058cc3fc6c497b32ed..4d3d4dd060f28366ebd069abb603472ae0275d5b 100644 --- a/drivers/target/target_core_xcopy.h +++ b/drivers/target/target_core_xcopy.h @@ -1,3 +1,5 @@ +#include + #define XCOPY_TARGET_DESC_LEN 32 #define XCOPY_SEGMENT_DESC_LEN 28 #define XCOPY_NAA_IEEE_REGEX_LEN 16 diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index e28209b99b59804de51663afe0a677c91745a827..11d27b93b41392aee08a4034c1e828927521defc 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -17,6 +17,9 @@ #ifndef __TCM_FC_H__ #define __TCM_FC_H__ +#include +#include + #define FT_VERSION "0.4" #define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */ diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index ff5de9a96643f9b21e06a14af4bcd5e7277689f9..9af7842b8178e97c66fb8935c0891301b288a78d 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -92,7 +92,7 @@ static void ft_free_cmd(struct ft_cmd *cmd) fp = cmd->req_frame; lport = fr_dev(fp); if (fr_seq(fp)) - lport->tt.seq_release(fr_seq(fp)); + fc_seq_release(fr_seq(fp)); fc_frame_free(fp); percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); ft_sess_put(sess); /* undo get from lookup at recv */ @@ -161,11 +161,11 @@ int ft_queue_status(struct se_cmd *se_cmd) /* * Send response. */ - cmd->seq = lport->tt.seq_start_next(cmd->seq); + cmd->seq = fc_seq_start_next(cmd->seq); fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); - rc = lport->tt.seq_send(lport, cmd->seq, fp); + rc = fc_seq_send(lport, cmd->seq, fp); if (rc) { pr_info_ratelimited("%s: Failed to send response frame %p, " "xid <0x%x>\n", __func__, fp, ep->xid); @@ -177,7 +177,7 @@ int ft_queue_status(struct se_cmd *se_cmd) se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; return -ENOMEM; } - lport->tt.exch_done(cmd->seq); + fc_exch_done(cmd->seq); /* * Drop the extra ACK_KREF reference taken by target_submit_cmd() * ahead of ft_check_stop_free() -> transport_generic_free_cmd() @@ -221,7 +221,7 @@ int ft_write_pending(struct se_cmd *se_cmd) memset(txrdy, 0, sizeof(*txrdy)); txrdy->ft_burst_len = htonl(se_cmd->data_length); - cmd->seq = lport->tt.seq_start_next(cmd->seq); + cmd->seq = fc_seq_start_next(cmd->seq); fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); @@ -242,7 +242,7 @@ int ft_write_pending(struct se_cmd *se_cmd) cmd->was_ddp_setup = 1; } } - lport->tt.seq_send(lport, cmd->seq, fp); + fc_seq_send(lport, cmd->seq, fp); return 0; } @@ -323,8 +323,8 @@ static void ft_send_resp_status(struct fc_lport *lport, fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0); sp = fr_seq(fp); if (sp) { - lport->tt.seq_send(lport, sp, fp); - lport->tt.exch_done(sp); + fc_seq_send(lport, sp, fp); + fc_exch_done(sp); } else { lport->tt.frame_send(lport, fp); } @@ -461,7 +461,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) cmd->se_cmd.map_tag = tag; cmd->sess = sess; - cmd->seq = lport->tt.seq_assign(lport, fp); + cmd->seq = fc_seq_assign(lport, fp); if (!cmd->seq) { percpu_ida_free(&se_sess->sess_tag_pool, tag); goto busy; @@ -563,7 +563,7 @@ static void ft_send_work(struct work_struct *work) task_attr = TCM_SIMPLE_TAG; } - fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); + fc_seq_set_resp(cmd->seq, ft_recv_seq, cmd); cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid; /* * Use a single se_cmd->cmd_kref as we expect to release se_cmd diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 6f7c65abfe2af769fdcae37fff4542f82a38be60..1eb1f58e00e494ee6f3388c08d387cc551d1682d 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -82,7 +82,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) ep = fc_seq_exch(cmd->seq); lport = ep->lp; - cmd->seq = lport->tt.seq_start_next(cmd->seq); + cmd->seq = fc_seq_start_next(cmd->seq); remaining = se_cmd->data_length; @@ -174,7 +174,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) f_ctl |= FC_FC_END_SEQ; fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, FC_TYPE_FCP, f_ctl, fh_off); - error = lport->tt.seq_send(lport, seq, fp); + error = fc_seq_send(lport, seq, fp); if (error) { pr_info_ratelimited("%s: Failed to send frame %p, " "xid <0x%x>, remaining %zu, " diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index a13541bdc726899807dd8723c92c21312fbc6439..c2c056cc7ea52e1a7170b35de2832c1b1dcf40b4 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -177,8 +177,10 @@ config THERMAL_EMULATION config HISI_THERMAL tristate "Hisilicon thermal driver" - depends on (ARCH_HISI && CPU_THERMAL && OF) || COMPILE_TEST + depends on ARCH_HISI || COMPILE_TEST depends on HAS_IOMEM + depends on OF + default y help Enable this to plug hisilicon's thermal sensor driver into the Linux thermal framework. cpufreq is used as the cooling device to throttle diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index c92eb22a41ff89f3f1c61c61977de7eb9ba516ae..6a3d7b57303655f58b7fec456cc43519fa29dc91 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -3,7 +3,8 @@ # obj-$(CONFIG_THERMAL) += thermal_sys.o -thermal_sys-y += thermal_core.o +thermal_sys-y += thermal_core.o thermal_sysfs.o \ + thermal_helpers.o # interface to/from other layers providing sensors thermal_sys-$(CONFIG_THERMAL_HWMON) += thermal_hwmon.o diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c index e776cea80cfcb19bc61c12908b71969a55f3e64e..f491faf165923b7b463fe406f07404e0565939c6 100644 --- a/drivers/thermal/db8500_thermal.c +++ b/drivers/thermal/db8500_thermal.c @@ -512,6 +512,7 @@ static const struct of_device_id db8500_thermal_match[] = { { .compatible = "stericsson,db8500-thermal" }, {}, }; +MODULE_DEVICE_TABLE(of, db8500_thermal_match); #endif static struct platform_driver db8500_thermal_driver = { diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c index 81631b110e178f3b186746f32c8a8f4c001c0046..5a737fd5f1aa470104406ccfafbd3e50d03b9a03 100644 --- a/drivers/thermal/devfreq_cooling.c +++ b/drivers/thermal/devfreq_cooling.c @@ -238,7 +238,7 @@ get_static_power(struct devfreq_cooling_device *dfc, unsigned long freq) return 0; } - return dfc->power_ops->get_static_power(voltage); + return dfc->power_ops->get_static_power(df, voltage); } /** @@ -262,7 +262,8 @@ get_dynamic_power(struct devfreq_cooling_device *dfc, unsigned long freq, struct devfreq_cooling_power *dfc_power = dfc->power_ops; if (dfc_power->get_dynamic_power) - return dfc_power->get_dynamic_power(freq, voltage); + return dfc_power->get_dynamic_power(dfc->devfreq, freq, + voltage); freq_mhz = freq / 1000000; power = (u64)dfc_power->dyn_power_coeff * freq_mhz * voltage * voltage; diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c index 5836e55544331dad4cbfcf918a29923d3025aba0..9413c4abf0b93cca1681e2e3a46d083219cf5a5c 100644 --- a/drivers/thermal/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/int340x_thermal/int3400_thermal.c @@ -96,7 +96,7 @@ static ssize_t current_uuid_store(struct device *dev, return -EINVAL; } -static DEVICE_ATTR(current_uuid, 0644, current_uuid_show, current_uuid_store); +static DEVICE_ATTR_RW(current_uuid); static DEVICE_ATTR_RO(available_uuids); static struct attribute *uuid_attrs[] = { &dev_attr_available_uuids.attr, diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c index 19bf2028e508437e46c96e1a5df89e763f6cf59c..2b49e8d0fe9eb3befd7a43b2c70e66cceb77bc74 100644 --- a/drivers/thermal/intel_pch_thermal.c +++ b/drivers/thermal/intel_pch_thermal.c @@ -29,6 +29,7 @@ #define PCH_THERMAL_DID_HSW_2 0x8C24 /* Haswell PCH */ #define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */ #define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */ +#define PCH_THERMAL_DID_SKL_H 0xA131 /* Skylake PCH 100 series */ /* Wildcat Point-LP PCH Thermal registers */ #define WPT_TEMP 0x0000 /* Temperature */ @@ -273,37 +274,44 @@ static struct thermal_zone_device_ops tzd_ops = { .get_trip_temp = pch_get_trip_temp, }; +enum board_ids { + board_hsw, + board_wpt, + board_skl, +}; + +static const struct board_info { + const char *name; + const struct pch_dev_ops *ops; +} board_info[] = { + [board_hsw] = { + .name = "pch_haswell", + .ops = &pch_dev_ops_wpt, + }, + [board_wpt] = { + .name = "pch_wildcat_point", + .ops = &pch_dev_ops_wpt, + }, + [board_skl] = { + .name = "pch_skylake", + .ops = &pch_dev_ops_wpt, + }, +}; static int intel_pch_thermal_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + enum board_ids board_id = id->driver_data; + const struct board_info *bi = &board_info[board_id]; struct pch_thermal_device *ptd; int err; int nr_trips; - char *dev_name; ptd = devm_kzalloc(&pdev->dev, sizeof(*ptd), GFP_KERNEL); if (!ptd) return -ENOMEM; - switch (pdev->device) { - case PCH_THERMAL_DID_WPT: - ptd->ops = &pch_dev_ops_wpt; - dev_name = "pch_wildcat_point"; - break; - case PCH_THERMAL_DID_SKL: - ptd->ops = &pch_dev_ops_wpt; - dev_name = "pch_skylake"; - break; - case PCH_THERMAL_DID_HSW_1: - case PCH_THERMAL_DID_HSW_2: - ptd->ops = &pch_dev_ops_wpt; - dev_name = "pch_haswell"; - break; - default: - dev_err(&pdev->dev, "unknown pch thermal device\n"); - return -ENODEV; - } + ptd->ops = bi->ops; pci_set_drvdata(pdev, ptd); ptd->pdev = pdev; @@ -331,11 +339,11 @@ static int intel_pch_thermal_probe(struct pci_dev *pdev, if (err) goto error_cleanup; - ptd->tzd = thermal_zone_device_register(dev_name, nr_trips, 0, ptd, + ptd->tzd = thermal_zone_device_register(bi->name, nr_trips, 0, ptd, &tzd_ops, NULL, 0, 0); if (IS_ERR(ptd->tzd)) { dev_err(&pdev->dev, "Failed to register thermal zone %s\n", - dev_name); + bi->name); err = PTR_ERR(ptd->tzd); goto error_cleanup; } @@ -380,10 +388,16 @@ static int intel_pch_thermal_resume(struct device *device) } static struct pci_device_id intel_pch_thermal_id[] = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1), + .driver_data = board_hsw, }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2), + .driver_data = board_hsw, }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT), + .driver_data = board_wpt, }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL), + .driver_data = board_skl, }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL_H), + .driver_data = board_skl, }, { 0, }, }; MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id); diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index 7a223074df3d4338ede8ad0192ce12d0dcb0c1ce..df64692e9e64a22edf2d43f8b2e6a1f4b01f931b 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -43,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -56,7 +55,6 @@ #include #include #include -#include #include #define MAX_TARGET_RATIO (50U) @@ -86,11 +84,26 @@ static unsigned int control_cpu; /* The cpu assigned to collect stat and update */ static bool clamping; +static const struct sched_param sparam = { + .sched_priority = MAX_USER_RT_PRIO / 2, +}; +struct powerclamp_worker_data { + struct kthread_worker *worker; + struct kthread_work balancing_work; + struct kthread_delayed_work idle_injection_work; + unsigned int cpu; + unsigned int count; + unsigned int guard; + unsigned int window_size_now; + unsigned int target_ratio; + unsigned int duration_jiffies; + bool clamping; +}; -static struct task_struct * __percpu *powerclamp_thread; +static struct powerclamp_worker_data * __percpu worker_data; static struct thermal_cooling_device *cooling_dev; static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu - * clamping thread + * clamping kthread worker */ static unsigned int duration; @@ -262,11 +275,6 @@ static u64 pkg_state_counter(void) return count; } -static void noop_timer(unsigned long foo) -{ - /* empty... just the fact that we get the interrupt wakes us up */ -} - static unsigned int get_compensation(int ratio) { unsigned int comp = 0; @@ -368,103 +376,79 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio, return set_target_ratio + guard <= current_ratio; } -static int clamp_thread(void *arg) +static void clamp_balancing_func(struct kthread_work *work) { - int cpunr = (unsigned long)arg; - DEFINE_TIMER(wakeup_timer, noop_timer, 0, 0); - static const struct sched_param param = { - .sched_priority = MAX_USER_RT_PRIO/2, - }; - unsigned int count = 0; - unsigned int target_ratio; + struct powerclamp_worker_data *w_data; + int sleeptime; + unsigned long target_jiffies; + unsigned int compensated_ratio; + int interval; /* jiffies to sleep for each attempt */ - set_bit(cpunr, cpu_clamping_mask); - set_freezable(); - init_timer_on_stack(&wakeup_timer); - sched_setscheduler(current, SCHED_FIFO, ¶m); - - while (true == clamping && !kthread_should_stop() && - cpu_online(cpunr)) { - int sleeptime; - unsigned long target_jiffies; - unsigned int guard; - unsigned int compensated_ratio; - int interval; /* jiffies to sleep for each attempt */ - unsigned int duration_jiffies = msecs_to_jiffies(duration); - unsigned int window_size_now; - - try_to_freeze(); - /* - * make sure user selected ratio does not take effect until - * the next round. adjust target_ratio if user has changed - * target such that we can converge quickly. - */ - target_ratio = set_target_ratio; - guard = 1 + target_ratio/20; - window_size_now = window_size; - count++; - - /* - * systems may have different ability to enter package level - * c-states, thus we need to compensate the injected idle ratio - * to achieve the actual target reported by the HW. - */ - compensated_ratio = target_ratio + - get_compensation(target_ratio); - if (compensated_ratio <= 0) - compensated_ratio = 1; - interval = duration_jiffies * 100 / compensated_ratio; - - /* align idle time */ - target_jiffies = roundup(jiffies, interval); - sleeptime = target_jiffies - jiffies; - if (sleeptime <= 0) - sleeptime = 1; - schedule_timeout_interruptible(sleeptime); - /* - * only elected controlling cpu can collect stats and update - * control parameters. - */ - if (cpunr == control_cpu && !(count%window_size_now)) { - should_skip = - powerclamp_adjust_controls(target_ratio, - guard, window_size_now); - smp_mb(); - } + w_data = container_of(work, struct powerclamp_worker_data, + balancing_work); - if (should_skip) - continue; - - target_jiffies = jiffies + duration_jiffies; - mod_timer(&wakeup_timer, target_jiffies); - if (unlikely(local_softirq_pending())) - continue; - /* - * stop tick sched during idle time, interrupts are still - * allowed. thus jiffies are updated properly. - */ - preempt_disable(); - /* mwait until target jiffies is reached */ - while (time_before(jiffies, target_jiffies)) { - unsigned long ecx = 1; - unsigned long eax = target_mwait; - - /* - * REVISIT: may call enter_idle() to notify drivers who - * can save power during cpu idle. same for exit_idle() - */ - local_touch_nmi(); - stop_critical_timings(); - mwait_idle_with_hints(eax, ecx); - start_critical_timings(); - atomic_inc(&idle_wakeup_counter); - } - preempt_enable(); + /* + * make sure user selected ratio does not take effect until + * the next round. adjust target_ratio if user has changed + * target such that we can converge quickly. + */ + w_data->target_ratio = READ_ONCE(set_target_ratio); + w_data->guard = 1 + w_data->target_ratio / 20; + w_data->window_size_now = window_size; + w_data->duration_jiffies = msecs_to_jiffies(duration); + w_data->count++; + + /* + * systems may have different ability to enter package level + * c-states, thus we need to compensate the injected idle ratio + * to achieve the actual target reported by the HW. + */ + compensated_ratio = w_data->target_ratio + + get_compensation(w_data->target_ratio); + if (compensated_ratio <= 0) + compensated_ratio = 1; + interval = w_data->duration_jiffies * 100 / compensated_ratio; + + /* align idle time */ + target_jiffies = roundup(jiffies, interval); + sleeptime = target_jiffies - jiffies; + if (sleeptime <= 0) + sleeptime = 1; + + if (clamping && w_data->clamping && cpu_online(w_data->cpu)) + kthread_queue_delayed_work(w_data->worker, + &w_data->idle_injection_work, + sleeptime); +} + +static void clamp_idle_injection_func(struct kthread_work *work) +{ + struct powerclamp_worker_data *w_data; + + w_data = container_of(work, struct powerclamp_worker_data, + idle_injection_work.work); + + /* + * only elected controlling cpu can collect stats and update + * control parameters. + */ + if (w_data->cpu == control_cpu && + !(w_data->count % w_data->window_size_now)) { + should_skip = + powerclamp_adjust_controls(w_data->target_ratio, + w_data->guard, + w_data->window_size_now); + smp_mb(); } - del_timer_sync(&wakeup_timer); - clear_bit(cpunr, cpu_clamping_mask); - return 0; + if (should_skip) + goto balance; + + play_idle(jiffies_to_msecs(w_data->duration_jiffies)); + +balance: + if (clamping && w_data->clamping && cpu_online(w_data->cpu)) + kthread_queue_work(w_data->worker, &w_data->balancing_work); } /* @@ -508,10 +492,60 @@ static void poll_pkg_cstate(struct work_struct *dummy) schedule_delayed_work(&poll_pkg_cstate_work, HZ); } +static void start_power_clamp_worker(unsigned long cpu) +{ + struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu); + struct kthread_worker *worker; + + worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu); + if (IS_ERR(worker)) + return; + + w_data->worker = worker; + w_data->count = 0; + w_data->cpu = cpu; + w_data->clamping = true; + set_bit(cpu, cpu_clamping_mask); + sched_setscheduler(worker->task, SCHED_FIFO, &sparam); + kthread_init_work(&w_data->balancing_work, clamp_balancing_func); + kthread_init_delayed_work(&w_data->idle_injection_work, + clamp_idle_injection_func); + kthread_queue_work(w_data->worker, &w_data->balancing_work); +} + +static void stop_power_clamp_worker(unsigned long cpu) +{ + struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu); + + if (!w_data->worker) + return; + + w_data->clamping = false; + /* + * Make sure that all works that get queued after this point see + * the clamping disabled. The counter part is not needed because + * there is an implicit memory barrier when the queued work + * is proceed. + */ + smp_wmb(); + kthread_cancel_work_sync(&w_data->balancing_work); + kthread_cancel_delayed_work_sync(&w_data->idle_injection_work); + /* + * The balancing work still might be queued here because + * the handling of the "clapming" variable, cancel, and queue + * operations are not synchronized via a lock. But it is not + * a big deal. The balancing work is fast and destroy kthread + * will wait for it. + */ + clear_bit(w_data->cpu, cpu_clamping_mask); + kthread_destroy_worker(w_data->worker); + + w_data->worker = NULL; +} + static int start_power_clamp(void) { unsigned long cpu; - struct task_struct *thread; set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1); /* prevent cpu hotplug */ @@ -525,22 +559,9 @@ static int start_power_clamp(void) clamping = true; schedule_delayed_work(&poll_pkg_cstate_work, 0); - /* start one thread per online cpu */ + /* start one kthread worker per online cpu */ for_each_online_cpu(cpu) { - struct task_struct **p = - per_cpu_ptr(powerclamp_thread, cpu); - - thread = kthread_create_on_node(clamp_thread, - (void *) cpu, - cpu_to_node(cpu), - "kidle_inject/%ld", cpu); - /* bind to cpu here */ - if (likely(!IS_ERR(thread))) { - kthread_bind(thread, cpu); - wake_up_process(thread); - *p = thread; - } - + start_power_clamp_worker(cpu); } put_online_cpus(); @@ -550,71 +571,49 @@ static int start_power_clamp(void) static void end_power_clamp(void) { int i; - struct task_struct *thread; - clamping = false; /* - * make clamping visible to other cpus and give per cpu clamping threads - * sometime to exit, or gets killed later. + * Block requeuing in all the kthread workers. They will flush and + * stop faster. */ - smp_mb(); - msleep(20); + clamping = false; if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) { for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) { - pr_debug("clamping thread for cpu %d alive, kill\n", i); - thread = *per_cpu_ptr(powerclamp_thread, i); - kthread_stop(thread); + pr_debug("clamping worker for cpu %d alive, destroy\n", + i); + stop_power_clamp_worker(i); } } } -static int powerclamp_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int powerclamp_cpu_online(unsigned int cpu) { - unsigned long cpu = (unsigned long)hcpu; - struct task_struct *thread; - struct task_struct **percpu_thread = - per_cpu_ptr(powerclamp_thread, cpu); - - if (false == clamping) - goto exit_ok; - - switch (action) { - case CPU_ONLINE: - thread = kthread_create_on_node(clamp_thread, - (void *) cpu, - cpu_to_node(cpu), - "kidle_inject/%lu", cpu); - if (likely(!IS_ERR(thread))) { - kthread_bind(thread, cpu); - wake_up_process(thread); - *percpu_thread = thread; - } - /* prefer BSP as controlling CPU */ - if (cpu == 0) { - control_cpu = 0; - smp_mb(); - } - break; - case CPU_DEAD: - if (test_bit(cpu, cpu_clamping_mask)) { - pr_err("cpu %lu dead but powerclamping thread is not\n", - cpu); - kthread_stop(*percpu_thread); - } - if (cpu == control_cpu) { - control_cpu = smp_processor_id(); - smp_mb(); - } + if (clamping == false) + return 0; + start_power_clamp_worker(cpu); + /* prefer BSP as controlling CPU */ + if (cpu == 0) { + control_cpu = 0; + smp_mb(); } - -exit_ok: - return NOTIFY_OK; + return 0; } -static struct notifier_block powerclamp_cpu_notifier = { - .notifier_call = powerclamp_cpu_callback, -}; +static int powerclamp_cpu_predown(unsigned int cpu) +{ + if (clamping == false) + return 0; + + stop_power_clamp_worker(cpu); + if (cpu != control_cpu) + return 0; + + control_cpu = cpumask_first(cpu_online_mask); + if (control_cpu == cpu) + control_cpu = cpumask_next(cpu, cpu_online_mask); + smp_mb(); + return 0; +} static int powerclamp_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) @@ -669,9 +668,16 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = { .set_cur_state = powerclamp_set_cur_state, }; +static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = { + { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); + static int __init powerclamp_probe(void) { - if (!boot_cpu_has(X86_FEATURE_MWAIT)) { + + if (!x86_match_cpu(intel_powerclamp_ids)) { pr_err("CPU does not support MWAIT"); return -ENODEV; } @@ -735,6 +741,8 @@ static inline void powerclamp_create_debug_files(void) debugfs_remove_recursive(debug_dir); } +static enum cpuhp_state hp_state; + static int __init powerclamp_init(void) { int retval; @@ -752,10 +760,17 @@ static int __init powerclamp_init(void) /* set default limit, maybe adjusted during runtime based on feedback */ window_size = 2; - register_hotcpu_notifier(&powerclamp_cpu_notifier); + retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "thermal/intel_powerclamp:online", + powerclamp_cpu_online, + powerclamp_cpu_predown); + if (retval < 0) + goto exit_free; + + hp_state = retval; - powerclamp_thread = alloc_percpu(struct task_struct *); - if (!powerclamp_thread) { + worker_data = alloc_percpu(struct powerclamp_worker_data); + if (!worker_data) { retval = -ENOMEM; goto exit_unregister; } @@ -775,9 +790,9 @@ static int __init powerclamp_init(void) return 0; exit_free_thread: - free_percpu(powerclamp_thread); + free_percpu(worker_data); exit_unregister: - unregister_hotcpu_notifier(&powerclamp_cpu_notifier); + cpuhp_remove_state_nocalls(hp_state); exit_free: kfree(cpu_clamping_mask); return retval; @@ -786,9 +801,9 @@ module_init(powerclamp_init); static void __exit powerclamp_exit(void) { - unregister_hotcpu_notifier(&powerclamp_cpu_notifier); end_power_clamp(); - free_percpu(powerclamp_thread); + cpuhp_remove_state_nocalls(hp_state); + free_percpu(worker_data); thermal_cooling_device_unregister(cooling_dev); kfree(cpu_clamping_mask); diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c index 83905ff46e40e14904c1197521455b6d813f55e6..e9a1fe342760efd9d5eff1b05a1c4bbbebfd6148 100644 --- a/drivers/thermal/max77620_thermal.c +++ b/drivers/thermal/max77620_thermal.c @@ -149,6 +149,7 @@ static struct platform_device_id max77620_thermal_devtype[] = { { .name = "max77620-thermal", }, {}, }; +MODULE_DEVICE_TABLE(platform, max77620_thermal_devtype); static struct platform_driver max77620_thermal_driver = { .driver = { diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c index 819c6d5d7aa714914b6611fb306fd74bea48344c..f50241962ad2e064110db695a07d7d242c33c67b 100644 --- a/drivers/thermal/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom-spmi-temp-alarm.c @@ -200,7 +200,7 @@ static int qpnp_tm_probe(struct platform_device *pdev) struct qpnp_tm_chip *chip; struct device_node *node; u8 type, subtype; - u32 res[2]; + u32 res; int ret, irq; node = pdev->dev.of_node; @@ -215,7 +215,7 @@ static int qpnp_tm_probe(struct platform_device *pdev) if (!chip->map) return -ENXIO; - ret = of_property_read_u32_array(node, "reg", res, 2); + ret = of_property_read_u32(node, "reg", &res); if (ret < 0) return ret; @@ -228,7 +228,7 @@ static int qpnp_tm_probe(struct platform_device *pdev) if (PTR_ERR(chip->adc) == -EPROBE_DEFER) return PTR_ERR(chip->adc); - chip->base = res[0]; + chip->base = res; ret = qpnp_tm_read(chip, QPNP_TM_REG_TYPE, &type); if (ret < 0) { diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index e227a9f0acf71c52917a91b8986c9fc4c4d17e31..b811b0fb61b1381fba45440f963030aba9ee509a 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -524,11 +524,6 @@ static void rk_tsadcv2_initialize(struct regmap *grf, void __iomem *regs, regs + TSADCV2_AUTO_PERIOD_HT); writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT, regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE); - - if (IS_ERR(grf)) { - pr_warn("%s: Missing rockchip,grf property\n", __func__); - return; - } } /** @@ -971,6 +966,8 @@ static int rockchip_configure_from_dt(struct device *dev, * need this property. */ thermal->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); + if (IS_ERR(thermal->grf)) + dev_warn(dev, "Missing rockchip,grf property\n"); return 0; } diff --git a/drivers/thermal/tango_thermal.c b/drivers/thermal/tango_thermal.c index 201304aeafebcdde57b6c194b5670726dae109e8..4e67795cb6cea5668cef3b5ff99352e6ed1f9825 100644 --- a/drivers/thermal/tango_thermal.c +++ b/drivers/thermal/tango_thermal.c @@ -107,6 +107,7 @@ static const struct of_device_id tango_sensor_ids[] = { }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, tango_sensor_ids); static struct platform_driver tango_thermal_driver = { .probe = tango_thermal_probe, diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 226b0b4aced6a2e9fd348086259fc517e3f674ee..641faab6e24b50fef4d70d3334edfc49e0ab0ce3 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -5,22 +5,9 @@ * Copyright (C) 2008 Zhang Rui * Copyright (C) 2008 Sujith Thomas * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -64,6 +51,13 @@ static atomic_t in_suspend; static struct thermal_governor *def_governor; +/* + * Governor section: set of functions to handle thermal governors + * + * Functions to help in the life cycle of thermal governors within + * the thermal core and by the thermal governor code. + */ + static struct thermal_governor *__find_governor(const char *name) { struct thermal_governor *pos; @@ -142,11 +136,16 @@ int thermal_register_governor(struct thermal_governor *governor) mutex_lock(&thermal_governor_lock); err = -EBUSY; - if (__find_governor(governor->name) == NULL) { + if (!__find_governor(governor->name)) { + bool match_default; + err = 0; list_add(&governor->governor_list, &thermal_governor_list); - if (!def_governor && !strncmp(governor->name, - DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH)) + match_default = !strncmp(governor->name, + DEFAULT_THERMAL_GOVERNOR, + THERMAL_NAME_LENGTH); + + if (!def_governor && match_default) def_governor = governor; } @@ -188,14 +187,14 @@ void thermal_unregister_governor(struct thermal_governor *governor) mutex_lock(&thermal_governor_lock); - if (__find_governor(governor->name) == NULL) + if (!__find_governor(governor->name)) goto exit; mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) { if (!strncasecmp(pos->governor->name, governor->name, - THERMAL_NAME_LENGTH)) + THERMAL_NAME_LENGTH)) thermal_set_governor(pos, NULL); } @@ -203,195 +202,92 @@ void thermal_unregister_governor(struct thermal_governor *governor) list_del(&governor->governor_list); exit: mutex_unlock(&thermal_governor_lock); - return; -} - -static int get_idr(struct idr *idr, struct mutex *lock, int *id) -{ - int ret; - - if (lock) - mutex_lock(lock); - ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL); - if (lock) - mutex_unlock(lock); - if (unlikely(ret < 0)) - return ret; - *id = ret; - return 0; -} - -static void release_idr(struct idr *idr, struct mutex *lock, int id) -{ - if (lock) - mutex_lock(lock); - idr_remove(idr, id); - if (lock) - mutex_unlock(lock); -} - -int get_tz_trend(struct thermal_zone_device *tz, int trip) -{ - enum thermal_trend trend; - - if (tz->emul_temperature || !tz->ops->get_trend || - tz->ops->get_trend(tz, trip, &trend)) { - if (tz->temperature > tz->last_temperature) - trend = THERMAL_TREND_RAISING; - else if (tz->temperature < tz->last_temperature) - trend = THERMAL_TREND_DROPPING; - else - trend = THERMAL_TREND_STABLE; - } - - return trend; } -EXPORT_SYMBOL(get_tz_trend); -struct thermal_instance *get_thermal_instance(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev, int trip) +int thermal_zone_device_set_policy(struct thermal_zone_device *tz, + char *policy) { - struct thermal_instance *pos = NULL; - struct thermal_instance *target_instance = NULL; + struct thermal_governor *gov; + int ret = -EINVAL; + mutex_lock(&thermal_governor_lock); mutex_lock(&tz->lock); - mutex_lock(&cdev->lock); - list_for_each_entry(pos, &tz->thermal_instances, tz_node) { - if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { - target_instance = pos; - break; - } - } + gov = __find_governor(strim(policy)); + if (!gov) + goto exit; - mutex_unlock(&cdev->lock); - mutex_unlock(&tz->lock); + ret = thermal_set_governor(tz, gov); - return target_instance; -} -EXPORT_SYMBOL(get_thermal_instance); +exit: + mutex_unlock(&tz->lock); + mutex_unlock(&thermal_governor_lock); -static void print_bind_err_msg(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev, int ret) -{ - dev_err(&tz->device, "binding zone %s with cdev %s failed:%d\n", - tz->type, cdev->type, ret); + return ret; } -static void __bind(struct thermal_zone_device *tz, int mask, - struct thermal_cooling_device *cdev, - unsigned long *limits, - unsigned int weight) +int thermal_build_list_of_policies(char *buf) { - int i, ret; + struct thermal_governor *pos; + ssize_t count = 0; + ssize_t size = PAGE_SIZE; - for (i = 0; i < tz->trips; i++) { - if (mask & (1 << i)) { - unsigned long upper, lower; + mutex_lock(&thermal_governor_lock); - upper = THERMAL_NO_LIMIT; - lower = THERMAL_NO_LIMIT; - if (limits) { - lower = limits[i * 2]; - upper = limits[i * 2 + 1]; - } - ret = thermal_zone_bind_cooling_device(tz, i, cdev, - upper, lower, - weight); - if (ret) - print_bind_err_msg(tz, cdev, ret); - } + list_for_each_entry(pos, &thermal_governor_list, governor_list) { + size = PAGE_SIZE - count; + count += scnprintf(buf + count, size, "%s ", pos->name); } -} + count += scnprintf(buf + count, size, "\n"); -static void __unbind(struct thermal_zone_device *tz, int mask, - struct thermal_cooling_device *cdev) -{ - int i; + mutex_unlock(&thermal_governor_lock); - for (i = 0; i < tz->trips; i++) - if (mask & (1 << i)) - thermal_zone_unbind_cooling_device(tz, i, cdev); + return count; } -static void bind_cdev(struct thermal_cooling_device *cdev) +static int __init thermal_register_governors(void) { - int i, ret; - const struct thermal_zone_params *tzp; - struct thermal_zone_device *pos = NULL; - - mutex_lock(&thermal_list_lock); + int result; - list_for_each_entry(pos, &thermal_tz_list, node) { - if (!pos->tzp && !pos->ops->bind) - continue; + result = thermal_gov_step_wise_register(); + if (result) + return result; - if (pos->ops->bind) { - ret = pos->ops->bind(pos, cdev); - if (ret) - print_bind_err_msg(pos, cdev, ret); - continue; - } + result = thermal_gov_fair_share_register(); + if (result) + return result; - tzp = pos->tzp; - if (!tzp || !tzp->tbp) - continue; + result = thermal_gov_bang_bang_register(); + if (result) + return result; - for (i = 0; i < tzp->num_tbps; i++) { - if (tzp->tbp[i].cdev || !tzp->tbp[i].match) - continue; - if (tzp->tbp[i].match(pos, cdev)) - continue; - tzp->tbp[i].cdev = cdev; - __bind(pos, tzp->tbp[i].trip_mask, cdev, - tzp->tbp[i].binding_limits, - tzp->tbp[i].weight); - } - } + result = thermal_gov_user_space_register(); + if (result) + return result; - mutex_unlock(&thermal_list_lock); + return thermal_gov_power_allocator_register(); } -static void bind_tz(struct thermal_zone_device *tz) +static void thermal_unregister_governors(void) { - int i, ret; - struct thermal_cooling_device *pos = NULL; - const struct thermal_zone_params *tzp = tz->tzp; - - if (!tzp && !tz->ops->bind) - return; - - mutex_lock(&thermal_list_lock); - - /* If there is ops->bind, try to use ops->bind */ - if (tz->ops->bind) { - list_for_each_entry(pos, &thermal_cdev_list, node) { - ret = tz->ops->bind(tz, pos); - if (ret) - print_bind_err_msg(tz, pos, ret); - } - goto exit; - } - - if (!tzp || !tzp->tbp) - goto exit; - - list_for_each_entry(pos, &thermal_cdev_list, node) { - for (i = 0; i < tzp->num_tbps; i++) { - if (tzp->tbp[i].cdev || !tzp->tbp[i].match) - continue; - if (tzp->tbp[i].match(tz, pos)) - continue; - tzp->tbp[i].cdev = pos; - __bind(tz, tzp->tbp[i].trip_mask, pos, - tzp->tbp[i].binding_limits, - tzp->tbp[i].weight); - } - } -exit: - mutex_unlock(&thermal_list_lock); + thermal_gov_step_wise_unregister(); + thermal_gov_fair_share_unregister(); + thermal_gov_bang_bang_unregister(); + thermal_gov_user_space_unregister(); + thermal_gov_power_allocator_unregister(); } +/* + * Zone update section: main control loop applied to each zone while monitoring + * + * in polling mode. The monitoring is done using a workqueue. + * Same update may be done on a zone by calling thermal_zone_device_update(). + * + * An update means: + * - Non-critical trips will invoke the governor responsible for that zone; + * - Hot trips will produce a notification to userspace; + * - Critical trip point will cause a system shutdown. + */ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, int delay) { @@ -420,14 +316,15 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz) } static void handle_non_critical_trips(struct thermal_zone_device *tz, - int trip, enum thermal_trip_type trip_type) + int trip, + enum thermal_trip_type trip_type) { tz->governor ? tz->governor->throttle(tz, trip) : def_governor->throttle(tz, trip); } static void handle_critical_trips(struct thermal_zone_device *tz, - int trip, enum thermal_trip_type trip_type) + int trip, enum thermal_trip_type trip_type) { int trip_temp; @@ -471,105 +368,6 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip) monitor_thermal_zone(tz); } -/** - * thermal_zone_get_temp() - returns the temperature of a thermal zone - * @tz: a valid pointer to a struct thermal_zone_device - * @temp: a valid pointer to where to store the resulting temperature. - * - * When a valid thermal zone reference is passed, it will fetch its - * temperature and fill @temp. - * - * Return: On success returns 0, an error code otherwise - */ -int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp) -{ - int ret = -EINVAL; - int count; - int crit_temp = INT_MAX; - enum thermal_trip_type type; - - if (!tz || IS_ERR(tz) || !tz->ops->get_temp) - goto exit; - - mutex_lock(&tz->lock); - - ret = tz->ops->get_temp(tz, temp); - - if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) { - for (count = 0; count < tz->trips; count++) { - ret = tz->ops->get_trip_type(tz, count, &type); - if (!ret && type == THERMAL_TRIP_CRITICAL) { - ret = tz->ops->get_trip_temp(tz, count, - &crit_temp); - break; - } - } - - /* - * Only allow emulating a temperature when the real temperature - * is below the critical temperature so that the emulation code - * cannot hide critical conditions. - */ - if (!ret && *temp < crit_temp) - *temp = tz->emul_temperature; - } - - mutex_unlock(&tz->lock); -exit: - return ret; -} -EXPORT_SYMBOL_GPL(thermal_zone_get_temp); - -void thermal_zone_set_trips(struct thermal_zone_device *tz) -{ - int low = -INT_MAX; - int high = INT_MAX; - int trip_temp, hysteresis; - int i, ret; - - mutex_lock(&tz->lock); - - if (!tz->ops->set_trips || !tz->ops->get_trip_hyst) - goto exit; - - for (i = 0; i < tz->trips; i++) { - int trip_low; - - tz->ops->get_trip_temp(tz, i, &trip_temp); - tz->ops->get_trip_hyst(tz, i, &hysteresis); - - trip_low = trip_temp - hysteresis; - - if (trip_low < tz->temperature && trip_low > low) - low = trip_low; - - if (trip_temp > tz->temperature && trip_temp < high) - high = trip_temp; - } - - /* No need to change trip points */ - if (tz->prev_low_trip == low && tz->prev_high_trip == high) - goto exit; - - tz->prev_low_trip = low; - tz->prev_high_trip = high; - - dev_dbg(&tz->device, - "new temperature boundaries: %d < x < %d\n", low, high); - - /* - * Set a temperature window. When this window is left the driver - * must inform the thermal core via thermal_zone_device_update. - */ - ret = tz->ops->set_trips(tz, low, high); - if (ret) - dev_err(&tz->device, "Failed to set trips: %d\n", ret); - -exit: - mutex_unlock(&tz->lock); -} -EXPORT_SYMBOL_GPL(thermal_zone_set_trips); - static void update_temperature(struct thermal_zone_device *tz) { int temp, ret; @@ -629,6 +427,24 @@ void thermal_zone_device_update(struct thermal_zone_device *tz, } EXPORT_SYMBOL_GPL(thermal_zone_device_update); +/** + * thermal_notify_framework - Sensor drivers use this API to notify framework + * @tz: thermal zone device + * @trip: indicates which trip point has been crossed + * + * This function handles the trip events from sensor drivers. It starts + * throttling the cooling devices according to the policy configured. + * For CRITICAL and HOT trip points, this notifies the respective drivers, + * and does actual throttling for other trip points i.e ACTIVE and PASSIVE. + * The throttling policy is based on the configured platform data; if no + * platform data is provided, this uses the step_wise throttling policy. + */ +void thermal_notify_framework(struct thermal_zone_device *tz, int trip) +{ + handle_thermal_trip(tz, trip); +} +EXPORT_SYMBOL_GPL(thermal_notify_framework); + static void thermal_zone_device_check(struct work_struct *work) { struct thermal_zone_device *tz = container_of(work, struct @@ -637,445 +453,12 @@ static void thermal_zone_device_check(struct work_struct *work) thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); } -/* sys I/F for thermal zone */ - -#define to_thermal_zone(_dev) \ - container_of(_dev, struct thermal_zone_device, device) - -static ssize_t -type_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - - return sprintf(buf, "%s\n", tz->type); -} - -static ssize_t -temp_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - int temperature, ret; - - ret = thermal_zone_get_temp(tz, &temperature); - - if (ret) - return ret; - - return sprintf(buf, "%d\n", temperature); -} - -static ssize_t -mode_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - enum thermal_device_mode mode; - int result; - - if (!tz->ops->get_mode) - return -EPERM; - - result = tz->ops->get_mode(tz, &mode); - if (result) - return result; - - return sprintf(buf, "%s\n", mode == THERMAL_DEVICE_ENABLED ? "enabled" - : "disabled"); -} - -static ssize_t -mode_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - int result; - - if (!tz->ops->set_mode) - return -EPERM; - - if (!strncmp(buf, "enabled", sizeof("enabled") - 1)) - result = tz->ops->set_mode(tz, THERMAL_DEVICE_ENABLED); - else if (!strncmp(buf, "disabled", sizeof("disabled") - 1)) - result = tz->ops->set_mode(tz, THERMAL_DEVICE_DISABLED); - else - result = -EINVAL; - - if (result) - return result; - - return count; -} - -static ssize_t -trip_point_type_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - enum thermal_trip_type type; - int trip, result; - - if (!tz->ops->get_trip_type) - return -EPERM; - - if (!sscanf(attr->attr.name, "trip_point_%d_type", &trip)) - return -EINVAL; - - result = tz->ops->get_trip_type(tz, trip, &type); - if (result) - return result; - - switch (type) { - case THERMAL_TRIP_CRITICAL: - return sprintf(buf, "critical\n"); - case THERMAL_TRIP_HOT: - return sprintf(buf, "hot\n"); - case THERMAL_TRIP_PASSIVE: - return sprintf(buf, "passive\n"); - case THERMAL_TRIP_ACTIVE: - return sprintf(buf, "active\n"); - default: - return sprintf(buf, "unknown\n"); - } -} - -static ssize_t -trip_point_temp_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - int trip, ret; - int temperature; - - if (!tz->ops->set_trip_temp) - return -EPERM; - - if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip)) - return -EINVAL; - - if (kstrtoint(buf, 10, &temperature)) - return -EINVAL; - - ret = tz->ops->set_trip_temp(tz, trip, temperature); - if (ret) - return ret; - - thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); - - return count; -} - -static ssize_t -trip_point_temp_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - int trip, ret; - int temperature; - - if (!tz->ops->get_trip_temp) - return -EPERM; - - if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip)) - return -EINVAL; - - ret = tz->ops->get_trip_temp(tz, trip, &temperature); - - if (ret) - return ret; - - return sprintf(buf, "%d\n", temperature); -} - -static ssize_t -trip_point_hyst_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - int trip, ret; - int temperature; - - if (!tz->ops->set_trip_hyst) - return -EPERM; - - if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip)) - return -EINVAL; - - if (kstrtoint(buf, 10, &temperature)) - return -EINVAL; - - /* - * We are not doing any check on the 'temperature' value - * here. The driver implementing 'set_trip_hyst' has to - * take care of this. - */ - ret = tz->ops->set_trip_hyst(tz, trip, temperature); - - if (!ret) - thermal_zone_set_trips(tz); - - return ret ? ret : count; -} - -static ssize_t -trip_point_hyst_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - int trip, ret; - int temperature; - - if (!tz->ops->get_trip_hyst) - return -EPERM; - - if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip)) - return -EINVAL; - - ret = tz->ops->get_trip_hyst(tz, trip, &temperature); - - return ret ? ret : sprintf(buf, "%d\n", temperature); -} - -static ssize_t -passive_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - struct thermal_cooling_device *cdev = NULL; - int state; - - if (!sscanf(buf, "%d\n", &state)) - return -EINVAL; - - /* sanity check: values below 1000 millicelcius don't make sense - * and can cause the system to go into a thermal heart attack - */ - if (state && state < 1000) - return -EINVAL; - - if (state && !tz->forced_passive) { - mutex_lock(&thermal_list_lock); - list_for_each_entry(cdev, &thermal_cdev_list, node) { - if (!strncmp("Processor", cdev->type, - sizeof("Processor"))) - thermal_zone_bind_cooling_device(tz, - THERMAL_TRIPS_NONE, cdev, - THERMAL_NO_LIMIT, - THERMAL_NO_LIMIT, - THERMAL_WEIGHT_DEFAULT); - } - mutex_unlock(&thermal_list_lock); - if (!tz->passive_delay) - tz->passive_delay = 1000; - } else if (!state && tz->forced_passive) { - mutex_lock(&thermal_list_lock); - list_for_each_entry(cdev, &thermal_cdev_list, node) { - if (!strncmp("Processor", cdev->type, - sizeof("Processor"))) - thermal_zone_unbind_cooling_device(tz, - THERMAL_TRIPS_NONE, - cdev); - } - mutex_unlock(&thermal_list_lock); - tz->passive_delay = 0; - } - - tz->forced_passive = state; - - thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); - - return count; -} - -static ssize_t -passive_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - - return sprintf(buf, "%d\n", tz->forced_passive); -} - -static ssize_t -policy_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - int ret = -EINVAL; - struct thermal_zone_device *tz = to_thermal_zone(dev); - struct thermal_governor *gov; - char name[THERMAL_NAME_LENGTH]; - - snprintf(name, sizeof(name), "%s", buf); - - mutex_lock(&thermal_governor_lock); - mutex_lock(&tz->lock); - - gov = __find_governor(strim(name)); - if (!gov) - goto exit; - - ret = thermal_set_governor(tz, gov); - if (!ret) - ret = count; - -exit: - mutex_unlock(&tz->lock); - mutex_unlock(&thermal_governor_lock); - return ret; -} - -static ssize_t -policy_show(struct device *dev, struct device_attribute *devattr, char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - - return sprintf(buf, "%s\n", tz->governor->name); -} - -static ssize_t -available_policies_show(struct device *dev, struct device_attribute *devattr, - char *buf) -{ - struct thermal_governor *pos; - ssize_t count = 0; - ssize_t size = PAGE_SIZE; - - mutex_lock(&thermal_governor_lock); - - list_for_each_entry(pos, &thermal_governor_list, governor_list) { - size = PAGE_SIZE - count; - count += scnprintf(buf + count, size, "%s ", pos->name); - } - count += scnprintf(buf + count, size, "\n"); - - mutex_unlock(&thermal_governor_lock); - - return count; -} - -static ssize_t -emul_temp_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - int ret = 0; - int temperature; - - if (kstrtoint(buf, 10, &temperature)) - return -EINVAL; - - if (!tz->ops->set_emul_temp) { - mutex_lock(&tz->lock); - tz->emul_temperature = temperature; - mutex_unlock(&tz->lock); - } else { - ret = tz->ops->set_emul_temp(tz, temperature); - } - - if (!ret) - thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); - - return ret ? ret : count; -} -static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store); - -static ssize_t -sustainable_power_show(struct device *dev, struct device_attribute *devattr, - char *buf) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - - if (tz->tzp) - return sprintf(buf, "%u\n", tz->tzp->sustainable_power); - else - return -EIO; -} - -static ssize_t -sustainable_power_store(struct device *dev, struct device_attribute *devattr, - const char *buf, size_t count) -{ - struct thermal_zone_device *tz = to_thermal_zone(dev); - u32 sustainable_power; - - if (!tz->tzp) - return -EIO; - - if (kstrtou32(buf, 10, &sustainable_power)) - return -EINVAL; - - tz->tzp->sustainable_power = sustainable_power; - - return count; -} -static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show, - sustainable_power_store); - -#define create_s32_tzp_attr(name) \ - static ssize_t \ - name##_show(struct device *dev, struct device_attribute *devattr, \ - char *buf) \ - { \ - struct thermal_zone_device *tz = to_thermal_zone(dev); \ - \ - if (tz->tzp) \ - return sprintf(buf, "%d\n", tz->tzp->name); \ - else \ - return -EIO; \ - } \ - \ - static ssize_t \ - name##_store(struct device *dev, struct device_attribute *devattr, \ - const char *buf, size_t count) \ - { \ - struct thermal_zone_device *tz = to_thermal_zone(dev); \ - s32 value; \ - \ - if (!tz->tzp) \ - return -EIO; \ - \ - if (kstrtos32(buf, 10, &value)) \ - return -EINVAL; \ - \ - tz->tzp->name = value; \ - \ - return count; \ - } \ - static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, name##_show, name##_store) - -create_s32_tzp_attr(k_po); -create_s32_tzp_attr(k_pu); -create_s32_tzp_attr(k_i); -create_s32_tzp_attr(k_d); -create_s32_tzp_attr(integral_cutoff); -create_s32_tzp_attr(slope); -create_s32_tzp_attr(offset); -#undef create_s32_tzp_attr - -static struct device_attribute *dev_tzp_attrs[] = { - &dev_attr_sustainable_power, - &dev_attr_k_po, - &dev_attr_k_pu, - &dev_attr_k_i, - &dev_attr_k_d, - &dev_attr_integral_cutoff, - &dev_attr_slope, - &dev_attr_offset, -}; - -static int create_tzp_attrs(struct device *dev) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(dev_tzp_attrs); i++) { - int ret; - struct device_attribute *dev_attr = dev_tzp_attrs[i]; - - ret = device_create_file(dev, dev_attr); - if (ret) - return ret; - } - - return 0; -} +/* + * Power actor section: interface to power actors to estimate power + * + * Set of functions used to interact to cooling devices that know + * how to estimate their devices power consumption. + */ /** * power_actor_get_max_power() - get the maximum power that a cdev can consume @@ -1127,12 +510,13 @@ int power_actor_get_min_power(struct thermal_cooling_device *cdev, } /** - * power_actor_set_power() - limit the maximum power that a cooling device can consume + * power_actor_set_power() - limit the maximum power a cooling device consumes * @cdev: pointer to &thermal_cooling_device * @instance: thermal instance to update * @power: the power in milliwatts * - * Set the cooling device to consume at most @power milliwatts. + * Set the cooling device to consume at most @power milliwatts. The limit is + * expected to be a cap at the maximum power consumption. * * Return: 0 on success, -EINVAL if the cooling device does not * implement the power actor API or -E* for other failures. @@ -1159,143 +543,75 @@ int power_actor_set_power(struct thermal_cooling_device *cdev, return 0; } -static DEVICE_ATTR(type, 0444, type_show, NULL); -static DEVICE_ATTR(temp, 0444, temp_show, NULL); -static DEVICE_ATTR(mode, 0644, mode_show, mode_store); -static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store); -static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store); -static DEVICE_ATTR(available_policies, S_IRUGO, available_policies_show, NULL); - -/* sys I/F for cooling device */ -#define to_cooling_device(_dev) \ - container_of(_dev, struct thermal_cooling_device, device) - -static ssize_t -thermal_cooling_device_type_show(struct device *dev, - struct device_attribute *attr, char *buf) +void thermal_zone_device_rebind_exception(struct thermal_zone_device *tz, + const char *cdev_type, size_t size) { - struct thermal_cooling_device *cdev = to_cooling_device(dev); + struct thermal_cooling_device *cdev = NULL; - return sprintf(buf, "%s\n", cdev->type); + mutex_lock(&thermal_list_lock); + list_for_each_entry(cdev, &thermal_cdev_list, node) { + /* skip non matching cdevs */ + if (strncmp(cdev_type, cdev->type, size)) + continue; + + /* re binding the exception matching the type pattern */ + thermal_zone_bind_cooling_device(tz, THERMAL_TRIPS_NONE, cdev, + THERMAL_NO_LIMIT, + THERMAL_NO_LIMIT, + THERMAL_WEIGHT_DEFAULT); + } + mutex_unlock(&thermal_list_lock); } -static ssize_t -thermal_cooling_device_max_state_show(struct device *dev, - struct device_attribute *attr, char *buf) +void thermal_zone_device_unbind_exception(struct thermal_zone_device *tz, + const char *cdev_type, size_t size) { - struct thermal_cooling_device *cdev = to_cooling_device(dev); - unsigned long state; - int ret; + struct thermal_cooling_device *cdev = NULL; - ret = cdev->ops->get_max_state(cdev, &state); - if (ret) - return ret; - return sprintf(buf, "%ld\n", state); + mutex_lock(&thermal_list_lock); + list_for_each_entry(cdev, &thermal_cdev_list, node) { + /* skip non matching cdevs */ + if (strncmp(cdev_type, cdev->type, size)) + continue; + /* unbinding the exception matching the type pattern */ + thermal_zone_unbind_cooling_device(tz, THERMAL_TRIPS_NONE, + cdev); + } + mutex_unlock(&thermal_list_lock); } -static ssize_t -thermal_cooling_device_cur_state_show(struct device *dev, - struct device_attribute *attr, char *buf) +/* + * Device management section: cooling devices, zones devices, and binding + * + * Set of functions provided by the thermal core for: + * - cooling devices lifecycle: registration, unregistration, + * binding, and unbinding. + * - thermal zone devices lifecycle: registration, unregistration, + * binding, and unbinding. + */ +static int get_idr(struct idr *idr, struct mutex *lock, int *id) { - struct thermal_cooling_device *cdev = to_cooling_device(dev); - unsigned long state; int ret; - ret = cdev->ops->get_cur_state(cdev, &state); - if (ret) + if (lock) + mutex_lock(lock); + ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL); + if (lock) + mutex_unlock(lock); + if (unlikely(ret < 0)) return ret; - return sprintf(buf, "%ld\n", state); -} - -static ssize_t -thermal_cooling_device_cur_state_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct thermal_cooling_device *cdev = to_cooling_device(dev); - unsigned long state; - int result; - - if (!sscanf(buf, "%ld\n", &state)) - return -EINVAL; - - if ((long)state < 0) - return -EINVAL; - - result = cdev->ops->set_cur_state(cdev, state); - if (result) - return result; - return count; -} - -static struct device_attribute dev_attr_cdev_type = -__ATTR(type, 0444, thermal_cooling_device_type_show, NULL); -static DEVICE_ATTR(max_state, 0444, - thermal_cooling_device_max_state_show, NULL); -static DEVICE_ATTR(cur_state, 0644, - thermal_cooling_device_cur_state_show, - thermal_cooling_device_cur_state_store); - -static ssize_t -thermal_cooling_device_trip_point_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct thermal_instance *instance; - - instance = - container_of(attr, struct thermal_instance, attr); - - if (instance->trip == THERMAL_TRIPS_NONE) - return sprintf(buf, "-1\n"); - else - return sprintf(buf, "%d\n", instance->trip); -} - -static struct attribute *cooling_device_attrs[] = { - &dev_attr_cdev_type.attr, - &dev_attr_max_state.attr, - &dev_attr_cur_state.attr, - NULL, -}; - -static const struct attribute_group cooling_device_attr_group = { - .attrs = cooling_device_attrs, -}; - -static const struct attribute_group *cooling_device_attr_groups[] = { - &cooling_device_attr_group, - NULL, -}; - -static ssize_t -thermal_cooling_device_weight_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct thermal_instance *instance; - - instance = container_of(attr, struct thermal_instance, weight_attr); - - return sprintf(buf, "%d\n", instance->weight); + *id = ret; + return 0; } -static ssize_t -thermal_cooling_device_weight_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct thermal_instance *instance; - int ret, weight; - - ret = kstrtoint(buf, 0, &weight); - if (ret) - return ret; - - instance = container_of(attr, struct thermal_instance, weight_attr); - instance->weight = weight; - - return count; +static void release_idr(struct idr *idr, struct mutex *lock, int id) +{ + if (lock) + mutex_lock(lock); + idr_remove(idr, id); + if (lock) + mutex_unlock(lock); } -/* Device management */ /** * thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone @@ -1358,8 +674,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, if (lower > upper || upper > max_state) return -EINVAL; - dev = - kzalloc(sizeof(struct thermal_instance), GFP_KERNEL); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->tz = tz; @@ -1402,10 +717,10 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, mutex_lock(&tz->lock); mutex_lock(&cdev->lock); list_for_each_entry(pos, &tz->thermal_instances, tz_node) - if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { - result = -EEXIST; - break; - } + if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { + result = -EEXIST; + break; + } if (!result) { list_add_tail(&dev->tz_node, &tz->thermal_instances); list_add_tail(&dev->cdev_node, &cdev->thermal_instances); @@ -1485,8 +800,8 @@ static void thermal_release(struct device *dev) sizeof("thermal_zone") - 1)) { tz = to_thermal_zone(dev); kfree(tz); - } else if(!strncmp(dev_name(dev), "cooling_device", - sizeof("cooling_device") - 1)){ + } else if (!strncmp(dev_name(dev), "cooling_device", + sizeof("cooling_device") - 1)) { cdev = to_cooling_device(dev); kfree(cdev); } @@ -1497,6 +812,78 @@ static struct class thermal_class = { .dev_release = thermal_release, }; +static inline +void print_bind_err_msg(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev, int ret) +{ + dev_err(&tz->device, "binding zone %s with cdev %s failed:%d\n", + tz->type, cdev->type, ret); +} + +static void __bind(struct thermal_zone_device *tz, int mask, + struct thermal_cooling_device *cdev, + unsigned long *limits, + unsigned int weight) +{ + int i, ret; + + for (i = 0; i < tz->trips; i++) { + if (mask & (1 << i)) { + unsigned long upper, lower; + + upper = THERMAL_NO_LIMIT; + lower = THERMAL_NO_LIMIT; + if (limits) { + lower = limits[i * 2]; + upper = limits[i * 2 + 1]; + } + ret = thermal_zone_bind_cooling_device(tz, i, cdev, + upper, lower, + weight); + if (ret) + print_bind_err_msg(tz, cdev, ret); + } + } +} + +static void bind_cdev(struct thermal_cooling_device *cdev) +{ + int i, ret; + const struct thermal_zone_params *tzp; + struct thermal_zone_device *pos = NULL; + + mutex_lock(&thermal_list_lock); + + list_for_each_entry(pos, &thermal_tz_list, node) { + if (!pos->tzp && !pos->ops->bind) + continue; + + if (pos->ops->bind) { + ret = pos->ops->bind(pos, cdev); + if (ret) + print_bind_err_msg(pos, cdev, ret); + continue; + } + + tzp = pos->tzp; + if (!tzp || !tzp->tbp) + continue; + + for (i = 0; i < tzp->num_tbps; i++) { + if (tzp->tbp[i].cdev || !tzp->tbp[i].match) + continue; + if (tzp->tbp[i].match(pos, cdev)) + continue; + tzp->tbp[i].cdev = cdev; + __bind(pos, tzp->tbp[i].trip_mask, cdev, + tzp->tbp[i].binding_limits, + tzp->tbp[i].weight); + } + } + + mutex_unlock(&thermal_list_lock); +} + /** * __thermal_cooling_device_register() - register a new thermal cooling device * @np: a pointer to a device tree node. @@ -1529,7 +916,7 @@ __thermal_cooling_device_register(struct device_node *np, !ops->set_cur_state) return ERR_PTR(-EINVAL); - cdev = kzalloc(sizeof(struct thermal_cooling_device), GFP_KERNEL); + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (!cdev) return ERR_PTR(-ENOMEM); @@ -1546,7 +933,7 @@ __thermal_cooling_device_register(struct device_node *np, cdev->ops = ops; cdev->updated = false; cdev->device.class = &thermal_class; - cdev->device.groups = cooling_device_attr_groups; + thermal_cooling_device_setup_sysfs(cdev); cdev->devdata = devdata; dev_set_name(&cdev->device, "cooling_device%d", cdev->id); result = device_register(&cdev->device); @@ -1619,12 +1006,22 @@ thermal_of_cooling_device_register(struct device_node *np, } EXPORT_SYMBOL_GPL(thermal_of_cooling_device_register); +static void __unbind(struct thermal_zone_device *tz, int mask, + struct thermal_cooling_device *cdev) +{ + int i; + + for (i = 0; i < tz->trips; i++) + if (mask & (1 << i)) + thermal_zone_unbind_cooling_device(tz, i, cdev); +} + /** - * thermal_cooling_device_unregister - removes the registered thermal cooling device + * thermal_cooling_device_unregister - removes a thermal cooling device * @cdev: the thermal cooling device to remove. * - * thermal_cooling_device_unregister() must be called when the device is no - * longer needed. + * thermal_cooling_device_unregister() must be called when a registered + * thermal cooling device is no longer needed. */ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) { @@ -1638,8 +1035,8 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_cdev_list, node) - if (pos == cdev) - break; + if (pos == cdev) + break; if (pos != cdev) { /* thermal cooling device not found */ mutex_unlock(&thermal_list_lock); @@ -1668,171 +1065,49 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) mutex_unlock(&thermal_list_lock); - if (cdev->type[0]) - device_remove_file(&cdev->device, &dev_attr_cdev_type); - device_remove_file(&cdev->device, &dev_attr_max_state); - device_remove_file(&cdev->device, &dev_attr_cur_state); - release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); device_unregister(&cdev->device); - return; } EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister); -void thermal_cdev_update(struct thermal_cooling_device *cdev) +static void bind_tz(struct thermal_zone_device *tz) { - struct thermal_instance *instance; - unsigned long target = 0; + int i, ret; + struct thermal_cooling_device *pos = NULL; + const struct thermal_zone_params *tzp = tz->tzp; - mutex_lock(&cdev->lock); - /* cooling device is updated*/ - if (cdev->updated) { - mutex_unlock(&cdev->lock); + if (!tzp && !tz->ops->bind) return; - } - - /* Make sure cdev enters the deepest cooling state */ - list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) { - dev_dbg(&cdev->device, "zone%d->target=%lu\n", - instance->tz->id, instance->target); - if (instance->target == THERMAL_NO_TARGET) - continue; - if (instance->target > target) - target = instance->target; - } - cdev->ops->set_cur_state(cdev, target); - cdev->updated = true; - mutex_unlock(&cdev->lock); - trace_cdev_update(cdev, target); - dev_dbg(&cdev->device, "set to state %lu\n", target); -} -EXPORT_SYMBOL(thermal_cdev_update); - -/** - * thermal_notify_framework - Sensor drivers use this API to notify framework - * @tz: thermal zone device - * @trip: indicates which trip point has been crossed - * - * This function handles the trip events from sensor drivers. It starts - * throttling the cooling devices according to the policy configured. - * For CRITICAL and HOT trip points, this notifies the respective drivers, - * and does actual throttling for other trip points i.e ACTIVE and PASSIVE. - * The throttling policy is based on the configured platform data; if no - * platform data is provided, this uses the step_wise throttling policy. - */ -void thermal_notify_framework(struct thermal_zone_device *tz, int trip) -{ - handle_thermal_trip(tz, trip); -} -EXPORT_SYMBOL_GPL(thermal_notify_framework); - -/** - * create_trip_attrs() - create attributes for trip points - * @tz: the thermal zone device - * @mask: Writeable trip point bitmap. - * - * helper function to instantiate sysfs entries for every trip - * point and its properties of a struct thermal_zone_device. - * - * Return: 0 on success, the proper error value otherwise. - */ -static int create_trip_attrs(struct thermal_zone_device *tz, int mask) -{ - int indx; - int size = sizeof(struct thermal_attr) * tz->trips; - tz->trip_type_attrs = kzalloc(size, GFP_KERNEL); - if (!tz->trip_type_attrs) - return -ENOMEM; - - tz->trip_temp_attrs = kzalloc(size, GFP_KERNEL); - if (!tz->trip_temp_attrs) { - kfree(tz->trip_type_attrs); - return -ENOMEM; - } + mutex_lock(&thermal_list_lock); - if (tz->ops->get_trip_hyst) { - tz->trip_hyst_attrs = kzalloc(size, GFP_KERNEL); - if (!tz->trip_hyst_attrs) { - kfree(tz->trip_type_attrs); - kfree(tz->trip_temp_attrs); - return -ENOMEM; + /* If there is ops->bind, try to use ops->bind */ + if (tz->ops->bind) { + list_for_each_entry(pos, &thermal_cdev_list, node) { + ret = tz->ops->bind(tz, pos); + if (ret) + print_bind_err_msg(tz, pos, ret); } + goto exit; } + if (!tzp || !tzp->tbp) + goto exit; - for (indx = 0; indx < tz->trips; indx++) { - /* create trip type attribute */ - snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH, - "trip_point_%d_type", indx); - - sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr); - tz->trip_type_attrs[indx].attr.attr.name = - tz->trip_type_attrs[indx].name; - tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO; - tz->trip_type_attrs[indx].attr.show = trip_point_type_show; - - device_create_file(&tz->device, - &tz->trip_type_attrs[indx].attr); - - /* create trip temp attribute */ - snprintf(tz->trip_temp_attrs[indx].name, THERMAL_NAME_LENGTH, - "trip_point_%d_temp", indx); - - sysfs_attr_init(&tz->trip_temp_attrs[indx].attr.attr); - tz->trip_temp_attrs[indx].attr.attr.name = - tz->trip_temp_attrs[indx].name; - tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO; - tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show; - if (IS_ENABLED(CONFIG_THERMAL_WRITABLE_TRIPS) && - mask & (1 << indx)) { - tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR; - tz->trip_temp_attrs[indx].attr.store = - trip_point_temp_store; - } - - device_create_file(&tz->device, - &tz->trip_temp_attrs[indx].attr); - - /* create Optional trip hyst attribute */ - if (!tz->ops->get_trip_hyst) - continue; - snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH, - "trip_point_%d_hyst", indx); - - sysfs_attr_init(&tz->trip_hyst_attrs[indx].attr.attr); - tz->trip_hyst_attrs[indx].attr.attr.name = - tz->trip_hyst_attrs[indx].name; - tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO; - tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show; - if (tz->ops->set_trip_hyst) { - tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR; - tz->trip_hyst_attrs[indx].attr.store = - trip_point_hyst_store; + list_for_each_entry(pos, &thermal_cdev_list, node) { + for (i = 0; i < tzp->num_tbps; i++) { + if (tzp->tbp[i].cdev || !tzp->tbp[i].match) + continue; + if (tzp->tbp[i].match(tz, pos)) + continue; + tzp->tbp[i].cdev = pos; + __bind(tz, tzp->tbp[i].trip_mask, pos, + tzp->tbp[i].binding_limits, + tzp->tbp[i].weight); } - - device_create_file(&tz->device, - &tz->trip_hyst_attrs[indx].attr); - } - return 0; -} - -static void remove_trip_attrs(struct thermal_zone_device *tz) -{ - int indx; - - for (indx = 0; indx < tz->trips; indx++) { - device_remove_file(&tz->device, - &tz->trip_type_attrs[indx].attr); - device_remove_file(&tz->device, - &tz->trip_temp_attrs[indx].attr); - if (tz->ops->get_trip_hyst) - device_remove_file(&tz->device, - &tz->trip_hyst_attrs[indx].attr); } - kfree(tz->trip_type_attrs); - kfree(tz->trip_temp_attrs); - kfree(tz->trip_hyst_attrs); +exit: + mutex_unlock(&thermal_list_lock); } /** @@ -1859,20 +1134,22 @@ static void remove_trip_attrs(struct thermal_zone_device *tz) * in case of error, an ERR_PTR. Caller must check return value with * IS_ERR*() helpers. */ -struct thermal_zone_device *thermal_zone_device_register(const char *type, - int trips, int mask, void *devdata, - struct thermal_zone_device_ops *ops, - struct thermal_zone_params *tzp, - int passive_delay, int polling_delay) +struct thermal_zone_device * +thermal_zone_device_register(const char *type, int trips, int mask, + void *devdata, struct thermal_zone_device_ops *ops, + struct thermal_zone_params *tzp, int passive_delay, + int polling_delay) { struct thermal_zone_device *tz; enum thermal_trip_type trip_type; int trip_temp; int result; int count; - int passive = 0; struct thermal_governor *governor; + if (!type || strlen(type) == 0) + return ERR_PTR(-EINVAL); + if (type && strlen(type) >= THERMAL_NAME_LENGTH) return ERR_PTR(-EINVAL); @@ -1885,7 +1162,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, if (trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp)) return ERR_PTR(-EINVAL); - tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL); + tz = kzalloc(sizeof(*tz), GFP_KERNEL); if (!tz) return ERR_PTR(-ENOMEM); @@ -1898,7 +1175,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, return ERR_PTR(result); } - strlcpy(tz->type, type ? : "", sizeof(tz->type)); + strlcpy(tz->type, type, sizeof(tz->type)); tz->ops = ops; tz->tzp = tzp; tz->device.class = &thermal_class; @@ -1906,6 +1183,13 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, tz->trips = trips; tz->passive_delay = passive_delay; tz->polling_delay = polling_delay; + + /* sys I/F */ + /* Add nodes that are always present via .groups */ + result = thermal_zone_create_device_groups(tz, mask); + if (result) + goto unregister; + /* A new thermal zone needs to be updated anyway. */ atomic_set(&tz->need_update, 1); @@ -1917,32 +1201,9 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, return ERR_PTR(result); } - /* sys I/F */ - if (type) { - result = device_create_file(&tz->device, &dev_attr_type); - if (result) - goto unregister; - } - - result = device_create_file(&tz->device, &dev_attr_temp); - if (result) - goto unregister; - - if (ops->get_mode) { - result = device_create_file(&tz->device, &dev_attr_mode); - if (result) - goto unregister; - } - - result = create_trip_attrs(tz, mask); - if (result) - goto unregister; - for (count = 0; count < trips; count++) { if (tz->ops->get_trip_type(tz, count, &trip_type)) set_bit(count, &tz->trips_disabled); - if (trip_type == THERMAL_TRIP_PASSIVE) - passive = 1; if (tz->ops->get_trip_temp(tz, count, &trip_temp)) set_bit(count, &tz->trips_disabled); /* Check for bogus trip points */ @@ -1950,33 +1211,6 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, set_bit(count, &tz->trips_disabled); } - if (!passive) { - result = device_create_file(&tz->device, &dev_attr_passive); - if (result) - goto unregister; - } - - if (IS_ENABLED(CONFIG_THERMAL_EMULATION)) { - result = device_create_file(&tz->device, &dev_attr_emul_temp); - if (result) - goto unregister; - } - - /* Create policy attribute */ - result = device_create_file(&tz->device, &dev_attr_policy); - if (result) - goto unregister; - - /* Add thermal zone params */ - result = create_tzp_attrs(&tz->device); - if (result) - goto unregister; - - /* Create available_policies attribute */ - result = device_create_file(&tz->device, &dev_attr_available_policies); - if (result) - goto unregister; - /* Update 'this' zone's governor information */ mutex_lock(&thermal_governor_lock); @@ -2006,7 +1240,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, /* Bind cooling devices for this zone */ bind_tz(tz); - INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check); + INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check); thermal_zone_device_reset(tz); /* Update the new thermal zone and mark it as already updated. */ @@ -2040,8 +1274,8 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) - if (pos == tz) - break; + if (pos == tz) + break; if (pos != tz) { /* thermal zone device not found */ mutex_unlock(&thermal_list_lock); @@ -2071,14 +1305,10 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) thermal_zone_device_set_polling(tz, 0); - if (tz->type[0]) - device_remove_file(&tz->device, &dev_attr_type); - device_remove_file(&tz->device, &dev_attr_temp); - if (tz->ops->get_mode) - device_remove_file(&tz->device, &dev_attr_mode); - device_remove_file(&tz->device, &dev_attr_policy); - device_remove_file(&tz->device, &dev_attr_available_policies); - remove_trip_attrs(tz); + kfree(tz->trip_type_attrs); + kfree(tz->trip_temp_attrs); + kfree(tz->trip_hyst_attrs); + kfree(tz->trips_attribute_group.attrs); thermal_set_governor(tz, NULL); thermal_remove_hwmon_sysfs(tz); @@ -2086,7 +1316,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) idr_destroy(&tz->idr); mutex_destroy(&tz->lock); device_unregister(&tz->device); - return; + kfree(tz->device.groups); } EXPORT_SYMBOL_GPL(thermal_zone_device_unregister); @@ -2128,43 +1358,13 @@ struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name) } EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name); -/** - * thermal_zone_get_slope - return the slope attribute of the thermal zone - * @tz: thermal zone device with the slope attribute - * - * Return: If the thermal zone device has a slope attribute, return it, else - * return 1. - */ -int thermal_zone_get_slope(struct thermal_zone_device *tz) -{ - if (tz && tz->tzp) - return tz->tzp->slope; - return 1; -} -EXPORT_SYMBOL_GPL(thermal_zone_get_slope); - -/** - * thermal_zone_get_offset - return the offset attribute of the thermal zone - * @tz: thermal zone device with the offset attribute - * - * Return: If the thermal zone device has a offset attribute, return it, else - * return 0. - */ -int thermal_zone_get_offset(struct thermal_zone_device *tz) -{ - if (tz && tz->tzp) - return tz->tzp->offset; - return 0; -} -EXPORT_SYMBOL_GPL(thermal_zone_get_offset); - #ifdef CONFIG_NET static const struct genl_multicast_group thermal_event_mcgrps[] = { { .name = THERMAL_GENL_MCAST_GROUP_NAME, }, }; -static struct genl_family thermal_event_genl_family = { - .id = GENL_ID_GENERATE, +static struct genl_family thermal_event_genl_family __ro_after_init = { + .module = THIS_MODULE, .name = THERMAL_GENL_FAMILY_NAME, .version = THERMAL_GENL_VERSION, .maxattr = THERMAL_GENL_ATTR_MAX, @@ -2173,7 +1373,7 @@ static struct genl_family thermal_event_genl_family = { }; int thermal_generate_netlink_event(struct thermal_zone_device *tz, - enum events event) + enum events event) { struct sk_buff *skb; struct nlattr *attr; @@ -2235,7 +1435,7 @@ int thermal_generate_netlink_event(struct thermal_zone_device *tz, } EXPORT_SYMBOL_GPL(thermal_generate_netlink_event); -static int genetlink_init(void) +static int __init genetlink_init(void) { return genl_register_family(&thermal_event_genl_family); } @@ -2249,40 +1449,8 @@ static inline int genetlink_init(void) { return 0; } static inline void genetlink_exit(void) {} #endif /* !CONFIG_NET */ -static int __init thermal_register_governors(void) -{ - int result; - - result = thermal_gov_step_wise_register(); - if (result) - return result; - - result = thermal_gov_fair_share_register(); - if (result) - return result; - - result = thermal_gov_bang_bang_register(); - if (result) - return result; - - result = thermal_gov_user_space_register(); - if (result) - return result; - - return thermal_gov_power_allocator_register(); -} - -static void thermal_unregister_governors(void) -{ - thermal_gov_step_wise_unregister(); - thermal_gov_fair_share_unregister(); - thermal_gov_bang_bang_unregister(); - thermal_gov_user_space_unregister(); - thermal_gov_power_allocator_unregister(); -} - static int thermal_pm_notify(struct notifier_block *nb, - unsigned long mode, void *_unused) + unsigned long mode, void *_unused) { struct thermal_zone_device *tz; diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 749d41abfbabecfc8fc33f54f4bc2e5d680450f8..2412b3759e16c0404b0dd803a9328c23ac6116a5 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -54,8 +54,34 @@ struct thermal_instance { unsigned int weight; /* The weight of the cooling device */ }; +#define to_thermal_zone(_dev) \ + container_of(_dev, struct thermal_zone_device, device) + +#define to_cooling_device(_dev) \ + container_of(_dev, struct thermal_cooling_device, device) + int thermal_register_governor(struct thermal_governor *); void thermal_unregister_governor(struct thermal_governor *); +void thermal_zone_device_rebind_exception(struct thermal_zone_device *, + const char *, size_t); +void thermal_zone_device_unbind_exception(struct thermal_zone_device *, + const char *, size_t); +int thermal_zone_device_set_policy(struct thermal_zone_device *, char *); +int thermal_build_list_of_policies(char *buf); + +/* sysfs I/F */ +int thermal_zone_create_device_groups(struct thermal_zone_device *, int); +void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *); +/* used only at binding time */ +ssize_t +thermal_cooling_device_trip_point_show(struct device *, + struct device_attribute *, char *); +ssize_t thermal_cooling_device_weight_show(struct device *, + struct device_attribute *, char *); + +ssize_t thermal_cooling_device_weight_store(struct device *, + struct device_attribute *, + const char *, size_t); #ifdef CONFIG_THERMAL_GOV_STEP_WISE int thermal_gov_step_wise_register(void); diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c new file mode 100644 index 0000000000000000000000000000000000000000..8cdf75adcce16a56654a1036272008fa6b7a675d --- /dev/null +++ b/drivers/thermal/thermal_helpers.c @@ -0,0 +1,226 @@ +/* + * thermal_helpers.c - helper functions to handle thermal devices + * + * Copyright (C) 2016 Eduardo Valentin + * + * Highly based on original thermal_core.c + * Copyright (C) 2008 Intel Corp + * Copyright (C) 2008 Zhang Rui + * Copyright (C) 2008 Sujith Thomas + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#include + +#include "thermal_core.h" + +int get_tz_trend(struct thermal_zone_device *tz, int trip) +{ + enum thermal_trend trend; + + if (tz->emul_temperature || !tz->ops->get_trend || + tz->ops->get_trend(tz, trip, &trend)) { + if (tz->temperature > tz->last_temperature) + trend = THERMAL_TREND_RAISING; + else if (tz->temperature < tz->last_temperature) + trend = THERMAL_TREND_DROPPING; + else + trend = THERMAL_TREND_STABLE; + } + + return trend; +} +EXPORT_SYMBOL(get_tz_trend); + +struct thermal_instance * +get_thermal_instance(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev, int trip) +{ + struct thermal_instance *pos = NULL; + struct thermal_instance *target_instance = NULL; + + mutex_lock(&tz->lock); + mutex_lock(&cdev->lock); + + list_for_each_entry(pos, &tz->thermal_instances, tz_node) { + if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { + target_instance = pos; + break; + } + } + + mutex_unlock(&cdev->lock); + mutex_unlock(&tz->lock); + + return target_instance; +} +EXPORT_SYMBOL(get_thermal_instance); + +/** + * thermal_zone_get_temp() - returns the temperature of a thermal zone + * @tz: a valid pointer to a struct thermal_zone_device + * @temp: a valid pointer to where to store the resulting temperature. + * + * When a valid thermal zone reference is passed, it will fetch its + * temperature and fill @temp. + * + * Return: On success returns 0, an error code otherwise + */ +int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp) +{ + int ret = -EINVAL; + int count; + int crit_temp = INT_MAX; + enum thermal_trip_type type; + + if (!tz || IS_ERR(tz) || !tz->ops->get_temp) + goto exit; + + mutex_lock(&tz->lock); + + ret = tz->ops->get_temp(tz, temp); + + if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) { + for (count = 0; count < tz->trips; count++) { + ret = tz->ops->get_trip_type(tz, count, &type); + if (!ret && type == THERMAL_TRIP_CRITICAL) { + ret = tz->ops->get_trip_temp(tz, count, + &crit_temp); + break; + } + } + + /* + * Only allow emulating a temperature when the real temperature + * is below the critical temperature so that the emulation code + * cannot hide critical conditions. + */ + if (!ret && *temp < crit_temp) + *temp = tz->emul_temperature; + } + + mutex_unlock(&tz->lock); +exit: + return ret; +} +EXPORT_SYMBOL_GPL(thermal_zone_get_temp); + +void thermal_zone_set_trips(struct thermal_zone_device *tz) +{ + int low = -INT_MAX; + int high = INT_MAX; + int trip_temp, hysteresis; + int i, ret; + + mutex_lock(&tz->lock); + + if (!tz->ops->set_trips || !tz->ops->get_trip_hyst) + goto exit; + + for (i = 0; i < tz->trips; i++) { + int trip_low; + + tz->ops->get_trip_temp(tz, i, &trip_temp); + tz->ops->get_trip_hyst(tz, i, &hysteresis); + + trip_low = trip_temp - hysteresis; + + if (trip_low < tz->temperature && trip_low > low) + low = trip_low; + + if (trip_temp > tz->temperature && trip_temp < high) + high = trip_temp; + } + + /* No need to change trip points */ + if (tz->prev_low_trip == low && tz->prev_high_trip == high) + goto exit; + + tz->prev_low_trip = low; + tz->prev_high_trip = high; + + dev_dbg(&tz->device, + "new temperature boundaries: %d < x < %d\n", low, high); + + /* + * Set a temperature window. When this window is left the driver + * must inform the thermal core via thermal_zone_device_update. + */ + ret = tz->ops->set_trips(tz, low, high); + if (ret) + dev_err(&tz->device, "Failed to set trips: %d\n", ret); + +exit: + mutex_unlock(&tz->lock); +} +EXPORT_SYMBOL_GPL(thermal_zone_set_trips); + +void thermal_cdev_update(struct thermal_cooling_device *cdev) +{ + struct thermal_instance *instance; + unsigned long target = 0; + + mutex_lock(&cdev->lock); + /* cooling device is updated*/ + if (cdev->updated) { + mutex_unlock(&cdev->lock); + return; + } + + /* Make sure cdev enters the deepest cooling state */ + list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) { + dev_dbg(&cdev->device, "zone%d->target=%lu\n", + instance->tz->id, instance->target); + if (instance->target == THERMAL_NO_TARGET) + continue; + if (instance->target > target) + target = instance->target; + } + cdev->ops->set_cur_state(cdev, target); + cdev->updated = true; + mutex_unlock(&cdev->lock); + trace_cdev_update(cdev, target); + dev_dbg(&cdev->device, "set to state %lu\n", target); +} +EXPORT_SYMBOL(thermal_cdev_update); + +/** + * thermal_zone_get_slope - return the slope attribute of the thermal zone + * @tz: thermal zone device with the slope attribute + * + * Return: If the thermal zone device has a slope attribute, return it, else + * return 1. + */ +int thermal_zone_get_slope(struct thermal_zone_device *tz) +{ + if (tz && tz->tzp) + return tz->tzp->slope; + return 1; +} +EXPORT_SYMBOL_GPL(thermal_zone_get_slope); + +/** + * thermal_zone_get_offset - return the offset attribute of the thermal zone + * @tz: thermal zone device with the offset attribute + * + * Return: If the thermal zone device has a offset attribute, return it, else + * return 0. + */ +int thermal_zone_get_offset(struct thermal_zone_device *tz) +{ + if (tz && tz->tzp) + return tz->tzp->offset; + return 0; +} +EXPORT_SYMBOL_GPL(thermal_zone_get_offset); diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index c41c7742903ab43b2132241574376b85849666b9..541af5946203bf08e9a2c38a269661e1df81d417 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c @@ -64,7 +64,7 @@ name_show(struct device *dev, struct device_attribute *attr, char *buf) struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev); return sprintf(buf, "%s\n", hwmon->type); } -static DEVICE_ATTR(name, 0444, name_show, NULL); +static DEVICE_ATTR_RO(name); static ssize_t temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -98,7 +98,7 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf) int temperature; int ret; - ret = tz->ops->get_trip_temp(tz, 0, &temperature); + ret = tz->ops->get_crit_temp(tz, &temperature); if (ret) return ret; diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..a694de907a266b6c9661545d77de513c0e890365 --- /dev/null +++ b/drivers/thermal/thermal_sysfs.c @@ -0,0 +1,771 @@ +/* + * thermal.c - sysfs interface of thermal devices + * + * Copyright (C) 2016 Eduardo Valentin + * + * Highly based on original thermal_core.c + * Copyright (C) 2008 Intel Corp + * Copyright (C) 2008 Zhang Rui + * Copyright (C) 2008 Sujith Thomas + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#include "thermal_core.h" + +/* sys I/F for thermal zone */ + +static ssize_t +type_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + + return sprintf(buf, "%s\n", tz->type); +} + +static ssize_t +temp_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + int temperature, ret; + + ret = thermal_zone_get_temp(tz, &temperature); + + if (ret) + return ret; + + return sprintf(buf, "%d\n", temperature); +} + +static ssize_t +mode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + enum thermal_device_mode mode; + int result; + + if (!tz->ops->get_mode) + return -EPERM; + + result = tz->ops->get_mode(tz, &mode); + if (result) + return result; + + return sprintf(buf, "%s\n", mode == THERMAL_DEVICE_ENABLED ? "enabled" + : "disabled"); +} + +static ssize_t +mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + int result; + + if (!tz->ops->set_mode) + return -EPERM; + + if (!strncmp(buf, "enabled", sizeof("enabled") - 1)) + result = tz->ops->set_mode(tz, THERMAL_DEVICE_ENABLED); + else if (!strncmp(buf, "disabled", sizeof("disabled") - 1)) + result = tz->ops->set_mode(tz, THERMAL_DEVICE_DISABLED); + else + result = -EINVAL; + + if (result) + return result; + + return count; +} + +static ssize_t +trip_point_type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + enum thermal_trip_type type; + int trip, result; + + if (!tz->ops->get_trip_type) + return -EPERM; + + if (sscanf(attr->attr.name, "trip_point_%d_type", &trip) != 1) + return -EINVAL; + + result = tz->ops->get_trip_type(tz, trip, &type); + if (result) + return result; + + switch (type) { + case THERMAL_TRIP_CRITICAL: + return sprintf(buf, "critical\n"); + case THERMAL_TRIP_HOT: + return sprintf(buf, "hot\n"); + case THERMAL_TRIP_PASSIVE: + return sprintf(buf, "passive\n"); + case THERMAL_TRIP_ACTIVE: + return sprintf(buf, "active\n"); + default: + return sprintf(buf, "unknown\n"); + } +} + +static ssize_t +trip_point_temp_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + int trip, ret; + int temperature; + + if (!tz->ops->set_trip_temp) + return -EPERM; + + if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip) != 1) + return -EINVAL; + + if (kstrtoint(buf, 10, &temperature)) + return -EINVAL; + + ret = tz->ops->set_trip_temp(tz, trip, temperature); + if (ret) + return ret; + + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); + + return count; +} + +static ssize_t +trip_point_temp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + int trip, ret; + int temperature; + + if (!tz->ops->get_trip_temp) + return -EPERM; + + if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip) != 1) + return -EINVAL; + + ret = tz->ops->get_trip_temp(tz, trip, &temperature); + + if (ret) + return ret; + + return sprintf(buf, "%d\n", temperature); +} + +static ssize_t +trip_point_hyst_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + int trip, ret; + int temperature; + + if (!tz->ops->set_trip_hyst) + return -EPERM; + + if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip) != 1) + return -EINVAL; + + if (kstrtoint(buf, 10, &temperature)) + return -EINVAL; + + /* + * We are not doing any check on the 'temperature' value + * here. The driver implementing 'set_trip_hyst' has to + * take care of this. + */ + ret = tz->ops->set_trip_hyst(tz, trip, temperature); + + if (!ret) + thermal_zone_set_trips(tz); + + return ret ? ret : count; +} + +static ssize_t +trip_point_hyst_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + int trip, ret; + int temperature; + + if (!tz->ops->get_trip_hyst) + return -EPERM; + + if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip) != 1) + return -EINVAL; + + ret = tz->ops->get_trip_hyst(tz, trip, &temperature); + + return ret ? ret : sprintf(buf, "%d\n", temperature); +} + +static ssize_t +passive_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + int state; + + if (sscanf(buf, "%d\n", &state) != 1) + return -EINVAL; + + /* sanity check: values below 1000 millicelcius don't make sense + * and can cause the system to go into a thermal heart attack + */ + if (state && state < 1000) + return -EINVAL; + + if (state && !tz->forced_passive) { + if (!tz->passive_delay) + tz->passive_delay = 1000; + thermal_zone_device_rebind_exception(tz, "Processor", + sizeof("Processor")); + } else if (!state && tz->forced_passive) { + tz->passive_delay = 0; + thermal_zone_device_unbind_exception(tz, "Processor", + sizeof("Processor")); + } + + tz->forced_passive = state; + + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); + + return count; +} + +static ssize_t +passive_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + + return sprintf(buf, "%d\n", tz->forced_passive); +} + +static ssize_t +policy_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + char name[THERMAL_NAME_LENGTH]; + int ret; + + snprintf(name, sizeof(name), "%s", buf); + + ret = thermal_zone_device_set_policy(tz, name); + if (!ret) + ret = count; + + return ret; +} + +static ssize_t +policy_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + + return sprintf(buf, "%s\n", tz->governor->name); +} + +static ssize_t +available_policies_show(struct device *dev, struct device_attribute *devattr, + char *buf) +{ + return thermal_build_list_of_policies(buf); +} + +#if (IS_ENABLED(CONFIG_THERMAL_EMULATION)) +static ssize_t +emul_temp_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + int ret = 0; + int temperature; + + if (kstrtoint(buf, 10, &temperature)) + return -EINVAL; + + if (!tz->ops->set_emul_temp) { + mutex_lock(&tz->lock); + tz->emul_temperature = temperature; + mutex_unlock(&tz->lock); + } else { + ret = tz->ops->set_emul_temp(tz, temperature); + } + + if (!ret) + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); + + return ret ? ret : count; +} +static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store); +#endif + +static ssize_t +sustainable_power_show(struct device *dev, struct device_attribute *devattr, + char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + + if (tz->tzp) + return sprintf(buf, "%u\n", tz->tzp->sustainable_power); + else + return -EIO; +} + +static ssize_t +sustainable_power_store(struct device *dev, struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + u32 sustainable_power; + + if (!tz->tzp) + return -EIO; + + if (kstrtou32(buf, 10, &sustainable_power)) + return -EINVAL; + + tz->tzp->sustainable_power = sustainable_power; + + return count; +} + +#define create_s32_tzp_attr(name) \ + static ssize_t \ + name##_show(struct device *dev, struct device_attribute *devattr, \ + char *buf) \ + { \ + struct thermal_zone_device *tz = to_thermal_zone(dev); \ + \ + if (tz->tzp) \ + return sprintf(buf, "%d\n", tz->tzp->name); \ + else \ + return -EIO; \ + } \ + \ + static ssize_t \ + name##_store(struct device *dev, struct device_attribute *devattr, \ + const char *buf, size_t count) \ + { \ + struct thermal_zone_device *tz = to_thermal_zone(dev); \ + s32 value; \ + \ + if (!tz->tzp) \ + return -EIO; \ + \ + if (kstrtos32(buf, 10, &value)) \ + return -EINVAL; \ + \ + tz->tzp->name = value; \ + \ + return count; \ + } \ + static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, name##_show, name##_store) + +create_s32_tzp_attr(k_po); +create_s32_tzp_attr(k_pu); +create_s32_tzp_attr(k_i); +create_s32_tzp_attr(k_d); +create_s32_tzp_attr(integral_cutoff); +create_s32_tzp_attr(slope); +create_s32_tzp_attr(offset); +#undef create_s32_tzp_attr + +/* + * These are thermal zone device attributes that will always be present. + * All the attributes created for tzp (create_s32_tzp_attr) also are always + * present on the sysfs interface. + */ +static DEVICE_ATTR(type, 0444, type_show, NULL); +static DEVICE_ATTR(temp, 0444, temp_show, NULL); +static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store); +static DEVICE_ATTR(available_policies, S_IRUGO, available_policies_show, NULL); +static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show, + sustainable_power_store); + +/* These thermal zone device attributes are created based on conditions */ +static DEVICE_ATTR(mode, 0644, mode_show, mode_store); +static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store); + +/* These attributes are unconditionally added to a thermal zone */ +static struct attribute *thermal_zone_dev_attrs[] = { + &dev_attr_type.attr, + &dev_attr_temp.attr, +#if (IS_ENABLED(CONFIG_THERMAL_EMULATION)) + &dev_attr_emul_temp.attr, +#endif + &dev_attr_policy.attr, + &dev_attr_available_policies.attr, + &dev_attr_sustainable_power.attr, + &dev_attr_k_po.attr, + &dev_attr_k_pu.attr, + &dev_attr_k_i.attr, + &dev_attr_k_d.attr, + &dev_attr_integral_cutoff.attr, + &dev_attr_slope.attr, + &dev_attr_offset.attr, + NULL, +}; + +static struct attribute_group thermal_zone_attribute_group = { + .attrs = thermal_zone_dev_attrs, +}; + +/* We expose mode only if .get_mode is present */ +static struct attribute *thermal_zone_mode_attrs[] = { + &dev_attr_mode.attr, + NULL, +}; + +static umode_t thermal_zone_mode_is_visible(struct kobject *kobj, + struct attribute *attr, + int attrno) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct thermal_zone_device *tz; + + tz = container_of(dev, struct thermal_zone_device, device); + + if (tz->ops->get_mode) + return attr->mode; + + return 0; +} + +static struct attribute_group thermal_zone_mode_attribute_group = { + .attrs = thermal_zone_mode_attrs, + .is_visible = thermal_zone_mode_is_visible, +}; + +/* We expose passive only if passive trips are present */ +static struct attribute *thermal_zone_passive_attrs[] = { + &dev_attr_passive.attr, + NULL, +}; + +static umode_t thermal_zone_passive_is_visible(struct kobject *kobj, + struct attribute *attr, + int attrno) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct thermal_zone_device *tz; + enum thermal_trip_type trip_type; + int count, passive = 0; + + tz = container_of(dev, struct thermal_zone_device, device); + + for (count = 0; count < tz->trips && !passive; count++) { + tz->ops->get_trip_type(tz, count, &trip_type); + + if (trip_type == THERMAL_TRIP_PASSIVE) + passive = 1; + } + + if (!passive) + return attr->mode; + + return 0; +} + +static struct attribute_group thermal_zone_passive_attribute_group = { + .attrs = thermal_zone_passive_attrs, + .is_visible = thermal_zone_passive_is_visible, +}; + +static const struct attribute_group *thermal_zone_attribute_groups[] = { + &thermal_zone_attribute_group, + &thermal_zone_mode_attribute_group, + &thermal_zone_passive_attribute_group, + /* This is not NULL terminated as we create the group dynamically */ +}; + +/** + * create_trip_attrs() - create attributes for trip points + * @tz: the thermal zone device + * @mask: Writeable trip point bitmap. + * + * helper function to instantiate sysfs entries for every trip + * point and its properties of a struct thermal_zone_device. + * + * Return: 0 on success, the proper error value otherwise. + */ +static int create_trip_attrs(struct thermal_zone_device *tz, int mask) +{ + struct attribute **attrs; + int indx; + + /* This function works only for zones with at least one trip */ + if (tz->trips <= 0) + return -EINVAL; + + tz->trip_type_attrs = kcalloc(tz->trips, sizeof(*tz->trip_type_attrs), + GFP_KERNEL); + if (!tz->trip_type_attrs) + return -ENOMEM; + + tz->trip_temp_attrs = kcalloc(tz->trips, sizeof(*tz->trip_temp_attrs), + GFP_KERNEL); + if (!tz->trip_temp_attrs) { + kfree(tz->trip_type_attrs); + return -ENOMEM; + } + + if (tz->ops->get_trip_hyst) { + tz->trip_hyst_attrs = kcalloc(tz->trips, + sizeof(*tz->trip_hyst_attrs), + GFP_KERNEL); + if (!tz->trip_hyst_attrs) { + kfree(tz->trip_type_attrs); + kfree(tz->trip_temp_attrs); + return -ENOMEM; + } + } + + attrs = kcalloc(tz->trips * 3 + 1, sizeof(*attrs), GFP_KERNEL); + if (!attrs) { + kfree(tz->trip_type_attrs); + kfree(tz->trip_temp_attrs); + if (tz->ops->get_trip_hyst) + kfree(tz->trip_hyst_attrs); + return -ENOMEM; + } + + for (indx = 0; indx < tz->trips; indx++) { + /* create trip type attribute */ + snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH, + "trip_point_%d_type", indx); + + sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr); + tz->trip_type_attrs[indx].attr.attr.name = + tz->trip_type_attrs[indx].name; + tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO; + tz->trip_type_attrs[indx].attr.show = trip_point_type_show; + attrs[indx] = &tz->trip_type_attrs[indx].attr.attr; + + /* create trip temp attribute */ + snprintf(tz->trip_temp_attrs[indx].name, THERMAL_NAME_LENGTH, + "trip_point_%d_temp", indx); + + sysfs_attr_init(&tz->trip_temp_attrs[indx].attr.attr); + tz->trip_temp_attrs[indx].attr.attr.name = + tz->trip_temp_attrs[indx].name; + tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO; + tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show; + if (IS_ENABLED(CONFIG_THERMAL_WRITABLE_TRIPS) && + mask & (1 << indx)) { + tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR; + tz->trip_temp_attrs[indx].attr.store = + trip_point_temp_store; + } + attrs[indx + tz->trips] = &tz->trip_temp_attrs[indx].attr.attr; + + /* create Optional trip hyst attribute */ + if (!tz->ops->get_trip_hyst) + continue; + snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH, + "trip_point_%d_hyst", indx); + + sysfs_attr_init(&tz->trip_hyst_attrs[indx].attr.attr); + tz->trip_hyst_attrs[indx].attr.attr.name = + tz->trip_hyst_attrs[indx].name; + tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO; + tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show; + if (tz->ops->set_trip_hyst) { + tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR; + tz->trip_hyst_attrs[indx].attr.store = + trip_point_hyst_store; + } + attrs[indx + tz->trips * 2] = + &tz->trip_hyst_attrs[indx].attr.attr; + } + attrs[tz->trips * 3] = NULL; + + tz->trips_attribute_group.attrs = attrs; + + return 0; +} + +int thermal_zone_create_device_groups(struct thermal_zone_device *tz, + int mask) +{ + const struct attribute_group **groups; + int i, size, result; + + /* we need one extra for trips and the NULL to terminate the array */ + size = ARRAY_SIZE(thermal_zone_attribute_groups) + 2; + /* This also takes care of API requirement to be NULL terminated */ + groups = kcalloc(size, sizeof(*groups), GFP_KERNEL); + if (!groups) + return -ENOMEM; + + for (i = 0; i < size - 2; i++) + groups[i] = thermal_zone_attribute_groups[i]; + + if (tz->trips) { + result = create_trip_attrs(tz, mask); + if (result) { + kfree(groups); + + return result; + } + + groups[size - 2] = &tz->trips_attribute_group; + } + + tz->device.groups = groups; + + return 0; +} + +/* sys I/F for cooling device */ +static ssize_t +thermal_cooling_device_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct thermal_cooling_device *cdev = to_cooling_device(dev); + + return sprintf(buf, "%s\n", cdev->type); +} + +static ssize_t +thermal_cooling_device_max_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct thermal_cooling_device *cdev = to_cooling_device(dev); + unsigned long state; + int ret; + + ret = cdev->ops->get_max_state(cdev, &state); + if (ret) + return ret; + return sprintf(buf, "%ld\n", state); +} + +static ssize_t +thermal_cooling_device_cur_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct thermal_cooling_device *cdev = to_cooling_device(dev); + unsigned long state; + int ret; + + ret = cdev->ops->get_cur_state(cdev, &state); + if (ret) + return ret; + return sprintf(buf, "%ld\n", state); +} + +static ssize_t +thermal_cooling_device_cur_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_cooling_device *cdev = to_cooling_device(dev); + unsigned long state; + int result; + + if (sscanf(buf, "%ld\n", &state) != 1) + return -EINVAL; + + if ((long)state < 0) + return -EINVAL; + + result = cdev->ops->set_cur_state(cdev, state); + if (result) + return result; + return count; +} + +static struct device_attribute dev_attr_cdev_type = +__ATTR(type, 0444, thermal_cooling_device_type_show, NULL); +static DEVICE_ATTR(max_state, 0444, + thermal_cooling_device_max_state_show, NULL); +static DEVICE_ATTR(cur_state, 0644, + thermal_cooling_device_cur_state_show, + thermal_cooling_device_cur_state_store); + +static struct attribute *cooling_device_attrs[] = { + &dev_attr_cdev_type.attr, + &dev_attr_max_state.attr, + &dev_attr_cur_state.attr, + NULL, +}; + +static const struct attribute_group cooling_device_attr_group = { + .attrs = cooling_device_attrs, +}; + +static const struct attribute_group *cooling_device_attr_groups[] = { + &cooling_device_attr_group, + NULL, +}; + +void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *cdev) +{ + cdev->device.groups = cooling_device_attr_groups; +} + +/* these helper will be used only at the time of bindig */ +ssize_t +thermal_cooling_device_trip_point_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct thermal_instance *instance; + + instance = + container_of(attr, struct thermal_instance, attr); + + if (instance->trip == THERMAL_TRIPS_NONE) + return sprintf(buf, "-1\n"); + else + return sprintf(buf, "%d\n", instance->trip); +} + +ssize_t +thermal_cooling_device_weight_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct thermal_instance *instance; + + instance = container_of(attr, struct thermal_instance, weight_attr); + + return sprintf(buf, "%d\n", instance->weight); +} + +ssize_t +thermal_cooling_device_weight_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_instance *instance; + int ret, weight; + + ret = kstrtoint(buf, 0, &weight); + if (ret) + return ret; + + instance = container_of(attr, struct thermal_instance, weight_attr); + instance->weight = weight; + + return count; +} diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index 06ea9766a70af431d12e72dfb7c6bfedb5179511..ba9c302454fb398490046be46831c0a0f821788c 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c @@ -1298,7 +1298,7 @@ int ti_bandgap_probe(struct platform_device *pdev) if (IS_ERR(bgp->div_clk)) { dev_err(&pdev->dev, "failed to request div_ts_ck clock ref\n"); ret = PTR_ERR(bgp->div_clk); - goto free_irqs; + goto put_fclock; } for (i = 0; i < bgp->conf->sensor_count; i++) { @@ -1430,8 +1430,9 @@ int ti_bandgap_probe(struct platform_device *pdev) if (TI_BANDGAP_HAS(bgp, CLK_CTRL)) clk_disable_unprepare(bgp->fclock); put_clks: - clk_put(bgp->fclock); clk_put(bgp->div_clk); +put_fclock: + clk_put(bgp->fclock); free_irqs: if (TI_BANDGAP_HAS(bgp, TSHUT)) { free_irq(gpio_to_irq(bgp->tshut_gpio), NULL); diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c index 95f4c1bcdb4caf069df9f70dbe63ec3ae64b7908..d93eee2f101b0aeede2049f515154687df9428db 100644 --- a/drivers/thermal/x86_pkg_temp_thermal.c +++ b/drivers/thermal/x86_pkg_temp_thermal.c @@ -54,37 +54,33 @@ MODULE_PARM_DESC(notify_delay_ms, * is some wrong values returned by cpuid for number of thresholds. */ #define MAX_NUMBER_OF_TRIPS 2 -/* Limit number of package temp zones */ -#define MAX_PKG_TEMP_ZONE_IDS 256 - -struct phy_dev_entry { - struct list_head list; - u16 phys_proc_id; - u16 first_cpu; - u32 tj_max; - int ref_cnt; - u32 start_pkg_therm_low; - u32 start_pkg_therm_high; - struct thermal_zone_device *tzone; + +struct pkg_device { + int cpu; + bool work_scheduled; + u32 tj_max; + u32 msr_pkg_therm_low; + u32 msr_pkg_therm_high; + struct delayed_work work; + struct thermal_zone_device *tzone; + struct cpumask cpumask; }; static struct thermal_zone_params pkg_temp_tz_params = { .no_hwmon = true, }; -/* List maintaining number of package instances */ -static LIST_HEAD(phy_dev_list); -static DEFINE_MUTEX(phy_dev_list_mutex); - -/* Interrupt to work function schedule queue */ -static DEFINE_PER_CPU(struct delayed_work, pkg_temp_thermal_threshold_work); +/* Keep track of how many package pointers we allocated in init() */ +static int max_packages __read_mostly; +/* Array of package pointers */ +static struct pkg_device **packages; +/* Serializes interrupt notification, work and hotplug */ +static DEFINE_SPINLOCK(pkg_temp_lock); +/* Protects zone operation in the work function against hotplug removal */ +static DEFINE_MUTEX(thermal_zone_mutex); -/* To track if the work is already scheduled on a package */ -static u8 *pkg_work_scheduled; - -/* Spin lock to prevent races with pkg_work_scheduled */ -static spinlock_t pkg_work_lock; -static u16 max_phy_id; +/* The dynamically assigned cpu hotplug state for module_exit() */ +static enum cpuhp_state pkg_thermal_hp_state __read_mostly; /* Debug counters to show using debugfs */ static struct dentry *debugfs; @@ -116,22 +112,20 @@ static int pkg_temp_debugfs_init(void) return -ENOENT; } -static struct phy_dev_entry - *pkg_temp_thermal_get_phy_entry(unsigned int cpu) +/* + * Protection: + * + * - cpu hotplug: Read serialized by cpu hotplug lock + * Write must hold pkg_temp_lock + * + * - Other callsites: Must hold pkg_temp_lock + */ +static struct pkg_device *pkg_temp_thermal_get_dev(unsigned int cpu) { - u16 phys_proc_id = topology_physical_package_id(cpu); - struct phy_dev_entry *phy_ptr; - - mutex_lock(&phy_dev_list_mutex); - - list_for_each_entry(phy_ptr, &phy_dev_list, list) - if (phy_ptr->phys_proc_id == phys_proc_id) { - mutex_unlock(&phy_dev_list_mutex); - return phy_ptr; - } - - mutex_unlock(&phy_dev_list_mutex); + int pkgid = topology_logical_package_id(cpu); + if (pkgid >= 0 && pkgid < max_packages) + return packages[pkgid]; return NULL; } @@ -141,61 +135,44 @@ static struct phy_dev_entry */ static int get_tj_max(int cpu, u32 *tj_max) { - u32 eax, edx; - u32 val; + u32 eax, edx, val; int err; err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); if (err) - goto err_ret; - else { - val = (eax >> 16) & 0xff; - if (val) - *tj_max = val * 1000; - else { - err = -EINVAL; - goto err_ret; - } - } + return err; - return 0; -err_ret: - *tj_max = 0; - return err; + val = (eax >> 16) & 0xff; + *tj_max = val * 1000; + + return val ? 0 : -EINVAL; } static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp) { + struct pkg_device *pkgdev = tzd->devdata; u32 eax, edx; - struct phy_dev_entry *phy_dev_entry; - phy_dev_entry = tzd->devdata; - rdmsr_on_cpu(phy_dev_entry->first_cpu, MSR_IA32_PACKAGE_THERM_STATUS, - &eax, &edx); + rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_STATUS, &eax, &edx); if (eax & 0x80000000) { - *temp = phy_dev_entry->tj_max - - ((eax >> 16) & 0x7f) * 1000; + *temp = pkgdev->tj_max - ((eax >> 16) & 0x7f) * 1000; pr_debug("sys_get_curr_temp %d\n", *temp); return 0; } - return -EINVAL; } static int sys_get_trip_temp(struct thermal_zone_device *tzd, - int trip, int *temp) + int trip, int *temp) { - u32 eax, edx; - struct phy_dev_entry *phy_dev_entry; - u32 mask, shift; + struct pkg_device *pkgdev = tzd->devdata; unsigned long thres_reg_value; + u32 mask, shift, eax, edx; int ret; if (trip >= MAX_NUMBER_OF_TRIPS) return -EINVAL; - phy_dev_entry = tzd->devdata; - if (trip) { mask = THERM_MASK_THRESHOLD1; shift = THERM_SHIFT_THRESHOLD1; @@ -204,14 +181,14 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd, shift = THERM_SHIFT_THRESHOLD0; } - ret = rdmsr_on_cpu(phy_dev_entry->first_cpu, - MSR_IA32_PACKAGE_THERM_INTERRUPT, &eax, &edx); + ret = rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, + &eax, &edx); if (ret < 0) - return -EINVAL; + return ret; thres_reg_value = (eax & mask) >> shift; if (thres_reg_value) - *temp = phy_dev_entry->tj_max - thres_reg_value * 1000; + *temp = pkgdev->tj_max - thres_reg_value * 1000; else *temp = 0; pr_debug("sys_get_trip_temp %d\n", *temp); @@ -219,24 +196,20 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd, return 0; } -static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, - int temp) +static int +sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp) { - u32 l, h; - struct phy_dev_entry *phy_dev_entry; - u32 mask, shift, intr; + struct pkg_device *pkgdev = tzd->devdata; + u32 l, h, mask, shift, intr; int ret; - phy_dev_entry = tzd->devdata; - - if (trip >= MAX_NUMBER_OF_TRIPS || temp >= phy_dev_entry->tj_max) + if (trip >= MAX_NUMBER_OF_TRIPS || temp >= pkgdev->tj_max) return -EINVAL; - ret = rdmsr_on_cpu(phy_dev_entry->first_cpu, - MSR_IA32_PACKAGE_THERM_INTERRUPT, - &l, &h); + ret = rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, + &l, &h); if (ret < 0) - return -EINVAL; + return ret; if (trip) { mask = THERM_MASK_THRESHOLD1; @@ -252,24 +225,20 @@ static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, * When users space sets a trip temperature == 0, which is indication * that, it is no longer interested in receiving notifications. */ - if (!temp) + if (!temp) { l &= ~intr; - else { - l |= (phy_dev_entry->tj_max - temp)/1000 << shift; + } else { + l |= (pkgdev->tj_max - temp)/1000 << shift; l |= intr; } - return wrmsr_on_cpu(phy_dev_entry->first_cpu, - MSR_IA32_PACKAGE_THERM_INTERRUPT, - l, h); + return wrmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); } -static int sys_get_trip_type(struct thermal_zone_device *thermal, - int trip, enum thermal_trip_type *type) +static int sys_get_trip_type(struct thermal_zone_device *thermal, int trip, + enum thermal_trip_type *type) { - *type = THERMAL_TRIP_PASSIVE; - return 0; } @@ -281,7 +250,7 @@ static struct thermal_zone_device_ops tzone_ops = { .set_trip_temp = sys_set_trip_temp, }; -static bool pkg_temp_thermal_platform_thermal_rate_control(void) +static bool pkg_thermal_rate_control(void) { return true; } @@ -289,8 +258,8 @@ static bool pkg_temp_thermal_platform_thermal_rate_control(void) /* Enable threshold interrupt on local package/cpu */ static inline void enable_pkg_thres_interrupt(void) { - u32 l, h; u8 thres_0, thres_1; + u32 l, h; rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); /* only enable/disable if it had valid threshold value */ @@ -307,271 +276,232 @@ static inline void enable_pkg_thres_interrupt(void) static inline void disable_pkg_thres_interrupt(void) { u32 l, h; + rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); - wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, - l & (~THERM_INT_THRESHOLD0_ENABLE) & - (~THERM_INT_THRESHOLD1_ENABLE), h); + + l &= ~(THERM_INT_THRESHOLD0_ENABLE | THERM_INT_THRESHOLD1_ENABLE); + wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); } static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) { - __u64 msr_val; + struct thermal_zone_device *tzone = NULL; int cpu = smp_processor_id(); - int phy_id = topology_physical_package_id(cpu); - struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu); - bool notify = false; - unsigned long flags; - - if (!phdev) - return; + struct pkg_device *pkgdev; + u64 msr_val, wr_val; - spin_lock_irqsave(&pkg_work_lock, flags); + mutex_lock(&thermal_zone_mutex); + spin_lock_irq(&pkg_temp_lock); ++pkg_work_cnt; - if (unlikely(phy_id > max_phy_id)) { - spin_unlock_irqrestore(&pkg_work_lock, flags); + + pkgdev = pkg_temp_thermal_get_dev(cpu); + if (!pkgdev) { + spin_unlock_irq(&pkg_temp_lock); + mutex_unlock(&thermal_zone_mutex); return; } - pkg_work_scheduled[phy_id] = 0; - spin_unlock_irqrestore(&pkg_work_lock, flags); + pkgdev->work_scheduled = false; - enable_pkg_thres_interrupt(); rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); - if (msr_val & THERM_LOG_THRESHOLD0) { - wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, - msr_val & ~THERM_LOG_THRESHOLD0); - notify = true; - } - if (msr_val & THERM_LOG_THRESHOLD1) { - wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, - msr_val & ~THERM_LOG_THRESHOLD1); - notify = true; - } - if (notify) { - pr_debug("thermal_zone_device_update\n"); - thermal_zone_device_update(phdev->tzone, - THERMAL_EVENT_UNSPECIFIED); + wr_val = msr_val & ~(THERM_LOG_THRESHOLD0 | THERM_LOG_THRESHOLD1); + if (wr_val != msr_val) { + wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, wr_val); + tzone = pkgdev->tzone; } + + enable_pkg_thres_interrupt(); + spin_unlock_irq(&pkg_temp_lock); + + /* + * If tzone is not NULL, then thermal_zone_mutex will prevent the + * concurrent removal in the cpu offline callback. + */ + if (tzone) + thermal_zone_device_update(tzone, THERMAL_EVENT_UNSPECIFIED); + + mutex_unlock(&thermal_zone_mutex); } -static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) +static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work) +{ + unsigned long ms = msecs_to_jiffies(notify_delay_ms); + + schedule_delayed_work_on(cpu, work, ms); +} + +static int pkg_thermal_notify(u64 msr_val) { - unsigned long flags; int cpu = smp_processor_id(); - int phy_id = topology_physical_package_id(cpu); + struct pkg_device *pkgdev; + unsigned long flags; - /* - * When a package is in interrupted state, all CPU's in that package - * are in the same interrupt state. So scheduling on any one CPU in - * the package is enough and simply return for others. - */ - spin_lock_irqsave(&pkg_work_lock, flags); + spin_lock_irqsave(&pkg_temp_lock, flags); ++pkg_interrupt_cnt; - if (unlikely(phy_id > max_phy_id) || unlikely(!pkg_work_scheduled) || - pkg_work_scheduled[phy_id]) { - disable_pkg_thres_interrupt(); - spin_unlock_irqrestore(&pkg_work_lock, flags); - return -EINVAL; - } - pkg_work_scheduled[phy_id] = 1; - spin_unlock_irqrestore(&pkg_work_lock, flags); disable_pkg_thres_interrupt(); - schedule_delayed_work_on(cpu, - &per_cpu(pkg_temp_thermal_threshold_work, cpu), - msecs_to_jiffies(notify_delay_ms)); - return 0; -} -static int find_siblings_cpu(int cpu) -{ - int i; - int id = topology_physical_package_id(cpu); - - for_each_online_cpu(i) - if (i != cpu && topology_physical_package_id(i) == id) - return i; + /* Work is per package, so scheduling it once is enough. */ + pkgdev = pkg_temp_thermal_get_dev(cpu); + if (pkgdev && !pkgdev->work_scheduled) { + pkgdev->work_scheduled = true; + pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work); + } + spin_unlock_irqrestore(&pkg_temp_lock, flags); return 0; } static int pkg_temp_thermal_device_add(unsigned int cpu) { - int err; - u32 tj_max; - struct phy_dev_entry *phy_dev_entry; - int thres_count; - u32 eax, ebx, ecx, edx; - u8 *temp; - unsigned long flags; + int pkgid = topology_logical_package_id(cpu); + u32 tj_max, eax, ebx, ecx, edx; + struct pkg_device *pkgdev; + int thres_count, err; + + if (pkgid >= max_packages) + return -ENOMEM; cpuid(6, &eax, &ebx, &ecx, &edx); thres_count = ebx & 0x07; if (!thres_count) return -ENODEV; - if (topology_physical_package_id(cpu) > MAX_PKG_TEMP_ZONE_IDS) - return -ENODEV; - thres_count = clamp_val(thres_count, 0, MAX_NUMBER_OF_TRIPS); err = get_tj_max(cpu, &tj_max); if (err) - goto err_ret; - - mutex_lock(&phy_dev_list_mutex); + return err; - phy_dev_entry = kzalloc(sizeof(*phy_dev_entry), GFP_KERNEL); - if (!phy_dev_entry) { - err = -ENOMEM; - goto err_ret_unlock; - } + pkgdev = kzalloc(sizeof(*pkgdev), GFP_KERNEL); + if (!pkgdev) + return -ENOMEM; - spin_lock_irqsave(&pkg_work_lock, flags); - if (topology_physical_package_id(cpu) > max_phy_id) - max_phy_id = topology_physical_package_id(cpu); - temp = krealloc(pkg_work_scheduled, - (max_phy_id+1) * sizeof(u8), GFP_ATOMIC); - if (!temp) { - spin_unlock_irqrestore(&pkg_work_lock, flags); - err = -ENOMEM; - goto err_ret_free; - } - pkg_work_scheduled = temp; - pkg_work_scheduled[topology_physical_package_id(cpu)] = 0; - spin_unlock_irqrestore(&pkg_work_lock, flags); - - phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu); - phy_dev_entry->first_cpu = cpu; - phy_dev_entry->tj_max = tj_max; - phy_dev_entry->ref_cnt = 1; - phy_dev_entry->tzone = thermal_zone_device_register("x86_pkg_temp", + INIT_DELAYED_WORK(&pkgdev->work, pkg_temp_thermal_threshold_work_fn); + pkgdev->cpu = cpu; + pkgdev->tj_max = tj_max; + pkgdev->tzone = thermal_zone_device_register("x86_pkg_temp", thres_count, - (thres_count == MAX_NUMBER_OF_TRIPS) ? - 0x03 : 0x01, - phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0); - if (IS_ERR(phy_dev_entry->tzone)) { - err = PTR_ERR(phy_dev_entry->tzone); - goto err_ret_free; + (thres_count == MAX_NUMBER_OF_TRIPS) ? 0x03 : 0x01, + pkgdev, &tzone_ops, &pkg_temp_tz_params, 0, 0); + if (IS_ERR(pkgdev->tzone)) { + err = PTR_ERR(pkgdev->tzone); + kfree(pkgdev); + return err; } /* Store MSR value for package thermal interrupt, to restore at exit */ - rdmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, - &phy_dev_entry->start_pkg_therm_low, - &phy_dev_entry->start_pkg_therm_high); - - list_add_tail(&phy_dev_entry->list, &phy_dev_list); - pr_debug("pkg_temp_thermal_device_add :phy_id %d cpu %d\n", - phy_dev_entry->phys_proc_id, cpu); - - mutex_unlock(&phy_dev_list_mutex); + rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, pkgdev->msr_pkg_therm_low, + pkgdev->msr_pkg_therm_high); + cpumask_set_cpu(cpu, &pkgdev->cpumask); + spin_lock_irq(&pkg_temp_lock); + packages[pkgid] = pkgdev; + spin_unlock_irq(&pkg_temp_lock); return 0; - -err_ret_free: - kfree(phy_dev_entry); -err_ret_unlock: - mutex_unlock(&phy_dev_list_mutex); - -err_ret: - return err; } -static int pkg_temp_thermal_device_remove(unsigned int cpu) +static int pkg_thermal_cpu_offline(unsigned int cpu) { - struct phy_dev_entry *n; - u16 phys_proc_id = topology_physical_package_id(cpu); - struct phy_dev_entry *phdev = - pkg_temp_thermal_get_phy_entry(cpu); + struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu); + bool lastcpu, was_target; + int target; - if (!phdev) - return -ENODEV; + if (!pkgdev) + return 0; - mutex_lock(&phy_dev_list_mutex); - /* If we are loosing the first cpu for this package, we need change */ - if (phdev->first_cpu == cpu) { - phdev->first_cpu = find_siblings_cpu(cpu); - pr_debug("thermal_device_remove: first cpu switched %d\n", - phdev->first_cpu); + target = cpumask_any_but(&pkgdev->cpumask, cpu); + cpumask_clear_cpu(cpu, &pkgdev->cpumask); + lastcpu = target >= nr_cpu_ids; + /* + * Remove the sysfs files, if this is the last cpu in the package + * before doing further cleanups. + */ + if (lastcpu) { + struct thermal_zone_device *tzone = pkgdev->tzone; + + /* + * We must protect against a work function calling + * thermal_zone_update, after/while unregister. We null out + * the pointer under the zone mutex, so the worker function + * won't try to call. + */ + mutex_lock(&thermal_zone_mutex); + pkgdev->tzone = NULL; + mutex_unlock(&thermal_zone_mutex); + + thermal_zone_device_unregister(tzone); } + + /* Protect against work and interrupts */ + spin_lock_irq(&pkg_temp_lock); + /* - * It is possible that no siblings left as this was the last cpu - * going offline. We don't need to worry about this assignment - * as the phydev entry will be removed in this case and - * thermal zone is removed. - */ - --phdev->ref_cnt; - pr_debug("thermal_device_remove: pkg: %d cpu %d ref_cnt %d\n", - phys_proc_id, cpu, phdev->ref_cnt); - if (!phdev->ref_cnt) - list_for_each_entry_safe(phdev, n, &phy_dev_list, list) { - if (phdev->phys_proc_id == phys_proc_id) { - thermal_zone_device_unregister(phdev->tzone); - list_del(&phdev->list); - kfree(phdev); - break; - } - } - mutex_unlock(&phy_dev_list_mutex); + * Check whether this cpu was the current target and store the new + * one. When we drop the lock, then the interrupt notify function + * will see the new target. + */ + was_target = pkgdev->cpu == cpu; + pkgdev->cpu = target; - return 0; -} + /* + * If this is the last CPU in the package remove the package + * reference from the array and restore the interrupt MSR. When we + * drop the lock neither the interrupt notify function nor the + * worker will see the package anymore. + */ + if (lastcpu) { + packages[topology_logical_package_id(cpu)] = NULL; + /* After this point nothing touches the MSR anymore. */ + wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, + pkgdev->msr_pkg_therm_low, pkgdev->msr_pkg_therm_high); + } -static int get_core_online(unsigned int cpu) -{ - struct cpuinfo_x86 *c = &cpu_data(cpu); - struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu); - - /* Check if there is already an instance for this package */ - if (!phdev) { - if (!cpu_has(c, X86_FEATURE_DTHERM) || - !cpu_has(c, X86_FEATURE_PTS)) - return -ENODEV; - if (pkg_temp_thermal_device_add(cpu)) - return -ENODEV; - } else { - mutex_lock(&phy_dev_list_mutex); - ++phdev->ref_cnt; - pr_debug("get_core_online: cpu %d ref_cnt %d\n", - cpu, phdev->ref_cnt); - mutex_unlock(&phy_dev_list_mutex); + /* + * Check whether there is work scheduled and whether the work is + * targeted at the outgoing CPU. + */ + if (pkgdev->work_scheduled && was_target) { + /* + * To cancel the work we need to drop the lock, otherwise + * we might deadlock if the work needs to be flushed. + */ + spin_unlock_irq(&pkg_temp_lock); + cancel_delayed_work_sync(&pkgdev->work); + spin_lock_irq(&pkg_temp_lock); + /* + * If this is not the last cpu in the package and the work + * did not run after we dropped the lock above, then we + * need to reschedule the work, otherwise the interrupt + * stays disabled forever. + */ + if (!lastcpu && pkgdev->work_scheduled) + pkg_thermal_schedule_work(target, &pkgdev->work); } - INIT_DELAYED_WORK(&per_cpu(pkg_temp_thermal_threshold_work, cpu), - pkg_temp_thermal_threshold_work_fn); - pr_debug("get_core_online: cpu %d successful\n", cpu); + spin_unlock_irq(&pkg_temp_lock); + /* Final cleanup if this is the last cpu */ + if (lastcpu) + kfree(pkgdev); return 0; } -static void put_core_offline(unsigned int cpu) +static int pkg_thermal_cpu_online(unsigned int cpu) { - if (!pkg_temp_thermal_device_remove(cpu)) - cancel_delayed_work_sync( - &per_cpu(pkg_temp_thermal_threshold_work, cpu)); + struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu); + struct cpuinfo_x86 *c = &cpu_data(cpu); - pr_debug("put_core_offline: cpu %d\n", cpu); -} + /* Paranoia check */ + if (!cpu_has(c, X86_FEATURE_DTHERM) || !cpu_has(c, X86_FEATURE_PTS)) + return -ENODEV; -static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (unsigned long) hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - case CPU_DOWN_FAILED: - get_core_online(cpu); - break; - case CPU_DOWN_PREPARE: - put_core_offline(cpu); - break; + /* If the package exists, nothing to do */ + if (pkgdev) { + cpumask_set_cpu(cpu, &pkgdev->cpumask); + return 0; } - return NOTIFY_OK; + return pkg_temp_thermal_device_add(cpu); } -static struct notifier_block pkg_temp_thermal_notifier __refdata = { - .notifier_call = pkg_temp_thermal_cpu_callback, -}; - static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = { { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_PTS }, {} @@ -580,71 +510,46 @@ MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids); static int __init pkg_temp_thermal_init(void) { - int i; + int ret; if (!x86_match_cpu(pkg_temp_thermal_ids)) return -ENODEV; - spin_lock_init(&pkg_work_lock); - platform_thermal_package_notify = - pkg_temp_thermal_platform_thermal_notify; - platform_thermal_package_rate_control = - pkg_temp_thermal_platform_thermal_rate_control; + max_packages = topology_max_packages(); + packages = kzalloc(max_packages * sizeof(struct pkg_device *), GFP_KERNEL); + if (!packages) + return -ENOMEM; - cpu_notifier_register_begin(); - for_each_online_cpu(i) - if (get_core_online(i)) - goto err_ret; - __register_hotcpu_notifier(&pkg_temp_thermal_notifier); - cpu_notifier_register_done(); + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online", + pkg_thermal_cpu_online, pkg_thermal_cpu_offline); + if (ret < 0) + goto err; - pkg_temp_debugfs_init(); /* Don't care if fails */ + /* Store the state for module exit */ + pkg_thermal_hp_state = ret; - return 0; + platform_thermal_package_notify = pkg_thermal_notify; + platform_thermal_package_rate_control = pkg_thermal_rate_control; -err_ret: - for_each_online_cpu(i) - put_core_offline(i); - cpu_notifier_register_done(); - kfree(pkg_work_scheduled); - platform_thermal_package_notify = NULL; - platform_thermal_package_rate_control = NULL; + /* Don't care if it fails */ + pkg_temp_debugfs_init(); + return 0; - return -ENODEV; +err: + kfree(packages); + return ret; } +module_init(pkg_temp_thermal_init) static void __exit pkg_temp_thermal_exit(void) { - struct phy_dev_entry *phdev, *n; - int i; - - cpu_notifier_register_begin(); - __unregister_hotcpu_notifier(&pkg_temp_thermal_notifier); - mutex_lock(&phy_dev_list_mutex); - list_for_each_entry_safe(phdev, n, &phy_dev_list, list) { - /* Retore old MSR value for package thermal interrupt */ - wrmsr_on_cpu(phdev->first_cpu, - MSR_IA32_PACKAGE_THERM_INTERRUPT, - phdev->start_pkg_therm_low, - phdev->start_pkg_therm_high); - thermal_zone_device_unregister(phdev->tzone); - list_del(&phdev->list); - kfree(phdev); - } - mutex_unlock(&phy_dev_list_mutex); platform_thermal_package_notify = NULL; platform_thermal_package_rate_control = NULL; - for_each_online_cpu(i) - cancel_delayed_work_sync( - &per_cpu(pkg_temp_thermal_threshold_work, i)); - cpu_notifier_register_done(); - - kfree(pkg_work_scheduled); + cpuhp_remove_state(pkg_thermal_hp_state); debugfs_remove_recursive(debugfs); + kfree(packages); } - -module_init(pkg_temp_thermal_init) module_exit(pkg_temp_thermal_exit) MODULE_DESCRIPTION("X86 PKG TEMP Thermal Driver"); diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig index c121acc15bfedef164ed3944ffb45a2296334af6..d35db16aa43f5ef2e3979d46c9c91b20c5774b3d 100644 --- a/drivers/thunderbolt/Kconfig +++ b/drivers/thunderbolt/Kconfig @@ -1,6 +1,8 @@ menuconfig THUNDERBOLT tristate "Thunderbolt support for Apple devices" depends on PCI + depends on X86 || COMPILE_TEST + select APPLE_PROPERTIES if EFI_STUB && X86 select CRC32 help Cactus Ridge Thunderbolt Controller driver diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c index 2b9602c2c35515aa91c53df8687abc9474253cec..6392990c984dd1ca462219b70e515a5d75aca9af 100644 --- a/drivers/thunderbolt/eeprom.c +++ b/drivers/thunderbolt/eeprom.c @@ -5,6 +5,7 @@ */ #include +#include #include #include "tb.h" @@ -359,6 +360,40 @@ static int tb_drom_parse_entries(struct tb_switch *sw) return 0; } +/** + * tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present + */ +static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size) +{ + struct device *dev = &sw->tb->nhi->pdev->dev; + int len, res; + + len = device_property_read_u8_array(dev, "ThunderboltDROM", NULL, 0); + if (len < 0 || len < sizeof(struct tb_drom_header)) + return -EINVAL; + + sw->drom = kmalloc(len, GFP_KERNEL); + if (!sw->drom) + return -ENOMEM; + + res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom, + len); + if (res) + goto err; + + *size = ((struct tb_drom_header *)sw->drom)->data_len + + TB_DROM_DATA_START; + if (*size > len) + goto err; + + return 0; + +err: + kfree(sw->drom); + sw->drom = NULL; + return -EINVAL; +} + /** * tb_drom_read - copy drom to sw->drom and parse it */ @@ -373,6 +408,13 @@ int tb_drom_read(struct tb_switch *sw) return 0; if (tb_route(sw) == 0) { + /* + * Apple's NHI EFI driver supplies a DROM for the root switch + * in a device property. Use it if available. + */ + if (tb_drom_copy_efi(sw, &size) == 0) + goto parse; + /* * The root switch contains only a dummy drom (header only, * no entries). Hardcode the configuration here. @@ -418,6 +460,7 @@ int tb_drom_read(struct tb_switch *sw) if (res) goto err; +parse: header = (void *) sw->drom; if (header->data_len + TB_DROM_DATA_START != size) { diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h index 86b996c702a0aac0e263a22f6259e4fd4b58fec4..75cf0691e6c5d13f75df48525332bc252f02511f 100644 --- a/drivers/thunderbolt/nhi_regs.h +++ b/drivers/thunderbolt/nhi_regs.h @@ -1,11 +1,11 @@ /* - * Thunderbolt Cactus Ridge driver - NHI registers + * Thunderbolt driver - NHI registers * * Copyright (c) 2014 Andreas Noever */ -#ifndef DSL3510_REGS_H_ -#define DSL3510_REGS_H_ +#ifndef NHI_REGS_H_ +#define NHI_REGS_H_ #include diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 9840fdecb73b5226f36ae0b61c4761f23d4dfbff..c6f30b1695a959b2dea798e99a5735652b3bb9e4 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -460,7 +460,7 @@ int tb_switch_resume(struct tb_switch *sw) tb_sw_warn(sw, "uid read failed\n"); return err; } - if (sw->uid != uid) { + if (sw != sw->tb->root_switch && sw->uid != uid) { tb_sw_info(sw, "changed while suspended (uid %#llx -> %#llx)\n", sw->uid, uid); diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c index 208f573495dcab3efdad85fcbdcd32809102c192..dea16bb8c46a35b14604472fdf84a795d8fe3b51 100644 --- a/drivers/tty/amiserial.c +++ b/drivers/tty/amiserial.c @@ -127,7 +127,7 @@ static struct serial_state rs_table[1]; #define NR_PORTS ARRAY_SIZE(rs_table) -#include +#include #define serial_isroot() (capable(CAP_SYS_ADMIN)) @@ -1012,8 +1012,6 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state, { struct serial_struct tmp; - if (!retinfo) - return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tty_lock(tty); tmp.line = tty->index; diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index ce864875330e4b0d89c7c6cb2e7207ddab37ff57..9b5c0fb216b58ac86b1f6fc3acca2e5f9aa53fe1 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c @@ -42,7 +42,7 @@ #include #include -#include +#include #include "hvc_console.h" diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c index 3c4d7c2b4ade876a06a7bd5939638302178cf39f..7823d6d998cfd8ca70ab32eb2f51f43c34ef9f3c 100644 --- a/drivers/tty/hvc/hvcs.c +++ b/drivers/tty/hvc/hvcs.c @@ -81,7 +81,7 @@ #include #include #include -#include +#include #include /* diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c index 96ce6bd1cc6f699260b5b9613487d352cab57c08..2e578d6433af95a0fe6f757b6cbdb7546d5a13d7 100644 --- a/drivers/tty/hvc/hvsi.c +++ b/drivers/tty/hvc/hvsi.c @@ -46,7 +46,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c index 60d37b225589c25a509591f3076af353a19c752f..4caf0c3b1f99569935a221ee92a383b3697aca58 100644 --- a/drivers/tty/moxa.c +++ b/drivers/tty/moxa.c @@ -47,7 +47,7 @@ #include #include -#include +#include #include "moxa.h" diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 69294ae154be0f17ae0e56c39d6fa048077b3c9c..7b8f383fb090ca8836e0fbc58fff80c190cb516b 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c @@ -43,7 +43,7 @@ #include #include -#include +#include #include "mxser.h" diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 54cab59e20edf9e34220354c4c2e870f3c35d027..f3932baed07dc3162ceef88f6c1f831ade5d4cfb 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -2711,15 +2711,6 @@ static void gsm_mux_rx_netchar(struct gsm_dlci *dlci, return; } -static int gsm_change_mtu(struct net_device *net, int new_mtu) -{ - struct gsm_mux_net *mux_net = netdev_priv(net); - if ((new_mtu < 8) || (new_mtu > mux_net->dlci->gsm->mtu)) - return -EINVAL; - net->mtu = new_mtu; - return 0; -} - static void gsm_mux_net_init(struct net_device *net) { static const struct net_device_ops gsm_netdev_ops = { @@ -2728,7 +2719,6 @@ static void gsm_mux_net_init(struct net_device *net) .ndo_start_xmit = gsm_mux_net_start_xmit, .ndo_tx_timeout = gsm_mux_net_tx_timeout, .ndo_get_stats = gsm_mux_net_get_stats, - .ndo_change_mtu = gsm_change_mtu, }; net->netdev_ops = &gsm_netdev_ops; @@ -2787,6 +2777,8 @@ static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc) return -ENOMEM; } net->mtu = dlci->gsm->mtu; + net->min_mtu = 8; + net->max_mtu = dlci->gsm->mtu; mux_net = netdev_priv(net); mux_net->dlci = dlci; kref_init(&mux_net->ref); diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index a7fa016f31ebff79260236e12163d3612549200f..eb278832f5ce52c880f76a8eb9baa5e9e68cf447 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c @@ -103,7 +103,7 @@ #include #include -#include +#include /* * Buffers for individual HDLC frames diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c index 345111467b85026ead297b67f27a70cbf67a867d..305b6490d40532e5f10beceac5410817783c016e 100644 --- a/drivers/tty/n_r3964.c +++ b/drivers/tty/n_r3964.c @@ -65,7 +65,7 @@ #include #include #include -#include +#include /*#define DEBUG_QUEUE*/ diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c index d6fd0e802ef512732951bc6e8eaca099ce7bfc1d..39b3723a32a63f1275cfac856bd344d5d8f98344 100644 --- a/drivers/tty/nozomi.c +++ b/drivers/tty/nozomi.c @@ -63,44 +63,23 @@ #define VERSION_STRING DRIVER_DESC " 2.1d" -/* Macros definitions */ - /* Default debug printout level */ #define NOZOMI_DEBUG_LEVEL 0x00 - -#define P_BUF_SIZE 128 -#define NFO(_err_flag_, args...) \ -do { \ - char tmp[P_BUF_SIZE]; \ - snprintf(tmp, sizeof(tmp), ##args); \ - printk(_err_flag_ "[%d] %s(): %s\n", __LINE__, \ - __func__, tmp); \ -} while (0) - -#define DBG1(args...) D_(0x01, ##args) -#define DBG2(args...) D_(0x02, ##args) -#define DBG3(args...) D_(0x04, ##args) -#define DBG4(args...) D_(0x08, ##args) -#define DBG5(args...) D_(0x10, ##args) -#define DBG6(args...) D_(0x20, ##args) -#define DBG7(args...) D_(0x40, ##args) -#define DBG8(args...) D_(0x80, ##args) - -#ifdef DEBUG -/* Do we need this settable at runtime? */ static int debug = NOZOMI_DEBUG_LEVEL; +module_param(debug, int, S_IRUGO | S_IWUSR); -#define D(lvl, args...) do \ - {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \ - while (0) -#define D_(lvl, args...) D(lvl, ##args) - -/* These printouts are always printed */ +/* Macros definitions */ +#define DBG_(lvl, fmt, args...) \ +do { \ + if (lvl & debug) \ + pr_debug("[%d] %s(): " fmt "\n", \ + __LINE__, __func__, ##args); \ +} while (0) -#else -static int debug; -#define D_(lvl, args...) -#endif +#define DBG1(args...) DBG_(0x01, ##args) +#define DBG2(args...) DBG_(0x02, ##args) +#define DBG3(args...) DBG_(0x04, ##args) +#define DBG4(args...) DBG_(0x08, ##args) /* TODO: rewrite to optimize macros... */ @@ -1320,7 +1299,7 @@ static ssize_t card_type_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%d\n", dc->card_type); } -static DEVICE_ATTR(card_type, S_IRUGO, card_type_show, NULL); +static DEVICE_ATTR_RO(card_type); static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1329,7 +1308,7 @@ static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%u\n", dc->open_ttys); } -static DEVICE_ATTR(open_ttys, S_IRUGO, open_ttys_show, NULL); +static DEVICE_ATTR_RO(open_ttys); static void make_sysfs_files(struct nozomi *dc) { @@ -1943,7 +1922,5 @@ static __exit void nozomi_exit(void) module_init(nozomi_init); module_exit(nozomi_exit); -module_param(debug, int, S_IRUGO | S_IWUSR); - MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c index b0cc47c77b40f79a49b637fafce9fa64914f234a..d66c1edd98926d5d410895ba8a39b1ed82d10340 100644 --- a/drivers/tty/rocket.c +++ b/drivers/tty/rocket.c @@ -1189,8 +1189,6 @@ static int get_config(struct r_port *info, struct rocket_config __user *retinfo) { struct rocket_config tmp; - if (!retinfo) - return -EFAULT; memset(&tmp, 0, sizeof (tmp)); mutex_lock(&info->port.mutex); tmp.line = info->line; @@ -1255,8 +1253,6 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports) struct rocket_ports tmp; int board; - if (!retports) - return -EFAULT; memset(&tmp, 0, sizeof (tmp)); tmp.tty_major = rocket_driver->major; diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h index a697a8585ddc346a72de6e30cdb77785a5688e71..ce8d4ffcc425b045377305f176aa8fa85af0068f 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h @@ -80,6 +80,7 @@ struct serial8250_config { #define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */ #define UART_CAP_HFIFO (1 << 14) /* UART has a "hidden" FIFO */ #define UART_CAP_RPM (1 << 15) /* Runtime PM is active while idle */ +#define UART_CAP_IRDA (1 << 16) /* UART supports IrDA line discipline */ #define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */ #define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */ @@ -129,8 +130,13 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value) } struct uart_8250_port *serial8250_get_port(int line); + void serial8250_rpm_get(struct uart_8250_port *p); void serial8250_rpm_put(struct uart_8250_port *p); + +void serial8250_rpm_get_tx(struct uart_8250_port *p); +void serial8250_rpm_put_tx(struct uart_8250_port *p); + int serial8250_em485_init(struct uart_8250_port *p); void serial8250_em485_destroy(struct uart_8250_port *p); diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 240a361b674fe72ce657067e5303156b7add6a6f..61569a765d9ee17213052db6f0d3a1c41d4d9293 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -425,10 +425,10 @@ struct uart_8250_port *serial8250_get_port(int line) EXPORT_SYMBOL_GPL(serial8250_get_port); static void (*serial8250_isa_config)(int port, struct uart_port *up, - unsigned short *capabilities); + u32 *capabilities); void serial8250_set_isa_configurator( - void (*v)(int port, struct uart_port *up, unsigned short *capabilities)) + void (*v)(int port, struct uart_port *up, u32 *capabilities)) { serial8250_isa_config = v; } @@ -830,6 +830,7 @@ static int serial8250_probe(struct platform_device *dev) uart.port.handle_irq = p->handle_irq; uart.port.handle_break = p->handle_break; uart.port.set_termios = p->set_termios; + uart.port.set_ldisc = p->set_ldisc; uart.port.get_mctrl = p->get_mctrl; uart.port.pm = p->pm; uart.port.dev = &dev->dev; @@ -1023,6 +1024,8 @@ int serial8250_register_8250_port(struct uart_8250_port *up) /* Possibly override set_termios call */ if (up->port.set_termios) uart->port.set_termios = up->port.set_termios; + if (up->port.set_ldisc) + uart->port.set_ldisc = up->port.set_ldisc; if (up->port.get_mctrl) uart->port.get_mctrl = up->port.get_mctrl; if (up->port.set_mctrl) diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index fdbddbc6375da50932d3ce156d4bc2643de67b20..26f17456b0d76143484b46ff3e948ee55e79b3eb 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -72,10 +72,15 @@ int serial8250_tx_dma(struct uart_8250_port *p) struct dma_async_tx_descriptor *desc; int ret; - if (uart_tx_stopped(&p->port) || dma->tx_running || - uart_circ_empty(xmit)) + if (dma->tx_running) return 0; + if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) { + /* We have been called from __dma_tx_complete() */ + serial8250_rpm_put_tx(p); + return 0; + } + dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); desc = dmaengine_prep_slave_single(dma->txchan, diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 459d726f9d59bc709a889163964f775866e7adb4..c89fafc972b69f3a43c59b14a58d5806068d75f0 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -53,6 +53,8 @@ /* Helper for fifo size calculation */ #define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16) +/* DesignWare specific register fields */ +#define DW_UART_MCR_SIRE BIT(6) struct dw8250_data { u8 usr_reg; @@ -254,6 +256,22 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, serial8250_do_set_termios(p, termios, old); } +static void dw8250_set_ldisc(struct uart_port *p, struct ktermios *termios) +{ + struct uart_8250_port *up = up_to_u8250p(p); + unsigned int mcr = p->serial_in(p, UART_MCR); + + if (up->capabilities & UART_CAP_IRDA) { + if (termios->c_line == N_IRDA) + mcr |= DW_UART_MCR_SIRE; + else + mcr &= ~DW_UART_MCR_SIRE; + + p->serial_out(p, UART_MCR, mcr); + } + serial8250_do_set_ldisc(p, termios); +} + /* * dw8250_fallback_dma_filter will prevent the UART from getting just any free * channel on platforms that have DMA engines, but don't have any channels @@ -357,6 +375,9 @@ static void dw8250_setup_port(struct uart_port *p) if (reg & DW_UART_CPR_AFCE_MODE) up->capabilities |= UART_CAP_AFE; + + if (reg & DW_UART_CPR_SIR_MODE) + up->capabilities |= UART_CAP_IRDA; } static int dw8250_probe(struct platform_device *pdev) @@ -392,6 +413,7 @@ static int dw8250_probe(struct platform_device *pdev) p->iotype = UPIO_MEM; p->serial_in = dw8250_serial_in; p->serial_out = dw8250_serial_out; + p->set_ldisc = dw8250_set_ldisc; p->membase = devm_ioremap(dev, regs->start, resource_size(regs)); if (!p->membase) diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c index 0facc789fe7d4133e1c5e32d0a471db53bb3baa5..b67e7a5449353ca8ac2f3a67e0af132517c4f643 100644 --- a/drivers/tty/serial/8250/8250_fintek.c +++ b/drivers/tty/serial/8250/8250_fintek.c @@ -21,8 +21,11 @@ #define EXIT_KEY 0xAA #define CHIP_ID1 0x20 #define CHIP_ID2 0x21 -#define CHIP_ID_0 0x1602 -#define CHIP_ID_1 0x0501 +#define CHIP_ID_F81865 0x0407 +#define CHIP_ID_F81866 0x1010 +#define CHIP_ID_F81216AD 0x1602 +#define CHIP_ID_F81216H 0x0501 +#define CHIP_ID_F81216 0x0802 #define VENDOR_ID1 0x23 #define VENDOR_ID1_VAL 0x19 #define VENDOR_ID2 0x24 @@ -43,12 +46,60 @@ #define RXW4C_IRA BIT(3) #define TXW4C_IRA BIT(2) +#define FIFO_CTRL 0xF6 +#define FIFO_MODE_MASK (BIT(1) | BIT(0)) +#define FIFO_MODE_128 (BIT(1) | BIT(0)) +#define RXFTHR_MODE_MASK (BIT(5) | BIT(4)) +#define RXFTHR_MODE_4X BIT(5) + +#define F81216_LDN_LOW 0x0 +#define F81216_LDN_HIGH 0x4 + +/* + * F81866 registers + * + * The IRQ setting mode of F81866 is not the same with F81216 series. + * Level/Low: IRQ_MODE0:0, IRQ_MODE1:0 + * Edge/High: IRQ_MODE0:1, IRQ_MODE1:0 + */ +#define F81866_IRQ_MODE 0xf0 +#define F81866_IRQ_SHARE BIT(0) +#define F81866_IRQ_MODE0 BIT(1) + +#define F81866_FIFO_CTRL FIFO_CTRL +#define F81866_IRQ_MODE1 BIT(3) + +#define F81866_LDN_LOW 0x10 +#define F81866_LDN_HIGH 0x16 + struct fintek_8250 { + u16 pid; u16 base_port; u8 index; u8 key; }; +static u8 sio_read_reg(struct fintek_8250 *pdata, u8 reg) +{ + outb(reg, pdata->base_port + ADDR_PORT); + return inb(pdata->base_port + DATA_PORT); +} + +static void sio_write_reg(struct fintek_8250 *pdata, u8 reg, u8 data) +{ + outb(reg, pdata->base_port + ADDR_PORT); + outb(data, pdata->base_port + DATA_PORT); +} + +static void sio_write_mask_reg(struct fintek_8250 *pdata, u8 reg, u8 mask, + u8 data) +{ + u8 tmp; + + tmp = (sio_read_reg(pdata, reg) & ~mask) | (mask & data); + sio_write_reg(pdata, reg, tmp); +} + static int fintek_8250_enter_key(u16 base_port, u8 key) { if (!request_muxed_region(base_port, 2, "8250_fintek")) @@ -66,29 +117,55 @@ static void fintek_8250_exit_key(u16 base_port) release_region(base_port + ADDR_PORT, 2); } -static int fintek_8250_check_id(u16 base_port) +static int fintek_8250_check_id(struct fintek_8250 *pdata) { u16 chip; - outb(VENDOR_ID1, base_port + ADDR_PORT); - if (inb(base_port + DATA_PORT) != VENDOR_ID1_VAL) + if (sio_read_reg(pdata, VENDOR_ID1) != VENDOR_ID1_VAL) return -ENODEV; - outb(VENDOR_ID2, base_port + ADDR_PORT); - if (inb(base_port + DATA_PORT) != VENDOR_ID2_VAL) + if (sio_read_reg(pdata, VENDOR_ID2) != VENDOR_ID2_VAL) return -ENODEV; - outb(CHIP_ID1, base_port + ADDR_PORT); - chip = inb(base_port + DATA_PORT); - outb(CHIP_ID2, base_port + ADDR_PORT); - chip |= inb(base_port + DATA_PORT) << 8; - - if (chip != CHIP_ID_0 && chip != CHIP_ID_1) + chip = sio_read_reg(pdata, CHIP_ID1); + chip |= sio_read_reg(pdata, CHIP_ID2) << 8; + + switch (chip) { + case CHIP_ID_F81865: + case CHIP_ID_F81866: + case CHIP_ID_F81216AD: + case CHIP_ID_F81216H: + case CHIP_ID_F81216: + break; + default: return -ENODEV; + } + pdata->pid = chip; return 0; } +static int fintek_8250_get_ldn_range(struct fintek_8250 *pdata, int *min, + int *max) +{ + switch (pdata->pid) { + case CHIP_ID_F81865: + case CHIP_ID_F81866: + *min = F81866_LDN_LOW; + *max = F81866_LDN_HIGH; + return 0; + + case CHIP_ID_F81216AD: + case CHIP_ID_F81216H: + case CHIP_ID_F81216: + *min = F81216_LDN_LOW; + *max = F81216_LDN_HIGH; + return 0; + } + + return -ENODEV; +} + static int fintek_8250_rs485_config(struct uart_port *port, struct serial_rs485 *rs485) { @@ -128,10 +205,8 @@ static int fintek_8250_rs485_config(struct uart_port *port, if (fintek_8250_enter_key(pdata->base_port, pdata->key)) return -EBUSY; - outb(LDN, pdata->base_port + ADDR_PORT); - outb(pdata->index, pdata->base_port + DATA_PORT); - outb(RS485, pdata->base_port + ADDR_PORT); - outb(config, pdata->base_port + DATA_PORT); + sio_write_reg(pdata, LDN, pdata->index); + sio_write_reg(pdata, RS485, config); fintek_8250_exit_key(pdata->base_port); port->rs485 = *rs485; @@ -139,40 +214,90 @@ static int fintek_8250_rs485_config(struct uart_port *port, return 0; } -static int find_base_port(struct fintek_8250 *pdata, u16 io_address) +static void fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool is_level) +{ + sio_write_reg(pdata, LDN, pdata->index); + + switch (pdata->pid) { + case CHIP_ID_F81866: + sio_write_mask_reg(pdata, F81866_FIFO_CTRL, F81866_IRQ_MODE1, + 0); + /* fall through */ + case CHIP_ID_F81865: + sio_write_mask_reg(pdata, F81866_IRQ_MODE, F81866_IRQ_SHARE, + F81866_IRQ_SHARE); + sio_write_mask_reg(pdata, F81866_IRQ_MODE, F81866_IRQ_MODE0, + is_level ? 0 : F81866_IRQ_MODE0); + break; + + case CHIP_ID_F81216AD: + case CHIP_ID_F81216H: + case CHIP_ID_F81216: + sio_write_mask_reg(pdata, FINTEK_IRQ_MODE, IRQ_SHARE, + IRQ_SHARE); + sio_write_mask_reg(pdata, FINTEK_IRQ_MODE, IRQ_MODE_MASK, + is_level ? IRQ_LEVEL_LOW : IRQ_EDGE_HIGH); + break; + } +} + +static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata) +{ + switch (pdata->pid) { + case CHIP_ID_F81216H: /* 128Bytes FIFO */ + case CHIP_ID_F81866: + sio_write_mask_reg(pdata, FIFO_CTRL, + FIFO_MODE_MASK | RXFTHR_MODE_MASK, + FIFO_MODE_128 | RXFTHR_MODE_4X); + break; + + default: /* Default 16Bytes FIFO */ + break; + } +} + +static int probe_setup_port(struct fintek_8250 *pdata, u16 io_address, + unsigned int irq) { static const u16 addr[] = {0x4e, 0x2e}; static const u8 keys[] = {0x77, 0xa0, 0x87, 0x67}; - int i, j, k; + struct irq_data *irq_data; + bool level_mode = false; + int i, j, k, min, max; for (i = 0; i < ARRAY_SIZE(addr); i++) { for (j = 0; j < ARRAY_SIZE(keys); j++) { + pdata->base_port = addr[i]; + pdata->key = keys[j]; if (fintek_8250_enter_key(addr[i], keys[j])) continue; - if (fintek_8250_check_id(addr[i])) { + if (fintek_8250_check_id(pdata) || + fintek_8250_get_ldn_range(pdata, &min, &max)) { fintek_8250_exit_key(addr[i]); continue; } - for (k = 0; k < 4; k++) { + for (k = min; k < max; k++) { u16 aux; - outb(LDN, addr[i] + ADDR_PORT); - outb(k, addr[i] + DATA_PORT); - - outb(IO_ADDR1, addr[i] + ADDR_PORT); - aux = inb(addr[i] + DATA_PORT); - outb(IO_ADDR2, addr[i] + ADDR_PORT); - aux |= inb(addr[i] + DATA_PORT) << 8; + sio_write_reg(pdata, LDN, k); + aux = sio_read_reg(pdata, IO_ADDR1); + aux |= sio_read_reg(pdata, IO_ADDR2) << 8; if (aux != io_address) continue; - fintek_8250_exit_key(addr[i]); - pdata->key = keys[j]; - pdata->base_port = addr[i]; pdata->index = k; + irq_data = irq_get_irq_data(irq); + if (irq_data) + level_mode = + irqd_is_level_type(irq_data); + + fintek_8250_set_irq_mode(pdata, level_mode); + fintek_8250_set_max_fifo(pdata); + fintek_8250_exit_key(addr[i]); + return 0; } @@ -183,39 +308,29 @@ static int find_base_port(struct fintek_8250 *pdata, u16 io_address) return -ENODEV; } -static int fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool level_mode) +static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart) { - int status; - u8 tmp; - - status = fintek_8250_enter_key(pdata->base_port, pdata->key); - if (status) - return status; - - outb(LDN, pdata->base_port + ADDR_PORT); - outb(pdata->index, pdata->base_port + DATA_PORT); - - outb(FINTEK_IRQ_MODE, pdata->base_port + ADDR_PORT); - tmp = inb(pdata->base_port + DATA_PORT); - - tmp &= ~IRQ_MODE_MASK; - tmp |= IRQ_SHARE; - if (!level_mode) - tmp |= IRQ_EDGE_HIGH; - - outb(tmp, pdata->base_port + DATA_PORT); - fintek_8250_exit_key(pdata->base_port); - return 0; + struct fintek_8250 *pdata = uart->port.private_data; + + switch (pdata->pid) { + case CHIP_ID_F81216AD: + case CHIP_ID_F81216H: + case CHIP_ID_F81866: + case CHIP_ID_F81865: + uart->port.rs485_config = fintek_8250_rs485_config; + break; + + default: /* No RS485 Auto direction functional */ + break; + } } int fintek_8250_probe(struct uart_8250_port *uart) { struct fintek_8250 *pdata; struct fintek_8250 probe_data; - struct irq_data *irq_data = irq_get_irq_data(uart->port.irq); - bool level_mode = irqd_is_level_type(irq_data); - if (find_base_port(&probe_data, uart->port.iobase)) + if (probe_setup_port(&probe_data, uart->port.iobase, uart->port.irq)) return -ENODEV; pdata = devm_kzalloc(uart->port.dev, sizeof(*pdata), GFP_KERNEL); @@ -223,8 +338,8 @@ int fintek_8250_probe(struct uart_8250_port *uart) return -ENOMEM; memcpy(pdata, &probe_data, sizeof(probe_data)); - uart->port.rs485_config = fintek_8250_rs485_config; uart->port.private_data = pdata; + fintek_8250_set_rs485_handler(uart); - return fintek_8250_set_irq_mode(pdata, level_mode); + return 0; } diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c index b9923464599f6fc18b825d902fe8f14b5555e981..58cbb30a940178edb483a865ad2c2cb29268f96f 100644 --- a/drivers/tty/serial/8250/8250_lpss.c +++ b/drivers/tty/serial/8250/8250_lpss.c @@ -157,12 +157,12 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port) static const struct dw_dma_platform_data qrk_serial_dma_pdata = { .nr_channels = 2, .is_private = true, - .is_nollp = true, .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, .chan_priority = CHAN_PRIORITY_ASCENDING, .block_size = 4095, .nr_masters = 1, .data_width = {4}, + .multi_block = {0}, }; static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port) @@ -174,7 +174,7 @@ static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port) int ret; chip->dev = &pdev->dev; - chip->irq = pdev->irq; + chip->irq = pci_irq_vector(pdev, 0); chip->regs = pci_ioremap_bar(pdev, 1); chip->pdata = &qrk_serial_dma_pdata; @@ -183,6 +183,9 @@ static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port) if (ret) return; + pci_set_master(pdev); + pci_try_set_mwi(pdev); + /* Special DMA address for UART */ dma->rx_dma_addr = 0xfffff000; dma->tx_dma_addr = 0xfffff000; @@ -280,8 +283,6 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) return ret; - pci_set_master(pdev); - lpss = devm_kzalloc(&pdev->dev, sizeof(*lpss), GFP_KERNEL); if (!lpss) return -ENOMEM; diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c index 39c2324484ddf05edf9d241ec7f0d7b5ec4fd40c..ac013edf4992e74362f62ba34789f57b63ba9128 100644 --- a/drivers/tty/serial/8250/8250_mid.c +++ b/drivers/tty/serial/8250/8250_mid.c @@ -303,10 +303,10 @@ static void mid8250_remove(struct pci_dev *pdev) { struct mid8250 *mid = pci_get_drvdata(pdev); + serial8250_unregister_port(mid->line); + if (mid->board->exit) mid->board->exit(mid); - - serial8250_unregister_port(mid->line); } static const struct mid8250_board pnw_board = { diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c index 7a8b5fc81a192f081add58d5a1a212f9c5a41093..d25ab1cd42958cb8f24c00af1d0def1fd0711278 100644 --- a/drivers/tty/serial/8250/8250_of.c +++ b/drivers/tty/serial/8250/8250_of.c @@ -332,8 +332,6 @@ static const struct of_device_id of_platform_serial_table[] = { .data = (void *)PORT_ALTR_16550_F128, }, { .compatible = "mrvl,mmp-uart", .data = (void *)PORT_XSCALE, }, - { .compatible = "mrvl,pxa-uart", - .data = (void *)PORT_XSCALE, }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, of_platform_serial_table); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index b98c1578f45adbd86420f63683bb904e2b9dc92f..aa0166b6d450dcadc85b89d3939c7ae3dd4f0cbc 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -52,6 +52,7 @@ struct serial_private { struct pci_dev *dev; unsigned int nr; struct pci_serial_quirk *quirk; + const struct pciserial_board *board; int line[0]; }; @@ -1329,6 +1330,30 @@ static int pci_default_setup(struct serial_private *priv, return setup_port(priv, port, bar, offset, board->reg_shift); } +static int pci_pericom_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + unsigned int bar, offset = board->first_offset, maxnr; + + bar = FL_GET_BASE(board->flags); + if (board->flags & FL_BASE_BARS) + bar += idx; + else + offset += idx * board->uart_offset; + + if (idx==3) + offset = 0x38; + + maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >> + (board->reg_shift + 3); + + if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr) + return 1; + + return setup_port(priv, port, bar, offset, board->reg_shift); +} + static int ce4100_serial_setup(struct serial_private *priv, const struct pciserial_board *board, @@ -2095,6 +2120,16 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { .setup = pci_default_setup, .exit = pci_plx9050_exit, }, + /* + * Pericom (Only 7954 - It have a offset jump for port 4) + */ + { + .vendor = PCI_VENDOR_ID_PERICOM, + .device = PCI_DEVICE_ID_PERICOM_PI7C9X7954, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, /* * PLX */ @@ -3862,6 +3897,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) } } priv->nr = i; + priv->board = board; return priv; err_deinit: @@ -3872,7 +3908,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) } EXPORT_SYMBOL_GPL(pciserial_init_ports); -void pciserial_remove_ports(struct serial_private *priv) +void pciserial_detach_ports(struct serial_private *priv) { struct pci_serial_quirk *quirk; int i; @@ -3886,7 +3922,11 @@ void pciserial_remove_ports(struct serial_private *priv) quirk = find_quirk(priv->dev); if (quirk->exit) quirk->exit(priv->dev); +} +void pciserial_remove_ports(struct serial_private *priv) +{ + pciserial_detach_ports(priv); kfree(priv); } EXPORT_SYMBOL_GPL(pciserial_remove_ports); @@ -5577,7 +5617,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev, return PCI_ERS_RESULT_DISCONNECT; if (priv) - pciserial_suspend_ports(priv); + pciserial_detach_ports(priv); pci_disable_device(dev); @@ -5602,9 +5642,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev) static void serial8250_io_resume(struct pci_dev *dev) { struct serial_private *priv = pci_get_drvdata(dev); + const struct pciserial_board *board; - if (priv) - pciserial_resume_ports(priv); + if (!priv) + return; + + board = priv->board; + kfree(priv); + priv = pciserial_init_ports(dev, board); + + if (!IS_ERR(priv)) { + pci_set_drvdata(dev, priv); + } } static const struct pci_error_handlers serial8250_err_handler = { diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 1731b98d2471077c762806b63f88993b0a475fc3..fe4399b41df6cea4904edcee4749bb67532f7e18 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -636,7 +636,7 @@ EXPORT_SYMBOL_GPL(serial8250_em485_destroy); * once and disable_runtime_pm_tx() will still disable RPM because the fifo is * empty and the HW can idle again. */ -static void serial8250_rpm_get_tx(struct uart_8250_port *p) +void serial8250_rpm_get_tx(struct uart_8250_port *p) { unsigned char rpm_active; @@ -648,8 +648,9 @@ static void serial8250_rpm_get_tx(struct uart_8250_port *p) return; pm_runtime_get_sync(p->port.dev); } +EXPORT_SYMBOL_GPL(serial8250_rpm_get_tx); -static void serial8250_rpm_put_tx(struct uart_8250_port *p) +void serial8250_rpm_put_tx(struct uart_8250_port *p) { unsigned char rpm_active; @@ -662,6 +663,7 @@ static void serial8250_rpm_put_tx(struct uart_8250_port *p) pm_runtime_mark_last_busy(p->port.dev); pm_runtime_put_autosuspend(p->port.dev); } +EXPORT_SYMBOL_GPL(serial8250_rpm_put_tx); /* * IER sleep support. UARTs which have EFRs need the "extended @@ -2691,8 +2693,7 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios, serial8250_do_set_termios(port, termios, old); } -static void -serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios) +void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios) { if (termios->c_line == N_PPS) { port->flags |= UPF_HARDPPS_CD; @@ -2708,7 +2709,16 @@ serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios) } } } +EXPORT_SYMBOL_GPL(serial8250_do_set_ldisc); +static void +serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios) +{ + if (port->set_ldisc) + port->set_ldisc(port, termios); + else + serial8250_do_set_ldisc(port, termios); +} void serial8250_do_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c new file mode 100644 index 0000000000000000000000000000000000000000..4d68731af5340e70ef9e337a199e4e65fd6eaa54 --- /dev/null +++ b/drivers/tty/serial/8250/8250_pxa.c @@ -0,0 +1,190 @@ +/* + * drivers/tty/serial/8250/8250_pxa.c -- driver for PXA on-board UARTS + * Copyright: (C) 2013 Sergei Ianovich + * + * replaces drivers/serial/pxa.c by Nicolas Pitre + * Created: Feb 20, 2003 + * Copyright: (C) 2003 Monta Vista Software, Inc. + * + * Based on drivers/serial/8250.c by Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "8250.h" + +struct pxa8250_data { + int line; + struct clk *clk; +}; + +static int __maybe_unused serial_pxa_suspend(struct device *dev) +{ + struct pxa8250_data *data = dev_get_drvdata(dev); + + serial8250_suspend_port(data->line); + + return 0; +} + +static int __maybe_unused serial_pxa_resume(struct device *dev) +{ + struct pxa8250_data *data = dev_get_drvdata(dev); + + serial8250_resume_port(data->line); + + return 0; +} + +static const struct dev_pm_ops serial_pxa_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(serial_pxa_suspend, serial_pxa_resume) +}; + +static const struct of_device_id serial_pxa_dt_ids[] = { + { .compatible = "mrvl,pxa-uart", }, + { .compatible = "mrvl,mmp-uart", }, + {} +}; +MODULE_DEVICE_TABLE(of, serial_pxa_dt_ids); + +/* Uart divisor latch write */ +static void serial_pxa_dl_write(struct uart_8250_port *up, int value) +{ + unsigned int dll; + + serial_out(up, UART_DLL, value & 0xff); + /* + * work around Erratum #74 according to Marvel(R) PXA270M Processor + * Specification Update (April 19, 2010) + */ + dll = serial_in(up, UART_DLL); + WARN_ON(dll != (value & 0xff)); + + serial_out(up, UART_DLM, value >> 8 & 0xff); +} + + +static void serial_pxa_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + struct pxa8250_data *data = port->private_data; + + if (!state) + clk_prepare_enable(data->clk); + else + clk_disable_unprepare(data->clk); +} + +static int serial_pxa_probe(struct platform_device *pdev) +{ + struct uart_8250_port uart = {}; + struct pxa8250_data *data; + struct resource *mmres, *irqres; + int ret; + + mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!mmres || !irqres) + return -ENODEV; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(data->clk)) + return PTR_ERR(data->clk); + + ret = clk_prepare(data->clk); + if (ret) + return ret; + + uart.port.type = PORT_XSCALE; + uart.port.iotype = UPIO_MEM32; + uart.port.mapbase = mmres->start; + uart.port.regshift = 2; + uart.port.irq = irqres->start; + uart.port.fifosize = 64; + uart.port.flags = UPF_IOREMAP | UPF_SKIP_TEST; + uart.port.dev = &pdev->dev; + uart.port.uartclk = clk_get_rate(data->clk); + uart.port.pm = serial_pxa_pm; + uart.port.private_data = data; + uart.dl_write = serial_pxa_dl_write; + + ret = serial8250_register_8250_port(&uart); + if (ret < 0) + goto err_clk; + + data->line = ret; + + platform_set_drvdata(pdev, data); + + return 0; + + err_clk: + clk_unprepare(data->clk); + return ret; +} + +static int serial_pxa_remove(struct platform_device *pdev) +{ + struct pxa8250_data *data = platform_get_drvdata(pdev); + + serial8250_unregister_port(data->line); + + clk_unprepare(data->clk); + + return 0; +} + +static struct platform_driver serial_pxa_driver = { + .probe = serial_pxa_probe, + .remove = serial_pxa_remove, + + .driver = { + .name = "pxa2xx-uart", + .pm = &serial_pxa_pm_ops, + .of_match_table = serial_pxa_dt_ids, + }, +}; + +module_platform_driver(serial_pxa_driver); + +#ifdef CONFIG_SERIAL_8250_CONSOLE +static int __init early_serial_pxa_setup(struct earlycon_device *device, + const char *options) +{ + struct uart_port *port = &device->port; + + if (!(device->port.membase || device->port.iobase)) + return -ENODEV; + + port->regshift = 2; + return early_serial8250_setup(device, NULL); +} +OF_EARLYCON_DECLARE(early_pxa, "mrvl,pxa-uart", early_serial_pxa_setup); +#endif + +MODULE_AUTHOR("Sergei Ianovich"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:pxa2xx-uart"); diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c index 417d9e7038e1aa53ea3da50569d158f7b88d4c78..746680ebf90c572b6e01ec3080555454477ac8d8 100644 --- a/drivers/tty/serial/8250/8250_uniphier.c +++ b/drivers/tty/serial/8250/8250_uniphier.c @@ -24,10 +24,22 @@ /* Most (but not all) of UniPhier UART devices have 64-depth FIFO. */ #define UNIPHIER_UART_DEFAULT_FIFO_SIZE 64 -#define UNIPHIER_UART_CHAR_FCR 3 /* Character / FIFO Control Register */ -#define UNIPHIER_UART_LCR_MCR 4 /* Line/Modem Control Register */ -#define UNIPHIER_UART_LCR_SHIFT 8 -#define UNIPHIER_UART_DLR 9 /* Divisor Latch Register */ +/* + * This hardware is similar to 8250, but its register map is a bit different: + * - MMIO32 (regshift = 2) + * - FCR is not at 2, but 3 + * - LCR and MCR are not at 3 and 4, they share 4 + * - Divisor latch at 9, no divisor latch access bit + */ + +#define UNIPHIER_UART_REGSHIFT 2 + +/* bit[15:8] = CHAR (not used), bit[7:0] = FCR */ +#define UNIPHIER_UART_CHAR_FCR (3 << (UNIPHIER_UART_REGSHIFT)) +/* bit[15:8] = LCR, bit[7:0] = MCR */ +#define UNIPHIER_UART_LCR_MCR (4 << (UNIPHIER_UART_REGSHIFT)) +/* Divisor Latch Register */ +#define UNIPHIER_UART_DLR (9 << (UNIPHIER_UART_REGSHIFT)) struct uniphier8250_priv { int line; @@ -44,7 +56,7 @@ static int __init uniphier_early_console_setup(struct earlycon_device *device, /* This hardware always expects MMIO32 register interface. */ device->port.iotype = UPIO_MEM32; - device->port.regshift = 2; + device->port.regshift = UNIPHIER_UART_REGSHIFT; /* * Do not touch the divisor register in early_serial8250_setup(); @@ -68,17 +80,16 @@ static unsigned int uniphier_serial_in(struct uart_port *p, int offset) switch (offset) { case UART_LCR: - valshift = UNIPHIER_UART_LCR_SHIFT; + valshift = 8; /* fall through */ case UART_MCR: offset = UNIPHIER_UART_LCR_MCR; break; default: + offset <<= UNIPHIER_UART_REGSHIFT; break; } - offset <<= p->regshift; - /* * The return value must be masked with 0xff because LCR and MCR reside * in the same register that must be accessed by 32-bit write/read. @@ -90,27 +101,26 @@ static unsigned int uniphier_serial_in(struct uart_port *p, int offset) static void uniphier_serial_out(struct uart_port *p, int offset, int value) { unsigned int valshift = 0; - bool normal = false; + bool normal = true; switch (offset) { case UART_FCR: offset = UNIPHIER_UART_CHAR_FCR; break; case UART_LCR: - valshift = UNIPHIER_UART_LCR_SHIFT; + valshift = 8; /* Divisor latch access bit does not exist. */ value &= ~UART_LCR_DLAB; /* fall through */ case UART_MCR: offset = UNIPHIER_UART_LCR_MCR; + normal = false; break; default: - normal = true; + offset <<= UNIPHIER_UART_REGSHIFT; break; } - offset <<= p->regshift; - if (normal) { writel(value, p->membase + offset); } else { @@ -139,16 +149,12 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value) */ static int uniphier_serial_dl_read(struct uart_8250_port *up) { - int offset = UNIPHIER_UART_DLR << up->port.regshift; - - return readl(up->port.membase + offset); + return readl(up->port.membase + UNIPHIER_UART_DLR); } static void uniphier_serial_dl_write(struct uart_8250_port *up, int value) { - int offset = UNIPHIER_UART_DLR << up->port.regshift; - - writel(value, up->port.membase + offset); + writel(value, up->port.membase + UNIPHIER_UART_DLR); } static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port, @@ -234,7 +240,7 @@ static int uniphier_uart_probe(struct platform_device *pdev) up.port.type = PORT_16550A; up.port.iotype = UPIO_MEM32; - up.port.regshift = 2; + up.port.regshift = UNIPHIER_UART_REGSHIFT; up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE; up.capabilities = UART_CAP_FIFO; diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index 899834776b36001809f9ec0429b0d1208d48502d..0b8b6740ba436a20c9d1a3447aa0a43245e27709 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -439,6 +439,16 @@ config SERIAL_8250_MOXA This driver can also be built as a module. The module will be called 8250_moxa. If you want to do that, say M here. +config SERIAL_8250_PXA + tristate "PXA serial port support" + depends on SERIAL_8250 + depends on ARCH_PXA || ARCH_MMP + help + If you have a machine based on an Intel XScale PXA2xx CPU you can + enable its onboard serial ports by enabling this option. The option is + applicable to both devicetree and legacy boards, and early console is + part of its support. + config SERIAL_OF_PLATFORM tristate "Devicetree based probing for 8250 ports" depends on SERIAL_8250 && OF diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile index 276c6fb6033795a4f451840fadc4a282c8d3f5dc..850e721877a93da2edad8aac4100d9d275bd27c0 100644 --- a/drivers/tty/serial/8250/Makefile +++ b/drivers/tty/serial/8250/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_SERIAL_8250_INGENIC) += 8250_ingenic.o obj-$(CONFIG_SERIAL_8250_LPSS) += 8250_lpss.o obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o obj-$(CONFIG_SERIAL_8250_MOXA) += 8250_moxa.o +obj-$(CONFIG_SERIAL_8250_PXA) += 8250_pxa.o obj-$(CONFIG_SERIAL_OF_PLATFORM) += 8250_of.o CFLAGS_8250_ingenic.o += -I$(srctree)/scripts/dtc/libfdt diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 25c1d7bc010043b15f1676660bef7577150252cb..e9cf5b67f1b793e6b21f96a0c3371134a7eb5b90 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -438,17 +438,27 @@ config SERIAL_MPSC_CONSOLE Say Y here if you want to support a serial console on a Marvell MPSC. config SERIAL_PXA - bool "PXA serial port support" + bool "PXA serial port support (DEPRECATED)" depends on ARCH_PXA || ARCH_MMP select SERIAL_CORE + select SERIAL_8250_PXA if SERIAL_8250=y + select SERIAL_PXA_NON8250 if !SERIAL_8250=y help If you have a machine based on an Intel XScale PXA2xx CPU you can enable its onboard serial ports by enabling this option. + Unless you have a specific need, you should use SERIAL_8250_PXA + instead of this. + +config SERIAL_PXA_NON8250 + bool + depends on !SERIAL_8250 + config SERIAL_PXA_CONSOLE - bool "Console on PXA serial port" + bool "Console on PXA serial port (DEPRECATED)" depends on SERIAL_PXA select SERIAL_CORE_CONSOLE + select SERIAL_8250_CONSOLE if SERIAL_8250=y help If you have enabled the serial port on the Intel XScale PXA CPU you can make it the console by answering Y to this option. @@ -460,6 +470,9 @@ config SERIAL_PXA_CONSOLE your boot loader (lilo or loadlin) about how to pass options to the kernel at boot time.) + Unless you have a specific need, you should use SERIAL_8250_PXA + and SERIAL_8250_CONSOLE instead of this. + config SERIAL_SA1100 bool "SA1100 serial port support" depends on ARCH_SA1100 @@ -1626,7 +1639,7 @@ config SERIAL_STM32 tristate "STMicroelectronics STM32 serial port support" select SERIAL_CORE depends on HAS_DMA - depends on ARM || COMPILE_TEST + depends on ARCH_STM32 || COMPILE_TEST help This driver is for the on-chip Serial Controller on STMicroelectronics STM32 MCUs. diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index 1278d376da507d59dbb6a15313864f36e65a5d6d..2d6288bc45543f4d68ada0e44538564230db79c6 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -23,7 +23,7 @@ obj-$(CONFIG_SERIAL_8250) += 8250/ obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o -obj-$(CONFIG_SERIAL_PXA) += pxa.o +obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o obj-$(CONFIG_SERIAL_SA1100) += sa1100.o obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o @@ -62,13 +62,11 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o obj-$(CONFIG_SERIAL_MSM) += msm_serial.o obj-$(CONFIG_SERIAL_NETX) += netx-serial.o -obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o obj-$(CONFIG_SERIAL_ST_ASC) += st-asc.o obj-$(CONFIG_SERIAL_TILEGX) += tilegx.o -obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o obj-$(CONFIG_SERIAL_QE) += ucc_uart.o obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o @@ -96,3 +94,6 @@ obj-$(CONFIG_SERIAL_MPS2_UART) += mps2-uart.o # GPIOLIB helpers for modem control lines obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o + +obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o +obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index e2c33b9528d82ed7a2c27d083d7b1d222da68178..d4171d71a258f2696ab2ab2109376ee5a516bfba 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2315,12 +2315,67 @@ static int __init pl011_console_setup(struct console *co, char *options) return uart_set_options(&uap->port, co, baud, parity, bits, flow); } +/** + * pl011_console_match - non-standard console matching + * @co: registering console + * @name: name from console command line + * @idx: index from console command line + * @options: ptr to option string from console command line + * + * Only attempts to match console command lines of the form: + * console=pl011,mmio|mmio32,[,] + * console=pl011,0x[,] + * This form is used to register an initial earlycon boot console and + * replace it with the amba_console at pl011 driver init. + * + * Performs console setup for a match (as required by interface) + * If no are specified, then assume the h/w is already setup. + * + * Returns 0 if console matches; otherwise non-zero to use default matching + */ +static int __init pl011_console_match(struct console *co, char *name, int idx, + char *options) +{ + unsigned char iotype; + resource_size_t addr; + int i; + + if (strcmp(name, "pl011") != 0) + return -ENODEV; + + if (uart_parse_earlycon(options, &iotype, &addr, &options)) + return -ENODEV; + + if (iotype != UPIO_MEM && iotype != UPIO_MEM32) + return -ENODEV; + + /* try to match the port specified on the command line */ + for (i = 0; i < ARRAY_SIZE(amba_ports); i++) { + struct uart_port *port; + + if (!amba_ports[i]) + continue; + + port = &amba_ports[i]->port; + + if (port->mapbase != addr) + continue; + + co->index = i; + port->cons = co; + return pl011_console_setup(co, options); + } + + return -ENODEV; +} + static struct uart_driver amba_reg; static struct console amba_console = { .name = "ttyAMA", .write = pl011_console_write, .device = uart_console_device, .setup = pl011_console_setup, + .match = pl011_console_match, .flags = CON_PRINTBUFFER, .index = -1, .data = &amba_reg, @@ -2357,6 +2412,7 @@ static int __init pl011_early_console_setup(struct earlycon_device *device, return 0; } OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup); +OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup); #else #define AMBA_CONSOLE NULL diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c index 315c84979b18adda3d693ca3d9d8ee927cdd6981..e92c23470e519f839693ca2d486e3aaf2d368854 100644 --- a/drivers/tty/serial/crisv10.c +++ b/drivers/tty/serial/crisv10.c @@ -28,7 +28,6 @@ static char *serial_version = "$Revision: 1.25 $"; #include #include #include -#include #include #include @@ -3214,8 +3213,6 @@ get_serial_info(struct e100_serial * info, * should set them to something else than 0. */ - if (!retinfo) - return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.type = info->type; tmp.line = info->line; @@ -4098,7 +4095,7 @@ static void show_serial_version(void) &serial_version[11]); /* "$Revision: x.yy" */ } -/* rs_init inits the driver at boot (using the module_init chain) */ +/* rs_init inits the driver at boot (using the initcall chain) */ static const struct tty_operations rs_ops = { .open = rs_open, @@ -4247,5 +4244,4 @@ static int __init rs_init(void) } /* this makes sure that rs_init is called during kernel boot */ - -module_init(rs_init); +device_initcall(rs_init); diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 76103f2c4a8001e36e10fffc7581ee2ee4c885b6..a1c6519837a40d4e562e9bea834728e48511b733 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -430,6 +430,65 @@ static void lpuart_flush_buffer(struct uart_port *port) } } +#if defined(CONFIG_CONSOLE_POLL) + +static int lpuart_poll_init(struct uart_port *port) +{ + struct lpuart_port *sport = container_of(port, + struct lpuart_port, port); + unsigned long flags; + unsigned char temp; + + sport->port.fifosize = 0; + + spin_lock_irqsave(&sport->port.lock, flags); + /* Disable Rx & Tx */ + writeb(0, sport->port.membase + UARTCR2); + + temp = readb(sport->port.membase + UARTPFIFO); + /* Enable Rx and Tx FIFO */ + writeb(temp | UARTPFIFO_RXFE | UARTPFIFO_TXFE, + sport->port.membase + UARTPFIFO); + + /* flush Tx and Rx FIFO */ + writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH, + sport->port.membase + UARTCFIFO); + + /* explicitly clear RDRF */ + if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) { + readb(sport->port.membase + UARTDR); + writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO); + } + + writeb(0, sport->port.membase + UARTTWFIFO); + writeb(1, sport->port.membase + UARTRWFIFO); + + /* Enable Rx and Tx */ + writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2); + spin_unlock_irqrestore(&sport->port.lock, flags); + + return 0; +} + +static void lpuart_poll_put_char(struct uart_port *port, unsigned char c) +{ + /* drain */ + while (!(readb(port->membase + UARTSR1) & UARTSR1_TDRE)) + barrier(); + + writeb(c, port->membase + UARTDR); +} + +static int lpuart_poll_get_char(struct uart_port *port) +{ + if (!(readb(port->membase + UARTSR1) & UARTSR1_RDRF)) + return NO_POLL_CHAR; + + return readb(port->membase + UARTDR); +} + +#endif + static inline void lpuart_transmit_buffer(struct lpuart_port *sport) { struct circ_buf *xmit = &sport->port.state->xmit; @@ -1595,6 +1654,11 @@ static const struct uart_ops lpuart_pops = { .config_port = lpuart_config_port, .verify_port = lpuart_verify_port, .flush_buffer = lpuart_flush_buffer, +#if defined(CONFIG_CONSOLE_POLL) + .poll_init = lpuart_poll_init, + .poll_get_char = lpuart_poll_get_char, + .poll_put_char = lpuart_poll_put_char, +#endif }; static const struct uart_ops lpuart32_pops = { diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c index c60a8d5e40201c535ac70a7ccfae67b813a9b790..d83783cfbade63bfb945b2d1f7d3afdd0f9c7149 100644 --- a/drivers/tty/serial/icom.c +++ b/drivers/tty/serial/icom.c @@ -53,7 +53,7 @@ #include #include -#include +#include #include "icom.h" diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index d386346248deba5026224d590b6be60fe5a2af91..157883653256da9ebeae89d7dabead3285656c82 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c @@ -1042,6 +1042,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi) ret = spi_setup(spi); if (ret) { dev_err(&spi->dev, "SPI setup wasn't successful %d", ret); + kfree(ifx_dev); return -ENODEV; } diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c index e5c42fef69d26254e1250aa0b0e00bdd894bc1fc..3be051abb2a26a2b0608fc02f478ce91d6a2f8ac 100644 --- a/drivers/tty/serial/ioc4_serial.c +++ b/drivers/tty/serial/ioc4_serial.c @@ -1082,7 +1082,7 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd) if (!port) { printk(KERN_WARNING "IOC4 serial memory not available for port\n"); - return -ENOMEM; + goto free; } spin_lock_init(&port->ip_lock); @@ -1190,6 +1190,11 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd) handle_dma_error_intr, port); } return 0; + +free: + while (port_number) + kfree(ports[--port_number]); + return -ENOMEM; } /** diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 770454e0dfa3b5556524f25827bd84a9801c9cf4..8c1c9112b3fdd0cff5f5bee9a3912bef2c46a2f6 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -1016,7 +1016,7 @@ static void mxs_auart_settermios(struct uart_port *u, ctrl |= AUART_LINECTRL_EPS; } - u->read_status_mask = 0; + u->read_status_mask = AUART_STAT_OERR; if (termios->c_iflag & INPCK) u->read_status_mask |= AUART_STAT_PERR; diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c index cd9d9e87847555ee486e4e0d8ff85ceab5349a7e..75952811c0daf77b259f6ebad8e07a4653a9ba50 100644 --- a/drivers/tty/serial/pxa.c +++ b/drivers/tty/serial/pxa.c @@ -925,6 +925,8 @@ static struct platform_driver serial_pxa_driver = { }, }; + +/* 8250 driver for PXA serial ports should be used */ static int __init serial_pxa_init(void) { int ret; diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index fb0672554123a196d75fd9eedca35a915ddc12e6..793395451982d8e65d495345b9f8bfc17bd24e6c 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -1264,7 +1264,7 @@ static int sc16is7xx_probe(struct device *dev, /* Setup interrupt */ ret = devm_request_irq(dev, irq, sc16is7xx_irq, - IRQF_ONESHOT | flags, dev_name(dev), s); + flags, dev_name(dev), s); if (!ret) return 0; diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index f2303f390345e14664f31976662f7c804fbbb9d9..9939c3d9912b3563d107e3cd4439212e291377c3 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -36,7 +36,7 @@ #include #include -#include +#include /* * This is used to lock changes in serial line configuration. @@ -73,7 +73,7 @@ static inline struct uart_port *uart_port_ref(struct uart_state *state) static inline void uart_port_deref(struct uart_port *uport) { - if (uport && atomic_dec_and_test(&uport->state->refcount)) + if (atomic_dec_and_test(&uport->state->refcount)) wake_up(&uport->state->remove_wait); } @@ -88,9 +88,10 @@ static inline void uart_port_deref(struct uart_port *uport) #define uart_port_unlock(uport, flags) \ ({ \ struct uart_port *__uport = uport; \ - if (__uport) \ + if (__uport) { \ spin_unlock_irqrestore(&__uport->lock, flags); \ - uart_port_deref(__uport); \ + uart_port_deref(__uport); \ + } \ }) static inline struct uart_port *uart_port_check(struct uart_state *state) @@ -1515,7 +1516,10 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout) unsigned long char_time, expire; port = uart_port_ref(state); - if (!port || port->type == PORT_UNKNOWN || port->fifosize == 0) { + if (!port) + return; + + if (port->type == PORT_UNKNOWN || port->fifosize == 0) { uart_port_deref(port); return; } @@ -2365,9 +2369,10 @@ static int uart_poll_get_char(struct tty_driver *driver, int line) if (state) { port = uart_port_ref(state); - if (port) + if (port) { ret = port->ops->poll_get_char(port); - uart_port_deref(port); + uart_port_deref(port); + } } return ret; } diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 4b26252c288597a569d4740bc490f56e499bfcfa..91e7dddbf72cd3de61844c85bf0d01efe8e6db56 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1142,11 +1142,8 @@ static int sci_dma_rx_push(struct sci_port *s, void *buf, size_t count) int copied; copied = tty_insert_flip_string(tport, buf, count); - if (copied < count) { - dev_warn(port->dev, "Rx overrun: dropping %zu bytes\n", - count - copied); + if (copied < count) port->icount.buf_overrun++; - } port->icount.rx += copied; @@ -1161,8 +1158,6 @@ static int sci_dma_rx_find_active(struct sci_port *s) if (s->active_rx == s->cookie_rx[i]) return i; - dev_err(s->port.dev, "%s: Rx cookie %d not found!\n", __func__, - s->active_rx); return -1; } @@ -1223,9 +1218,9 @@ static void sci_dma_rx_complete(void *arg) dma_async_issue_pending(chan); + spin_unlock_irqrestore(&port->lock, flags); dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n", __func__, s->cookie_rx[active], active, s->active_rx); - spin_unlock_irqrestore(&port->lock, flags); return; fail: @@ -1273,8 +1268,6 @@ static void sci_submit_rx(struct sci_port *s) if (dma_submit_error(s->cookie_rx[i])) goto fail; - dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__, - s->cookie_rx[i], i); } s->active_rx = s->cookie_rx[0]; @@ -1288,7 +1281,6 @@ static void sci_submit_rx(struct sci_port *s) for (i = 0; i < 2; i++) s->cookie_rx[i] = -EINVAL; s->active_rx = -EINVAL; - dev_warn(s->port.dev, "Failed to re-start Rx DMA, using PIO\n"); sci_rx_dma_release(s, true); } @@ -1358,10 +1350,10 @@ static void rx_timer_fn(unsigned long arg) int active, count; u16 scr; - spin_lock_irqsave(&port->lock, flags); - dev_dbg(port->dev, "DMA Rx timed out\n"); + spin_lock_irqsave(&port->lock, flags); + active = sci_dma_rx_find_active(s); if (active < 0) { spin_unlock_irqrestore(&port->lock, flags); @@ -1370,9 +1362,9 @@ static void rx_timer_fn(unsigned long arg) status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state); if (status == DMA_COMPLETE) { + spin_unlock_irqrestore(&port->lock, flags); dev_dbg(port->dev, "Cookie %d #%d has already completed\n", s->active_rx, active); - spin_unlock_irqrestore(&port->lock, flags); /* Let packet complete handler take care of the packet */ return; @@ -1396,8 +1388,6 @@ static void rx_timer_fn(unsigned long arg) /* Handle incomplete DMA receive */ dmaengine_terminate_all(s->chan_rx); read = sg_dma_len(&s->sg_rx[active]) - state.residue; - dev_dbg(port->dev, "Read %u bytes with cookie %d\n", read, - s->active_rx); if (read) { count = sci_dma_rx_push(s, s->rx_buf[active], read); diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index 4e603d060e80c91b36aabb3961dc22c3596d37ec..99ef5c6e47669a4c32a19ce614c52189bf251f62 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -598,7 +598,8 @@ static int hv_remove(struct platform_device *dev) uart_remove_one_port(&sunhv_reg, port); sunserial_unregister_minors(&sunhv_reg, 1); - + kfree(con_read_page); + kfree(con_write_page); kfree(port); sunhv_port = NULL; diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index 9ad98eaa35bf86cf9ce2e8ce1a94233989c75ed2..72df2e1b88afe209c8e9420a28f46eeb865b987a 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c @@ -1500,6 +1500,7 @@ static int su_probe(struct platform_device *op) out_unmap: of_iounmap(&op->resource[0], up->port.membase, up->reg_size); + kfree(up); return err; } diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c index c13e27ecb0b7f441379dd6cec89ed084892fa057..657eed82eeb372d96e5f480bae3eb7c92dee06fa 100644 --- a/drivers/tty/synclink.c +++ b/drivers/tty/synclink.c @@ -107,7 +107,7 @@ #define PUT_USER(error,value,addr) error = put_user(value,addr) #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 -#include +#include #define RCLRVALUE 0xffff @@ -7973,7 +7973,6 @@ static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size) static const struct net_device_ops hdlcdev_ops = { .ndo_open = hdlcdev_open, .ndo_stop = hdlcdev_close, - .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = hdlcdev_ioctl, .ndo_tx_timeout = hdlcdev_tx_timeout, diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index 7aca2d4670e4a3aaf3636acc5f8dd1c3f288bfd3..31885f20fc1583178f9d3108bce20009eca99cb8 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c @@ -77,7 +77,7 @@ #include #include #include -#include +#include #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE)) #define SYNCLINK_GENERIC_HDLC 1 @@ -1768,7 +1768,6 @@ static void hdlcdev_rx(struct slgt_info *info, char *buf, int size) static const struct net_device_ops hdlcdev_ops = { .ndo_open = hdlcdev_open, .ndo_stop = hdlcdev_close, - .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = hdlcdev_ioctl, .ndo_tx_timeout = hdlcdev_tx_timeout, diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c index dec156586de1bdfcf9ed1439523a884267ff02df..51e8846cd68fce28f0544c2f16424560dfaf787b 100644 --- a/drivers/tty/synclinkmp.c +++ b/drivers/tty/synclinkmp.c @@ -79,7 +79,7 @@ #define PUT_USER(error,value,addr) error = put_user(value,addr) #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 -#include +#include static MGSL_PARAMS default_params = { MGSL_MODE_HDLC, /* unsigned long mode */ @@ -1887,7 +1887,6 @@ static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size) static const struct net_device_ops hdlcdev_ops = { .ndo_open = hdlcdev_open, .ndo_stop = hdlcdev_close, - .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = hdlcdev_ioctl, .ndo_tx_timeout = hdlcdev_tx_timeout, diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index bf36ac9aee41cdb7aa1172e65c55d5bd5e20beda..f27fc0f14c11f2a0858a50094f3bd0b6e47aae8d 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -22,7 +22,7 @@ #include #include -#include +#include #undef TTY_DEBUG_WAIT_UNTIL_SENT diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c index 9d7ab7b66a8a18999c9a0afb94b9a1bfed8bce4a..1f6e17fc3fb041f91b4d06cbe6a64bf3aa206be4 100644 --- a/drivers/tty/vt/consolemap.c +++ b/drivers/tty/vt/consolemap.c @@ -9,6 +9,17 @@ * Support for multiple unimaps by Jakub Jelinek , July 1998 * * Fix bug in inverse translation. Stanislav Voronyi , Dec 1998 + * + * In order to prevent the following circular lock dependency: + * &mm->mmap_sem --> cpu_hotplug.lock --> console_lock --> &mm->mmap_sem + * + * We cannot allow page fault to happen while holding the console_lock. + * Therefore, all the userspace copy operations have to be done outside + * the console_lock critical sections. + * + * As all the affected functions are all called directly from vt_ioctl(), we + * can allocate some small buffers directly on stack without worrying about + * stack overflow. */ #include @@ -18,10 +29,11 @@ #include #include #include -#include +#include #include #include #include +#include static unsigned short translations[][256] = { /* 8-bit Latin-1 mapped to Unicode -- trivial mapping */ @@ -309,18 +321,19 @@ static void update_user_maps(void) int con_set_trans_old(unsigned char __user * arg) { int i; - unsigned short *p = translations[USER_MAP]; + unsigned short inbuf[E_TABSZ]; if (!access_ok(VERIFY_READ, arg, E_TABSZ)) return -EFAULT; - console_lock(); - for (i=0; iunicode, &list->unicode); + __get_user(plist->fontpos, &list->fontpos); + } + console_lock(); /* Save original vc_unipagdir_loc in case we allocate a new one */ @@ -557,8 +583,8 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) err1 = con_do_clear_unimap(vc); if (err1) { - console_unlock(); - return err1; + err = err1; + goto out_unlock; } /* @@ -592,8 +618,8 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) *vc->vc_uni_pagedir_loc = p; con_release_unimap(q); kfree(q); - console_unlock(); - return err1; + err = err1; + goto out_unlock; } } } else { @@ -617,22 +643,17 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) /* * Insert user specified unicode pairs into new table. */ - while (ct--) { - unsigned short unicode, fontpos; - __get_user(unicode, &list->unicode); - __get_user(fontpos, &list->fontpos); - if ((err1 = con_insert_unipair(p, unicode,fontpos)) != 0) + for (plist = unilist; ct; ct--, plist++) { + err1 = con_insert_unipair(p, plist->unicode, plist->fontpos); + if (err1) err = err1; - list++; } /* * Merge with fontmaps of any other virtual consoles. */ - if (con_unify_unimap(vc, p)) { - console_unlock(); - return err; - } + if (con_unify_unimap(vc, p)) + goto out_unlock; for (i = 0; i <= 3; i++) set_inverse_transl(vc, p, i); /* Update inverse translations */ @@ -640,6 +661,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) out_unlock: console_unlock(); + kfree(unilist); return err; } @@ -735,9 +757,15 @@ EXPORT_SYMBOL(con_copy_unimap); */ int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list) { - int i, j, k, ect; + int i, j, k; + ushort ect; u16 **p1, *p2; struct uni_pagedir *p; + struct unipair *unilist, *plist; + + unilist = kmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL); + if (!unilist) + return -ENOMEM; console_lock(); @@ -750,21 +778,26 @@ int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct uni for (j = 0; j < 32; j++) { p2 = *(p1++); if (p2) - for (k = 0; k < 64; k++) { - if (*p2 < MAX_GLYPH && ect++ < ct) { - __put_user((u_short)((i<<11)+(j<<6)+k), - &list->unicode); - __put_user((u_short) *p2, - &list->fontpos); - list++; + for (k = 0; k < 64; k++, p2++) { + if (*p2 >= MAX_GLYPH) + continue; + if (ect < ct) { + unilist[ect].unicode = + (i<<11)+(j<<6)+k; + unilist[ect].fontpos = *p2; } - p2++; + ect++; } } } } - __put_user(ect, uct); console_unlock(); + for (i = min(ect, ct), plist = unilist; i; i--, list++, plist++) { + __put_user(plist->unicode, &list->unicode); + __put_user(plist->fontpos, &list->fontpos); + } + __put_user(ect, uct); + kfree(unilist); return ((ect <= ct) ? 0 : -ENOMEM); } diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 0f8caae4267df8b3a19e39011730478f2c63c9d3..3dd6a491cdba536520014d5861306cfd7dac42c2 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -982,7 +982,7 @@ static void kbd_led_trigger_activate(struct led_classdev *cdev) KBD_LED_TRIGGER((_led_bit) + 8, _name) static struct kbd_led_trigger kbd_led_triggers[] = { - KBD_LED_TRIGGER(VC_SCROLLOCK, "kbd-scrollock"), + KBD_LED_TRIGGER(VC_SCROLLOCK, "kbd-scrolllock"), KBD_LED_TRIGGER(VC_NUMLOCK, "kbd-numlock"), KBD_LED_TRIGGER(VC_CAPSLOCK, "kbd-capslock"), KBD_LED_TRIGGER(VC_KANALOCK, "kbd-kanalock"), @@ -1256,7 +1256,7 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode, case KEY_SYSRQ: /* * Real AT keyboards (that's what we're trying - * to emulate here emit 0xe0 0x2a 0xe0 0x37 when + * to emulate here) emit 0xe0 0x2a 0xe0 0x37 when * pressing PrtSc/SysRq alone, but simply 0x54 * when pressing Alt+PrtSc/SysRq. */ diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c index 368ce1803e8f229e3cfc0be9ddd405d52b34e6f1..36e1b8c7680f1d8fc5a64ba16e2fc52a0a379b40 100644 --- a/drivers/tty/vt/selection.c +++ b/drivers/tty/vt/selection.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index 14a2b5f11bcab9739ea6bc23c5bb4134f1b37f1c..56dcff6059d3db965343739eea731921994b6073 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -39,7 +39,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 8c3bf3d613c061615bbcf15a607f371e9208c67b..4c10a9df3b91f3ea2b1143c41cb530a737a48366 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -315,38 +315,27 @@ void schedule_console_callback(void) schedule_work(&console_work); } -static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr) +static void con_scroll(struct vc_data *vc, unsigned int t, unsigned int b, + enum con_scroll dir, unsigned int nr) { - unsigned short *d, *s; + u16 *clear, *d, *s; - if (t+nr >= b) + if (t + nr >= b) nr = b - t - 1; if (b > vc->vc_rows || t >= b || nr < 1) return; - if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_UP, nr)) + if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, dir, nr)) return; - d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t); - s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr)); - scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row); - scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_video_erase_char, - vc->vc_size_row * nr); -} -static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr) -{ - unsigned short *s; - unsigned int step; + s = clear = (u16 *)(vc->vc_origin + vc->vc_size_row * t); + d = (u16 *)(vc->vc_origin + vc->vc_size_row * (t + nr)); - if (t+nr >= b) - nr = b - t - 1; - if (b > vc->vc_rows || t >= b || nr < 1) - return; - if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_DOWN, nr)) - return; - s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t); - step = vc->vc_cols * nr; - scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row); - scr_memsetw(s, vc->vc_video_erase_char, 2 * step); + if (dir == SM_UP) { + clear = s + (b - t - nr) * vc->vc_cols; + swap(s, d); + } + scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row); + scr_memsetw(clear, vc->vc_video_erase_char, vc->vc_size_row * nr); } static void do_update_region(struct vc_data *vc, unsigned long start, int count) @@ -1120,7 +1109,7 @@ static void lf(struct vc_data *vc) * if below scrolling region */ if (vc->vc_y + 1 == vc->vc_bottom) - scrup(vc, vc->vc_top, vc->vc_bottom, 1); + con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_UP, 1); else if (vc->vc_y < vc->vc_rows - 1) { vc->vc_y++; vc->vc_pos += vc->vc_size_row; @@ -1135,7 +1124,7 @@ static void ri(struct vc_data *vc) * if above scrolling region */ if (vc->vc_y == vc->vc_top) - scrdown(vc, vc->vc_top, vc->vc_bottom, 1); + con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_DOWN, 1); else if (vc->vc_y > 0) { vc->vc_y--; vc->vc_pos -= vc->vc_size_row; @@ -1631,7 +1620,7 @@ static void csi_L(struct vc_data *vc, unsigned int nr) nr = vc->vc_rows - vc->vc_y; else if (!nr) nr = 1; - scrdown(vc, vc->vc_y, vc->vc_bottom, nr); + con_scroll(vc, vc->vc_y, vc->vc_bottom, SM_DOWN, nr); vc->vc_need_wrap = 0; } @@ -1652,7 +1641,7 @@ static void csi_M(struct vc_data *vc, unsigned int nr) nr = vc->vc_rows - vc->vc_y; else if (!nr) nr=1; - scrup(vc, vc->vc_y, vc->vc_bottom, nr); + con_scroll(vc, vc->vc_y, vc->vc_bottom, SM_UP, nr); vc->vc_need_wrap = 0; } @@ -3934,10 +3923,6 @@ void unblank_screen(void) */ static void blank_screen_t(unsigned long dummy) { - if (unlikely(!keventd_up())) { - mod_timer(&console_timer, jiffies + (blankinterval * HZ)); - return; - } blank_timer_expired = 1; schedule_work(&console_work); } @@ -4295,6 +4280,46 @@ void vcs_scr_updated(struct vc_data *vc) notify_update(vc); } +void vc_scrolldelta_helper(struct vc_data *c, int lines, + unsigned int rolled_over, void *base, unsigned int size) +{ + unsigned long ubase = (unsigned long)base; + ptrdiff_t scr_end = (void *)c->vc_scr_end - base; + ptrdiff_t vorigin = (void *)c->vc_visible_origin - base; + ptrdiff_t origin = (void *)c->vc_origin - base; + int margin = c->vc_size_row * 4; + int from, wrap, from_off, avail; + + /* Turn scrollback off */ + if (!lines) { + c->vc_visible_origin = c->vc_origin; + return; + } + + /* Do we have already enough to allow jumping from 0 to the end? */ + if (rolled_over > scr_end + margin) { + from = scr_end; + wrap = rolled_over + c->vc_size_row; + } else { + from = 0; + wrap = size; + } + + from_off = (vorigin - from + wrap) % wrap + lines * c->vc_size_row; + avail = (origin - from + wrap) % wrap; + + /* Only a little piece would be left? Show all incl. the piece! */ + if (avail < 2 * margin) + margin = 0; + if (from_off < margin) + from_off = 0; + if (from_off > avail - margin) + from_off = avail; + + c->vc_visible_origin = ubase + (from + from_off) % wrap; +} +EXPORT_SYMBOL_GPL(vc_scrolldelta_helper); + /* * Visible symbols for modules */ diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index f62c598810ff4ca64ec2df66b769ea0b4e6541e0..a56edf2d58eb267570222a5d741760d3e118f137 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -29,7 +29,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig index 52c98ce1b6febf02df27ae4be8c031db8d14acc3..7e8dc78a97961459e05dc3d7cfe413323f31e6de 100644 --- a/drivers/uio/Kconfig +++ b/drivers/uio/Kconfig @@ -155,4 +155,13 @@ config UIO_MF624 If you compile this as a module, it will be called uio_mf624. +config UIO_HV_GENERIC + tristate "Generic driver for Hyper-V VMBus" + depends on HYPERV + help + Generic driver that you can bind, dynamically, to any + Hyper-V VMBus device. It is useful to provide direct access + to network and storage devices from userspace. + + If you compile this as a module, it will be called uio_hv_generic. endif diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile index 8560dad52d0f2934d4c67b7aac8d96e929de458a..e9663bb8a4c732c0a1a18d6d3d61129c6c428ca5 100644 --- a/drivers/uio/Makefile +++ b/drivers/uio/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_UIO_NETX) += uio_netx.o obj-$(CONFIG_UIO_PRUSS) += uio_pruss.o obj-$(CONFIG_UIO_MF624) += uio_mf624.o obj-$(CONFIG_UIO_FSL_ELBC_GPCM) += uio_fsl_elbc_gpcm.o +obj-$(CONFIG_UIO_HV_GENERIC) += uio_hv_generic.o diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c new file mode 100644 index 0000000000000000000000000000000000000000..50958f167305317f3de1a361673cd3caf4f8f47b --- /dev/null +++ b/drivers/uio/uio_hv_generic.c @@ -0,0 +1,218 @@ +/* + * uio_hv_generic - generic UIO driver for VMBus + * + * Copyright (c) 2013-2016 Brocade Communications Systems, Inc. + * Copyright (c) 2016, Microsoft Corporation. + * + * + * This work is licensed under the terms of the GNU GPL, version 2. + * + * Since the driver does not declare any device ids, you must allocate + * id and bind the device to the driver yourself. For example: + * + * # echo "f8615163-df3e-46c5-913f-f2d2f965ed0e" \ + * > /sys/bus/vmbus/drivers/uio_hv_generic + * # echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 \ + * > /sys/bus/vmbus/drivers/hv_netvsc/unbind + * # echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 \ + * > /sys/bus/vmbus/drivers/uio_hv_generic/bind + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../hv/hyperv_vmbus.h" + +#define DRIVER_VERSION "0.02.0" +#define DRIVER_AUTHOR "Stephen Hemminger " +#define DRIVER_DESC "Generic UIO driver for VMBus devices" + +/* + * List of resources to be mapped to user space + * can be extended up to MAX_UIO_MAPS(5) items + */ +enum hv_uio_map { + TXRX_RING_MAP = 0, + INT_PAGE_MAP, + MON_PAGE_MAP, +}; + +#define HV_RING_SIZE 512 + +struct hv_uio_private_data { + struct uio_info info; + struct hv_device *device; +}; + +static int +hv_uio_mmap(struct uio_info *info, struct vm_area_struct *vma) +{ + int mi; + + if (vma->vm_pgoff >= MAX_UIO_MAPS) + return -EINVAL; + + if (info->mem[vma->vm_pgoff].size == 0) + return -EINVAL; + + mi = (int)vma->vm_pgoff; + + return remap_pfn_range(vma, vma->vm_start, + info->mem[mi].addr >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, vma->vm_page_prot); +} + +/* + * This is the irqcontrol callback to be registered to uio_info. + * It can be used to disable/enable interrupt from user space processes. + * + * @param info + * pointer to uio_info. + * @param irq_state + * state value. 1 to enable interrupt, 0 to disable interrupt. + */ +static int +hv_uio_irqcontrol(struct uio_info *info, s32 irq_state) +{ + struct hv_uio_private_data *pdata = info->priv; + struct hv_device *dev = pdata->device; + + dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state; + virt_mb(); + + return 0; +} + +/* + * Callback from vmbus_event when something is in inbound ring. + */ +static void hv_uio_channel_cb(void *context) +{ + struct hv_uio_private_data *pdata = context; + struct hv_device *dev = pdata->device; + + dev->channel->inbound.ring_buffer->interrupt_mask = 1; + virt_mb(); + + uio_event_notify(&pdata->info); +} + +static int +hv_uio_probe(struct hv_device *dev, + const struct hv_vmbus_device_id *dev_id) +{ + struct hv_uio_private_data *pdata; + int ret; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE, + HV_RING_SIZE * PAGE_SIZE, NULL, 0, + hv_uio_channel_cb, pdata); + if (ret) + goto fail; + + dev->channel->inbound.ring_buffer->interrupt_mask = 1; + dev->channel->batched_reading = false; + + /* Fill general uio info */ + pdata->info.name = "uio_hv_generic"; + pdata->info.version = DRIVER_VERSION; + pdata->info.irqcontrol = hv_uio_irqcontrol; + pdata->info.mmap = hv_uio_mmap; + pdata->info.irq = UIO_IRQ_CUSTOM; + + /* mem resources */ + pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings"; + pdata->info.mem[TXRX_RING_MAP].addr + = virt_to_phys(dev->channel->ringbuffer_pages); + pdata->info.mem[TXRX_RING_MAP].size + = dev->channel->ringbuffer_pagecount * PAGE_SIZE; + pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL; + + pdata->info.mem[INT_PAGE_MAP].name = "int_page"; + pdata->info.mem[INT_PAGE_MAP].addr = + virt_to_phys(vmbus_connection.int_page); + pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE; + pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL; + + pdata->info.mem[MON_PAGE_MAP].name = "monitor_pages"; + pdata->info.mem[MON_PAGE_MAP].addr = + virt_to_phys(vmbus_connection.monitor_pages[1]); + pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE; + pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL; + + pdata->info.priv = pdata; + pdata->device = dev; + + ret = uio_register_device(&dev->device, &pdata->info); + if (ret) { + dev_err(&dev->device, "hv_uio register failed\n"); + goto fail_close; + } + + hv_set_drvdata(dev, pdata); + + return 0; + +fail_close: + vmbus_close(dev->channel); +fail: + kfree(pdata); + + return ret; +} + +static int +hv_uio_remove(struct hv_device *dev) +{ + struct hv_uio_private_data *pdata = hv_get_drvdata(dev); + + if (!pdata) + return 0; + + uio_unregister_device(&pdata->info); + hv_set_drvdata(dev, NULL); + vmbus_close(dev->channel); + kfree(pdata); + return 0; +} + +static struct hv_driver hv_uio_drv = { + .name = "uio_hv_generic", + .id_table = NULL, /* only dynamic id's */ + .probe = hv_uio_probe, + .remove = hv_uio_remove, +}; + +static int __init +hyperv_module_init(void) +{ + return vmbus_driver_register(&hv_uio_drv); +} + +static void __exit +hyperv_module_exit(void) +{ + vmbus_driver_unregister(&hv_uio_drv); +} + +module_init(hyperv_module_init); +module_exit(hyperv_module_exit); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c index ca9e2fafb0b6c05571b2652e5dcb45b12257b7a9..31d5b1d3b5af69a7344653f83c4b07cc99293f12 100644 --- a/drivers/uio/uio_pruss.c +++ b/drivers/uio/uio_pruss.c @@ -111,6 +111,7 @@ static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev) gdev->sram_vaddr, sram_pool_sz); kfree(gdev->info); + clk_disable(gdev->pruss_clk); clk_put(gdev->pruss_clk); kfree(gdev); } @@ -143,7 +144,14 @@ static int pruss_probe(struct platform_device *pdev) kfree(gdev); return ret; } else { - clk_enable(gdev->pruss_clk); + ret = clk_enable(gdev->pruss_clk); + if (ret) { + dev_err(dev, "Failed to enable clock\n"); + clk_put(gdev->pruss_clk); + kfree(gdev->info); + kfree(gdev); + return ret; + } } regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 644e978cbd3e3026e245b79b2affe69761d59336..fbe493d44e816970fa367cfc71a986fbd1a27909 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -95,6 +95,8 @@ source "drivers/usb/usbip/Kconfig" endif +source "drivers/usb/mtu3/Kconfig" + source "drivers/usb/musb/Kconfig" source "drivers/usb/dwc3/Kconfig" diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index dca78565eb5500263051e9e1c889d84d6afd8fd9..7791af6c102c7f4ea2c8ecdeda5cbc08fb03a55c 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_USB_DWC2) += dwc2/ obj-$(CONFIG_USB_ISP1760) += isp1760/ obj-$(CONFIG_USB_MON) += mon/ +obj-$(CONFIG_USB_MTU3) += mtu3/ obj-$(CONFIG_PCI) += host/ obj-$(CONFIG_USB_EHCI_HCD) += host/ diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index 4dec9df8764b9abc3544b81e4e06da1d7aec5a8f..5a59da0dc98a1798c36bce8c9f5c754632eb121d 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -64,7 +64,7 @@ #include "usbatm.h" -#include +#include #include #include #include diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 099179457f60c0247af8c49c4084b7f77d30e98f..5f4a8157fad84e41f1c8e0f614739937e4fb97e7 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include "ci.h" @@ -146,6 +147,9 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev) if (of_find_property(np, "external-vbus-divider", NULL)) data->evdo = 1; + if (of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI) + data->ulpi = 1; + return data; } diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h index 409aa5ca8dda1405aaf9c94ca16d363be386c810..d666c9f036bae6fe8c90483b0308e6bba1a0a949 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.h +++ b/drivers/usb/chipidea/ci_hdrc_imx.h @@ -19,6 +19,7 @@ struct imx_usbmisc_data { unsigned int disable_oc:1; /* over current detect disabled */ unsigned int oc_polarity:1; /* over current polarity if oc enabled */ unsigned int evdo:1; /* set external vbus divider option */ + unsigned int ulpi:1; /* connected to an ULPI phy */ }; int imx_usbmisc_init(struct imx_usbmisc_data *); diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 69426e644d17019b1c6e2d672b2e1a04cbffc3fd..3dbb4a21ab44c8a6555f5e2ad2a321a61526e23b 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -914,6 +914,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) if (!ci) return -ENOMEM; + spin_lock_init(&ci->lock); ci->dev = dev; ci->platdata = dev_get_platdata(dev); ci->imx28_write_fix = !!(ci->platdata->flags & diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c index de8e22ec39024edf0cf7d5d04e5ab55547e2658e..93e24ce61a3ac6c3701ed117df2770287f63572a 100644 --- a/drivers/usb/chipidea/otg_fsm.c +++ b/drivers/usb/chipidea/otg_fsm.c @@ -234,8 +234,8 @@ static void ci_otg_add_timer(struct ci_hdrc *ci, enum otg_fsm_timer t) ktime_set(timer_sec, timer_nsec)); ci->enabled_otg_timer_bits |= (1 << t); if ((ci->next_otg_timer == NUM_OTG_FSM_TIMERS) || - (ci->hr_timeouts[ci->next_otg_timer].tv64 > - ci->hr_timeouts[t].tv64)) { + (ci->hr_timeouts[ci->next_otg_timer] > + ci->hr_timeouts[t])) { ci->next_otg_timer = t; hrtimer_start_range_ns(&ci->otg_fsm_hrtimer, ci->hr_timeouts[t], NSEC_PER_MSEC, @@ -269,8 +269,8 @@ static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t) for_each_set_bit(cur_timer, &enabled_timer_bits, NUM_OTG_FSM_TIMERS) { if ((next_timer == NUM_OTG_FSM_TIMERS) || - (ci->hr_timeouts[next_timer].tv64 < - ci->hr_timeouts[cur_timer].tv64)) + (ci->hr_timeouts[next_timer] < + ci->hr_timeouts[cur_timer])) next_timer = cur_timer; } } @@ -397,14 +397,14 @@ static enum hrtimer_restart ci_otg_hrtimer_func(struct hrtimer *t) now = ktime_get(); for_each_set_bit(cur_timer, &enabled_timer_bits, NUM_OTG_FSM_TIMERS) { - if (now.tv64 >= ci->hr_timeouts[cur_timer].tv64) { + if (now >= ci->hr_timeouts[cur_timer]) { ci->enabled_otg_timer_bits &= ~(1 << cur_timer); if (otg_timer_handlers[cur_timer]) ret = otg_timer_handlers[cur_timer](ci); } else { if ((next_timer == NUM_OTG_FSM_TIMERS) || - (ci->hr_timeouts[cur_timer].tv64 < - ci->hr_timeouts[next_timer].tv64)) + (ci->hr_timeouts[cur_timer] < + ci->hr_timeouts[next_timer])) next_timer = cur_timer; } } diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 661f43fe0f9e9e8f9d9be29eaed5bb1307b8df54..cf132f0571375c8dfcdc7681ef718516620d38cb 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -365,7 +365,7 @@ static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq, if (hwreq->req.length == 0 || hwreq->req.length % hwep->ep.maxpacket) mul++; - node->ptr->token |= mul << __ffs(TD_MULTO); + node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO)); } temp = (u32) (hwreq->req.dma + hwreq->req.actual); @@ -504,7 +504,7 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq) if (hwreq->req.length == 0 || hwreq->req.length % hwep->ep.maxpacket) mul++; - hwep->qh.ptr->cap |= mul << __ffs(QH_MULT); + hwep->qh.ptr->cap |= cpu_to_le32(mul << __ffs(QH_MULT)); } ret = hw_ep_prime(ci, hwep->num, hwep->dir, @@ -529,7 +529,7 @@ static void free_pending_td(struct ci_hw_ep *hwep) static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep, struct td_node *node) { - hwep->qh.ptr->td.next = node->dma; + hwep->qh.ptr->td.next = cpu_to_le32(node->dma); hwep->qh.ptr->td.token &= cpu_to_le32(~(TD_STATUS_HALTED | TD_STATUS_ACTIVE)); @@ -821,7 +821,7 @@ static int _ep_queue(struct usb_ep *ep, struct usb_request *req, } if (usb_endpoint_xfer_isoc(hwep->ep.desc) && - hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) { + hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) { dev_err(hwep->ci->dev, "request length too big for isochronous\n"); return -EMSGSIZE; } @@ -1253,8 +1253,8 @@ static int ep_enable(struct usb_ep *ep, hwep->num = usb_endpoint_num(desc); hwep->type = usb_endpoint_type(desc); - hwep->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff; - hwep->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc)); + hwep->ep.maxpacket = usb_endpoint_maxp(desc); + hwep->ep.mult = usb_endpoint_maxp_mult(desc); if (hwep->type == USB_ENDPOINT_XFER_CONTROL) cap |= QH_IOS; @@ -1889,8 +1889,6 @@ static int udc_start(struct ci_hdrc *ci) struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps; int retval = 0; - spin_lock_init(&ci->lock); - ci->gadget.ops = &usb_gadget_ops; ci->gadget.speed = USB_SPEED_UNKNOWN; ci->gadget.max_speed = USB_SPEED_HIGH; diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h index e66df0020bd456aadf1a0a036ab8084a098d119d..2ecd1174d66ca145abe03014f572d2f70142920d 100644 --- a/drivers/usb/chipidea/udc.h +++ b/drivers/usb/chipidea/udc.h @@ -22,11 +22,11 @@ /* DMA layout of transfer descriptors */ struct ci_hw_td { /* 0 */ - u32 next; + __le32 next; #define TD_TERMINATE BIT(0) #define TD_ADDR_MASK (0xFFFFFFEUL << 5) /* 1 */ - u32 token; + __le32 token; #define TD_STATUS (0x00FFUL << 0) #define TD_STATUS_TR_ERR BIT(3) #define TD_STATUS_DT_ERR BIT(5) @@ -36,7 +36,7 @@ struct ci_hw_td { #define TD_IOC BIT(15) #define TD_TOTAL_BYTES (0x7FFFUL << 16) /* 2 */ - u32 page[5]; + __le32 page[5]; #define TD_CURR_OFFSET (0x0FFFUL << 0) #define TD_FRAME_NUM (0x07FFUL << 0) #define TD_RESERVED_MASK (0x0FFFUL << 0) @@ -45,18 +45,18 @@ struct ci_hw_td { /* DMA layout of queue heads */ struct ci_hw_qh { /* 0 */ - u32 cap; + __le32 cap; #define QH_IOS BIT(15) #define QH_MAX_PKT (0x07FFUL << 16) #define QH_ZLT BIT(29) #define QH_MULT (0x0003UL << 30) #define QH_ISO_MULT(x) ((x >> 11) & 0x03) /* 1 */ - u32 curr; + __le32 curr; /* 2 - 8 */ struct ci_hw_td td; /* 9 */ - u32 RESERVED; + __le32 RESERVED; struct usb_ctrlrequest setup; } __attribute__ ((packed, aligned(4))); diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index 20d02a5e418d936231fcb091e183c6f939d16701..e77a4ed4f021da17f5c1f366e22b8447c366474a 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c @@ -46,11 +46,23 @@ #define MX53_USB_OTG_PHY_CTRL_0_OFFSET 0x08 #define MX53_USB_OTG_PHY_CTRL_1_OFFSET 0x0c +#define MX53_USB_CTRL_1_OFFSET 0x10 +#define MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_MASK (0x11 << 2) +#define MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_ULPI BIT(2) +#define MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_MASK (0x11 << 6) +#define MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_ULPI BIT(6) #define MX53_USB_UH2_CTRL_OFFSET 0x14 #define MX53_USB_UH3_CTRL_OFFSET 0x18 +#define MX53_USB_CLKONOFF_CTRL_OFFSET 0x24 +#define MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF BIT(21) +#define MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF BIT(22) #define MX53_BM_OVER_CUR_DIS_H1 BIT(5) #define MX53_BM_OVER_CUR_DIS_OTG BIT(8) #define MX53_BM_OVER_CUR_DIS_UHx BIT(30) +#define MX53_USB_CTRL_1_UH2_ULPI_EN BIT(26) +#define MX53_USB_CTRL_1_UH3_ULPI_EN BIT(27) +#define MX53_USB_UHx_CTRL_WAKE_UP_EN BIT(7) +#define MX53_USB_UHx_CTRL_ULPI_INT_EN BIT(8) #define MX53_USB_PHYCTRL1_PLLDIV_MASK 0x3 #define MX53_USB_PLL_DIV_24_MHZ 0x01 @@ -199,31 +211,77 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data) val |= MX53_USB_PLL_DIV_24_MHZ; writel(val, usbmisc->base + MX53_USB_OTG_PHY_CTRL_1_OFFSET); - if (data->disable_oc) { - spin_lock_irqsave(&usbmisc->lock, flags); - switch (data->index) { - case 0: + spin_lock_irqsave(&usbmisc->lock, flags); + + switch (data->index) { + case 0: + if (data->disable_oc) { reg = usbmisc->base + MX53_USB_OTG_PHY_CTRL_0_OFFSET; val = readl(reg) | MX53_BM_OVER_CUR_DIS_OTG; - break; - case 1: + writel(val, reg); + } + break; + case 1: + if (data->disable_oc) { reg = usbmisc->base + MX53_USB_OTG_PHY_CTRL_0_OFFSET; val = readl(reg) | MX53_BM_OVER_CUR_DIS_H1; - break; - case 2: + writel(val, reg); + } + break; + case 2: + if (data->ulpi) { + /* set USBH2 into ULPI-mode. */ + reg = usbmisc->base + MX53_USB_CTRL_1_OFFSET; + val = readl(reg) | MX53_USB_CTRL_1_UH2_ULPI_EN; + /* select ULPI clock */ + val &= ~MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_MASK; + val |= MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_ULPI; + writel(val, reg); + /* Set interrupt wake up enable */ + reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET; + val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN + | MX53_USB_UHx_CTRL_ULPI_INT_EN; + writel(val, reg); + /* Disable internal 60Mhz clock */ + reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET; + val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF; + writel(val, reg); + } + if (data->disable_oc) { reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET; val = readl(reg) | MX53_BM_OVER_CUR_DIS_UHx; - break; - case 3: + writel(val, reg); + } + break; + case 3: + if (data->ulpi) { + /* set USBH3 into ULPI-mode. */ + reg = usbmisc->base + MX53_USB_CTRL_1_OFFSET; + val = readl(reg) | MX53_USB_CTRL_1_UH3_ULPI_EN; + /* select ULPI clock */ + val &= ~MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_MASK; + val |= MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_ULPI; + writel(val, reg); + /* Set interrupt wake up enable */ reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET; - val = readl(reg) | MX53_BM_OVER_CUR_DIS_UHx; - break; + val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN + | MX53_USB_UHx_CTRL_ULPI_INT_EN; + writel(val, reg); + /* Disable internal 60Mhz clock */ + reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET; + val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF; + writel(val, reg); } - if (reg && val) + if (data->disable_oc) { + reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET; + val = readl(reg) | MX53_BM_OVER_CUR_DIS_UHx; writel(val, reg); - spin_unlock_irqrestore(&usbmisc->lock, flags); + } + break; } + spin_unlock_irqrestore(&usbmisc->lock, flags); + return 0; } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 78f0f85bebdc25ef971405175feb621865f4cad1..e35b1508d3eb0ce0ba1b54ee50bc610cee703427 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -133,8 +133,8 @@ static int acm_ctrl_msg(struct acm *acm, int request, int value, buf, len, 5000); dev_dbg(&acm->control->dev, - "%s - rq 0x%02x, val %#x, len %#x, result %d\n", - __func__, request, value, len, retval); + "%s - rq 0x%02x, val %#x, len %#x, result %d\n", + __func__, request, value, len, retval); usb_autopm_put_interface(acm->control); @@ -158,6 +158,17 @@ static inline int acm_set_control(struct acm *acm, int control) #define acm_send_break(acm, ms) \ acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0) +static void acm_kill_urbs(struct acm *acm) +{ + int i; + + usb_kill_urb(acm->ctrlurb); + for (i = 0; i < ACM_NW; i++) + usb_kill_urb(acm->wb[i].urb); + for (i = 0; i < acm->rx_buflimit; i++) + usb_kill_urb(acm->read_urbs[i]); +} + /* * Write buffer management. * All of these assume proper locks taken by the caller. @@ -291,13 +302,13 @@ static void acm_ctrl_irq(struct urb *urb) case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&acm->control->dev, - "%s - urb shutting down with status: %d\n", - __func__, status); + "%s - urb shutting down with status: %d\n", + __func__, status); return; default: dev_dbg(&acm->control->dev, - "%s - nonzero urb status received: %d\n", - __func__, status); + "%s - nonzero urb status received: %d\n", + __func__, status); goto exit; } @@ -306,16 +317,16 @@ static void acm_ctrl_irq(struct urb *urb) data = (unsigned char *)(dr + 1); switch (dr->bNotificationType) { case USB_CDC_NOTIFY_NETWORK_CONNECTION: - dev_dbg(&acm->control->dev, "%s - network connection: %d\n", - __func__, dr->wValue); + dev_dbg(&acm->control->dev, + "%s - network connection: %d\n", __func__, dr->wValue); break; case USB_CDC_NOTIFY_SERIAL_STATE: newctrl = get_unaligned_le16(data); if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) { - dev_dbg(&acm->control->dev, "%s - calling hangup\n", - __func__); + dev_dbg(&acm->control->dev, + "%s - calling hangup\n", __func__); tty_port_tty_hangup(&acm->port, false); } @@ -357,8 +368,8 @@ static void acm_ctrl_irq(struct urb *urb) exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval && retval != -EPERM) - dev_err(&acm->control->dev, "%s - usb_submit_urb failed: %d\n", - __func__, retval); + dev_err(&acm->control->dev, + "%s - usb_submit_urb failed: %d\n", __func__, retval); } static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) @@ -372,8 +383,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) if (res) { if (res != -EPERM) { dev_err(&acm->data->dev, - "urb %d failed submission with %d\n", - index, res); + "urb %d failed submission with %d\n", + index, res); } set_bit(index, &acm->read_urbs_free); return res; @@ -416,30 +427,43 @@ static void acm_read_bulk_callback(struct urb *urb) int status = urb->status; dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n", - rb->index, urb->actual_length, - status); + rb->index, urb->actual_length, status); + + set_bit(rb->index, &acm->read_urbs_free); if (!acm->dev) { - set_bit(rb->index, &acm->read_urbs_free); dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__); return; } - if (status) { - set_bit(rb->index, &acm->read_urbs_free); - if ((status != -ENOENT) || (urb->actual_length == 0)) - return; + switch (status) { + case 0: + usb_mark_last_busy(acm->dev); + acm_process_read_urb(acm, urb); + break; + case -EPIPE: + set_bit(EVENT_RX_STALL, &acm->flags); + schedule_work(&acm->work); + return; + case -ENOENT: + case -ECONNRESET: + case -ESHUTDOWN: + dev_dbg(&acm->data->dev, + "%s - urb shutting down with status: %d\n", + __func__, status); + return; + default: + dev_dbg(&acm->data->dev, + "%s - nonzero urb status received: %d\n", + __func__, status); + break; } - usb_mark_last_busy(acm->dev); - - acm_process_read_urb(acm, urb); /* * Unthrottle may run on another CPU which needs to see events * in the same order. Submission has an implict barrier */ smp_mb__before_atomic(); - set_bit(rb->index, &acm->read_urbs_free); /* throttle device if requested by tty */ spin_lock_irqsave(&acm->read_lock, flags); @@ -469,14 +493,30 @@ static void acm_write_bulk(struct urb *urb) spin_lock_irqsave(&acm->write_lock, flags); acm_write_done(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); + set_bit(EVENT_TTY_WAKEUP, &acm->flags); schedule_work(&acm->work); } static void acm_softint(struct work_struct *work) { + int i; struct acm *acm = container_of(work, struct acm, work); - tty_port_tty_wakeup(&acm->port); + if (test_bit(EVENT_RX_STALL, &acm->flags)) { + if (!(usb_autopm_get_interface(acm->data))) { + for (i = 0; i < acm->rx_buflimit; i++) + usb_kill_urb(acm->read_urbs[i]); + usb_clear_halt(acm->dev, acm->in); + acm_submit_read_urbs(acm, GFP_KERNEL); + usb_autopm_put_interface(acm->data); + } + clear_bit(EVENT_RX_STALL, &acm->flags); + } + + if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) { + tty_port_tty_wakeup(&acm->port); + clear_bit(EVENT_TTY_WAKEUP, &acm->flags); + } } /* @@ -608,7 +648,6 @@ static void acm_port_shutdown(struct tty_port *port) struct acm *acm = container_of(port, struct acm, port); struct urb *urb; struct acm_wb *wb; - int i; /* * Need to grab write_lock to prevent race with resume, but no need to @@ -630,11 +669,7 @@ static void acm_port_shutdown(struct tty_port *port) usb_autopm_put_interface_async(acm->control); } - usb_kill_urb(acm->ctrlurb); - for (i = 0; i < ACM_NW; i++) - usb_kill_urb(acm->wb[i].urb); - for (i = 0; i < acm->rx_buflimit; i++) - usb_kill_urb(acm->read_urbs[i]); + acm_kill_urbs(acm); } static void acm_tty_cleanup(struct tty_struct *tty) @@ -837,8 +872,8 @@ static int acm_tty_break_ctl(struct tty_struct *tty, int state) retval = acm_send_break(acm, state ? 0xffff : 0); if (retval < 0) - dev_dbg(&acm->control->dev, "%s - send break failed\n", - __func__); + dev_dbg(&acm->control->dev, + "%s - send break failed\n", __func__); return retval; } @@ -877,9 +912,6 @@ static int get_serial_info(struct acm *acm, struct serial_struct __user *info) { struct serial_struct tmp; - if (!info) - return -EINVAL; - memset(&tmp, 0, sizeof(tmp)); tmp.flags = ASYNC_LOW_LATENCY; tmp.xmit_fifo_size = acm->writesize; @@ -932,8 +964,6 @@ static int wait_serial_change(struct acm *acm, unsigned long arg) DECLARE_WAITQUEUE(wait, current); struct async_icount old, new; - if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD)) - return -EINVAL; do { spin_lock_irq(&acm->read_lock); old = acm->oldcount; @@ -971,25 +1001,20 @@ static int wait_serial_change(struct acm *acm, unsigned long arg) return rv; } -static int get_serial_usage(struct acm *acm, - struct serial_icounter_struct __user *count) +static int acm_tty_get_icount(struct tty_struct *tty, + struct serial_icounter_struct *icount) { - struct serial_icounter_struct icount; - int rv = 0; - - memset(&icount, 0, sizeof(icount)); - icount.dsr = acm->iocount.dsr; - icount.rng = acm->iocount.rng; - icount.dcd = acm->iocount.dcd; - icount.frame = acm->iocount.frame; - icount.overrun = acm->iocount.overrun; - icount.parity = acm->iocount.parity; - icount.brk = acm->iocount.brk; + struct acm *acm = tty->driver_data; - if (copy_to_user(count, &icount, sizeof(icount)) > 0) - rv = -EFAULT; + icount->dsr = acm->iocount.dsr; + icount->rng = acm->iocount.rng; + icount->dcd = acm->iocount.dcd; + icount->frame = acm->iocount.frame; + icount->overrun = acm->iocount.overrun; + icount->parity = acm->iocount.parity; + icount->brk = acm->iocount.brk; - return rv; + return 0; } static int acm_tty_ioctl(struct tty_struct *tty, @@ -1014,9 +1039,6 @@ static int acm_tty_ioctl(struct tty_struct *tty, rv = wait_serial_change(acm, arg); usb_autopm_put_interface(acm->control); break; - case TIOCGICOUNT: - rv = get_serial_usage(acm, (struct serial_icounter_struct __user *) arg); - break; } return rv; @@ -1090,19 +1112,17 @@ static void acm_write_buffers_free(struct acm *acm) { int i; struct acm_wb *wb; - struct usb_device *usb_dev = interface_to_usbdev(acm->control); for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) - usb_free_coherent(usb_dev, acm->writesize, wb->buf, wb->dmah); + usb_free_coherent(acm->dev, acm->writesize, wb->buf, wb->dmah); } static void acm_read_buffers_free(struct acm *acm) { - struct usb_device *usb_dev = interface_to_usbdev(acm->control); int i; for (i = 0; i < acm->rx_buflimit; i++) - usb_free_coherent(usb_dev, acm->readsize, + usb_free_coherent(acm->dev, acm->readsize, acm->read_buffers[i].base, acm->read_buffers[i].dma); } @@ -1161,6 +1181,8 @@ static int acm_probe(struct usb_interface *intf, if (quirks == IGNORE_DEVICE) return -ENODEV; + memset(&h, 0x00, sizeof(struct usb_cdc_parsed_header)); + num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR; /* handle quirks deadly to normal probing*/ @@ -1345,9 +1367,16 @@ static int acm_probe(struct usb_interface *intf, spin_lock_init(&acm->write_lock); spin_lock_init(&acm->read_lock); mutex_init(&acm->mutex); - acm->is_int_ep = usb_endpoint_xfer_int(epread); - if (acm->is_int_ep) + if (usb_endpoint_xfer_int(epread)) { acm->bInterval = epread->bInterval; + acm->in = usb_rcvintpipe(usb_dev, epread->bEndpointAddress); + } else { + acm->in = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); + } + if (usb_endpoint_xfer_int(epwrite)) + acm->out = usb_sndintpipe(usb_dev, epwrite->bEndpointAddress); + else + acm->out = usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress); tty_port_init(&acm->port); acm->port.ops = &acm_port_ops; init_usb_anchor(&acm->delayed); @@ -1382,20 +1411,15 @@ static int acm_probe(struct usb_interface *intf, urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; urb->transfer_dma = rb->dma; - if (acm->is_int_ep) { - usb_fill_int_urb(urb, acm->dev, - usb_rcvintpipe(usb_dev, epread->bEndpointAddress), - rb->base, + if (usb_endpoint_xfer_int(epread)) + usb_fill_int_urb(urb, acm->dev, acm->in, rb->base, acm->readsize, acm_read_bulk_callback, rb, acm->bInterval); - } else { - usb_fill_bulk_urb(urb, acm->dev, - usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress), - rb->base, + else + usb_fill_bulk_urb(urb, acm->dev, acm->in, rb->base, acm->readsize, acm_read_bulk_callback, rb); - } acm->read_urbs[i] = urb; __set_bit(i, &acm->read_urbs_free); @@ -1408,12 +1432,10 @@ static int acm_probe(struct usb_interface *intf, goto alloc_fail7; if (usb_endpoint_xfer_int(epwrite)) - usb_fill_int_urb(snd->urb, usb_dev, - usb_sndintpipe(usb_dev, epwrite->bEndpointAddress), + usb_fill_int_urb(snd->urb, usb_dev, acm->out, NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval); else - usb_fill_bulk_urb(snd->urb, usb_dev, - usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), + usb_fill_bulk_urb(snd->urb, usb_dev, acm->out, NULL, acm->writesize, acm_write_bulk, snd); snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if (quirks & SEND_ZERO_PACKET) @@ -1485,8 +1507,8 @@ static int acm_probe(struct usb_interface *intf, } if (quirks & CLEAR_HALT_CONDITIONS) { - usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress)); - usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress)); + usb_clear_halt(usb_dev, acm->in); + usb_clear_halt(usb_dev, acm->out); } return 0; @@ -1520,25 +1542,10 @@ static int acm_probe(struct usb_interface *intf, return rv; } -static void stop_data_traffic(struct acm *acm) -{ - int i; - - usb_kill_urb(acm->ctrlurb); - for (i = 0; i < ACM_NW; i++) - usb_kill_urb(acm->wb[i].urb); - for (i = 0; i < acm->rx_buflimit; i++) - usb_kill_urb(acm->read_urbs[i]); - - cancel_work_sync(&acm->work); -} - static void acm_disconnect(struct usb_interface *intf) { struct acm *acm = usb_get_intfdata(intf); - struct usb_device *usb_dev = interface_to_usbdev(intf); struct tty_struct *tty; - int i; /* sibling interface is already cleaning up */ if (!acm) @@ -1564,17 +1571,13 @@ static void acm_disconnect(struct usb_interface *intf) tty_kref_put(tty); } - stop_data_traffic(acm); + acm_kill_urbs(acm); + cancel_work_sync(&acm->work); tty_unregister_device(acm_tty_driver, acm->minor); - usb_free_urb(acm->ctrlurb); - for (i = 0; i < ACM_NW; i++) - usb_free_urb(acm->wb[i].urb); - for (i = 0; i < acm->rx_buflimit; i++) - usb_free_urb(acm->read_urbs[i]); acm_write_buffers_free(acm); - usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); + usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); acm_read_buffers_free(acm); if (!acm->combined_interfaces) @@ -1603,7 +1606,8 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message) if (cnt) return 0; - stop_data_traffic(acm); + acm_kill_urbs(acm); + cancel_work_sync(&acm->work); return 0; } @@ -1657,6 +1661,15 @@ static int acm_reset_resume(struct usb_interface *intf) #endif /* CONFIG_PM */ +static int acm_pre_reset(struct usb_interface *intf) +{ + struct acm *acm = usb_get_intfdata(intf); + + clear_bit(EVENT_RX_STALL, &acm->flags); + + return 0; +} + #define NOKIA_PCSUITE_ACM_INFO(x) \ USB_DEVICE_AND_INTERFACE_INFO(0x0421, x, \ USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ @@ -1719,6 +1732,7 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */ .driver_info = QUIRK_CONTROL_LINE_STATE, }, { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */ + { USB_DEVICE(0x2184, 0x0036) }, /* GW Instek AFG-125 */ { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */ }, /* Motorola H24 HSPA module: */ @@ -1898,6 +1912,7 @@ static struct usb_driver acm_driver = { .resume = acm_resume, .reset_resume = acm_reset_resume, #endif + .pre_reset = acm_pre_reset, .id_table = acm_ids, #ifdef CONFIG_PM .supports_autosuspend = 1, @@ -1927,6 +1942,7 @@ static const struct tty_operations acm_ops = { .set_termios = acm_tty_set_termios, .tiocmget = acm_tty_tiocmget, .tiocmset = acm_tty_tiocmset, + .get_icount = acm_tty_get_icount, }; /* diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index 1f1eabfd846280ada598b776504d7d01e5b69886..c980f11cdf560a773e6d6bc4b8fccd232c0df200 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -83,6 +83,7 @@ struct acm { struct usb_device *dev; /* the corresponding usb device */ struct usb_interface *control; /* control interface */ struct usb_interface *data; /* data interface */ + unsigned in, out; /* i/o pipes */ struct tty_port port; /* our tty port data */ struct urb *ctrlurb; /* urbs */ u8 *ctrl_buffer; /* buffers of urbs */ @@ -102,6 +103,9 @@ struct acm { spinlock_t write_lock; struct mutex mutex; bool disconnected; + unsigned long flags; +# define EVENT_TTY_WAKEUP 0 +# define EVENT_RX_STALL 1 struct usb_cdc_line_coding line; /* bits, stop, parity */ struct work_struct work; /* work queue entry for line discipline waking up */ unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ @@ -116,7 +120,6 @@ struct acm { unsigned int ctrl_caps; /* control capabilities from the class specific header */ unsigned int susp_count; /* number of suspended interfaces */ unsigned int combined_interfaces:1; /* control and data collapsed */ - unsigned int is_int_ep:1; /* interrupt endpoints contrary to spec used */ unsigned int throttled:1; /* actually throttled */ unsigned int throttle_req:1; /* throttle requested */ u8 bInterval; diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index a6c1fae7d52a4d824812b50553df682900f4523f..f03692ec552056845c6fa50947e38abca47ea66b 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -157,6 +157,7 @@ static int usbtmc_open(struct inode *inode, struct file *filp) } data = usb_get_intfdata(intf); + /* Protect reference to data from file structure until release */ kref_get(&data->kref); /* Store pointer in file structure's private data field */ @@ -531,7 +532,7 @@ static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data, } /* - * Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-IN endpoint. + * Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-OUT endpoint. * @transfer_size: number of bytes to request from the device. * * See the USBTMC specification, Table 4. @@ -1471,7 +1472,7 @@ static int usbtmc_probe(struct usb_interface *intf, if (!data->iin_urb) goto error_register; - /* will reference data in int urb */ + /* Protect interrupt in endpoint data until iin_urb is freed */ kref_get(&data->kref); /* allocate buffer for interrupt in */ diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c index 98e39f91723a52ec7cf607dc60f50035e66a569f..b9bf6e2eb6feabc49daebff549b361d06622d8e9 100644 --- a/drivers/usb/core/buffer.c +++ b/drivers/usb/core/buffer.c @@ -3,6 +3,9 @@ * * This implementation plugs in through generic "usb_bus" level methods, * and should work with all USB controllers, regardless of bus type. + * + * Released under the GPLv2 only. + * SPDX-License-Identifier: GPL-2.0 */ #include diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index a2d90aca779fa16fb07e1327f33a41129c719e63..0aa9e7d697a5df8a8d25194c377498ddce049d87 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -1,3 +1,8 @@ +/* + * Released under the GPLv2 only. + * SPDX-License-Identifier: GPL-2.0 + */ + #include #include #include diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index ef04b50e6bbb9ff2d10f44b53b3ae91a1b3f7eca..f2987ddb1cde3492189f21caa6b23e807218abe2 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -182,14 +182,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, dir = usb_endpoint_dir_in(desc) ? 'I' : 'O'; - if (speed == USB_SPEED_HIGH) { - switch (usb_endpoint_maxp(desc) & (0x03 << 11)) { - case 1 << 11: - bandwidth = 2; break; - case 2 << 11: - bandwidth = 3; break; - } - } + if (speed == USB_SPEED_HIGH) + bandwidth = usb_endpoint_maxp_mult(desc); /* this isn't checking for illegal values */ switch (usb_endpoint_type(desc)) { @@ -233,7 +227,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, start += sprintf(start, format_endpt, desc->bEndpointAddress, dir, desc->bmAttributes, type, - (usb_endpoint_maxp(desc) & 0x07ff) * + usb_endpoint_maxp(desc) * bandwidth, interval, unit); return start; diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index dadd1e8dfe09ded9d28e541099d43f582707596c..cdee5130638b109e5be899c41b8a5f432620c4e3 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -15,6 +15,9 @@ * (usb_device_id matching changes by Adam J. Richter) * (C) Copyright Greg Kroah-Hartman 2002-2003 * + * Released under the GPLv2 only. + * SPDX-License-Identifier: GPL-2.0 + * * NOTE! This is not actually a driver at all, rather this is * just a collection of helper routines that implement the * matching, probing, releasing, suspending and resuming for diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c index 101983b7e8d271f7aaedf1b594b4579dfefb489d..a60bc830a056dbed076f4682c6a427ddd3824b7f 100644 --- a/drivers/usb/core/endpoint.c +++ b/drivers/usb/core/endpoint.c @@ -5,8 +5,10 @@ * (C) Copyright 2002,2004 IBM Corp. * (C) Copyright 2006 Novell Inc. * - * Endpoint sysfs stuff + * Released under the GPLv2 only. + * SPDX-License-Identifier: GPL-2.0 * + * Endpoint sysfs stuff */ #include @@ -50,8 +52,7 @@ static ssize_t wMaxPacketSize_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ep_device *ep = to_ep_device(dev); - return sprintf(buf, "%04x\n", - usb_endpoint_maxp(ep->desc) & 0x07ff); + return sprintf(buf, "%04x\n", usb_endpoint_maxp(ep->desc)); } static DEVICE_ATTR_RO(wMaxPacketSize); diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index 822ced9639aaf67463b359055ee825ff863761ea..e26bd5e773ada0685329ec8ed65084ff72d7242d 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -13,6 +13,8 @@ * (usb_device_id matching changes by Adam J. Richter) * (C) Copyright Greg Kroah-Hartman 2002-2003 * + * Released under the GPLv2 only. + * SPDX-License-Identifier: GPL-2.0 */ #include diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c index 358ca8dd784fe43700ae070764fa783500a792fe..bd3e0c5a6db25e7a162d922c6508de1ad0b68025 100644 --- a/drivers/usb/core/generic.c +++ b/drivers/usb/core/generic.c @@ -15,6 +15,8 @@ * (usb_device_id matching changes by Adam J. Richter) * (C) Copyright Greg Kroah-Hartman 2002-2003 * + * Released under the GPLv2 only. + * SPDX-License-Identifier: GPL-2.0 */ #include diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index cbb146736f577da8a060d49b17052af78881adfa..1fa5c0f29c647ab38367c00817518e122cf3b65a 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -6,6 +6,8 @@ * (C) Copyright 1999 Gregory P. Smith * (C) Copyright 2001 Brad Hards (bhards@bigpond.net.au) * + * Released under the GPLv2 only. + * SPDX-License-Identifier: GPL-2.0 */ #include @@ -27,7 +29,7 @@ #include #include -#include +#include #include #include "hub.h" @@ -101,6 +103,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); static void hub_release(struct kref *kref); static int usb_reset_and_verify_device(struct usb_device *udev); +static void hub_usb3_port_prepare_disable(struct usb_hub *hub, + struct usb_port *port_dev); static inline char *portspeed(struct usb_hub *hub, int portstatus) { @@ -899,82 +903,28 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1, } /* - * If USB 3.0 ports are placed into the Disabled state, they will no longer - * detect any device connects or disconnects. This is generally not what the - * USB core wants, since it expects a disabled port to produce a port status - * change event when a new device connects. - * - * Instead, set the link state to Disabled, wait for the link to settle into - * that state, clear any change bits, and then put the port into the RxDetect - * state. + * USB-3 does not have a similar link state as USB-2 that will avoid negotiating + * a connection with a plugged-in cable but will signal the host when the cable + * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices */ -static int hub_usb3_port_disable(struct usb_hub *hub, int port1) -{ - int ret; - int total_time; - u16 portchange, portstatus; - - if (!hub_is_superspeed(hub->hdev)) - return -EINVAL; - - ret = hub_port_status(hub, port1, &portstatus, &portchange); - if (ret < 0) - return ret; - - /* - * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI - * Controller [1022:7814] will have spurious result making the following - * usb 3.0 device hotplugging route to the 2.0 root hub and recognized - * as high-speed device if we set the usb 3.0 port link state to - * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we - * check the state here to avoid the bug. - */ - if ((portstatus & USB_PORT_STAT_LINK_STATE) == - USB_SS_PORT_LS_RX_DETECT) { - dev_dbg(&hub->ports[port1 - 1]->dev, - "Not disabling port; link state is RxDetect\n"); - return ret; - } - - ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); - if (ret) - return ret; - - /* Wait for the link to enter the disabled state. */ - for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) { - ret = hub_port_status(hub, port1, &portstatus, &portchange); - if (ret < 0) - return ret; - - if ((portstatus & USB_PORT_STAT_LINK_STATE) == - USB_SS_PORT_LS_SS_DISABLED) - break; - if (total_time >= HUB_DEBOUNCE_TIMEOUT) - break; - msleep(HUB_DEBOUNCE_STEP); - } - if (total_time >= HUB_DEBOUNCE_TIMEOUT) - dev_warn(&hub->ports[port1 - 1]->dev, - "Could not disable after %d ms\n", total_time); - - return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT); -} - static int hub_port_disable(struct usb_hub *hub, int port1, int set_state) { struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *hdev = hub->hdev; int ret = 0; - if (port_dev->child && set_state) - usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED); if (!hub->error) { - if (hub_is_superspeed(hub->hdev)) - ret = hub_usb3_port_disable(hub, port1); - else + if (hub_is_superspeed(hub->hdev)) { + hub_usb3_port_prepare_disable(hub, port_dev); + ret = hub_set_port_link_state(hub, port_dev->portnum, + USB_SS_PORT_LS_U3); + } else { ret = usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE); + } } + if (port_dev->child && set_state) + usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED); if (ret && ret != -ENODEV) dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret); return ret; @@ -2731,8 +2681,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1, if (ret < 0) return ret; - /* The port state is unknown until the reset completes. */ - if (!(portstatus & USB_PORT_STAT_RESET)) + /* + * The port state is unknown until the reset completes. + * + * On top of that, some chips may require additional time + * to re-establish a connection after the reset is complete, + * so also wait for the connection to be re-established. + */ + if (!(portstatus & USB_PORT_STAT_RESET) && + (portstatus & USB_PORT_STAT_CONNECTION)) break; /* switch to the long delay after two short delay failures */ @@ -4140,6 +4097,26 @@ void usb_unlocked_enable_lpm(struct usb_device *udev) } EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); +/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */ +static void hub_usb3_port_prepare_disable(struct usb_hub *hub, + struct usb_port *port_dev) +{ + struct usb_device *udev = port_dev->child; + int ret; + + if (udev && udev->port_is_suspended && udev->do_remote_wakeup) { + ret = hub_set_port_link_state(hub, port_dev->portnum, + USB_SS_PORT_LS_U0); + if (!ret) { + msleep(USB_RESUME_TIMEOUT); + ret = usb_disable_remote_wakeup(udev); + } + if (ret) + dev_warn(&udev->dev, + "Port disable: can't disable remote wake\n"); + udev->do_remote_wakeup = 0; + } +} #else /* CONFIG_PM */ @@ -4147,6 +4124,9 @@ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); #define hub_resume NULL #define hub_reset_resume NULL +static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub, + struct usb_port *port_dev) { } + int usb_disable_lpm(struct usb_device *udev) { return 0; diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c index 3ed5162677ad562e1ffa19b1913cd7b332137fda..1713248ab15ad497a54f76f9fd6bb7150ccc023a 100644 --- a/drivers/usb/core/ledtrig-usbport.c +++ b/drivers/usb/core/ledtrig-usbport.c @@ -74,8 +74,7 @@ static void usbport_trig_update_count(struct usbport_trig_data *usbport_data) usbport_data->count = 0; usb_for_each_dev(usbport_data, usbport_trig_usb_dev_check); - led_cdev->brightness_set(led_cdev, - usbport_data->count ? LED_FULL : LED_OFF); + led_set_brightness(led_cdev, usbport_data->count ? LED_FULL : LED_OFF); } /*************************************** @@ -228,12 +227,12 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action, case USB_DEVICE_ADD: usbport_trig_add_usb_dev_ports(usb_dev, usbport_data); if (observed && usbport_data->count++ == 0) - led_cdev->brightness_set(led_cdev, LED_FULL); + led_set_brightness(led_cdev, LED_FULL); return NOTIFY_OK; case USB_DEVICE_REMOVE: usbport_trig_remove_usb_dev_ports(usbport_data, usb_dev); if (observed && --usbport_data->count == 0) - led_cdev->brightness_set(led_cdev, LED_OFF); + led_set_brightness(led_cdev, LED_OFF); return NOTIFY_OK; } diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 3a47077461578f2af47af8d551353c2c7cb12f0e..dea55914d6410c5b472c36562864baee3143bf86 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1,5 +1,8 @@ /* * message.c - synchronous message handling + * + * Released under the GPLv2 only. + * SPDX-License-Identifier: GPL-2.0 */ #include /* for scatterlist macros */ diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c index 7728c91dfa2e61eb50b21d75f558a96342db60a2..b12a463a3e22176caf4ecc8e84d4da231edbb96b 100644 --- a/drivers/usb/core/notify.c +++ b/drivers/usb/core/notify.c @@ -6,6 +6,8 @@ * notifier functions originally based on those in kernel/sys.c * but fixed up to not be so broken. * + * Released under the GPLv2 only. + * SPDX-License-Identifier: GPL-2.0 */ diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index c953a0f1c69513a066ceab7bb7372f946b19e8ed..dfc68ed24db19bbba53f825bbe650894748d5551 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -7,6 +7,8 @@ * * All of the sysfs file attributes for usb devices and interfaces. * + * Released under the GPLv2 only. + * SPDX-License-Identifier: GPL-2.0 */ @@ -14,6 +16,7 @@ #include #include #include +#include #include "usb.h" /* Active configuration fields */ @@ -104,6 +107,17 @@ static ssize_t bConfigurationValue_store(struct device *dev, static DEVICE_ATTR_IGNORE_LOCKDEP(bConfigurationValue, S_IRUGO | S_IWUSR, bConfigurationValue_show, bConfigurationValue_store); +#ifdef CONFIG_OF +static ssize_t devspec_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct device_node *of_node = dev->of_node; + + return sprintf(buf, "%s\n", of_node_full_name(of_node)); +} +static DEVICE_ATTR_RO(devspec); +#endif + /* String fields */ #define usb_string_attr(name) \ static ssize_t name##_show(struct device *dev, \ @@ -786,6 +800,9 @@ static struct attribute *dev_attrs[] = { &dev_attr_remove.attr, &dev_attr_removable.attr, &dev_attr_ltm_capable.attr, +#ifdef CONFIG_OF + &dev_attr_devspec.attr, +#endif NULL, }; static struct attribute_group dev_attr_grp = { diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index a9039696476e68c8da475bbe40e1b841e64d903b..d75cb8c0f7df76ca1c78d1c39de9db17f9039af0 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -1,3 +1,8 @@ +/* + * Released under the GPLv2 only. + * SPDX-License-Identifier: GPL-2.0 + */ + #include #include #include @@ -407,11 +412,8 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) } /* "high bandwidth" mode, 1-3 packets/uframe? */ - if (dev->speed == USB_SPEED_HIGH) { - int mult = 1 + ((max >> 11) & 0x03); - max &= 0x07ff; - max *= mult; - } + if (dev->speed == USB_SPEED_HIGH) + max *= usb_endpoint_maxp_mult(&ep->desc); if (urb->number_of_packets <= 0) return -EINVAL; diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 5921514610179f04232e11da85746344ff3ee3e0..a2ccc69fb45c092a5220474c64b03418133f2178 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -12,6 +12,9 @@ * (usb_device_id matching changes by Adam J. Richter) * (C) Copyright Greg Kroah-Hartman 2002-2003 * + * Released under the GPLv2 only. + * SPDX-License-Identifier: GPL-2.0 + * * NOTE! This is not actually a driver at all, rather this is * just a collection of helper routines that implement the * generic USB things that the real drivers can use.. diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index 53318126ed91b24603934030702a0dae178cc5d1..dc69492488234141b9980fee41045b5461da206d 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -1,3 +1,8 @@ +/* + * Released under the GPLv2 only. + * SPDX-License-Identifier: GPL-2.0 + */ + #include #include diff --git a/drivers/usb/dwc2/Makefile b/drivers/usb/dwc2/Makefile index 50fdaace1e735f1f9a989bdebb0977a4224d8a44..b9237e1e45d098d43de9dea17bc535caa13c52ce 100644 --- a/drivers/usb/dwc2/Makefile +++ b/drivers/usb/dwc2/Makefile @@ -3,6 +3,7 @@ ccflags-$(CONFIG_USB_DWC2_VERBOSE) += -DVERBOSE_DEBUG obj-$(CONFIG_USB_DWC2) += dwc2.o dwc2-y := core.o core_intr.o platform.o +dwc2-y += params.o ifneq ($(filter y,$(CONFIG_USB_DWC2_HOST) $(CONFIG_USB_DWC2_DUAL_ROLE)),) dwc2-y += hcd.o hcd_intr.o diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index 4c0fa0b173538847e680ae13e983d6a3534311d6..11d8ae9aead1a2fe54b54695034e196968579c3b 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c @@ -135,7 +135,7 @@ int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore) u32 pcgcctl; int ret = 0; - if (!hsotg->core_params->hibernation) + if (!hsotg->params.hibernation) return -ENOTSUPP; pcgcctl = dwc2_readl(hsotg->regs + PCGCTL); @@ -188,7 +188,7 @@ int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg) u32 pcgcctl; int ret = 0; - if (!hsotg->core_params->hibernation) + if (!hsotg->params.hibernation) return -ENOTSUPP; /* Backup all registers */ @@ -445,7 +445,7 @@ static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host) * the force mode. We only need to call this once during probe if * dr_mode == OTG. */ -static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg) +void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg) { u32 gusbcfg; @@ -541,7 +541,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg) addr = hsotg->regs + HAINTMSK; dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n", (unsigned long)addr, dwc2_readl(addr)); - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { addr = hsotg->regs + HFLBADDR; dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n", (unsigned long)addr, dwc2_readl(addr)); @@ -551,7 +551,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n", (unsigned long)addr, dwc2_readl(addr)); - for (i = 0; i < hsotg->core_params->host_channels; i++) { + for (i = 0; i < hsotg->params.host_channels; i++) { dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i); addr = hsotg->regs + HCCHAR(i); dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n", @@ -571,7 +571,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg) addr = hsotg->regs + HCDMA(i); dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n", (unsigned long)addr, dwc2_readl(addr)); - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { addr = hsotg->regs + HCDMAB(i); dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n", (unsigned long)addr, dwc2_readl(addr)); @@ -735,704 +735,13 @@ void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg) udelay(1); } -#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c)) - -/* Parameter access functions */ -void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - switch (val) { - case DWC2_CAP_PARAM_HNP_SRP_CAPABLE: - if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) - valid = 0; - break; - case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE: - switch (hsotg->hw_params.op_mode) { - case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: - case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: - case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: - case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: - break; - default: - valid = 0; - break; - } - break; - case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE: - /* always valid */ - break; - default: - valid = 0; - break; - } - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for otg_cap parameter. Check HW configuration.\n", - val); - switch (hsotg->hw_params.op_mode) { - case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: - val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE; - break; - case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: - case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: - case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: - val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE; - break; - default: - val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; - break; - } - dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val); - } - - hsotg->core_params->otg_cap = val; -} - -void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH) - valid = 0; - if (val < 0) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for dma_enable parameter. Check HW configuration.\n", - val); - val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH; - dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val); - } - - hsotg->core_params->dma_enable = val; -} - -void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val > 0 && (hsotg->core_params->dma_enable <= 0 || - !hsotg->hw_params.dma_desc_enable)) - valid = 0; - if (val < 0) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for dma_desc_enable parameter. Check HW configuration.\n", - val); - val = (hsotg->core_params->dma_enable > 0 && - hsotg->hw_params.dma_desc_enable); - dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val); - } - - hsotg->core_params->dma_desc_enable = val; -} - -void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val > 0 && (hsotg->core_params->dma_enable <= 0 || - !hsotg->hw_params.dma_desc_enable)) - valid = 0; - if (val < 0) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n", - val); - val = (hsotg->core_params->dma_enable > 0 && - hsotg->hw_params.dma_desc_enable); - } - - hsotg->core_params->dma_desc_fs_enable = val; - dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val); -} - -void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg, - int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "Wrong value for host_support_fs_low_power\n"); - dev_err(hsotg->dev, - "host_support_fs_low_power must be 0 or 1\n"); - } - val = 0; - dev_dbg(hsotg->dev, - "Setting host_support_fs_low_power to %d\n", val); - } - - hsotg->core_params->host_support_fs_ls_low_power = val; -} - -void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo) - valid = 0; - if (val < 0) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n", - val); - val = hsotg->hw_params.enable_dynamic_fifo; - dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val); - } - - hsotg->core_params->enable_dynamic_fifo = val; -} - -void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for host_rx_fifo_size. Check HW configuration.\n", - val); - val = hsotg->hw_params.host_rx_fifo_size; - dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val); - } - - hsotg->core_params->host_rx_fifo_size = val; -} - -void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n", - val); - val = hsotg->hw_params.host_nperio_tx_fifo_size; - dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n", - val); - } - - hsotg->core_params->host_nperio_tx_fifo_size = val; -} - -void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n", - val); - val = hsotg->hw_params.host_perio_tx_fifo_size; - dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n", - val); - } - - hsotg->core_params->host_perio_tx_fifo_size = val; -} - -void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val < 2047 || val > hsotg->hw_params.max_transfer_size) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for max_transfer_size. Check HW configuration.\n", - val); - val = hsotg->hw_params.max_transfer_size; - dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val); - } - - hsotg->core_params->max_transfer_size = val; -} - -void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val < 15 || val > hsotg->hw_params.max_packet_count) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for max_packet_count. Check HW configuration.\n", - val); - val = hsotg->hw_params.max_packet_count; - dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val); - } - - hsotg->core_params->max_packet_count = val; -} - -void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val < 1 || val > hsotg->hw_params.host_channels) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for host_channels. Check HW configuration.\n", - val); - val = hsotg->hw_params.host_channels; - dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val); - } - - hsotg->core_params->host_channels = val; -} - -void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 0; - u32 hs_phy_type, fs_phy_type; - - if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS, - DWC2_PHY_TYPE_PARAM_ULPI)) { - if (val >= 0) { - dev_err(hsotg->dev, "Wrong value for phy_type\n"); - dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n"); - } - - valid = 0; - } - - hs_phy_type = hsotg->hw_params.hs_phy_type; - fs_phy_type = hsotg->hw_params.fs_phy_type; - if (val == DWC2_PHY_TYPE_PARAM_UTMI && - (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || - hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) - valid = 1; - else if (val == DWC2_PHY_TYPE_PARAM_ULPI && - (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI || - hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) - valid = 1; - else if (val == DWC2_PHY_TYPE_PARAM_FS && - fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) - valid = 1; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for phy_type. Check HW configuration.\n", - val); - val = DWC2_PHY_TYPE_PARAM_FS; - if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) { - if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || - hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI) - val = DWC2_PHY_TYPE_PARAM_UTMI; - else - val = DWC2_PHY_TYPE_PARAM_ULPI; - } - dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val); - } - - hsotg->core_params->phy_type = val; -} - -static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg) -{ - return hsotg->core_params->phy_type; -} - -void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, "Wrong value for speed parameter\n"); - dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n"); - } - valid = 0; - } - - if (val == DWC2_SPEED_PARAM_HIGH && - dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for speed parameter. Check HW configuration.\n", - val); - val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ? - DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH; - dev_dbg(hsotg->dev, "Setting speed to %d\n", val); - } - - hsotg->core_params->speed = val; -} - -void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ, - DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) { - if (val >= 0) { - dev_err(hsotg->dev, - "Wrong value for host_ls_low_power_phy_clk parameter\n"); - dev_err(hsotg->dev, - "host_ls_low_power_phy_clk must be 0 or 1\n"); - } - valid = 0; - } - - if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ && - dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n", - val); - val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS - ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ - : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ; - dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n", - val); - } - - hsotg->core_params->host_ls_low_power_phy_clk = val; -} - -void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n"); - dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n"); - } - val = 0; - dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val); - } - - hsotg->core_params->phy_ulpi_ddr = val; -} - -void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "Wrong value for phy_ulpi_ext_vbus\n"); - dev_err(hsotg->dev, - "phy_ulpi_ext_vbus must be 0 or 1\n"); - } - val = 0; - dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val); - } - - hsotg->core_params->phy_ulpi_ext_vbus = val; -} - -void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 0; - - switch (hsotg->hw_params.utmi_phy_data_width) { - case GHWCFG4_UTMI_PHY_DATA_WIDTH_8: - valid = (val == 8); - break; - case GHWCFG4_UTMI_PHY_DATA_WIDTH_16: - valid = (val == 16); - break; - case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16: - valid = (val == 8 || val == 16); - break; - } - - if (!valid) { - if (val >= 0) { - dev_err(hsotg->dev, - "%d invalid for phy_utmi_width. Check HW configuration.\n", - val); - } - val = (hsotg->hw_params.utmi_phy_data_width == - GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16; - dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val); - } - - hsotg->core_params->phy_utmi_width = val; -} - -void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n"); - dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n"); - } - val = 0; - dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val); - } - - hsotg->core_params->ulpi_fs_ls = val; -} - -void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, "Wrong value for ts_dline\n"); - dev_err(hsotg->dev, "ts_dline must be 0 or 1\n"); - } - val = 0; - dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val); - } - - hsotg->core_params->ts_dline = val; -} - -void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, "Wrong value for i2c_enable\n"); - dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n"); - } - - valid = 0; - } - - if (val == 1 && !(hsotg->hw_params.i2c_enable)) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for i2c_enable. Check HW configuration.\n", - val); - val = hsotg->hw_params.i2c_enable; - dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val); - } - - hsotg->core_params->i2c_enable = val; -} - -void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "Wrong value for en_multiple_tx_fifo,\n"); - dev_err(hsotg->dev, - "en_multiple_tx_fifo must be 0 or 1\n"); - } - valid = 0; - } - - if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n", - val); - val = hsotg->hw_params.en_multiple_tx_fifo; - dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val); - } - - hsotg->core_params->en_multiple_tx_fifo = val; -} - -void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "'%d' invalid for parameter reload_ctl\n", val); - dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n"); - } - valid = 0; - } - - if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for parameter reload_ctl. Check HW configuration.\n", - val); - val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a; - dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val); - } - - hsotg->core_params->reload_ctl = val; -} - -void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val) -{ - if (val != -1) - hsotg->core_params->ahbcfg = val; - else - hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 << - GAHBCFG_HBSTLEN_SHIFT; -} - -void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "'%d' invalid for parameter otg_ver\n", val); - dev_err(hsotg->dev, - "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n"); - } - val = 0; - dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val); - } - - hsotg->core_params->otg_ver = val; -} - -static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "'%d' invalid for parameter uframe_sched\n", - val); - dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n"); - } - val = 1; - dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val); - } - - hsotg->core_params->uframe_sched = val; -} - -static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg, - int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "'%d' invalid for parameter external_id_pin_ctl\n", - val); - dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n"); - } - val = 0; - dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val); - } - - hsotg->core_params->external_id_pin_ctl = val; -} - -static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg, - int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "'%d' invalid for parameter hibernation\n", - val); - dev_err(hsotg->dev, "hibernation must be 0 or 1\n"); - } - val = 0; - dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val); - } - - hsotg->core_params->hibernation = val; -} - -/* - * This function is called during module intialization to pass module parameters - * for the DWC_otg core. - */ -void dwc2_set_parameters(struct dwc2_hsotg *hsotg, - const struct dwc2_core_params *params) -{ - dev_dbg(hsotg->dev, "%s()\n", __func__); - - dwc2_set_param_otg_cap(hsotg, params->otg_cap); - dwc2_set_param_dma_enable(hsotg, params->dma_enable); - dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable); - dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable); - dwc2_set_param_host_support_fs_ls_low_power(hsotg, - params->host_support_fs_ls_low_power); - dwc2_set_param_enable_dynamic_fifo(hsotg, - params->enable_dynamic_fifo); - dwc2_set_param_host_rx_fifo_size(hsotg, - params->host_rx_fifo_size); - dwc2_set_param_host_nperio_tx_fifo_size(hsotg, - params->host_nperio_tx_fifo_size); - dwc2_set_param_host_perio_tx_fifo_size(hsotg, - params->host_perio_tx_fifo_size); - dwc2_set_param_max_transfer_size(hsotg, - params->max_transfer_size); - dwc2_set_param_max_packet_count(hsotg, - params->max_packet_count); - dwc2_set_param_host_channels(hsotg, params->host_channels); - dwc2_set_param_phy_type(hsotg, params->phy_type); - dwc2_set_param_speed(hsotg, params->speed); - dwc2_set_param_host_ls_low_power_phy_clk(hsotg, - params->host_ls_low_power_phy_clk); - dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr); - dwc2_set_param_phy_ulpi_ext_vbus(hsotg, - params->phy_ulpi_ext_vbus); - dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width); - dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls); - dwc2_set_param_ts_dline(hsotg, params->ts_dline); - dwc2_set_param_i2c_enable(hsotg, params->i2c_enable); - dwc2_set_param_en_multiple_tx_fifo(hsotg, - params->en_multiple_tx_fifo); - dwc2_set_param_reload_ctl(hsotg, params->reload_ctl); - dwc2_set_param_ahbcfg(hsotg, params->ahbcfg); - dwc2_set_param_otg_ver(hsotg, params->otg_ver); - dwc2_set_param_uframe_sched(hsotg, params->uframe_sched); - dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl); - dwc2_set_param_hibernation(hsotg, params->hibernation); -} - /* * Forces either host or device mode if the controller is not * currently in that mode. * * Returns true if the mode was forced. */ -static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host) +bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host) { if (host && dwc2_is_host_mode(hsotg)) return false; @@ -1442,232 +751,9 @@ static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host) return dwc2_force_mode(hsotg, host); } -/* - * Gets host hardware parameters. Forces host mode if not currently in - * host mode. Should be called immediately after a core soft reset in - * order to get the reset values. - */ -static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg) -{ - struct dwc2_hw_params *hw = &hsotg->hw_params; - u32 gnptxfsiz; - u32 hptxfsiz; - bool forced; - - if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) - return; - - forced = dwc2_force_mode_if_needed(hsotg, true); - - gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); - hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ); - dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); - dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); - - if (forced) - dwc2_clear_force_mode(hsotg); - - hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> - FIFOSIZE_DEPTH_SHIFT; - hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> - FIFOSIZE_DEPTH_SHIFT; -} - -/* - * Gets device hardware parameters. Forces device mode if not - * currently in device mode. Should be called immediately after a core - * soft reset in order to get the reset values. - */ -static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg) -{ - struct dwc2_hw_params *hw = &hsotg->hw_params; - bool forced; - u32 gnptxfsiz; - - if (hsotg->dr_mode == USB_DR_MODE_HOST) - return; - - forced = dwc2_force_mode_if_needed(hsotg, false); - - gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); - dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); - - if (forced) - dwc2_clear_force_mode(hsotg); - - hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> - FIFOSIZE_DEPTH_SHIFT; -} - -/** - * During device initialization, read various hardware configuration - * registers and interpret the contents. - */ -int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) -{ - struct dwc2_hw_params *hw = &hsotg->hw_params; - unsigned width; - u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4; - u32 grxfsiz; - - /* - * Attempt to ensure this device is really a DWC_otg Controller. - * Read and verify the GSNPSID register contents. The value should be - * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3", - * as in "OTG version 2.xx" or "OTG version 3.xx". - */ - hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID); - if ((hw->snpsid & 0xfffff000) != 0x4f542000 && - (hw->snpsid & 0xfffff000) != 0x4f543000) { - dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n", - hw->snpsid); - return -ENODEV; - } - - dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n", - hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf, - hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid); - - hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1); - hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2); - hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3); - hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4); - grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); - - dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1); - dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2); - dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3); - dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4); - dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz); - - /* - * Host specific hardware parameters. Reading these parameters - * requires the controller to be in host mode. The mode will - * be forced, if necessary, to read these values. - */ - dwc2_get_host_hwparams(hsotg); - dwc2_get_dev_hwparams(hsotg); - - /* hwcfg1 */ - hw->dev_ep_dirs = hwcfg1; - - /* hwcfg2 */ - hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >> - GHWCFG2_OP_MODE_SHIFT; - hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >> - GHWCFG2_ARCHITECTURE_SHIFT; - hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO); - hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >> - GHWCFG2_NUM_HOST_CHAN_SHIFT); - hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >> - GHWCFG2_HS_PHY_TYPE_SHIFT; - hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >> - GHWCFG2_FS_PHY_TYPE_SHIFT; - hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >> - GHWCFG2_NUM_DEV_EP_SHIFT; - hw->nperio_tx_q_depth = - (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >> - GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1; - hw->host_perio_tx_q_depth = - (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >> - GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1; - hw->dev_token_q_depth = - (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >> - GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT; - - /* hwcfg3 */ - width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >> - GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT; - hw->max_transfer_size = (1 << (width + 11)) - 1; - width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >> - GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT; - hw->max_packet_count = (1 << (width + 4)) - 1; - hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C); - hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >> - GHWCFG3_DFIFO_DEPTH_SHIFT; - - /* hwcfg4 */ - hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); - hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> - GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; - hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); - hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); - hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> - GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT; - - /* fifo sizes */ - hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> - GRXFSIZ_DEPTH_SHIFT; - - dev_dbg(hsotg->dev, "Detected values from hardware:\n"); - dev_dbg(hsotg->dev, " op_mode=%d\n", - hw->op_mode); - dev_dbg(hsotg->dev, " arch=%d\n", - hw->arch); - dev_dbg(hsotg->dev, " dma_desc_enable=%d\n", - hw->dma_desc_enable); - dev_dbg(hsotg->dev, " power_optimized=%d\n", - hw->power_optimized); - dev_dbg(hsotg->dev, " i2c_enable=%d\n", - hw->i2c_enable); - dev_dbg(hsotg->dev, " hs_phy_type=%d\n", - hw->hs_phy_type); - dev_dbg(hsotg->dev, " fs_phy_type=%d\n", - hw->fs_phy_type); - dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n", - hw->utmi_phy_data_width); - dev_dbg(hsotg->dev, " num_dev_ep=%d\n", - hw->num_dev_ep); - dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n", - hw->num_dev_perio_in_ep); - dev_dbg(hsotg->dev, " host_channels=%d\n", - hw->host_channels); - dev_dbg(hsotg->dev, " max_transfer_size=%d\n", - hw->max_transfer_size); - dev_dbg(hsotg->dev, " max_packet_count=%d\n", - hw->max_packet_count); - dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n", - hw->nperio_tx_q_depth); - dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n", - hw->host_perio_tx_q_depth); - dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n", - hw->dev_token_q_depth); - dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n", - hw->enable_dynamic_fifo); - dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n", - hw->en_multiple_tx_fifo); - dev_dbg(hsotg->dev, " total_fifo_size=%d\n", - hw->total_fifo_size); - dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n", - hw->host_rx_fifo_size); - dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n", - hw->host_nperio_tx_fifo_size); - dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n", - hw->host_perio_tx_fifo_size); - dev_dbg(hsotg->dev, "\n"); - - return 0; -} - -/* - * Sets all parameters to the given value. - * - * Assumes that the dwc2_core_params struct contains only integers. - */ -void dwc2_set_all_params(struct dwc2_core_params *params, int value) -{ - int *p = (int *)params; - size_t size = sizeof(*params) / sizeof(*p); - int i; - - for (i = 0; i < size; i++) - p[i] = value; -} - - u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg) { - return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103; + return hsotg->params.otg_ver == 1 ? 0x0200 : 0x0103; } bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg) diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 2a21a0414b1d385347ac700b4db5fda2f502c523..9548d3e03453db062042600668840d5b4676e649 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -172,6 +172,11 @@ struct dwc2_hsotg_req; * @periodic: Set if this is a periodic ep, such as Interrupt * @isochronous: Set if this is a isochronous ep * @send_zlp: Set if we need to send a zero-length packet. + * @desc_list_dma: The DMA address of descriptor chain currently in use. + * @desc_list: Pointer to descriptor DMA chain head currently in use. + * @desc_count: Count of entries within the DMA descriptor chain of EP. + * @isoc_chain_num: Number of ISOC chain currently in use - either 0 or 1. + * @next_desc: index of next free descriptor in the ISOC chain under SW control. * @total_data: The total number of data bytes done. * @fifo_size: The size of the FIFO (for periodic IN endpoints) * @fifo_load: The amount of data loaded into the FIFO (periodic IN) @@ -219,6 +224,13 @@ struct dwc2_hsotg_ep { #define TARGET_FRAME_INITIAL 0xFFFFFFFF bool frame_overrun; + dma_addr_t desc_list_dma; + struct dwc2_dma_desc *desc_list; + u8 desc_count; + + unsigned char isoc_chain_num; + unsigned int next_desc; + char name[10]; }; @@ -286,7 +298,7 @@ enum dwc2_ep0_state { * @otg_ver: OTG version supported * 0 - 1.3 (default) * 1 - 2.0 - * @dma_enable: Specifies whether to use slave or DMA mode for accessing + * @host_dma: Specifies whether to use slave or DMA mode for accessing * the data FIFOs. The driver will automatically detect the * value for this parameter if none is specified. * 0 - Slave (always available) @@ -314,7 +326,8 @@ enum dwc2_ep0_state { * @enable_dynamic_fifo: 0 - Use coreConsultant-specified FIFO size parameters * 1 - Allow dynamic FIFO sizing (default, if available) * @en_multiple_tx_fifo: Specifies whether dedicated per-endpoint transmit FIFOs - * are enabled + * are enabled for non-periodic IN endpoints in device + * mode. * @host_rx_fifo_size: Number of 4-byte words in the Rx FIFO in host mode when * dynamic FIFO sizing is enabled * 16 to 32768 @@ -417,6 +430,20 @@ enum dwc2_ep0_state { * needed. * 0 - No (default) * 1 - Yes + * @g_dma: Enables gadget dma usage (default: autodetect). + * @g_dma_desc: Enables gadget descriptor DMA (default: autodetect). + * @g_rx_fifo_size: The periodic rx fifo size for the device, in + * DWORDS from 16-32768 (default: 2048 if + * possible, otherwise autodetect). + * @g_np_tx_fifo_size: The non-periodic tx fifo size for the device in + * DWORDS from 16-32768 (default: 1024 if + * possible, otherwise autodetect). + * @g_tx_fifo_size: An array of TX fifo sizes in dedicated fifo + * mode. Each value corresponds to one EP + * starting from EP1 (max 15 values). Sizes are + * in DWORDS with possible values from from + * 16-32768 (default: 256, 256, 256, 256, 768, + * 768, 768, 768, 0, 0, 0, 0, 0, 0, 0). * * The following parameters may be specified when starting the module. These * parameters define how the DWC_otg controller should be configured. A @@ -430,11 +457,18 @@ struct dwc2_core_params { * dwc2_set_all_params! */ int otg_cap; +#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0 +#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1 +#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2 + int otg_ver; - int dma_enable; int dma_desc_enable; int dma_desc_fs_enable; int speed; +#define DWC2_SPEED_PARAM_HIGH 0 +#define DWC2_SPEED_PARAM_FULL 1 +#define DWC2_SPEED_PARAM_LOW 2 + int enable_dynamic_fifo; int en_multiple_tx_fifo; int host_rx_fifo_size; @@ -444,19 +478,44 @@ struct dwc2_core_params { int max_packet_count; int host_channels; int phy_type; +#define DWC2_PHY_TYPE_PARAM_FS 0 +#define DWC2_PHY_TYPE_PARAM_UTMI 1 +#define DWC2_PHY_TYPE_PARAM_ULPI 2 + int phy_utmi_width; int phy_ulpi_ddr; int phy_ulpi_ext_vbus; +#define DWC2_PHY_ULPI_INTERNAL_VBUS 0 +#define DWC2_PHY_ULPI_EXTERNAL_VBUS 1 + int i2c_enable; int ulpi_fs_ls; int host_support_fs_ls_low_power; int host_ls_low_power_phy_clk; +#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0 +#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1 + int ts_dline; int reload_ctl; int ahbcfg; int uframe_sched; int external_id_pin_ctl; int hibernation; + + /* + * The following parameters are *only* set via device + * properties and cannot be set directly in this structure. + */ + + /* Host parameters */ + bool host_dma; + + /* Gadget parameters */ + bool g_dma; + bool g_dma_desc; + u16 g_rx_fifo_size; + u16 g_np_tx_fifo_size; + u32 g_tx_fifo_size[MAX_EPS_CHANNELS]; }; /** @@ -516,10 +575,9 @@ struct dwc2_hw_params { unsigned op_mode:3; unsigned arch:2; unsigned dma_desc_enable:1; - unsigned dma_desc_fs_enable:1; unsigned enable_dynamic_fifo:1; unsigned en_multiple_tx_fifo:1; - unsigned host_rx_fifo_size:16; + unsigned rx_fifo_size:16; unsigned host_nperio_tx_fifo_size:16; unsigned dev_nperio_tx_fifo_size:16; unsigned host_perio_tx_fifo_size:16; @@ -839,11 +897,13 @@ struct dwc2_hregs_backup { * @ctrl_req: Request for EP0 control packets. * @ep0_state: EP0 control transfers state * @test_mode: USB test mode requested by the host + * @setup_desc_dma: EP0 setup stage desc chain DMA address + * @setup_desc: EP0 setup stage desc chain pointer + * @ctrl_in_desc_dma: EP0 IN data phase desc chain DMA address + * @ctrl_in_desc: EP0 IN data phase desc chain pointer + * @ctrl_out_desc_dma: EP0 OUT data phase desc chain DMA address + * @ctrl_out_desc: EP0 OUT data phase desc chain pointer * @eps: The endpoints being supplied to the gadget framework - * @g_using_dma: Indicate if dma usage is enabled - * @g_rx_fifo_sz: Contains rx fifo size value - * @g_np_g_tx_fifo_sz: Contains Non-Periodic tx fifo size value - * @g_tx_fifo_sz: Contains tx fifo size value per endpoints */ struct dwc2_hsotg { struct device *dev; @@ -851,7 +911,7 @@ struct dwc2_hsotg { /** Params detected from hardware */ struct dwc2_hw_params hw_params; /** Params to actually use */ - struct dwc2_core_params *core_params; + struct dwc2_core_params params; enum usb_otg_state op_state; enum usb_dr_mode dr_mode; unsigned int hcd_enabled:1; @@ -891,6 +951,8 @@ struct dwc2_hsotg { #define DWC2_CORE_REV_2_94a 0x4f54294a #define DWC2_CORE_REV_3_00a 0x4f54300a #define DWC2_CORE_REV_3_10a 0x4f54310a +#define DWC2_FS_IOT_REV_1_00a 0x5531100a +#define DWC2_HS_IOT_REV_1_00a 0x5532100a #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) union dwc2_hcd_internal_flags { @@ -986,15 +1048,18 @@ struct dwc2_hsotg { enum dwc2_ep0_state ep0_state; u8 test_mode; + dma_addr_t setup_desc_dma[2]; + struct dwc2_dma_desc *setup_desc[2]; + dma_addr_t ctrl_in_desc_dma; + struct dwc2_dma_desc *ctrl_in_desc; + dma_addr_t ctrl_out_desc_dma; + struct dwc2_dma_desc *ctrl_out_desc; + struct usb_gadget gadget; unsigned int enabled:1; unsigned int connected:1; struct dwc2_hsotg_ep *eps_in[MAX_EPS_CHANNELS]; struct dwc2_hsotg_ep *eps_out[MAX_EPS_CHANNELS]; - u32 g_using_dma; - u32 g_rx_fifo_sz; - u32 g_np_g_tx_fifo_sz; - u32 g_tx_fifo_sz[MAX_EPS_CHANNELS]; #endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */ }; @@ -1016,6 +1081,22 @@ enum dwc2_halt_status { DWC2_HC_XFER_URB_DEQUEUE, }; +/* Core version information */ +static inline bool dwc2_is_iot(struct dwc2_hsotg *hsotg) +{ + return (hsotg->hw_params.snpsid & 0xfff00000) == 0x55300000; +} + +static inline bool dwc2_is_fs_iot(struct dwc2_hsotg *hsotg) +{ + return (hsotg->hw_params.snpsid & 0xffff0000) == 0x55310000; +} + +static inline bool dwc2_is_hs_iot(struct dwc2_hsotg *hsotg) +{ + return (hsotg->hw_params.snpsid & 0xffff0000) == 0x55320000; +} + /* * The following functions support initialization of the core driver component * and the DWC_otg controller @@ -1025,6 +1106,8 @@ extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg); extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg); extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore); +bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host); +void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg); void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg); extern bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg); @@ -1044,217 +1127,16 @@ extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd); /* This function should be called on every hardware interrupt. */ extern irqreturn_t dwc2_handle_common_intr(int irq, void *dev); -/* OTG Core Parameters */ - -/* - * Specifies the OTG capabilities. The driver will automatically - * detect the value for this parameter if none is specified. - * 0 - HNP and SRP capable (default) - * 1 - SRP Only capable - * 2 - No HNP/SRP capable - */ -extern void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val); -#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0 -#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1 -#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2 - -/* - * Specifies whether to use slave or DMA mode for accessing the data - * FIFOs. The driver will automatically detect the value for this - * parameter if none is specified. - * 0 - Slave - * 1 - DMA (default, if available) - */ -extern void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val); - -/* - * When DMA mode is enabled specifies whether to use - * address DMA or DMA Descritor mode for accessing the data - * FIFOs in device mode. The driver will automatically detect - * the value for this parameter if none is specified. - * 0 - address DMA - * 1 - DMA Descriptor(default, if available) - */ -extern void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val); - -/* - * When DMA mode is enabled specifies whether to use - * address DMA or DMA Descritor mode with full speed devices - * for accessing the data FIFOs in host mode. - * 0 - address DMA - * 1 - FS DMA Descriptor(default, if available) - */ -extern void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, - int val); - -/* - * Specifies the maximum speed of operation in host and device mode. - * The actual speed depends on the speed of the attached device and - * the value of phy_type. The actual speed depends on the speed of the - * attached device. - * 0 - High Speed (default) - * 1 - Full Speed - */ -extern void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val); -#define DWC2_SPEED_PARAM_HIGH 0 -#define DWC2_SPEED_PARAM_FULL 1 - -/* - * Specifies whether low power mode is supported when attached - * to a Full Speed or Low Speed device in host mode. - * - * 0 - Don't support low power mode (default) - * 1 - Support low power mode - */ -extern void dwc2_set_param_host_support_fs_ls_low_power( - struct dwc2_hsotg *hsotg, int val); - -/* - * Specifies the PHY clock rate in low power mode when connected to a - * Low Speed device in host mode. This parameter is applicable only if - * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS - * then defaults to 6 MHZ otherwise 48 MHZ. - * - * 0 - 48 MHz - * 1 - 6 MHz - */ -extern void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, - int val); -#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0 -#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1 - -/* - * 0 - Use cC FIFO size parameters - * 1 - Allow dynamic FIFO sizing (default) - */ -extern void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, - int val); - -/* - * Number of 4-byte words in the Rx FIFO in host mode when dynamic - * FIFO sizing is enabled. - * 16 to 32768 (default 1024) - */ -extern void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val); - -/* - * Number of 4-byte words in the non-periodic Tx FIFO in host mode - * when Dynamic FIFO sizing is enabled in the core. - * 16 to 32768 (default 256) - */ -extern void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, - int val); - -/* - * Number of 4-byte words in the host periodic Tx FIFO when dynamic - * FIFO sizing is enabled. - * 16 to 32768 (default 256) - */ -extern void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, - int val); - -/* - * The maximum transfer size supported in bytes. - * 2047 to 65,535 (default 65,535) - */ -extern void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val); - -/* - * The maximum number of packets in a transfer. - * 15 to 511 (default 511) - */ -extern void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val); - -/* - * The number of host channel registers to use. - * 1 to 16 (default 11) - * Note: The FPGA configuration supports a maximum of 11 host channels. - */ -extern void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val); - -/* - * Specifies the type of PHY interface to use. By default, the driver - * will automatically detect the phy_type. - * - * 0 - Full Speed PHY - * 1 - UTMI+ (default) - * 2 - ULPI - */ -extern void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val); -#define DWC2_PHY_TYPE_PARAM_FS 0 -#define DWC2_PHY_TYPE_PARAM_UTMI 1 -#define DWC2_PHY_TYPE_PARAM_ULPI 2 - -/* - * Specifies the UTMI+ Data Width. This parameter is - * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI - * PHY_TYPE, this parameter indicates the data width between - * the MAC and the ULPI Wrapper.) Also, this parameter is - * applicable only if the OTG_HSPHY_WIDTH cC parameter was set - * to "8 and 16 bits", meaning that the core has been - * configured to work at either data path width. - * - * 8 or 16 bits (default 16) - */ -extern void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val); - -/* - * Specifies whether the ULPI operates at double or single - * data rate. This parameter is only applicable if PHY_TYPE is - * ULPI. - * - * 0 - single data rate ULPI interface with 8 bit wide data - * bus (default) - * 1 - double data rate ULPI interface with 4 bit wide data - * bus - */ -extern void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val); - -/* - * Specifies whether to use the internal or external supply to - * drive the vbus with a ULPI phy. - */ -extern void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val); -#define DWC2_PHY_ULPI_INTERNAL_VBUS 0 -#define DWC2_PHY_ULPI_EXTERNAL_VBUS 1 - -/* - * Specifies whether to use the I2Cinterface for full speed PHY. This - * parameter is only applicable if PHY_TYPE is FS. - * 0 - No (default) - * 1 - Yes - */ -extern void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val); - -extern void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val); - -extern void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val); - -/* - * Specifies whether dedicated transmit FIFOs are - * enabled for non periodic IN endpoints in device mode - * 0 - No - * 1 - Yes - */ -extern void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, - int val); - -extern void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val); - -extern void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val); - -extern void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val); - -extern void dwc2_set_parameters(struct dwc2_hsotg *hsotg, - const struct dwc2_core_params *params); - -extern void dwc2_set_all_params(struct dwc2_core_params *params, int value); - -extern int dwc2_get_hwparams(struct dwc2_hsotg *hsotg); +/* The device ID match table */ +extern const struct of_device_id dwc2_of_match_table[]; extern int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg); extern int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg); +/* Parameters */ +int dwc2_get_hwparams(struct dwc2_hsotg *hsotg); +int dwc2_init_params(struct dwc2_hsotg *hsotg); + /* * The following functions check the controller's OTG operation mode * capability (GHWCFG2.OTG_MODE). diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index d85c5c9f96c1dcbfc1d2706c4e9c20fe00c2ac74..5b228ba6045f15ae38a0d1a39e3e73adababa9ad 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c @@ -159,9 +159,9 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg) " ++OTG Interrupt: Session Request Success Status Change++\n"); gotgctl = dwc2_readl(hsotg->regs + GOTGCTL); if (gotgctl & GOTGCTL_SESREQSCS) { - if (hsotg->core_params->phy_type == + if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS - && hsotg->core_params->i2c_enable > 0) { + && hsotg->params.i2c_enable > 0) { hsotg->srp_success = 1; } else { /* Clear Session Request */ @@ -370,7 +370,7 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) /* Change to L0 state */ hsotg->lx_state = DWC2_L0; } else { - if (hsotg->core_params->hibernation) + if (hsotg->params.hibernation) return; if (hsotg->lx_state != DWC2_L1) { diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c index 55d91f24f94ab3aefd21c74626d7244fdccd7947..0a130916a91c68a85b1f3ad0ae89a393e0c6cc25 100644 --- a/drivers/usb/dwc2/debugfs.c +++ b/drivers/usb/dwc2/debugfs.c @@ -213,7 +213,7 @@ static int fifo_show(struct seq_file *seq, void *v) val = dwc2_readl(regs + GNPTXFSIZ); seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n", val >> FIFOSIZE_DEPTH_SHIFT, - val & FIFOSIZE_DEPTH_MASK); + val & FIFOSIZE_STARTADDR_MASK); seq_puts(seq, "\nPeriodic TXFIFOs:\n"); diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 24fbebc9b409050092c8a54296c445f129422e83..b95930f20d906dfc02d4d6db7dbe954e475d11cc 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -93,7 +93,18 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg); */ static inline bool using_dma(struct dwc2_hsotg *hsotg) { - return hsotg->g_using_dma; + return hsotg->params.g_dma; +} + +/* + * using_desc_dma - return the descriptor DMA status of the driver. + * @hsotg: The driver state. + * + * Return true if we're using descriptor DMA. + */ +static inline bool using_desc_dma(struct dwc2_hsotg *hsotg) +{ + return hsotg->params.g_dma_desc; } /** @@ -190,16 +201,17 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) unsigned int addr; int timeout; u32 val; + u32 *txfsz = hsotg->params.g_tx_fifo_size; /* Reset fifo map if not correctly cleared during previous session */ WARN_ON(hsotg->fifo_map); hsotg->fifo_map = 0; /* set RX/NPTX FIFO sizes */ - dwc2_writel(hsotg->g_rx_fifo_sz, hsotg->regs + GRXFSIZ); - dwc2_writel((hsotg->g_rx_fifo_sz << FIFOSIZE_STARTADDR_SHIFT) | - (hsotg->g_np_g_tx_fifo_sz << FIFOSIZE_DEPTH_SHIFT), - hsotg->regs + GNPTXFSIZ); + dwc2_writel(hsotg->params.g_rx_fifo_size, hsotg->regs + GRXFSIZ); + dwc2_writel((hsotg->params.g_rx_fifo_size << FIFOSIZE_STARTADDR_SHIFT) | + (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT), + hsotg->regs + GNPTXFSIZ); /* * arange all the rest of the TX FIFOs, as some versions of this @@ -209,7 +221,7 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) */ /* start at the end of the GNPTXFSIZ, rounded up */ - addr = hsotg->g_rx_fifo_sz + hsotg->g_np_g_tx_fifo_sz; + addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size; /* * Configure fifos sizes from provided configuration and assign @@ -217,15 +229,16 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) * given endpoint. */ for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) { - if (!hsotg->g_tx_fifo_sz[ep]) + if (!txfsz[ep]) continue; val = addr; - val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT; - WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem, + val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT; + WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem, "insufficient fifo memory"); - addr += hsotg->g_tx_fifo_sz[ep]; + addr += txfsz[ep]; dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep)); + val = dwc2_readl(hsotg->regs + DPTXFSIZN(ep)); } /* @@ -303,12 +316,55 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg, struct dwc2_hsotg_req *hs_req) { struct usb_request *req = &hs_req->req; + usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); +} - /* ignore this if we're not moving any data */ - if (hs_req->req.length == 0) - return; +/* + * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains + * for Control endpoint + * @hsotg: The device state. + * + * This function will allocate 4 descriptor chains for EP 0: 2 for + * Setup stage, per one for IN and OUT data/status transactions. + */ +static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg) +{ + hsotg->setup_desc[0] = + dmam_alloc_coherent(hsotg->dev, + sizeof(struct dwc2_dma_desc), + &hsotg->setup_desc_dma[0], + GFP_KERNEL); + if (!hsotg->setup_desc[0]) + goto fail; + + hsotg->setup_desc[1] = + dmam_alloc_coherent(hsotg->dev, + sizeof(struct dwc2_dma_desc), + &hsotg->setup_desc_dma[1], + GFP_KERNEL); + if (!hsotg->setup_desc[1]) + goto fail; + + hsotg->ctrl_in_desc = + dmam_alloc_coherent(hsotg->dev, + sizeof(struct dwc2_dma_desc), + &hsotg->ctrl_in_desc_dma, + GFP_KERNEL); + if (!hsotg->ctrl_in_desc) + goto fail; + + hsotg->ctrl_out_desc = + dmam_alloc_coherent(hsotg->dev, + sizeof(struct dwc2_dma_desc), + &hsotg->ctrl_out_desc_dma, + GFP_KERNEL); + if (!hsotg->ctrl_out_desc) + goto fail; - usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); + return 0; + +fail: + return -ENOMEM; } /** @@ -540,6 +596,273 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg) return dsts; } +/** + * dwc2_gadget_get_chain_limit - get the maximum data payload value of the + * DMA descriptor chain prepared for specific endpoint + * @hs_ep: The endpoint + * + * Return the maximum data that can be queued in one go on a given endpoint + * depending on its descriptor chain capacity so that transfers that + * are too long can be split. + */ +static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) +{ + int is_isoc = hs_ep->isochronous; + unsigned int maxsize; + + if (is_isoc) + maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT : + DEV_DMA_ISOC_RX_NBYTES_LIMIT; + else + maxsize = DEV_DMA_NBYTES_LIMIT; + + /* Above size of one descriptor was chosen, multiple it */ + maxsize *= MAX_DMA_DESC_NUM_GENERIC; + + return maxsize; +} + +/* + * dwc2_gadget_get_desc_params - get DMA descriptor parameters. + * @hs_ep: The endpoint + * @mask: RX/TX bytes mask to be defined + * + * Returns maximum data payload for one descriptor after analyzing endpoint + * characteristics. + * DMA descriptor transfer bytes limit depends on EP type: + * Control out - MPS, + * Isochronous - descriptor rx/tx bytes bitfield limit, + * Control In/Bulk/Interrupt - multiple of mps. This will allow to not + * have concatenations from various descriptors within one packet. + * + * Selects corresponding mask for RX/TX bytes as well. + */ +static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask) +{ + u32 mps = hs_ep->ep.maxpacket; + int dir_in = hs_ep->dir_in; + u32 desc_size = 0; + + if (!hs_ep->index && !dir_in) { + desc_size = mps; + *mask = DEV_DMA_NBYTES_MASK; + } else if (hs_ep->isochronous) { + if (dir_in) { + desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT; + *mask = DEV_DMA_ISOC_TX_NBYTES_MASK; + } else { + desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT; + *mask = DEV_DMA_ISOC_RX_NBYTES_MASK; + } + } else { + desc_size = DEV_DMA_NBYTES_LIMIT; + *mask = DEV_DMA_NBYTES_MASK; + + /* Round down desc_size to be mps multiple */ + desc_size -= desc_size % mps; + } + + return desc_size; +} + +/* + * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain. + * @hs_ep: The endpoint + * @dma_buff: DMA address to use + * @len: Length of the transfer + * + * This function will iterate over descriptor chain and fill its entries + * with corresponding information based on transfer data. + */ +static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep, + dma_addr_t dma_buff, + unsigned int len) +{ + struct dwc2_hsotg *hsotg = hs_ep->parent; + int dir_in = hs_ep->dir_in; + struct dwc2_dma_desc *desc = hs_ep->desc_list; + u32 mps = hs_ep->ep.maxpacket; + u32 maxsize = 0; + u32 offset = 0; + u32 mask = 0; + int i; + + maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); + + hs_ep->desc_count = (len / maxsize) + + ((len % maxsize) ? 1 : 0); + if (len == 0) + hs_ep->desc_count = 1; + + for (i = 0; i < hs_ep->desc_count; ++i) { + desc->status = 0; + desc->status |= (DEV_DMA_BUFF_STS_HBUSY + << DEV_DMA_BUFF_STS_SHIFT); + + if (len > maxsize) { + if (!hs_ep->index && !dir_in) + desc->status |= (DEV_DMA_L | DEV_DMA_IOC); + + desc->status |= (maxsize << + DEV_DMA_NBYTES_SHIFT & mask); + desc->buf = dma_buff + offset; + + len -= maxsize; + offset += maxsize; + } else { + desc->status |= (DEV_DMA_L | DEV_DMA_IOC); + + if (dir_in) + desc->status |= (len % mps) ? DEV_DMA_SHORT : + ((hs_ep->send_zlp) ? DEV_DMA_SHORT : 0); + if (len > maxsize) + dev_err(hsotg->dev, "wrong len %d\n", len); + + desc->status |= + len << DEV_DMA_NBYTES_SHIFT & mask; + desc->buf = dma_buff + offset; + } + + desc->status &= ~DEV_DMA_BUFF_STS_MASK; + desc->status |= (DEV_DMA_BUFF_STS_HREADY + << DEV_DMA_BUFF_STS_SHIFT); + desc++; + } +} + +/* + * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain. + * @hs_ep: The isochronous endpoint. + * @dma_buff: usb requests dma buffer. + * @len: usb request transfer length. + * + * Finds out index of first free entry either in the bottom or up half of + * descriptor chain depend on which is under SW control and not processed + * by HW. Then fills that descriptor with the data of the arrived usb request, + * frame info, sets Last and IOC bits increments next_desc. If filled + * descriptor is not the first one, removes L bit from the previous descriptor + * status. + */ +static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, + dma_addr_t dma_buff, unsigned int len) +{ + struct dwc2_dma_desc *desc; + struct dwc2_hsotg *hsotg = hs_ep->parent; + u32 index; + u32 maxsize = 0; + u32 mask = 0; + + maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); + if (len > maxsize) { + dev_err(hsotg->dev, "wrong len %d\n", len); + return -EINVAL; + } + + /* + * If SW has already filled half of chain, then return and wait for + * the other chain to be processed by HW. + */ + if (hs_ep->next_desc == MAX_DMA_DESC_NUM_GENERIC / 2) + return -EBUSY; + + /* Increment frame number by interval for IN */ + if (hs_ep->dir_in) + dwc2_gadget_incr_frame_num(hs_ep); + + index = (MAX_DMA_DESC_NUM_GENERIC / 2) * hs_ep->isoc_chain_num + + hs_ep->next_desc; + + /* Sanity check of calculated index */ + if ((hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC) || + (!hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC / 2)) { + dev_err(hsotg->dev, "wrong index %d for iso chain\n", index); + return -EINVAL; + } + + desc = &hs_ep->desc_list[index]; + + /* Clear L bit of previous desc if more than one entries in the chain */ + if (hs_ep->next_desc) + hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L; + + dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n", + __func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index); + + desc->status = 0; + desc->status |= (DEV_DMA_BUFF_STS_HBUSY << DEV_DMA_BUFF_STS_SHIFT); + + desc->buf = dma_buff; + desc->status |= (DEV_DMA_L | DEV_DMA_IOC | + ((len << DEV_DMA_NBYTES_SHIFT) & mask)); + + if (hs_ep->dir_in) { + desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) & + DEV_DMA_ISOC_PID_MASK) | + ((len % hs_ep->ep.maxpacket) ? + DEV_DMA_SHORT : 0) | + ((hs_ep->target_frame << + DEV_DMA_ISOC_FRNUM_SHIFT) & + DEV_DMA_ISOC_FRNUM_MASK); + } + + desc->status &= ~DEV_DMA_BUFF_STS_MASK; + desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT); + + /* Update index of last configured entry in the chain */ + hs_ep->next_desc++; + + return 0; +} + +/* + * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA + * @hs_ep: The isochronous endpoint. + * + * Prepare first descriptor chain for isochronous endpoints. Afterwards + * write DMA address to HW and enable the endpoint. + * + * Switch between descriptor chains via isoc_chain_num to give SW opportunity + * to prepare second descriptor chain while first one is being processed by HW. + */ +static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) +{ + struct dwc2_hsotg *hsotg = hs_ep->parent; + struct dwc2_hsotg_req *hs_req, *treq; + int index = hs_ep->index; + int ret; + u32 dma_reg; + u32 depctl; + u32 ctrl; + + if (list_empty(&hs_ep->queue)) { + dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__); + return; + } + + list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) { + ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma, + hs_req->req.length); + if (ret) { + dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__); + break; + } + } + + depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); + dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index); + + /* write descriptor chain address to control register */ + dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg); + + ctrl = dwc2_readl(hsotg->regs + depctl); + ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK; + dwc2_writel(ctrl, hsotg->regs + depctl); + + /* Switch ISOC descriptor chain number being processed by SW*/ + hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1; + hs_ep->next_desc = 0; +} + /** * dwc2_hsotg_start_req - start a USB request from an endpoint's queue * @hsotg: The controller state. @@ -565,6 +888,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, unsigned length; unsigned packets; unsigned maxreq; + unsigned int dma_reg; if (index != 0) { if (hs_ep->req && !continuing) { @@ -579,6 +903,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, } } + dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index); epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index); @@ -598,7 +923,11 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n", ureq->length, ureq->actual); - maxreq = get_ep_limit(hs_ep); + if (!using_desc_dma(hsotg)) + maxreq = get_ep_limit(hs_ep); + else + maxreq = dwc2_gadget_get_chain_limit(hs_ep); + if (length > maxreq) { int round = maxreq % hs_ep->ep.maxpacket; @@ -650,22 +979,51 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, /* store the request as the current one we're doing */ hs_ep->req = hs_req; - /* write size / packets */ - dwc2_writel(epsize, hsotg->regs + epsize_reg); + if (using_desc_dma(hsotg)) { + u32 offset = 0; + u32 mps = hs_ep->ep.maxpacket; - if (using_dma(hsotg) && !continuing) { - unsigned int dma_reg; + /* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */ + if (!dir_in) { + if (!index) + length = mps; + else if (length % mps) + length += (mps - (length % mps)); + } /* - * write DMA address to control register, buffer already - * synced by dwc2_hsotg_ep_queue(). + * If more data to send, adjust DMA for EP0 out data stage. + * ureq->dma stays unchanged, hence increment it by already + * passed passed data count before starting new transaction. */ + if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT && + continuing) + offset = ureq->actual; - dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index); - dwc2_writel(ureq->dma, hsotg->regs + dma_reg); + /* Fill DDMA chain entries */ + dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset, + length); - dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n", - __func__, &ureq->dma, dma_reg); + /* write descriptor chain address to control register */ + dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg); + + dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n", + __func__, (u32)hs_ep->desc_list_dma, dma_reg); + } else { + /* write size / packets */ + dwc2_writel(epsize, hsotg->regs + epsize_reg); + + if (using_dma(hsotg) && !continuing && (length != 0)) { + /* + * write DMA address to control register, buffer + * already synced by dwc2_hsotg_ep_queue(). + */ + + dwc2_writel(ureq->dma, hsotg->regs + dma_reg); + + dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n", + __func__, &ureq->dma, dma_reg); + } } if (hs_ep->isochronous && hs_ep->interval == 1) { @@ -738,13 +1096,8 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg, struct dwc2_hsotg_ep *hs_ep, struct usb_request *req) { - struct dwc2_hsotg_req *hs_req = our_req(req); int ret; - /* if the length is zero, ignore the DMA data */ - if (hs_req->req.length == 0) - return 0; - ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in); if (ret) goto dma_error; @@ -835,6 +1188,41 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep) return false; } +/* + * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers + * @hsotg: The driver state + * @hs_ep: the ep descriptor chain is for + * + * Called to update EP0 structure's pointers depend on stage of + * control transfer. + */ +static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg, + struct dwc2_hsotg_ep *hs_ep) +{ + switch (hsotg->ep0_state) { + case DWC2_EP0_SETUP: + case DWC2_EP0_STATUS_OUT: + hs_ep->desc_list = hsotg->setup_desc[0]; + hs_ep->desc_list_dma = hsotg->setup_desc_dma[0]; + break; + case DWC2_EP0_DATA_IN: + case DWC2_EP0_STATUS_IN: + hs_ep->desc_list = hsotg->ctrl_in_desc; + hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma; + break; + case DWC2_EP0_DATA_OUT: + hs_ep->desc_list = hsotg->ctrl_out_desc; + hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma; + break; + default: + dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n", + hsotg->ep0_state); + return -EINVAL; + } + + return 0; +} + static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { @@ -870,10 +1258,32 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, if (ret) return ret; } + /* If using descriptor DMA configure EP0 descriptor chain pointers */ + if (using_desc_dma(hs) && !hs_ep->index) { + ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep); + if (ret) + return ret; + } first = list_empty(&hs_ep->queue); list_add_tail(&hs_req->queue, &hs_ep->queue); + /* + * Handle DDMA isochronous transfers separately - just add new entry + * to the half of descriptor chain that is not processed by HW. + * Transfer will be started once SW gets either one of NAK or + * OutTknEpDis interrupts. + */ + if (using_desc_dma(hs) && hs_ep->isochronous && + hs_ep->target_frame != TARGET_FRAME_INITIAL) { + ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma, + hs_req->req.length); + if (ret) + dev_dbg(hs->dev, "%s: ISO desc chain full\n", __func__); + + return 0; + } + if (first) { if (!hs_ep->isochronous) { dwc2_hsotg_start_req(hs, hs_ep, hs_req, false); @@ -1099,10 +1509,8 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now); */ static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep) { - if (list_empty(&hs_ep->queue)) - return NULL; - - return list_first_entry(&hs_ep->queue, struct dwc2_hsotg_req, queue); + return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req, + queue); } /** @@ -1440,14 +1848,21 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg, if (hs_ep->dir_in) dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n", - index); + index); else dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n", - index); + index); + if (using_desc_dma(hsotg)) { + /* Not specific buffer needed for ep0 ZLP */ + dma_addr_t dma = hs_ep->desc_list_dma; - dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | - DXEPTSIZ_XFERSIZE(0), hsotg->regs + - epsiz_reg); + dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep); + dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0); + } else { + dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | + DXEPTSIZ_XFERSIZE(0), hsotg->regs + + epsiz_reg); + } ctrl = dwc2_readl(hsotg->regs + epctl_reg); ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */ @@ -1510,6 +1925,10 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg, spin_lock(&hsotg->lock); } + /* In DDMA don't need to proceed to starting of next ISOC request */ + if (using_desc_dma(hsotg) && hs_ep->isochronous) + return; + /* * Look to see if there is anything else to do. Note, the completion * of the previous request may have caused a new request to be started @@ -1521,6 +1940,115 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg, } } +/* + * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA + * @hs_ep: The endpoint the request was on. + * + * Get first request from the ep queue, determine descriptor on which complete + * happened. SW based on isoc_chain_num discovers which half of the descriptor + * chain is currently in use by HW, adjusts dma_address and calculates index + * of completed descriptor based on the value of DEPDMA register. Update actual + * length of request, giveback to gadget. + */ +static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep) +{ + struct dwc2_hsotg *hsotg = hs_ep->parent; + struct dwc2_hsotg_req *hs_req; + struct usb_request *ureq; + int index; + dma_addr_t dma_addr; + u32 dma_reg; + u32 depdma; + u32 desc_sts; + u32 mask; + + hs_req = get_ep_head(hs_ep); + if (!hs_req) { + dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__); + return; + } + ureq = &hs_req->req; + + dma_addr = hs_ep->desc_list_dma; + + /* + * If lower half of descriptor chain is currently use by SW, + * that means higher half is being processed by HW, so shift + * DMA address to higher half of descriptor chain. + */ + if (!hs_ep->isoc_chain_num) + dma_addr += sizeof(struct dwc2_dma_desc) * + (MAX_DMA_DESC_NUM_GENERIC / 2); + + dma_reg = hs_ep->dir_in ? DIEPDMA(hs_ep->index) : DOEPDMA(hs_ep->index); + depdma = dwc2_readl(hsotg->regs + dma_reg); + + index = (depdma - dma_addr) / sizeof(struct dwc2_dma_desc) - 1; + desc_sts = hs_ep->desc_list[index].status; + + mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK : + DEV_DMA_ISOC_RX_NBYTES_MASK; + ureq->actual = ureq->length - + ((desc_sts & mask) >> DEV_DMA_ISOC_NBYTES_SHIFT); + + /* Adjust actual length for ISOC Out if length is not align of 4 */ + if (!hs_ep->dir_in && ureq->length & 0x3) + ureq->actual += 4 - (ureq->length & 0x3); + + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); +} + +/* + * dwc2_gadget_start_next_isoc_ddma - start next isoc request, if any. + * @hs_ep: The isochronous endpoint to be re-enabled. + * + * If ep has been disabled due to last descriptor servicing (IN endpoint) or + * BNA (OUT endpoint) check the status of other half of descriptor chain that + * was under SW control till HW was busy and restart the endpoint if needed. + */ +static void dwc2_gadget_start_next_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) +{ + struct dwc2_hsotg *hsotg = hs_ep->parent; + u32 depctl; + u32 dma_reg; + u32 ctrl; + u32 dma_addr = hs_ep->desc_list_dma; + unsigned char index = hs_ep->index; + + dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index); + depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); + + ctrl = dwc2_readl(hsotg->regs + depctl); + + /* + * EP was disabled if HW has processed last descriptor or BNA was set. + * So restart ep if SW has prepared new descriptor chain in ep_queue + * routine while HW was busy. + */ + if (!(ctrl & DXEPCTL_EPENA)) { + if (!hs_ep->next_desc) { + dev_dbg(hsotg->dev, "%s: No more ISOC requests\n", + __func__); + return; + } + + dma_addr += sizeof(struct dwc2_dma_desc) * + (MAX_DMA_DESC_NUM_GENERIC / 2) * + hs_ep->isoc_chain_num; + dwc2_writel(dma_addr, hsotg->regs + dma_reg); + + ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK; + dwc2_writel(ctrl, hsotg->regs + depctl); + + /* Switch ISOC descriptor chain number being processed by SW*/ + hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1; + hs_ep->next_desc = 0; + + dev_dbg(hsotg->dev, "%s: Restarted isochronous endpoint\n", + __func__); + } +} + /** * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint * @hsotg: The device state. @@ -1618,6 +2146,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg, dwc2_writel(ctrl, hsotg->regs + epctl_reg); } +/* + * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc + * @hs_ep - The endpoint on which transfer went + * + * Iterate over endpoints descriptor chain and get info on bytes remained + * in DMA descriptors after transfer has completed. Used for non isoc EPs. + */ +static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep) +{ + struct dwc2_hsotg *hsotg = hs_ep->parent; + unsigned int bytes_rem = 0; + struct dwc2_dma_desc *desc = hs_ep->desc_list; + int i; + u32 status; + + if (!desc) + return -EINVAL; + + for (i = 0; i < hs_ep->desc_count; ++i) { + status = desc->status; + bytes_rem += status & DEV_DMA_NBYTES_MASK; + + if (status & DEV_DMA_STS_MASK) + dev_err(hsotg->dev, "descriptor %d closed with %x\n", + i, status & DEV_DMA_STS_MASK); + } + + return bytes_rem; +} + /** * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO * @hsotg: The device instance @@ -1648,6 +2206,9 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum) return; } + if (using_desc_dma(hsotg)) + size_left = dwc2_gadget_get_xfersize_ddma(hs_ep); + if (using_dma(hsotg)) { unsigned size_done; @@ -1682,7 +2243,9 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum) */ } - if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_DATA_OUT) { + /* DDMA IN status phase will start from StsPhseRcvd interrupt */ + if (!using_desc_dma(hsotg) && epnum == 0 && + hsotg->ep0_state == DWC2_EP0_DATA_OUT) { /* Move to STATUS IN */ dwc2_hsotg_ep0_zlp(hsotg, true); return; @@ -1812,17 +2375,17 @@ static u32 dwc2_hsotg_ep0_mps(unsigned int mps) * @hsotg: The driver state. * @ep: The index number of the endpoint * @mps: The maximum packet size in bytes + * @mc: The multicount value * * Configure the maximum packet size for the given endpoint, updating * the hardware control registers to reflect this. */ static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg, - unsigned int ep, unsigned int mps, unsigned int dir_in) + unsigned int ep, unsigned int mps, + unsigned int mc, unsigned int dir_in) { struct dwc2_hsotg_ep *hs_ep; void __iomem *regs = hsotg->regs; - u32 mpsval; - u32 mcval; u32 reg; hs_ep = index_to_ep(hsotg, ep, dir_in); @@ -1830,32 +2393,32 @@ static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg, return; if (ep == 0) { + u32 mps_bytes = mps; + /* EP0 is a special case */ - mpsval = dwc2_hsotg_ep0_mps(mps); - if (mpsval > 3) + mps = dwc2_hsotg_ep0_mps(mps_bytes); + if (mps > 3) goto bad_mps; - hs_ep->ep.maxpacket = mps; + hs_ep->ep.maxpacket = mps_bytes; hs_ep->mc = 1; } else { - mpsval = mps & DXEPCTL_MPS_MASK; - if (mpsval > 1024) + if (mps > 1024) goto bad_mps; - mcval = ((mps >> 11) & 0x3) + 1; - hs_ep->mc = mcval; - if (mcval > 3) + hs_ep->mc = mc; + if (mc > 3) goto bad_mps; - hs_ep->ep.maxpacket = mpsval; + hs_ep->ep.maxpacket = mps; } if (dir_in) { reg = dwc2_readl(regs + DIEPCTL(ep)); reg &= ~DXEPCTL_MPS_MASK; - reg |= mpsval; + reg |= mps; dwc2_writel(reg, regs + DIEPCTL(ep)); } else { reg = dwc2_readl(regs + DOEPCTL(ep)); reg &= ~DXEPCTL_MPS_MASK; - reg |= mpsval; + reg |= mps; dwc2_writel(reg, regs + DOEPCTL(ep)); } @@ -1954,6 +2517,13 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg, /* Finish ZLP handling for IN EP0 transactions */ if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) { dev_dbg(hsotg->dev, "zlp packet sent\n"); + + /* + * While send zlp for DWC2_EP0_STATUS_IN EP direction was + * changed to IN. Change back to complete OUT transfer request + */ + hs_ep->dir_in = 0; + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); if (hsotg->test_mode) { int ret; @@ -1979,8 +2549,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg, * past the end of the buffer (DMA transfers are always 32bit * aligned). */ - - size_left = DXEPTSIZ_XFERSIZE_GET(epsize); + if (using_desc_dma(hsotg)) { + size_left = dwc2_gadget_get_xfersize_ddma(hs_ep); + if (size_left < 0) + dev_err(hsotg->dev, "error parsing DDMA results %d\n", + size_left); + } else { + size_left = DXEPTSIZ_XFERSIZE_GET(epsize); + } size_done = hs_ep->size_loaded - size_left; size_done += hs_ep->last_load; @@ -2128,12 +2704,28 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep) struct dwc2_hsotg *hsotg = ep->parent; int dir_in = ep->dir_in; u32 doepmsk; + u32 tmp; if (dir_in || !ep->isochronous) return; + /* + * Store frame in which irq was asserted here, as + * it can change while completing request below. + */ + tmp = dwc2_hsotg_read_frameno(hsotg); + dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), -ENODATA); + if (using_desc_dma(hsotg)) { + if (ep->target_frame == TARGET_FRAME_INITIAL) { + /* Start first ISO Out */ + ep->target_frame = tmp; + dwc2_gadget_start_isoc_ddma(ep); + } + return; + } + if (ep->interval > 1 && ep->target_frame == TARGET_FRAME_INITIAL) { u32 dsts; @@ -2182,6 +2774,12 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep) if (hs_ep->target_frame == TARGET_FRAME_INITIAL) { hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg); + + if (using_desc_dma(hsotg)) { + dwc2_gadget_start_isoc_ddma(hs_ep); + return; + } + if (hs_ep->interval > 1) { u32 ctrl = dwc2_readl(hsotg->regs + DIEPCTL(hs_ep->index)); @@ -2237,8 +2835,15 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx, if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD))) ints &= ~DXEPINT_XFERCOMPL; - if (ints & DXEPINT_STSPHSERCVD) - dev_dbg(hsotg->dev, "%s: StsPhseRcvd asserted\n", __func__); + /* + * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP + * stage and xfercomplete was generated without SETUP phase done + * interrupt. SW should parse received setup packet only after host's + * exit from setup phase of control transfer. + */ + if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in && + hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP)) + ints &= ~DXEPINT_XFERCOMPL; if (ints & DXEPINT_XFERCOMPL) { dev_dbg(hsotg->dev, @@ -2246,11 +2851,17 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx, __func__, dwc2_readl(hsotg->regs + epctl_reg), dwc2_readl(hsotg->regs + epsiz_reg)); - /* - * we get OutDone from the FIFO, so we only need to look - * at completing IN requests here - */ - if (dir_in) { + /* In DDMA handle isochronous requests separately */ + if (using_desc_dma(hsotg) && hs_ep->isochronous) { + dwc2_gadget_complete_isoc_request_ddma(hs_ep); + /* Try to start next isoc request */ + dwc2_gadget_start_next_isoc_ddma(hs_ep); + } else if (dir_in) { + /* + * We get OutDone from the FIFO, so we only + * need to look at completing IN requests here + * if operating slave mode + */ if (hs_ep->isochronous && hs_ep->interval > 1) dwc2_gadget_incr_frame_num(hs_ep); @@ -2302,9 +2913,30 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx, } } + if (ints & DXEPINT_STSPHSERCVD) { + dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__); + + /* Move to STATUS IN for DDMA */ + if (using_desc_dma(hsotg)) + dwc2_hsotg_ep0_zlp(hsotg, true); + } + if (ints & DXEPINT_BACK2BACKSETUP) dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__); + if (ints & DXEPINT_BNAINTR) { + dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__); + + /* + * Try to start next isoc request, if any. + * Sometimes the endpoint remains enabled after BNA interrupt + * assertion, which is not expected, hence we can enter here + * couple of times. + */ + if (hs_ep->isochronous) + dwc2_gadget_start_next_isoc_ddma(hs_ep); + } + if (dir_in && !hs_ep->isochronous) { /* not sure if this is important, but we'll clear it anyway */ if (ints & DXEPINT_INTKNTXFEMP) { @@ -2372,6 +3004,8 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg) case DSTS_ENUMSPD_LS: hsotg->gadget.speed = USB_SPEED_LOW; + ep0_mps = 8; + ep_mps = 8; /* * note, we don't actually support LS in this driver at the * moment, and the documentation seems to imply that it isn't @@ -2390,13 +3024,15 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg) if (ep0_mps) { int i; /* Initialize ep0 for both in and out directions */ - dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 1); - dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0); + dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1); + dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0); for (i = 1; i < hsotg->num_of_eps; i++) { if (hsotg->eps_in[i]) - dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 1); + dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, + 0, 1); if (hsotg->eps_out[i]) - dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 0); + dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, + 0, 0); } } @@ -2516,6 +3152,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, u32 intmsk; u32 val; u32 usbcfg; + u32 dcfg = 0; /* Kill any ep0 requests as controller will be reinitialized */ kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET); @@ -2534,10 +3171,17 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | GUSBCFG_HNPCAP); - /* set the PLL on, remove the HNP/SRP and set the PHY */ - val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; - usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) | - (val << GUSBCFG_USBTRDTIM_SHIFT); + if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS && + (hsotg->params.speed == DWC2_SPEED_PARAM_FULL || + hsotg->params.speed == DWC2_SPEED_PARAM_LOW)) { + /* FS/LS Dedicated Transceiver Interface */ + usbcfg |= GUSBCFG_PHYSEL; + } else { + /* set the PLL on, remove the HNP/SRP and set the PHY */ + val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; + usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) | + (val << GUSBCFG_USBTRDTIM_SHIFT); + } dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); dwc2_hsotg_init_fifo(hsotg); @@ -2545,7 +3189,23 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, if (!is_usb_reset) __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON); - dwc2_writel(DCFG_EPMISCNT(1) | DCFG_DEVSPD_HS, hsotg->regs + DCFG); + dcfg |= DCFG_EPMISCNT(1); + + switch (hsotg->params.speed) { + case DWC2_SPEED_PARAM_LOW: + dcfg |= DCFG_DEVSPD_LS; + break; + case DWC2_SPEED_PARAM_FULL: + if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) + dcfg |= DCFG_DEVSPD_FS48; + else + dcfg |= DCFG_DEVSPD_FS; + break; + default: + dcfg |= DCFG_DEVSPD_HS; + } + + dwc2_writel(dcfg, hsotg->regs + DCFG); /* Clear any pending OTG interrupts */ dwc2_writel(0xffffffff, hsotg->regs + GOTGINT); @@ -2556,23 +3216,31 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF | GINTSTS_USBRST | GINTSTS_RESETDET | GINTSTS_ENUMDONE | GINTSTS_OTGINT | - GINTSTS_USBSUSP | GINTSTS_WKUPINT | - GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT; + GINTSTS_USBSUSP | GINTSTS_WKUPINT; - if (hsotg->core_params->external_id_pin_ctl <= 0) + if (!using_desc_dma(hsotg)) + intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT; + + if (hsotg->params.external_id_pin_ctl <= 0) intmsk |= GINTSTS_CONIDSTSCHNG; dwc2_writel(intmsk, hsotg->regs + GINTMSK); - if (using_dma(hsotg)) + if (using_dma(hsotg)) { dwc2_writel(GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN | (GAHBCFG_HBSTLEN_INCR4 << GAHBCFG_HBSTLEN_SHIFT), hsotg->regs + GAHBCFG); - else + + /* Set DDMA mode support in the core if needed */ + if (using_desc_dma(hsotg)) + __orr32(hsotg->regs + DCFG, DCFG_DESCDMA_EN); + + } else { dwc2_writel(((hsotg->dedicated_fifos) ? (GAHBCFG_NP_TXF_EMP_LVL | GAHBCFG_P_TXF_EMP_LVL) : 0) | GAHBCFG_GLBL_INTR_EN, hsotg->regs + GAHBCFG); + } /* * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts @@ -2588,13 +3256,18 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, /* * don't need XferCompl, we get that from RXFIFO in slave mode. In - * DMA mode we may need this. + * DMA mode we may need this and StsPhseRcvd. */ - dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK) : 0) | + dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK | + DOEPMSK_STSPHSERCVDMSK) : 0) | DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK | - DOEPMSK_SETUPMSK | DOEPMSK_STSPHSERCVDMSK, + DOEPMSK_SETUPMSK, hsotg->regs + DOEPMSK); + /* Enable BNA interrupt for DDMA */ + if (using_desc_dma(hsotg)) + __orr32(hsotg->regs + DOEPMSK, DOEPMSK_BNAMSK); + dwc2_writel(0, hsotg->regs + DAINTMSK); dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", @@ -2935,6 +3608,95 @@ static irqreturn_t dwc2_hsotg_irq(int irq, void *pw) return IRQ_HANDLED; } +static int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg, + u32 bit, u32 timeout) +{ + u32 i; + + for (i = 0; i < timeout; i++) { + if (dwc2_readl(hs_otg->regs + reg) & bit) + return 0; + udelay(1); + } + + return -ETIMEDOUT; +} + +static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, + struct dwc2_hsotg_ep *hs_ep) +{ + u32 epctrl_reg; + u32 epint_reg; + + epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) : + DOEPCTL(hs_ep->index); + epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) : + DOEPINT(hs_ep->index); + + dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__, + hs_ep->name); + + if (hs_ep->dir_in) { + if (hsotg->dedicated_fifos || hs_ep->periodic) { + __orr32(hsotg->regs + epctrl_reg, DXEPCTL_SNAK); + /* Wait for Nak effect */ + if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, + DXEPINT_INEPNAKEFF, 100)) + dev_warn(hsotg->dev, + "%s: timeout DIEPINT.NAKEFF\n", + __func__); + } else { + __orr32(hsotg->regs + DCTL, DCTL_SGNPINNAK); + /* Wait for Nak effect */ + if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, + GINTSTS_GINNAKEFF, 100)) + dev_warn(hsotg->dev, + "%s: timeout GINTSTS.GINNAKEFF\n", + __func__); + } + } else { + if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF)) + __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK); + + /* Wait for global nak to take effect */ + if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, + GINTSTS_GOUTNAKEFF, 100)) + dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n", + __func__); + } + + /* Disable ep */ + __orr32(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK); + + /* Wait for ep to be disabled */ + if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100)) + dev_warn(hsotg->dev, + "%s: timeout DOEPCTL.EPDisable\n", __func__); + + /* Clear EPDISBLD interrupt */ + __orr32(hsotg->regs + epint_reg, DXEPINT_EPDISBLD); + + if (hs_ep->dir_in) { + unsigned short fifo_index; + + if (hsotg->dedicated_fifos || hs_ep->periodic) + fifo_index = hs_ep->fifo_index; + else + fifo_index = 0; + + /* Flush TX FIFO */ + dwc2_flush_tx_fifo(hsotg, fifo_index); + + /* Clear Global In NP NAK in Shared FIFO for non periodic ep */ + if (!hsotg->dedicated_fifos && !hs_ep->periodic) + __orr32(hsotg->regs + DCTL, DCTL_CGNPINNAK); + + } else { + /* Remove global NAKs */ + __orr32(hsotg->regs + DCTL, DCTL_CGOUTNAK); + } +} + /** * dwc2_hsotg_ep_enable - enable the given endpoint * @ep: The USB endpint to configure @@ -2952,6 +3714,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, u32 epctrl_reg; u32 epctrl; u32 mps; + u32 mc; u32 mask; unsigned int dir_in; unsigned int i, val, size; @@ -2975,6 +3738,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, } mps = usb_endpoint_maxp(desc); + mc = usb_endpoint_maxp_mult(desc); /* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */ @@ -2984,6 +3748,18 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n", __func__, epctrl, epctrl_reg); + /* Allocate DMA descriptor chain for non-ctrl endpoints */ + if (using_desc_dma(hsotg)) { + hs_ep->desc_list = dma_alloc_coherent(hsotg->dev, + MAX_DMA_DESC_NUM_GENERIC * + sizeof(struct dwc2_dma_desc), + &hs_ep->desc_list_dma, GFP_KERNEL); + if (!hs_ep->desc_list) { + ret = -ENOMEM; + goto error2; + } + } + spin_lock_irqsave(&hsotg->lock, flags); epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK); @@ -2996,7 +3772,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, epctrl |= DXEPCTL_USBACTEP; /* update the endpoint state */ - dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, dir_in); + dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in); /* default, set to non-periodic */ hs_ep->isochronous = 0; @@ -3011,6 +3787,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, hs_ep->isochronous = 1; hs_ep->interval = 1 << (desc->bInterval - 1); hs_ep->target_frame = TARGET_FRAME_INITIAL; + hs_ep->isoc_chain_num = 0; + hs_ep->next_desc = 0; if (dir_in) { hs_ep->periodic = 1; mask = dwc2_readl(hsotg->regs + DIEPMSK); @@ -3067,7 +3845,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, dev_err(hsotg->dev, "%s: No suitable fifo found\n", __func__); ret = -ENOMEM; - goto error; + goto error1; } hsotg->fifo_map |= 1 << fifo_index; epctrl |= DXEPCTL_TXFNUM(fifo_index); @@ -3089,8 +3867,17 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, /* enable the endpoint interrupt */ dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1); -error: +error1: spin_unlock_irqrestore(&hsotg->lock, flags); + +error2: + if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) { + dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * + sizeof(struct dwc2_dma_desc), + hs_ep->desc_list, hs_ep->desc_list_dma); + hs_ep->desc_list = NULL; + } + return ret; } @@ -3115,11 +3902,23 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep) return -EINVAL; } + /* Remove DMA memory allocated for non-control Endpoints */ + if (using_desc_dma(hsotg)) { + dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * + sizeof(struct dwc2_dma_desc), + hs_ep->desc_list, hs_ep->desc_list_dma); + hs_ep->desc_list = NULL; + } + epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); spin_lock_irqsave(&hsotg->lock, flags); ctrl = dwc2_readl(hsotg->regs + epctrl_reg); + + if (ctrl & DXEPCTL_EPENA) + dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep); + ctrl &= ~DXEPCTL_EPENA; ctrl &= ~DXEPCTL_USBACTEP; ctrl |= DXEPCTL_SNAK; @@ -3158,77 +3957,6 @@ static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test) return false; } -static int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg, - u32 bit, u32 timeout) -{ - u32 i; - - for (i = 0; i < timeout; i++) { - if (dwc2_readl(hs_otg->regs + reg) & bit) - return 0; - udelay(1); - } - - return -ETIMEDOUT; -} - -static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, - struct dwc2_hsotg_ep *hs_ep) -{ - u32 epctrl_reg; - u32 epint_reg; - - epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) : - DOEPCTL(hs_ep->index); - epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) : - DOEPINT(hs_ep->index); - - dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__, - hs_ep->name); - if (hs_ep->dir_in) { - __orr32(hsotg->regs + epctrl_reg, DXEPCTL_SNAK); - /* Wait for Nak effect */ - if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, - DXEPINT_INEPNAKEFF, 100)) - dev_warn(hsotg->dev, - "%s: timeout DIEPINT.NAKEFF\n", __func__); - } else { - if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF)) - __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK); - - /* Wait for global nak to take effect */ - if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, - GINTSTS_GOUTNAKEFF, 100)) - dev_warn(hsotg->dev, - "%s: timeout GINTSTS.GOUTNAKEFF\n", __func__); - } - - /* Disable ep */ - __orr32(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK); - - /* Wait for ep to be disabled */ - if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100)) - dev_warn(hsotg->dev, - "%s: timeout DOEPCTL.EPDisable\n", __func__); - - if (hs_ep->dir_in) { - if (hsotg->dedicated_fifos) { - dwc2_writel(GRSTCTL_TXFNUM(hs_ep->fifo_index) | - GRSTCTL_TXFFLSH, hsotg->regs + GRSTCTL); - /* Wait for fifo flush */ - if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, - GRSTCTL_TXFFLSH, 100)) - dev_warn(hsotg->dev, - "%s: timeout flushing fifos\n", - __func__); - } - /* TODO: Flush shared tx fifo */ - } else { - /* Remove global NAKs */ - __bic32(hsotg->regs + DCTL, DCTL_SGOUTNAK); - } -} - /** * dwc2_hsotg_ep_dequeue - dequeue given endpoint * @ep: The endpoint to dequeue. @@ -3665,14 +4393,21 @@ static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg, hs_ep->parent = hsotg; hs_ep->ep.name = hs_ep->name; - usb_ep_set_maxpacket_limit(&hs_ep->ep, epnum ? 1024 : EP0_MPS_LIMIT); + + if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW) + usb_ep_set_maxpacket_limit(&hs_ep->ep, 8); + else + usb_ep_set_maxpacket_limit(&hs_ep->ep, + epnum ? 1024 : EP0_MPS_LIMIT); hs_ep->ep.ops = &dwc2_hsotg_ep_ops; if (epnum == 0) { hs_ep->ep.caps.type_control = true; } else { - hs_ep->ep.caps.type_iso = true; - hs_ep->ep.caps.type_bulk = true; + if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) { + hs_ep->ep.caps.type_iso = true; + hs_ep->ep.caps.type_bulk = true; + } hs_ep->ep.caps.type_int = true; } @@ -3802,51 +4537,6 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg) #endif } -#ifdef CONFIG_OF -static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg) -{ - struct device_node *np = hsotg->dev->of_node; - u32 len = 0; - u32 i = 0; - - /* Enable dma if requested in device tree */ - hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma"); - - /* - * Register TX periodic fifo size per endpoint. - * EP0 is excluded since it has no fifo configuration. - */ - if (!of_find_property(np, "g-tx-fifo-size", &len)) - goto rx_fifo; - - len /= sizeof(u32); - - /* Read tx fifo sizes other than ep0 */ - if (of_property_read_u32_array(np, "g-tx-fifo-size", - &hsotg->g_tx_fifo_sz[1], len)) - goto rx_fifo; - - /* Add ep0 */ - len++; - - /* Make remaining TX fifos unavailable */ - if (len < MAX_EPS_CHANNELS) { - for (i = len; i < MAX_EPS_CHANNELS; i++) - hsotg->g_tx_fifo_sz[i] = 0; - } - -rx_fifo: - /* Register RX fifo size */ - of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz); - - /* Register NPTX fifo size */ - of_property_read_u32(np, "g-np-tx-fifo-size", - &hsotg->g_np_g_tx_fifo_sz); -} -#else -static inline void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg) { } -#endif - /** * dwc2_gadget_init - init function for gadget * @dwc2: The data structure for the DWC2 driver. @@ -3857,33 +4547,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) struct device *dev = hsotg->dev; int epnum; int ret; - int i; - u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE; - - /* Initialize to legacy fifo configuration values */ - hsotg->g_rx_fifo_sz = 2048; - hsotg->g_np_g_tx_fifo_sz = 1024; - memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo)); - /* Device tree specific probe */ - dwc2_hsotg_of_probe(hsotg); - - /* Check against largest possible value. */ - if (hsotg->g_np_g_tx_fifo_sz > - hsotg->hw_params.dev_nperio_tx_fifo_size) { - dev_warn(dev, "Specified GNPTXFDEP=%d > %d\n", - hsotg->g_np_g_tx_fifo_sz, - hsotg->hw_params.dev_nperio_tx_fifo_size); - hsotg->g_np_g_tx_fifo_sz = - hsotg->hw_params.dev_nperio_tx_fifo_size; - } /* Dump fifo information */ dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n", - hsotg->g_np_g_tx_fifo_sz); - dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz); - for (i = 0; i < MAX_EPS_CHANNELS; i++) - dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i, - hsotg->g_tx_fifo_sz[i]); + hsotg->params.g_np_tx_fifo_size); + dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size); hsotg->gadget.max_speed = USB_SPEED_HIGH; hsotg->gadget.ops = &dwc2_hsotg_gadget_ops; @@ -3909,6 +4577,12 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) if (!hsotg->ep0_buff) return -ENOMEM; + if (using_desc_dma(hsotg)) { + ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg); + if (ret < 0) + return ret; + } + ret = devm_request_irq(hsotg->dev, irq, dwc2_hsotg_irq, IRQF_SHARED, dev_name(hsotg->dev), hsotg); if (ret < 0) { diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index df5a065780054f21841ca9f08b8ab118922c530b..911c3b36ac067028acdaa5d53bd0c9e8702f8942 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -79,9 +79,9 @@ static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg) /* Enable the interrupts in the GINTMSK */ intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; - if (hsotg->core_params->dma_enable <= 0) + if (hsotg->params.host_dma <= 0) intmsk |= GINTSTS_RXFLVL; - if (hsotg->core_params->external_id_pin_ctl <= 0) + if (hsotg->params.external_id_pin_ctl <= 0) intmsk |= GINTSTS_CONIDSTSCHNG; intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP | @@ -100,8 +100,8 @@ static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && - hsotg->core_params->ulpi_fs_ls > 0) || - hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { + hsotg->params.ulpi_fs_ls > 0) || + hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) { /* Full speed PHY */ val = HCFG_FSLSPCLKSEL_48_MHZ; } else { @@ -152,7 +152,7 @@ static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) if (dwc2_is_host_mode(hsotg)) dwc2_init_fs_ls_pclk_sel(hsotg); - if (hsotg->core_params->i2c_enable > 0) { + if (hsotg->params.i2c_enable > 0) { dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); /* Program GUSBCFG.OtgUtmiFsSel to I2C */ @@ -189,20 +189,20 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) * so only program the first time. Do a soft reset immediately after * setting phyif. */ - switch (hsotg->core_params->phy_type) { + switch (hsotg->params.phy_type) { case DWC2_PHY_TYPE_PARAM_ULPI: /* ULPI interface */ dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); usbcfg |= GUSBCFG_ULPI_UTMI_SEL; usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); - if (hsotg->core_params->phy_ulpi_ddr > 0) + if (hsotg->params.phy_ulpi_ddr > 0) usbcfg |= GUSBCFG_DDRSEL; break; case DWC2_PHY_TYPE_PARAM_UTMI: /* UTMI+ interface */ dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n"); usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16); - if (hsotg->core_params->phy_utmi_width == 16) + if (hsotg->params.phy_utmi_width == 16) usbcfg |= GUSBCFG_PHYIF16; break; default: @@ -230,9 +230,10 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) u32 usbcfg; int retval = 0; - if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL && - hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { - /* If FS mode with FS PHY */ + if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL || + hsotg->params.speed == DWC2_SPEED_PARAM_LOW) && + hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) { + /* If FS/LS mode with FS/LS PHY */ retval = dwc2_fs_phy_init(hsotg, select_phy); if (retval) return retval; @@ -245,7 +246,7 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && - hsotg->core_params->ulpi_fs_ls > 0) { + hsotg->params.ulpi_fs_ls > 0) { dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); usbcfg |= GUSBCFG_ULPI_FS_LS; @@ -272,9 +273,9 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) case GHWCFG2_INT_DMA_ARCH: dev_dbg(hsotg->dev, "Internal DMA Mode\n"); - if (hsotg->core_params->ahbcfg != -1) { + if (hsotg->params.ahbcfg != -1) { ahbcfg &= GAHBCFG_CTRL_MASK; - ahbcfg |= hsotg->core_params->ahbcfg & + ahbcfg |= hsotg->params.ahbcfg & ~GAHBCFG_CTRL_MASK; } break; @@ -285,21 +286,21 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) break; } - dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n", - hsotg->core_params->dma_enable, - hsotg->core_params->dma_desc_enable); + dev_dbg(hsotg->dev, "host_dma:%d dma_desc_enable:%d\n", + hsotg->params.host_dma, + hsotg->params.dma_desc_enable); - if (hsotg->core_params->dma_enable > 0) { - if (hsotg->core_params->dma_desc_enable > 0) + if (hsotg->params.host_dma > 0) { + if (hsotg->params.dma_desc_enable > 0) dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n"); else dev_dbg(hsotg->dev, "Using Buffer DMA mode\n"); } else { dev_dbg(hsotg->dev, "Using Slave mode\n"); - hsotg->core_params->dma_desc_enable = 0; + hsotg->params.dma_desc_enable = 0; } - if (hsotg->core_params->dma_enable > 0) + if (hsotg->params.host_dma > 0) ahbcfg |= GAHBCFG_DMA_EN; dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); @@ -316,10 +317,10 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) switch (hsotg->hw_params.op_mode) { case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: - if (hsotg->core_params->otg_cap == + if (hsotg->params.otg_cap == DWC2_CAP_PARAM_HNP_SRP_CAPABLE) usbcfg |= GUSBCFG_HNPCAP; - if (hsotg->core_params->otg_cap != + if (hsotg->params.otg_cap != DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) usbcfg |= GUSBCFG_SRPCAP; break; @@ -327,7 +328,7 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: - if (hsotg->core_params->otg_cap != + if (hsotg->params.otg_cap != DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) usbcfg |= GUSBCFG_SRPCAP; break; @@ -390,7 +391,7 @@ static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg) */ static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) { - struct dwc2_core_params *params = hsotg->core_params; + struct dwc2_core_params *params = &hsotg->params; struct dwc2_hw_params *hw = &hsotg->hw_params; u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size; @@ -449,7 +450,7 @@ static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) { - struct dwc2_core_params *params = hsotg->core_params; + struct dwc2_core_params *params = &hsotg->params; u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz; if (!params->enable_dynamic_fifo) @@ -490,7 +491,7 @@ static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", dwc2_readl(hsotg->regs + HPTXFSIZ)); - if (hsotg->core_params->en_multiple_tx_fifo > 0 && + if (hsotg->params.en_multiple_tx_fifo > 0 && hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) { /* * Global DFIFOCFG calculation for Host mode - @@ -598,7 +599,7 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) { #ifdef VERBOSE_DEBUG - int num_channels = hsotg->core_params->host_channels; + int num_channels = hsotg->params.host_channels; struct dwc2_qh *qh; u32 hcchar; u32 hcsplt; @@ -648,6 +649,35 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg, #endif /* VERBOSE_DEBUG */ } +static int _dwc2_hcd_start(struct usb_hcd *hcd); + +static void dwc2_host_start(struct dwc2_hsotg *hsotg) +{ + struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); + + hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg); + _dwc2_hcd_start(hcd); +} + +static void dwc2_host_disconnect(struct dwc2_hsotg *hsotg) +{ + struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); + + hcd->self.is_b_host = 0; +} + +static void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, + int *hub_addr, int *hub_port) +{ + struct urb *urb = context; + + if (urb->dev->tt) + *hub_addr = urb->dev->tt->hub->devnum; + else + *hub_addr = 0; + *hub_port = urb->dev->ttport; +} + /* * ========================================================================= * Low Level Host Channel Access Functions @@ -741,7 +771,7 @@ static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg, * For Descriptor DMA mode core halts the channel on AHB error. * Interrupt is not required. */ - if (hsotg->core_params->dma_desc_enable <= 0) { + if (hsotg->params.dma_desc_enable <= 0) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "desc DMA disabled\n"); hcintmsk |= HCINTMSK_AHBERR; @@ -774,7 +804,7 @@ static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg, { u32 intmsk; - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->params.host_dma > 0) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "DMA enabled\n"); dwc2_hc_enable_dma_ints(hsotg, chan); @@ -994,7 +1024,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, /* No need to set the bit in DDMA for disabling the channel */ /* TODO check it everywhere channel is disabled */ - if (hsotg->core_params->dma_desc_enable <= 0) { + if (hsotg->params.dma_desc_enable <= 0) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "desc DMA disabled\n"); hcchar |= HCCHAR_CHENA; @@ -1004,7 +1034,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, } hcchar |= HCCHAR_CHDIS; - if (hsotg->core_params->dma_enable <= 0) { + if (hsotg->params.host_dma <= 0) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "DMA not enabled\n"); hcchar |= HCCHAR_CHENA; @@ -1143,7 +1173,7 @@ static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg, fifo_space = (dwc2_readl(hsotg->regs + HPTXSTS) & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT; bytes_in_fifo = sizeof(u32) * - (hsotg->core_params->host_perio_tx_fifo_size - + (hsotg->params.host_perio_tx_fifo_size - fifo_space); /* @@ -1339,8 +1369,8 @@ static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) { - u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size; - u16 max_hc_pkt_count = hsotg->core_params->max_packet_count; + u32 max_hc_xfer_size = hsotg->params.max_transfer_size; + u16 max_hc_pkt_count = hsotg->params.max_packet_count; u32 hcchar; u32 hctsiz = 0; u16 num_packets; @@ -1350,7 +1380,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, dev_vdbg(hsotg->dev, "%s()\n", __func__); if (chan->do_ping) { - if (hsotg->core_params->dma_enable <= 0) { + if (hsotg->params.host_dma <= 0) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "ping, no DMA\n"); dwc2_hc_do_ping(hsotg, chan); @@ -1478,7 +1508,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, TSIZ_SC_MC_PID_SHIFT); } - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->params.host_dma > 0) { dwc2_writel((u32)chan->xfer_dma, hsotg->regs + HCDMA(chan->hc_num)); if (dbg_hc(chan)) @@ -1521,7 +1551,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, chan->xfer_started = 1; chan->requests++; - if (hsotg->core_params->dma_enable <= 0 && + if (hsotg->params.host_dma <= 0 && !chan->ep_is_in && chan->xfer_len > 0) /* Load OUT packet into the appropriate Tx FIFO */ dwc2_hc_write_packet(hsotg, chan); @@ -1799,12 +1829,12 @@ void dwc2_hcd_start(struct dwc2_hsotg *hsotg) /* Must be called with interrupt disabled and spinlock held */ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg) { - int num_channels = hsotg->core_params->host_channels; + int num_channels = hsotg->params.host_channels; struct dwc2_host_chan *channel; u32 hcchar; int i; - if (hsotg->core_params->dma_enable <= 0) { + if (hsotg->params.host_dma <= 0) { /* Flush out any channel requests in slave mode */ for (i = 0; i < num_channels; i++) { channel = hsotg->hc_ptr_array[i]; @@ -1840,9 +1870,9 @@ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg) channel->qh = NULL; } /* All channels have been freed, mark them available */ - if (hsotg->core_params->uframe_sched > 0) { + if (hsotg->params.uframe_sched > 0) { hsotg->available_host_channels = - hsotg->core_params->host_channels; + hsotg->params.host_channels; } else { hsotg->non_periodic_channels = 0; hsotg->periodic_channels = 0; @@ -2077,7 +2107,7 @@ static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg, * Free the QTD and clean up the associated QH. Leave the QH in the * schedule if it has any remaining QTDs. */ - if (hsotg->core_params->dma_desc_enable <= 0) { + if (hsotg->params.dma_desc_enable <= 0) { u8 in_process = urb_qtd->in_process; dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh); @@ -2185,13 +2215,13 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup) /* Set ULPI External VBUS bit if needed */ usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; - if (hsotg->core_params->phy_ulpi_ext_vbus == + if (hsotg->params.phy_ulpi_ext_vbus == DWC2_PHY_ULPI_EXTERNAL_VBUS) usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; /* Set external TS Dline pulsing bit if needed */ usbcfg &= ~GUSBCFG_TERMSELDLPULSE; - if (hsotg->core_params->ts_dline > 0) + if (hsotg->params.ts_dline > 0) usbcfg |= GUSBCFG_TERMSELDLPULSE; dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); @@ -2230,10 +2260,10 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup) /* Program the GOTGCTL register */ otgctl = dwc2_readl(hsotg->regs + GOTGCTL); otgctl &= ~GOTGCTL_OTGVER; - if (hsotg->core_params->otg_ver > 0) + if (hsotg->params.otg_ver > 0) otgctl |= GOTGCTL_OTGVER; dwc2_writel(otgctl, hsotg->regs + GOTGCTL); - dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver); + dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->params.otg_ver); /* Clear the SRP success bit for FS-I2c */ hsotg->srp_success = 0; @@ -2277,7 +2307,8 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) /* Initialize Host Configuration Register */ dwc2_init_fs_ls_pclk_sel(hsotg); - if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) { + if (hsotg->params.speed == DWC2_SPEED_PARAM_FULL || + hsotg->params.speed == DWC2_SPEED_PARAM_LOW) { hcfg = dwc2_readl(hsotg->regs + HCFG); hcfg |= HCFG_FSLSSUPP; dwc2_writel(hcfg, hsotg->regs + HCFG); @@ -2288,13 +2319,13 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) * runtime. This bit needs to be programmed during initial configuration * and its value must not be changed during runtime. */ - if (hsotg->core_params->reload_ctl > 0) { + if (hsotg->params.reload_ctl > 0) { hfir = dwc2_readl(hsotg->regs + HFIR); hfir |= HFIR_RLDCTRL; dwc2_writel(hfir, hsotg->regs + HFIR); } - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { u32 op_mode = hsotg->hw_params.op_mode; if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || @@ -2306,7 +2337,7 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) "Hardware does not support descriptor DMA mode -\n"); dev_err(hsotg->dev, "falling back to buffer DMA mode.\n"); - hsotg->core_params->dma_desc_enable = 0; + hsotg->params.dma_desc_enable = 0; } else { hcfg = dwc2_readl(hsotg->regs + HCFG); hcfg |= HCFG_DESCDMA; @@ -2332,12 +2363,12 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) otgctl &= ~GOTGCTL_HSTSETHNPEN; dwc2_writel(otgctl, hsotg->regs + GOTGCTL); - if (hsotg->core_params->dma_desc_enable <= 0) { + if (hsotg->params.dma_desc_enable <= 0) { int num_channels, i; u32 hcchar; /* Flush out any leftover queued requests */ - num_channels = hsotg->core_params->host_channels; + num_channels = hsotg->params.host_channels; for (i = 0; i < num_channels; i++) { hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); hcchar &= ~HCCHAR_CHENA; @@ -2399,9 +2430,9 @@ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg) hsotg->flags.d32 = 0; hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active; - if (hsotg->core_params->uframe_sched > 0) { + if (hsotg->params.uframe_sched > 0) { hsotg->available_host_channels = - hsotg->core_params->host_channels; + hsotg->params.host_channels; } else { hsotg->non_periodic_channels = 0; hsotg->periodic_channels = 0; @@ -2415,7 +2446,7 @@ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg) hc_list_entry) list_del_init(&chan->hc_list_entry); - num_channels = hsotg->core_params->host_channels; + num_channels = hsotg->params.host_channels; for (i = 0; i < num_channels; i++) { chan = hsotg->hc_ptr_array[i]; list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); @@ -2457,7 +2488,7 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, chan->do_ping = 0; chan->ep_is_in = 0; chan->data_pid_start = DWC2_HC_PID_SETUP; - if (hsotg->core_params->dma_enable > 0) + if (hsotg->params.host_dma > 0) chan->xfer_dma = urb->setup_dma; else chan->xfer_buf = urb->setup_packet; @@ -2484,7 +2515,7 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, chan->do_ping = 0; chan->data_pid_start = DWC2_HC_PID_DATA1; chan->xfer_len = 0; - if (hsotg->core_params->dma_enable > 0) + if (hsotg->params.host_dma > 0) chan->xfer_dma = hsotg->status_buf_dma; else chan->xfer_buf = hsotg->status_buf; @@ -2502,13 +2533,13 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, case USB_ENDPOINT_XFER_ISOC: chan->ep_type = USB_ENDPOINT_XFER_ISOC; - if (hsotg->core_params->dma_desc_enable > 0) + if (hsotg->params.dma_desc_enable > 0) break; frame_desc = &urb->iso_descs[qtd->isoc_frame_index]; frame_desc->status = 0; - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->params.host_dma > 0) { chan->xfer_dma = urb->dma; chan->xfer_dma += frame_desc->offset + qtd->isoc_split_offset; @@ -2690,7 +2721,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) !dwc2_hcd_is_pipe_in(&urb->pipe_info)) urb->actual_length = urb->length; - if (hsotg->core_params->dma_enable > 0) + if (hsotg->params.host_dma > 0) chan->xfer_dma = urb->dma + urb->actual_length; else chan->xfer_buf = (u8 *)urb->buf + urb->actual_length; @@ -2715,7 +2746,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) */ chan->multi_count = dwc2_hb_mult(qh->maxp); - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { chan->desc_list_addr = qh->desc_list_dma; chan->desc_list_sz = qh->desc_list_sz; } @@ -2752,7 +2783,7 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( while (qh_ptr != &hsotg->periodic_sched_ready) { if (list_empty(&hsotg->free_hc_list)) break; - if (hsotg->core_params->uframe_sched > 0) { + if (hsotg->params.uframe_sched > 0) { if (hsotg->available_host_channels <= 1) break; hsotg->available_host_channels--; @@ -2776,17 +2807,17 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( * schedule. Some free host channels may not be used if they are * reserved for periodic transfers. */ - num_channels = hsotg->core_params->host_channels; + num_channels = hsotg->params.host_channels; qh_ptr = hsotg->non_periodic_sched_inactive.next; while (qh_ptr != &hsotg->non_periodic_sched_inactive) { - if (hsotg->core_params->uframe_sched <= 0 && + if (hsotg->params.uframe_sched <= 0 && hsotg->non_periodic_channels >= num_channels - hsotg->periodic_channels) break; if (list_empty(&hsotg->free_hc_list)) break; qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); - if (hsotg->core_params->uframe_sched > 0) { + if (hsotg->params.uframe_sched > 0) { if (hsotg->available_host_channels < 1) break; hsotg->available_host_channels--; @@ -2808,7 +2839,7 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( else ret_val = DWC2_TRANSACTION_ALL; - if (hsotg->core_params->uframe_sched <= 0) + if (hsotg->params.uframe_sched <= 0) hsotg->non_periodic_channels++; } @@ -2847,8 +2878,8 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg, list_move_tail(&chan->split_order_list_entry, &hsotg->split_order); - if (hsotg->core_params->dma_enable > 0) { - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.host_dma > 0) { + if (hsotg->params.dma_desc_enable > 0) { if (!chan->xfer_started || chan->ep_type == USB_ENDPOINT_XFER_ISOC) { dwc2_hcd_start_xfer_ddma(hsotg, chan->qh); @@ -2957,7 +2988,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) * The flag prevents any halts to get into the request queue in * the middle of multiple high-bandwidth packets getting queued. */ - if (hsotg->core_params->dma_enable <= 0 && + if (hsotg->params.host_dma <= 0 && qh->channel->multi_count > 1) hsotg->queuing_high_bandwidth = 1; @@ -2976,7 +3007,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) * controller automatically handles multiple packets for * high-bandwidth transfers. */ - if (hsotg->core_params->dma_enable > 0 || status == 0 || + if (hsotg->params.host_dma > 0 || status == 0 || qh->channel->requests == qh->channel->multi_count) { qh_ptr = qh_ptr->next; /* @@ -2993,7 +3024,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) exit: if (no_queue_space || no_fifo_space || - (hsotg->core_params->dma_enable <= 0 && + (hsotg->params.host_dma <= 0 && !list_empty(&hsotg->periodic_sched_assigned))) { /* * May need to queue more transactions as the request @@ -3073,7 +3104,7 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg) tx_status = dwc2_readl(hsotg->regs + GNPTXSTS); qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT; - if (hsotg->core_params->dma_enable <= 0 && qspcavail == 0) { + if (hsotg->params.host_dma <= 0 && qspcavail == 0) { no_queue_space = 1; break; } @@ -3106,7 +3137,7 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg) hsotg->non_periodic_qh_ptr->next; } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr); - if (hsotg->core_params->dma_enable <= 0) { + if (hsotg->params.host_dma <= 0) { tx_status = dwc2_readl(hsotg->regs + GNPTXSTS); qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT; @@ -3307,7 +3338,7 @@ static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex) * If hibernation is supported, Phy clock will be suspended * after registers are backuped. */ - if (!hsotg->core_params->hibernation) { + if (!hsotg->params.hibernation) { /* Suspend the Phy Clock */ pcgctl = dwc2_readl(hsotg->regs + PCGCTL); pcgctl |= PCGCTL_STOPPCLK; @@ -3342,7 +3373,7 @@ static void dwc2_port_resume(struct dwc2_hsotg *hsotg) * If hibernation is supported, Phy clock is already resumed * after registers restore. */ - if (!hsotg->core_params->hibernation) { + if (!hsotg->params.hibernation) { pcgctl = dwc2_readl(hsotg->regs + PCGCTL); pcgctl &= ~PCGCTL_STOPPCLK; dwc2_writel(pcgctl, hsotg->regs + PCGCTL); @@ -3569,7 +3600,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, port_status |= USB_PORT_STAT_TEST; /* USB_PORT_FEAT_INDICATOR unsupported always 0 */ - if (hsotg->core_params->dma_desc_fs_enable) { + if (hsotg->params.dma_desc_fs_enable) { /* * Enable descriptor DMA only if a full speed * device is connected. @@ -3583,7 +3614,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, u32 hcfg; dev_info(hsotg->dev, "Enabling descriptor DMA mode\n"); - hsotg->core_params->dma_desc_enable = 1; + hsotg->params.dma_desc_enable = 1; hcfg = dwc2_readl(hsotg->regs + HCFG); hcfg |= HCFG_DESCDMA; dwc2_writel(hcfg, hsotg->regs + HCFG); @@ -3824,7 +3855,7 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg) u32 p_tx_status; int i; - num_channels = hsotg->core_params->host_channels; + num_channels = hsotg->params.host_channels; dev_dbg(hsotg->dev, "\n"); dev_dbg(hsotg->dev, "************************************************************\n"); @@ -4020,35 +4051,6 @@ static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd) return p->hsotg; } -static int _dwc2_hcd_start(struct usb_hcd *hcd); - -void dwc2_host_start(struct dwc2_hsotg *hsotg) -{ - struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); - - hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg); - _dwc2_hcd_start(hcd); -} - -void dwc2_host_disconnect(struct dwc2_hsotg *hsotg) -{ - struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); - - hcd->self.is_b_host = 0; -} - -void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr, - int *hub_port) -{ - struct urb *urb = context; - - if (urb->dev->tt) - *hub_addr = urb->dev->tt->hub->devnum; - else - *hub_addr = 0; - *hub_port = urb->dev->ttport; -} - /** * dwc2_host_get_tt_info() - Get the dwc2_tt associated with context * @@ -4365,7 +4367,7 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd) if (!HCD_HW_ACCESSIBLE(hcd)) goto unlock; - if (!hsotg->core_params->hibernation) + if (!hsotg->params.hibernation) goto skip_power_saving; /* @@ -4417,7 +4419,7 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd) if (hsotg->lx_state != DWC2_L2) goto unlock; - if (!hsotg->core_params->hibernation) { + if (!hsotg->params.hibernation) { hsotg->lx_state = DWC2_L0; goto unlock; } @@ -4510,9 +4512,6 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb, case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; - default: - pipetype = "UNKNOWN"; - break; } dev_vdbg(hsotg->dev, " Endpoint type: %s %s (%s)\n", pipetype, @@ -4609,8 +4608,6 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, case PIPE_INTERRUPT: ep_type = USB_ENDPOINT_XFER_INT; break; - default: - dev_warn(hsotg->dev, "Wrong ep type\n"); } dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets, @@ -4919,7 +4916,7 @@ static void dwc2_hcd_free(struct dwc2_hsotg *hsotg) } } - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->params.host_dma > 0) { if (hsotg->status_buf) { dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE, hsotg->status_buf, @@ -4999,16 +4996,16 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) hsotg->last_frame_num = HFNUM_MAX_FRNUM; /* Check if the bus driver or platform code has setup a dma_mask */ - if (hsotg->core_params->dma_enable > 0 && + if (hsotg->params.host_dma > 0 && hsotg->dev->dma_mask == NULL) { dev_warn(hsotg->dev, "dma_mask not set, disabling DMA\n"); - hsotg->core_params->dma_enable = 0; - hsotg->core_params->dma_desc_enable = 0; + hsotg->params.host_dma = 0; + hsotg->params.dma_desc_enable = 0; } /* Set device flags indicating whether the HCD supports DMA */ - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->params.host_dma > 0) { if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) dev_warn(hsotg->dev, "can't set DMA mask\n"); if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) @@ -5019,7 +5016,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) if (!hcd) goto error1; - if (hsotg->core_params->dma_enable <= 0) + if (hsotg->params.host_dma <= 0) hcd->self.uses_dma = 0; hcd->has_tt = 1; @@ -5067,7 +5064,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) * in the controller. Initialize the channel descriptor array. */ INIT_LIST_HEAD(&hsotg->free_hc_list); - num_channels = hsotg->core_params->host_channels; + num_channels = hsotg->params.host_channels; memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array)); for (i = 0; i < num_channels; i++) { @@ -5091,7 +5088,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) * done after usb_add_hcd since that function allocates the DMA buffer * pool. */ - if (hsotg->core_params->dma_enable > 0) + if (hsotg->params.host_dma > 0) hsotg->status_buf = dma_alloc_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE, &hsotg->status_buf_dma, GFP_KERNEL); @@ -5107,10 +5104,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) * DMA mode. * Alignment must be set to 512 bytes. */ - if (hsotg->core_params->dma_desc_enable || - hsotg->core_params->dma_desc_fs_enable) { + if (hsotg->params.dma_desc_enable || + hsotg->params.dma_desc_fs_enable) { hsotg->desc_gen_cache = kmem_cache_create("dwc2-gen-desc", - sizeof(struct dwc2_hcd_dma_desc) * + sizeof(struct dwc2_dma_desc) * MAX_DMA_DESC_NUM_GENERIC, 512, SLAB_CACHE_DMA, NULL); if (!hsotg->desc_gen_cache) { @@ -5121,12 +5118,12 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) * Disable descriptor dma mode since it will not be * usable. */ - hsotg->core_params->dma_desc_enable = 0; - hsotg->core_params->dma_desc_fs_enable = 0; + hsotg->params.dma_desc_enable = 0; + hsotg->params.dma_desc_fs_enable = 0; } hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc", - sizeof(struct dwc2_hcd_dma_desc) * + sizeof(struct dwc2_dma_desc) * MAX_DMA_DESC_NUM_HS_ISOC, 512, 0, NULL); if (!hsotg->desc_hsisoc_cache) { dev_err(hsotg->dev, @@ -5138,8 +5135,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) * Disable descriptor dma mode since it will not be * usable. */ - hsotg->core_params->dma_desc_enable = 0; - hsotg->core_params->dma_desc_fs_enable = 0; + hsotg->params.dma_desc_enable = 0; + hsotg->params.dma_desc_fs_enable = 0; } } @@ -5184,7 +5181,6 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) error2: usb_put_hcd(hcd); error1: - kfree(hsotg->core_params); #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS kfree(hsotg->last_frame_num_array); @@ -5250,7 +5246,7 @@ int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) hr = &hsotg->hr_backup; hr->hcfg = dwc2_readl(hsotg->regs + HCFG); hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK); - for (i = 0; i < hsotg->core_params->host_channels; ++i) + for (i = 0; i < hsotg->params.host_channels; ++i) hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i)); hr->hprt0 = dwc2_read_hprt0(hsotg); @@ -5286,7 +5282,7 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) dwc2_writel(hr->hcfg, hsotg->regs + HCFG); dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK); - for (i = 0; i < hsotg->core_params->host_channels; ++i) + for (i = 0; i < hsotg->params.host_channels; ++i) dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i)); dwc2_writel(hr->hprt0, hsotg->regs + HPRT0); diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index 7758bfb644ff89cea6334abb181395071b1d150a..1ed5fa2beff4a0b6a54486299419b70626ce89b5 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -348,7 +348,7 @@ struct dwc2_qh { struct list_head qtd_list; struct dwc2_host_chan *channel; struct list_head qh_list_entry; - struct dwc2_hcd_dma_desc *desc_list; + struct dwc2_dma_desc *desc_list; dma_addr_t desc_list_dma; u32 desc_list_sz; u32 *n_bytes; @@ -793,11 +793,6 @@ extern void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg); #define URB_SEND_ZERO_PACKET 0x2 /* Host driver callbacks */ - -extern void dwc2_host_start(struct dwc2_hsotg *hsotg); -extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg); -extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, - int *hub_addr, int *hub_port); extern struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, void *context, gfp_t mem_flags, int *ttport); diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c index 0e1d42b5dec5289630c1eee94a3942c1c7b64820..cf0367768cb39b023ef4308c451953877945b6f0 100644 --- a/drivers/usb/dwc2/hcd_ddma.c +++ b/drivers/usb/dwc2/hcd_ddma.c @@ -95,7 +95,7 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, else desc_cache = hsotg->desc_gen_cache; - qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) * + qh->desc_list_sz = sizeof(struct dwc2_dma_desc) * dwc2_max_desc_num(qh); qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA); @@ -297,7 +297,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan = qh->channel; if (dwc2_qh_is_non_per(qh)) { - if (hsotg->core_params->uframe_sched > 0) + if (hsotg->params.uframe_sched > 0) hsotg->available_host_channels++; else hsotg->non_periodic_channels--; @@ -322,7 +322,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, qh->ntd = 0; if (qh->desc_list) - memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) * + memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) * dwc2_max_desc_num(qh)); } @@ -404,7 +404,7 @@ void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC || qh->ep_type == USB_ENDPOINT_XFER_INT) && - (hsotg->core_params->uframe_sched > 0 || + (hsotg->params.uframe_sched > 0 || !hsotg->periodic_channels) && hsotg->frame_list) { dwc2_per_sched_disable(hsotg); dwc2_frame_list_free(hsotg); @@ -542,7 +542,7 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, u32 max_xfer_size, u16 idx) { - struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; + struct dwc2_dma_desc *dma_desc = &qh->desc_list[idx]; struct dwc2_hcd_iso_packet_desc *frame_desc; memset(dma_desc, 0, sizeof(*dma_desc)); @@ -571,8 +571,8 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, dma_sync_single_for_device(hsotg->dev, qh->desc_list_dma + - (idx * sizeof(struct dwc2_hcd_dma_desc)), - sizeof(struct dwc2_hcd_dma_desc), + (idx * sizeof(struct dwc2_dma_desc)), + sizeof(struct dwc2_dma_desc), DMA_TO_DEVICE); } @@ -645,8 +645,8 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, qh->desc_list[idx].status |= HOST_DMA_IOC; dma_sync_single_for_device(hsotg->dev, qh->desc_list_dma + (idx * - sizeof(struct dwc2_hcd_dma_desc)), - sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_dma_desc)), + sizeof(struct dwc2_dma_desc), DMA_TO_DEVICE); } #else @@ -679,8 +679,8 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, qh->desc_list[idx].status |= HOST_DMA_IOC; dma_sync_single_for_device(hsotg->dev, qh->desc_list_dma + - (idx * sizeof(struct dwc2_hcd_dma_desc)), - sizeof(struct dwc2_hcd_dma_desc), + (idx * sizeof(struct dwc2_dma_desc)), + sizeof(struct dwc2_dma_desc), DMA_TO_DEVICE); #endif } @@ -690,11 +690,11 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, struct dwc2_qh *qh, int n_desc) { - struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc]; + struct dwc2_dma_desc *dma_desc = &qh->desc_list[n_desc]; int len = chan->xfer_len; - if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1)) - len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1); + if (len > HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1)) + len = HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1); if (chan->ep_is_in) { int num_packets; @@ -721,8 +721,8 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, dma_sync_single_for_device(hsotg->dev, qh->desc_list_dma + - (n_desc * sizeof(struct dwc2_hcd_dma_desc)), - sizeof(struct dwc2_hcd_dma_desc), + (n_desc * sizeof(struct dwc2_dma_desc)), + sizeof(struct dwc2_dma_desc), DMA_TO_DEVICE); /* @@ -778,8 +778,8 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, dma_sync_single_for_device(hsotg->dev, qh->desc_list_dma + ((n_desc - 1) * - sizeof(struct dwc2_hcd_dma_desc)), - sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_dma_desc)), + sizeof(struct dwc2_dma_desc), DMA_TO_DEVICE); } dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc); @@ -808,8 +808,8 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, n_desc - 1, &qh->desc_list[n_desc - 1]); dma_sync_single_for_device(hsotg->dev, qh->desc_list_dma + (n_desc - 1) * - sizeof(struct dwc2_hcd_dma_desc), - sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_dma_desc), + sizeof(struct dwc2_dma_desc), DMA_TO_DEVICE); if (n_desc > 1) { qh->desc_list[0].status |= HOST_DMA_A; @@ -817,7 +817,7 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, &qh->desc_list[0]); dma_sync_single_for_device(hsotg->dev, qh->desc_list_dma, - sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_dma_desc), DMA_TO_DEVICE); } chan->ntd = n_desc; @@ -893,7 +893,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, struct dwc2_qh *qh, u16 idx) { - struct dwc2_hcd_dma_desc *dma_desc; + struct dwc2_dma_desc *dma_desc; struct dwc2_hcd_iso_packet_desc *frame_desc; u16 remain = 0; int rc = 0; @@ -902,8 +902,8 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, return -EINVAL; dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx * - sizeof(struct dwc2_hcd_dma_desc)), - sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_dma_desc)), + sizeof(struct dwc2_dma_desc), DMA_FROM_DEVICE); dma_desc = &qh->desc_list[idx]; @@ -1066,7 +1066,7 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, struct dwc2_qtd *qtd, - struct dwc2_hcd_dma_desc *dma_desc, + struct dwc2_dma_desc *dma_desc, enum dwc2_halt_status halt_status, u32 n_bytes, int *xfer_done) { @@ -1154,7 +1154,7 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, { struct dwc2_qh *qh = chan->qh; struct dwc2_hcd_urb *urb = qtd->urb; - struct dwc2_hcd_dma_desc *dma_desc; + struct dwc2_dma_desc *dma_desc; u32 n_bytes; int failed; @@ -1165,8 +1165,8 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (desc_num * - sizeof(struct dwc2_hcd_dma_desc)), - sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_dma_desc)), + sizeof(struct dwc2_dma_desc), DMA_FROM_DEVICE); dma_desc = &qh->desc_list[desc_num]; diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index 906f223542ee6436edb5680af1bd100529cb5d2a..b8f4b6aaf1d0fae7531c16b2cd03e5997c92479d 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -256,7 +256,7 @@ static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg) static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0, u32 *hprt0_modify) { - struct dwc2_core_params *params = hsotg->core_params; + struct dwc2_core_params *params = &hsotg->params; int do_reset = 0; u32 usbcfg; u32 prtspd; @@ -395,10 +395,10 @@ static void dwc2_port_intr(struct dwc2_hsotg *hsotg) dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify); } else { hsotg->flags.b.port_enable_change = 1; - if (hsotg->core_params->dma_desc_fs_enable) { + if (hsotg->params.dma_desc_fs_enable) { u32 hcfg; - hsotg->core_params->dma_desc_enable = 0; + hsotg->params.dma_desc_enable = 0; hsotg->new_connection = false; hcfg = dwc2_readl(hsotg->regs + HCFG); hcfg &= ~HCFG_DESCDMA; @@ -604,7 +604,7 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state( /* Skip whole frame */ if (chan->qh->do_split && chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in && - hsotg->core_params->dma_enable > 0) { + hsotg->params.host_dma > 0) { qtd->complete_split = 0; qtd->isoc_split_offset = 0; } @@ -743,7 +743,7 @@ static void dwc2_release_channel(struct dwc2_hsotg *hsotg, dwc2_hc_cleanup(hsotg, chan); list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); - if (hsotg->core_params->uframe_sched > 0) { + if (hsotg->params.uframe_sched > 0) { hsotg->available_host_channels++; } else { switch (chan->ep_type) { @@ -789,7 +789,7 @@ static void dwc2_halt_channel(struct dwc2_hsotg *hsotg, if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "%s()\n", __func__); - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->params.host_dma > 0) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "DMA enabled\n"); dwc2_release_channel(hsotg, chan, qtd, halt_status); @@ -915,6 +915,8 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg, { struct dwc2_hcd_iso_packet_desc *frame_desc; u32 len; + u32 hctsiz; + u32 pid; if (!qtd->urb) return 0; @@ -932,7 +934,10 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg, qtd->isoc_split_offset += len; - if (frame_desc->actual_length >= frame_desc->length) { + hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); + pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT; + + if (frame_desc->actual_length >= frame_desc->length || pid == 0) { frame_desc->status = 0; qtd->isoc_frame_index++; qtd->complete_split = 0; @@ -974,7 +979,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg, pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status); if (pipe_type == USB_ENDPOINT_XFER_ISOC) /* Do not disable the interrupt, just clear it */ @@ -985,7 +990,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg, /* Handle xfer complete on CSPLIT */ if (chan->qh->do_split) { if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in && - hsotg->core_params->dma_enable > 0) { + hsotg->params.host_dma > 0) { if (qtd->complete_split && dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum, qtd)) @@ -1097,7 +1102,7 @@ static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg, dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n", chnum); - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, DWC2_HC_XFER_STALL); goto handle_stall_done; @@ -1207,7 +1212,7 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg, switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) { case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_BULK: - if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) { + if (hsotg->params.host_dma > 0 && chan->ep_is_in) { /* * NAK interrupts are enabled on bulk/control IN * transfers in DMA mode for the sole purpose of @@ -1353,7 +1358,7 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg, */ if (chan->do_split && chan->complete_split) { if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC && - hsotg->core_params->dma_enable > 0) { + hsotg->params.host_dma > 0) { qtd->complete_split = 0; qtd->isoc_split_offset = 0; qtd->isoc_frame_index++; @@ -1374,7 +1379,7 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh = chan->qh; bool past_end; - if (hsotg->core_params->uframe_sched <= 0) { + if (hsotg->params.uframe_sched <= 0) { int frnum = dwc2_hcd_get_frame_number(hsotg); /* Don't have num_hs_transfers; simple logic */ @@ -1467,7 +1472,7 @@ static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg, dwc2_hc_handle_tt_clear(hsotg, chan, qtd); - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, DWC2_HC_XFER_BABBLE_ERR); goto disable_int; @@ -1572,7 +1577,7 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg, dev_err(hsotg->dev, " Interval: %d\n", urb->interval); /* Core halts the channel for Descriptor DMA mode */ - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, DWC2_HC_XFER_AHB_ERR); goto handle_ahberr_done; @@ -1604,7 +1609,7 @@ static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg, dwc2_hc_handle_tt_clear(hsotg, chan, qtd); - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, DWC2_HC_XFER_XACT_ERR); goto handle_xacterr_done; @@ -1798,8 +1803,8 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg, if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE || (chan->halt_status == DWC2_HC_XFER_AHB_ERR && - hsotg->core_params->dma_desc_enable <= 0)) { - if (hsotg->core_params->dma_desc_enable > 0) + hsotg->params.dma_desc_enable <= 0)) { + if (hsotg->params.dma_desc_enable > 0) dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, chan->halt_status); else @@ -1830,7 +1835,7 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg, } else if (chan->hcint & HCINTMSK_STALL) { dwc2_hc_stall_intr(hsotg, chan, chnum, qtd); } else if ((chan->hcint & HCINTMSK_XACTERR) && - hsotg->core_params->dma_desc_enable <= 0) { + hsotg->params.dma_desc_enable <= 0) { if (out_nak_enh) { if (chan->hcint & (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) { @@ -1850,10 +1855,10 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg, */ dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd); } else if ((chan->hcint & HCINTMSK_XCS_XACT) && - hsotg->core_params->dma_desc_enable > 0) { + hsotg->params.dma_desc_enable > 0) { dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd); } else if ((chan->hcint & HCINTMSK_AHBERR) && - hsotg->core_params->dma_desc_enable > 0) { + hsotg->params.dma_desc_enable > 0) { dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd); } else if (chan->hcint & HCINTMSK_BBLERR) { dwc2_hc_babble_intr(hsotg, chan, chnum, qtd); @@ -1946,7 +1951,7 @@ static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg, dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n", chnum); - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->params.host_dma > 0) { dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd); } else { if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd)) @@ -2023,7 +2028,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum) * interrupt unmasked */ WARN_ON(hcint != HCINTMSK_CHHLTD); - if (hsotg->core_params->dma_desc_enable > 0) + if (hsotg->params.dma_desc_enable > 0) dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, chan->halt_status); else @@ -2051,7 +2056,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum) qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd, qtd_list_entry); - if (hsotg->core_params->dma_enable <= 0) { + if (hsotg->params.host_dma <= 0) { if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD) hcint &= ~HCINTMSK_CHHLTD; } @@ -2156,7 +2161,7 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg) } } - for (i = 0; i < hsotg->core_params->host_channels; i++) { + for (i = 0; i < hsotg->params.host_channels; i++) { if (haint & (1 << i)) dwc2_hc_n_intr(hsotg, i); } diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index 13754353251f1a2252a1cf743d6c19523859c478..5713f03a4e56042942f51c77960a3dedc72145ac 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -75,7 +75,7 @@ static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg) int status; int num_channels; - num_channels = hsotg->core_params->host_channels; + num_channels = hsotg->params.host_channels; if (hsotg->periodic_channels + hsotg->non_periodic_channels < num_channels && hsotg->periodic_channels < num_channels - 1) { @@ -355,6 +355,37 @@ static void pmap_unschedule(unsigned long *map, int bits_per_period, } } +/** + * dwc2_get_ls_map() - Get the map used for the given qh + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + * + * We'll always get the periodic map out of our TT. Note that even if we're + * running the host straight in low speed / full speed mode it appears as if + * a TT is allocated for us, so we'll use it. If that ever changes we can + * add logic here to get a map out of "hsotg" if !qh->do_split. + * + * Returns: the map or NULL if a map couldn't be found. + */ +static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh) +{ + unsigned long *map; + + /* Don't expect to be missing a TT and be doing low speed scheduling */ + if (WARN_ON(!qh->dwc_tt)) + return NULL; + + /* Get the map and adjust if this is a multi_tt hub */ + map = qh->dwc_tt->periodic_bitmaps; + if (qh->dwc_tt->usb_tt->multi) + map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport; + + return map; +} + +#ifdef DWC2_PRINT_SCHEDULE /* * cat_printf() - A printf() + strcat() helper * @@ -454,35 +485,6 @@ static void pmap_print(unsigned long *map, int bits_per_period, } } -/** - * dwc2_get_ls_map() - Get the map used for the given qh - * - * @hsotg: The HCD state structure for the DWC OTG controller. - * @qh: QH for the periodic transfer. - * - * We'll always get the periodic map out of our TT. Note that even if we're - * running the host straight in low speed / full speed mode it appears as if - * a TT is allocated for us, so we'll use it. If that ever changes we can - * add logic here to get a map out of "hsotg" if !qh->do_split. - * - * Returns: the map or NULL if a map couldn't be found. - */ -static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg, - struct dwc2_qh *qh) -{ - unsigned long *map; - - /* Don't expect to be missing a TT and be doing low speed scheduling */ - if (WARN_ON(!qh->dwc_tt)) - return NULL; - - /* Get the map and adjust if this is a multi_tt hub */ - map = qh->dwc_tt->periodic_bitmaps; - if (qh->dwc_tt->usb_tt->multi) - map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport; - - return map; -} struct dwc2_qh_print_data { struct dwc2_hsotg *hsotg; @@ -519,9 +521,6 @@ static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg, * If we don't have tracing turned on, don't run unless the special * define is turned on. */ -#ifndef DWC2_PRINT_SCHEDULE - return; -#endif if (qh->schedule_low_speed) { unsigned long *map = dwc2_get_ls_map(hsotg, qh); @@ -559,8 +558,12 @@ static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg, DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us", dwc2_qh_print, &print_data); } - + return; } +#else +static inline void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh) {}; +#endif /** * dwc2_ls_pmap_schedule() - Schedule a low speed QH @@ -1104,7 +1107,7 @@ static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) next_active_frame = earliest_frame; /* Get the "no microframe schduler" out of the way... */ - if (hsotg->core_params->uframe_sched <= 0) { + if (hsotg->params.uframe_sched <= 0) { if (qh->do_split) /* Splits are active at microframe 0 minus 1 */ next_active_frame |= 0x7; @@ -1197,7 +1200,7 @@ static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { int status; - if (hsotg->core_params->uframe_sched > 0) { + if (hsotg->params.uframe_sched > 0) { status = dwc2_uframe_schedule(hsotg, qh); } else { status = dwc2_periodic_channel_available(hsotg); @@ -1218,7 +1221,7 @@ static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) return status; } - if (hsotg->core_params->uframe_sched <= 0) + if (hsotg->params.uframe_sched <= 0) /* Reserve periodic channel */ hsotg->periodic_channels++; @@ -1254,7 +1257,7 @@ static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) /* Update claimed usecs per (micro)frame */ hsotg->periodic_usecs -= qh->host_us; - if (hsotg->core_params->uframe_sched > 0) { + if (hsotg->params.uframe_sched > 0) { dwc2_uframe_unschedule(hsotg, qh); } else { /* Release periodic channel reservation */ @@ -1328,7 +1331,7 @@ static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg, int status = 0; max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp); - max_channel_xfer_size = hsotg->core_params->max_transfer_size; + max_channel_xfer_size = hsotg->params.max_transfer_size; if (max_xfer_size > max_channel_xfer_size) { dev_err(hsotg->dev, @@ -1391,7 +1394,7 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) qh->unreserve_pending = 0; - if (hsotg->core_params->dma_desc_enable > 0) + if (hsotg->params.dma_desc_enable > 0) /* Don't rely on SOF and start in ready schedule */ list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready); else @@ -1599,7 +1602,7 @@ struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, dwc2_qh_init(hsotg, qh, urb, mem_flags); - if (hsotg->core_params->dma_desc_enable > 0 && + if (hsotg->params.dma_desc_enable > 0 && dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) { dwc2_hcd_qh_free(hsotg, qh); return NULL; @@ -1711,7 +1714,7 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) dwc2_deschedule_periodic(hsotg, qh); hsotg->periodic_qh_count--; if (!hsotg->periodic_qh_count && - hsotg->core_params->dma_desc_enable <= 0) { + hsotg->params.dma_desc_enable <= 0) { intr_mask = dwc2_readl(hsotg->regs + GINTMSK); intr_mask &= ~GINTSTS_SOF; dwc2_writel(intr_mask, hsotg->regs + GINTMSK); diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h index 91058441e62abb1f68538aa2145018a097813dda..5be056b39e5c9139c1e7299ed71bab85f1fe322b 100644 --- a/drivers/usb/dwc2/hw.h +++ b/drivers/usb/dwc2/hw.h @@ -412,6 +412,7 @@ /* Device mode registers */ #define DCFG HSOTG_REG(0x800) +#define DCFG_DESCDMA_EN (1 << 23) #define DCFG_EPMISCNT_MASK (0x1f << 18) #define DCFG_EPMISCNT_SHIFT 18 #define DCFG_EPMISCNT_LIMIT 0x1f @@ -473,6 +474,7 @@ #define DIEPMSK_XFERCOMPLMSK (1 << 0) #define DOEPMSK HSOTG_REG(0x814) +#define DOEPMSK_BNAMSK (1 << 9) #define DOEPMSK_BACK2BACKSETUP (1 << 6) #define DOEPMSK_STSPHSERCVDMSK (1 << 5) #define DOEPMSK_OUTTKNEPDISMSK (1 << 4) @@ -790,7 +792,8 @@ #define HCFIFO(_ch) HSOTG_REG(0x1000 + 0x1000 * (_ch)) /** - * struct dwc2_hcd_dma_desc - Host-mode DMA descriptor structure + * struct dwc2_dma_desc - DMA descriptor structure, + * used for both host and gadget modes * * @status: DMA descriptor status quadlet * @buf: DMA descriptor data buffer pointer @@ -798,10 +801,12 @@ * DMA Descriptor structure contains two quadlets: * Status quadlet and Data buffer pointer. */ -struct dwc2_hcd_dma_desc { +struct dwc2_dma_desc { u32 status; u32 buf; -}; +} __packed; + +/* Host Mode DMA descriptor status quadlet */ #define HOST_DMA_A (1 << 31) #define HOST_DMA_STS_MASK (0x3 << 28) @@ -817,8 +822,43 @@ struct dwc2_hcd_dma_desc { #define HOST_DMA_ISOC_NBYTES_SHIFT 0 #define HOST_DMA_NBYTES_MASK (0x1ffff << 0) #define HOST_DMA_NBYTES_SHIFT 0 +#define HOST_DMA_NBYTES_LIMIT 131071 + +/* Device Mode DMA descriptor status quadlet */ + +#define DEV_DMA_BUFF_STS_MASK (0x3 << 30) +#define DEV_DMA_BUFF_STS_SHIFT 30 +#define DEV_DMA_BUFF_STS_HREADY 0 +#define DEV_DMA_BUFF_STS_DMABUSY 1 +#define DEV_DMA_BUFF_STS_DMADONE 2 +#define DEV_DMA_BUFF_STS_HBUSY 3 +#define DEV_DMA_STS_MASK (0x3 << 28) +#define DEV_DMA_STS_SHIFT 28 +#define DEV_DMA_STS_SUCC 0 +#define DEV_DMA_STS_BUFF_FLUSH 1 +#define DEV_DMA_STS_BUFF_ERR 3 +#define DEV_DMA_L (1 << 27) +#define DEV_DMA_SHORT (1 << 26) +#define DEV_DMA_IOC (1 << 25) +#define DEV_DMA_SR (1 << 24) +#define DEV_DMA_MTRF (1 << 23) +#define DEV_DMA_ISOC_PID_MASK (0x3 << 23) +#define DEV_DMA_ISOC_PID_SHIFT 23 +#define DEV_DMA_ISOC_PID_DATA0 0 +#define DEV_DMA_ISOC_PID_DATA2 1 +#define DEV_DMA_ISOC_PID_DATA1 2 +#define DEV_DMA_ISOC_PID_MDATA 3 +#define DEV_DMA_ISOC_FRNUM_MASK (0x7ff << 12) +#define DEV_DMA_ISOC_FRNUM_SHIFT 12 +#define DEV_DMA_ISOC_TX_NBYTES_MASK (0xfff << 0) +#define DEV_DMA_ISOC_TX_NBYTES_LIMIT 0xfff +#define DEV_DMA_ISOC_RX_NBYTES_MASK (0x7ff << 0) +#define DEV_DMA_ISOC_RX_NBYTES_LIMIT 0x7ff +#define DEV_DMA_ISOC_NBYTES_SHIFT 0 +#define DEV_DMA_NBYTES_MASK (0xffff << 0) +#define DEV_DMA_NBYTES_SHIFT 0 +#define DEV_DMA_NBYTES_LIMIT 0xffff -#define MAX_DMA_DESC_SIZE 131071 #define MAX_DMA_DESC_NUM_GENERIC 64 #define MAX_DMA_DESC_NUM_HS_ISOC 256 diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c new file mode 100644 index 0000000000000000000000000000000000000000..a786256535b6a77392018aa93f78f785daf8a1b2 --- /dev/null +++ b/drivers/usb/dwc2/params.c @@ -0,0 +1,1435 @@ +/* + * Copyright (C) 2004-2016 Synopsys, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the above-listed copyright holders may not be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#include "core.h" + +static const struct dwc2_core_params params_hi6220 = { + .otg_cap = 2, /* No HNP/SRP capable */ + .otg_ver = 0, /* 1.3 */ + .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, + .speed = 0, /* High Speed */ + .enable_dynamic_fifo = 1, + .en_multiple_tx_fifo = 1, + .host_rx_fifo_size = 512, + .host_nperio_tx_fifo_size = 512, + .host_perio_tx_fifo_size = 512, + .max_transfer_size = 65535, + .max_packet_count = 511, + .host_channels = 16, + .phy_type = 1, /* UTMI */ + .phy_utmi_width = 8, + .phy_ulpi_ddr = 0, /* Single */ + .phy_ulpi_ext_vbus = 0, + .i2c_enable = 0, + .ulpi_fs_ls = 0, + .host_support_fs_ls_low_power = 0, + .host_ls_low_power_phy_clk = 0, /* 48 MHz */ + .ts_dline = 0, + .reload_ctl = 0, + .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << + GAHBCFG_HBSTLEN_SHIFT, + .uframe_sched = 0, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + +static const struct dwc2_core_params params_bcm2835 = { + .otg_cap = 0, /* HNP/SRP capable */ + .otg_ver = 0, /* 1.3 */ + .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, + .speed = 0, /* High Speed */ + .enable_dynamic_fifo = 1, + .en_multiple_tx_fifo = 1, + .host_rx_fifo_size = 774, /* 774 DWORDs */ + .host_nperio_tx_fifo_size = 256, /* 256 DWORDs */ + .host_perio_tx_fifo_size = 512, /* 512 DWORDs */ + .max_transfer_size = 65535, + .max_packet_count = 511, + .host_channels = 8, + .phy_type = 1, /* UTMI */ + .phy_utmi_width = 8, /* 8 bits */ + .phy_ulpi_ddr = 0, /* Single */ + .phy_ulpi_ext_vbus = 0, + .i2c_enable = 0, + .ulpi_fs_ls = 0, + .host_support_fs_ls_low_power = 0, + .host_ls_low_power_phy_clk = 0, /* 48 MHz */ + .ts_dline = 0, + .reload_ctl = 0, + .ahbcfg = 0x10, + .uframe_sched = 0, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + +static const struct dwc2_core_params params_rk3066 = { + .otg_cap = 2, /* non-HNP/non-SRP */ + .otg_ver = -1, + .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, + .speed = -1, + .enable_dynamic_fifo = 1, + .en_multiple_tx_fifo = -1, + .host_rx_fifo_size = 525, /* 525 DWORDs */ + .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ + .host_perio_tx_fifo_size = 256, /* 256 DWORDs */ + .max_transfer_size = -1, + .max_packet_count = -1, + .host_channels = -1, + .phy_type = -1, + .phy_utmi_width = -1, + .phy_ulpi_ddr = -1, + .phy_ulpi_ext_vbus = -1, + .i2c_enable = -1, + .ulpi_fs_ls = -1, + .host_support_fs_ls_low_power = -1, + .host_ls_low_power_phy_clk = -1, + .ts_dline = -1, + .reload_ctl = -1, + .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << + GAHBCFG_HBSTLEN_SHIFT, + .uframe_sched = -1, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + +static const struct dwc2_core_params params_ltq = { + .otg_cap = 2, /* non-HNP/non-SRP */ + .otg_ver = -1, + .dma_desc_enable = -1, + .dma_desc_fs_enable = -1, + .speed = -1, + .enable_dynamic_fifo = -1, + .en_multiple_tx_fifo = -1, + .host_rx_fifo_size = 288, /* 288 DWORDs */ + .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ + .host_perio_tx_fifo_size = 96, /* 96 DWORDs */ + .max_transfer_size = 65535, + .max_packet_count = 511, + .host_channels = -1, + .phy_type = -1, + .phy_utmi_width = -1, + .phy_ulpi_ddr = -1, + .phy_ulpi_ext_vbus = -1, + .i2c_enable = -1, + .ulpi_fs_ls = -1, + .host_support_fs_ls_low_power = -1, + .host_ls_low_power_phy_clk = -1, + .ts_dline = -1, + .reload_ctl = -1, + .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << + GAHBCFG_HBSTLEN_SHIFT, + .uframe_sched = -1, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + +static const struct dwc2_core_params params_amlogic = { + .otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE, + .otg_ver = -1, + .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, + .speed = DWC2_SPEED_PARAM_HIGH, + .enable_dynamic_fifo = 1, + .en_multiple_tx_fifo = -1, + .host_rx_fifo_size = 512, + .host_nperio_tx_fifo_size = 500, + .host_perio_tx_fifo_size = 500, + .max_transfer_size = -1, + .max_packet_count = -1, + .host_channels = 16, + .phy_type = DWC2_PHY_TYPE_PARAM_UTMI, + .phy_utmi_width = -1, + .phy_ulpi_ddr = -1, + .phy_ulpi_ext_vbus = -1, + .i2c_enable = -1, + .ulpi_fs_ls = -1, + .host_support_fs_ls_low_power = -1, + .host_ls_low_power_phy_clk = -1, + .ts_dline = -1, + .reload_ctl = 1, + .ahbcfg = GAHBCFG_HBSTLEN_INCR8 << + GAHBCFG_HBSTLEN_SHIFT, + .uframe_sched = 0, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + +static const struct dwc2_core_params params_default = { + .otg_cap = -1, + .otg_ver = -1, + + /* + * Disable descriptor dma mode by default as the HW can support + * it, but does not support it for SPLIT transactions. + * Disable it for FS devices as well. + */ + .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, + + .speed = -1, + .enable_dynamic_fifo = -1, + .en_multiple_tx_fifo = -1, + .host_rx_fifo_size = -1, + .host_nperio_tx_fifo_size = -1, + .host_perio_tx_fifo_size = -1, + .max_transfer_size = -1, + .max_packet_count = -1, + .host_channels = -1, + .phy_type = -1, + .phy_utmi_width = -1, + .phy_ulpi_ddr = -1, + .phy_ulpi_ext_vbus = -1, + .i2c_enable = -1, + .ulpi_fs_ls = -1, + .host_support_fs_ls_low_power = -1, + .host_ls_low_power_phy_clk = -1, + .ts_dline = -1, + .reload_ctl = -1, + .ahbcfg = -1, + .uframe_sched = -1, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + +const struct of_device_id dwc2_of_match_table[] = { + { .compatible = "brcm,bcm2835-usb", .data = ¶ms_bcm2835 }, + { .compatible = "hisilicon,hi6220-usb", .data = ¶ms_hi6220 }, + { .compatible = "rockchip,rk3066-usb", .data = ¶ms_rk3066 }, + { .compatible = "lantiq,arx100-usb", .data = ¶ms_ltq }, + { .compatible = "lantiq,xrx200-usb", .data = ¶ms_ltq }, + { .compatible = "snps,dwc2", .data = NULL }, + { .compatible = "samsung,s3c6400-hsotg", .data = NULL}, + { .compatible = "amlogic,meson8b-usb", .data = ¶ms_amlogic }, + { .compatible = "amlogic,meson-gxbb-usb", .data = ¶ms_amlogic }, + { .compatible = "amcc,dwc-otg", .data = NULL }, + {}, +}; +MODULE_DEVICE_TABLE(of, dwc2_of_match_table); + +static void dwc2_get_device_property(struct dwc2_hsotg *hsotg, + char *property, u8 size, u64 *value) +{ + u8 val8; + u16 val16; + u32 val32; + + switch (size) { + case 0: + *value = device_property_read_bool(hsotg->dev, property); + break; + case 1: + if (device_property_read_u8(hsotg->dev, property, &val8)) + return; + + *value = val8; + break; + case 2: + if (device_property_read_u16(hsotg->dev, property, &val16)) + return; + + *value = val16; + break; + case 4: + if (device_property_read_u32(hsotg->dev, property, &val32)) + return; + + *value = val32; + break; + case 8: + if (device_property_read_u64(hsotg->dev, property, value)) + return; + + break; + default: + /* + * The size is checked by the only function that calls + * this so this should never happen. + */ + WARN_ON(1); + return; + } +} + +static void dwc2_set_core_param(void *param, u8 size, u64 value) +{ + switch (size) { + case 0: + *((bool *)param) = !!value; + break; + case 1: + *((u8 *)param) = (u8)value; + break; + case 2: + *((u16 *)param) = (u16)value; + break; + case 4: + *((u32 *)param) = (u32)value; + break; + case 8: + *((u64 *)param) = (u64)value; + break; + default: + /* + * The size is checked by the only function that calls + * this so this should never happen. + */ + WARN_ON(1); + return; + } +} + +/** + * dwc2_set_param() - Set a core parameter + * + * @hsotg: Programming view of the DWC_otg controller + * @param: Pointer to the parameter to set + * @lookup: True if the property should be looked up + * @property: The device property to read + * @legacy: The param value to set if @property is not available. This + * will typically be the legacy value set in the static + * params structure. + * @def: The default value + * @min: The minimum value + * @max: The maximum value + * @size: The size of the core parameter in bytes, or 0 for bool. + * + * This function looks up @property and sets the @param to that value. + * If the property doesn't exist it uses the passed-in @value. It will + * verify that the value falls between @min and @max. If it doesn't, + * it will output an error and set the parameter to either @def or, + * failing that, to @min. + * + * The @size is used to write to @param and to query the device + * properties so that this same function can be used with different + * types of parameters. + */ +static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param, + bool lookup, char *property, u64 legacy, + u64 def, u64 min, u64 max, u8 size) +{ + u64 sizemax; + u64 value; + + if (WARN_ON(!hsotg || !param || !property)) + return; + + if (WARN((size > 8) || ((size & (size - 1)) != 0), + "Invalid size %d for %s\n", size, property)) + return; + + dev_vdbg(hsotg->dev, "%s: Setting %s: legacy=%llu, def=%llu, min=%llu, max=%llu, size=%d\n", + __func__, property, legacy, def, min, max, size); + + sizemax = (1ULL << (size * 8)) - 1; + value = legacy; + + /* Override legacy settings. */ + if (lookup) + dwc2_get_device_property(hsotg, property, size, &value); + + /* + * While the value is not valid, try setting it to the default + * value, and failing that, set it to the minimum. + */ + while ((value < min) || (value > max)) { + /* Print an error unless the value is set to auto. */ + if (value != sizemax) + dev_err(hsotg->dev, "Invalid value %llu for param %s\n", + value, property); + + /* + * If we are already the default, just set it to the + * minimum. + */ + if (value == def) { + dev_vdbg(hsotg->dev, "%s: setting value to min=%llu\n", + __func__, min); + value = min; + break; + } + + /* Try the default value */ + dev_vdbg(hsotg->dev, "%s: setting value to default=%llu\n", + __func__, def); + value = def; + } + + dev_dbg(hsotg->dev, "Setting %s to %llu\n", property, value); + dwc2_set_core_param(param, size, value); +} + +/** + * dwc2_set_param_u16() - Set a u16 parameter + * + * See dwc2_set_param(). + */ +static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param, + bool lookup, char *property, u16 legacy, + u16 def, u16 min, u16 max) +{ + dwc2_set_param(hsotg, param, lookup, property, + legacy, def, min, max, 2); +} + +/** + * dwc2_set_param_bool() - Set a bool parameter + * + * See dwc2_set_param(). + * + * Note: there is no 'legacy' argument here because there is no legacy + * source of bool params. + */ +static void dwc2_set_param_bool(struct dwc2_hsotg *hsotg, bool *param, + bool lookup, char *property, + bool def, bool min, bool max) +{ + dwc2_set_param(hsotg, param, lookup, property, + def, def, min, max, 0); +} + +#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c)) + +/* Parameter access functions */ +static void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + switch (val) { + case DWC2_CAP_PARAM_HNP_SRP_CAPABLE: + if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) + valid = 0; + break; + case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE: + switch (hsotg->hw_params.op_mode) { + case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: + case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: + case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: + case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: + break; + default: + valid = 0; + break; + } + break; + case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE: + /* always valid */ + break; + default: + valid = 0; + break; + } + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for otg_cap parameter. Check HW configuration.\n", + val); + switch (hsotg->hw_params.op_mode) { + case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: + val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE; + break; + case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: + case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: + case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: + val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE; + break; + default: + val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; + break; + } + dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val); + } + + hsotg->params.otg_cap = val; +} + +static void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (val > 0 && (hsotg->params.host_dma <= 0 || + !hsotg->hw_params.dma_desc_enable)) + valid = 0; + if (val < 0) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for dma_desc_enable parameter. Check HW configuration.\n", + val); + val = (hsotg->params.host_dma > 0 && + hsotg->hw_params.dma_desc_enable); + dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val); + } + + hsotg->params.dma_desc_enable = val; +} + +static void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (val > 0 && (hsotg->params.host_dma <= 0 || + !hsotg->hw_params.dma_desc_enable)) + valid = 0; + if (val < 0) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n", + val); + val = (hsotg->params.host_dma > 0 && + hsotg->hw_params.dma_desc_enable); + } + + hsotg->params.dma_desc_fs_enable = val; + dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val); +} + +static void +dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg, + int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "Wrong value for host_support_fs_low_power\n"); + dev_err(hsotg->dev, + "host_support_fs_low_power must be 0 or 1\n"); + } + val = 0; + dev_dbg(hsotg->dev, + "Setting host_support_fs_low_power to %d\n", val); + } + + hsotg->params.host_support_fs_ls_low_power = val; +} + +static void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, + int val) +{ + int valid = 1; + + if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo) + valid = 0; + if (val < 0) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n", + val); + val = hsotg->hw_params.enable_dynamic_fifo; + dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val); + } + + hsotg->params.enable_dynamic_fifo = val; +} + +static void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (val < 16 || val > hsotg->hw_params.rx_fifo_size) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for host_rx_fifo_size. Check HW configuration.\n", + val); + val = hsotg->hw_params.rx_fifo_size; + dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val); + } + + hsotg->params.host_rx_fifo_size = val; +} + +static void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, + int val) +{ + int valid = 1; + + if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n", + val); + val = hsotg->hw_params.host_nperio_tx_fifo_size; + dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n", + val); + } + + hsotg->params.host_nperio_tx_fifo_size = val; +} + +static void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, + int val) +{ + int valid = 1; + + if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n", + val); + val = hsotg->hw_params.host_perio_tx_fifo_size; + dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n", + val); + } + + hsotg->params.host_perio_tx_fifo_size = val; +} + +static void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (val < 2047 || val > hsotg->hw_params.max_transfer_size) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for max_transfer_size. Check HW configuration.\n", + val); + val = hsotg->hw_params.max_transfer_size; + dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val); + } + + hsotg->params.max_transfer_size = val; +} + +static void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (val < 15 || val > hsotg->hw_params.max_packet_count) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for max_packet_count. Check HW configuration.\n", + val); + val = hsotg->hw_params.max_packet_count; + dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val); + } + + hsotg->params.max_packet_count = val; +} + +static void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (val < 1 || val > hsotg->hw_params.host_channels) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for host_channels. Check HW configuration.\n", + val); + val = hsotg->hw_params.host_channels; + dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val); + } + + hsotg->params.host_channels = val; +} + +static void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 0; + u32 hs_phy_type, fs_phy_type; + + if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS, + DWC2_PHY_TYPE_PARAM_ULPI)) { + if (val >= 0) { + dev_err(hsotg->dev, "Wrong value for phy_type\n"); + dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n"); + } + + valid = 0; + } + + hs_phy_type = hsotg->hw_params.hs_phy_type; + fs_phy_type = hsotg->hw_params.fs_phy_type; + if (val == DWC2_PHY_TYPE_PARAM_UTMI && + (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || + hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) + valid = 1; + else if (val == DWC2_PHY_TYPE_PARAM_ULPI && + (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI || + hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) + valid = 1; + else if (val == DWC2_PHY_TYPE_PARAM_FS && + fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) + valid = 1; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for phy_type. Check HW configuration.\n", + val); + val = DWC2_PHY_TYPE_PARAM_FS; + if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) { + if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || + hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI) + val = DWC2_PHY_TYPE_PARAM_UTMI; + else + val = DWC2_PHY_TYPE_PARAM_ULPI; + } + dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val); + } + + hsotg->params.phy_type = val; +} + +static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg) +{ + return hsotg->params.phy_type; +} + +static void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (DWC2_OUT_OF_BOUNDS(val, 0, 2)) { + if (val >= 0) { + dev_err(hsotg->dev, "Wrong value for speed parameter\n"); + dev_err(hsotg->dev, "max_speed parameter must be 0, 1, or 2\n"); + } + valid = 0; + } + + if (dwc2_is_hs_iot(hsotg) && + val == DWC2_SPEED_PARAM_LOW) + valid = 0; + + if (val == DWC2_SPEED_PARAM_HIGH && + dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for speed parameter. Check HW configuration.\n", + val); + val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ? + DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH; + dev_dbg(hsotg->dev, "Setting speed to %d\n", val); + } + + hsotg->params.speed = val; +} + +static void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, + int val) +{ + int valid = 1; + + if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ, + DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) { + if (val >= 0) { + dev_err(hsotg->dev, + "Wrong value for host_ls_low_power_phy_clk parameter\n"); + dev_err(hsotg->dev, + "host_ls_low_power_phy_clk must be 0 or 1\n"); + } + valid = 0; + } + + if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ && + dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n", + val); + val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS + ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ + : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ; + dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n", + val); + } + + hsotg->params.host_ls_low_power_phy_clk = val; +} + +static void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n"); + dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n"); + } + val = 0; + dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val); + } + + hsotg->params.phy_ulpi_ddr = val; +} + +static void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "Wrong value for phy_ulpi_ext_vbus\n"); + dev_err(hsotg->dev, + "phy_ulpi_ext_vbus must be 0 or 1\n"); + } + val = 0; + dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val); + } + + hsotg->params.phy_ulpi_ext_vbus = val; +} + +static void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 0; + + switch (hsotg->hw_params.utmi_phy_data_width) { + case GHWCFG4_UTMI_PHY_DATA_WIDTH_8: + valid = (val == 8); + break; + case GHWCFG4_UTMI_PHY_DATA_WIDTH_16: + valid = (val == 16); + break; + case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16: + valid = (val == 8 || val == 16); + break; + } + + if (!valid) { + if (val >= 0) { + dev_err(hsotg->dev, + "%d invalid for phy_utmi_width. Check HW configuration.\n", + val); + } + val = (hsotg->hw_params.utmi_phy_data_width == + GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16; + dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val); + } + + hsotg->params.phy_utmi_width = val; +} + +static void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n"); + dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n"); + } + val = 0; + dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val); + } + + hsotg->params.ulpi_fs_ls = val; +} + +static void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, "Wrong value for ts_dline\n"); + dev_err(hsotg->dev, "ts_dline must be 0 or 1\n"); + } + val = 0; + dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val); + } + + hsotg->params.ts_dline = val; +} + +static void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, "Wrong value for i2c_enable\n"); + dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n"); + } + + valid = 0; + } + + if (val == 1 && !(hsotg->hw_params.i2c_enable)) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for i2c_enable. Check HW configuration.\n", + val); + val = hsotg->hw_params.i2c_enable; + dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val); + } + + hsotg->params.i2c_enable = val; +} + +static void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, + int val) +{ + int valid = 1; + + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "Wrong value for en_multiple_tx_fifo,\n"); + dev_err(hsotg->dev, + "en_multiple_tx_fifo must be 0 or 1\n"); + } + valid = 0; + } + + if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n", + val); + val = hsotg->hw_params.en_multiple_tx_fifo; + dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val); + } + + hsotg->params.en_multiple_tx_fifo = val; +} + +static void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "'%d' invalid for parameter reload_ctl\n", val); + dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n"); + } + valid = 0; + } + + if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for parameter reload_ctl. Check HW configuration.\n", + val); + val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a; + dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val); + } + + hsotg->params.reload_ctl = val; +} + +static void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val) +{ + if (val != -1) + hsotg->params.ahbcfg = val; + else + hsotg->params.ahbcfg = GAHBCFG_HBSTLEN_INCR4 << + GAHBCFG_HBSTLEN_SHIFT; +} + +static void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "'%d' invalid for parameter otg_ver\n", val); + dev_err(hsotg->dev, + "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n"); + } + val = 0; + dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val); + } + + hsotg->params.otg_ver = val; +} + +static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "'%d' invalid for parameter uframe_sched\n", + val); + dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n"); + } + val = 1; + dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val); + } + + hsotg->params.uframe_sched = val; +} + +static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg, + int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "'%d' invalid for parameter external_id_pin_ctl\n", + val); + dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n"); + } + val = 0; + dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val); + } + + hsotg->params.external_id_pin_ctl = val; +} + +static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg, + int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "'%d' invalid for parameter hibernation\n", + val); + dev_err(hsotg->dev, "hibernation must be 0 or 1\n"); + } + val = 0; + dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val); + } + + hsotg->params.hibernation = val; +} + +static void dwc2_set_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg) +{ + int i; + int num; + char *property = "g-tx-fifo-size"; + struct dwc2_core_params *p = &hsotg->params; + + memset(p->g_tx_fifo_size, 0, sizeof(p->g_tx_fifo_size)); + + /* Read tx fifo sizes */ + num = device_property_read_u32_array(hsotg->dev, property, NULL, 0); + + if (num > 0) { + device_property_read_u32_array(hsotg->dev, property, + &p->g_tx_fifo_size[1], + num); + } else { + u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE; + + memcpy(&p->g_tx_fifo_size[1], + p_tx_fifo, + sizeof(p_tx_fifo)); + + num = ARRAY_SIZE(p_tx_fifo); + } + + for (i = 0; i < num; i++) { + if ((i + 1) >= ARRAY_SIZE(p->g_tx_fifo_size)) + break; + + dev_dbg(hsotg->dev, "Setting %s[%d] to %d\n", + property, i + 1, p->g_tx_fifo_size[i + 1]); + } +} + +static void dwc2_set_gadget_dma(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hw_params *hw = &hsotg->hw_params; + struct dwc2_core_params *p = &hsotg->params; + bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH); + + /* Buffer DMA */ + dwc2_set_param_bool(hsotg, &p->g_dma, + false, "gadget-dma", + true, false, + dma_capable); + + /* DMA Descriptor */ + dwc2_set_param_bool(hsotg, &p->g_dma_desc, false, + "gadget-dma-desc", + p->g_dma, false, + !!hw->dma_desc_enable); +} + +/** + * dwc2_set_parameters() - Set all core parameters. + * + * @hsotg: Programming view of the DWC_otg controller + * @params: The parameters to set + */ +static void dwc2_set_parameters(struct dwc2_hsotg *hsotg, + const struct dwc2_core_params *params) +{ + struct dwc2_hw_params *hw = &hsotg->hw_params; + struct dwc2_core_params *p = &hsotg->params; + bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH); + + dwc2_set_param_otg_cap(hsotg, params->otg_cap); + if ((hsotg->dr_mode == USB_DR_MODE_HOST) || + (hsotg->dr_mode == USB_DR_MODE_OTG)) { + dev_dbg(hsotg->dev, "Setting HOST parameters\n"); + + dwc2_set_param_bool(hsotg, &p->host_dma, + false, "host-dma", + true, false, + dma_capable); + } + dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable); + dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable); + + dwc2_set_param_host_support_fs_ls_low_power(hsotg, + params->host_support_fs_ls_low_power); + dwc2_set_param_enable_dynamic_fifo(hsotg, + params->enable_dynamic_fifo); + dwc2_set_param_host_rx_fifo_size(hsotg, + params->host_rx_fifo_size); + dwc2_set_param_host_nperio_tx_fifo_size(hsotg, + params->host_nperio_tx_fifo_size); + dwc2_set_param_host_perio_tx_fifo_size(hsotg, + params->host_perio_tx_fifo_size); + dwc2_set_param_max_transfer_size(hsotg, + params->max_transfer_size); + dwc2_set_param_max_packet_count(hsotg, + params->max_packet_count); + dwc2_set_param_host_channels(hsotg, params->host_channels); + dwc2_set_param_phy_type(hsotg, params->phy_type); + dwc2_set_param_speed(hsotg, params->speed); + dwc2_set_param_host_ls_low_power_phy_clk(hsotg, + params->host_ls_low_power_phy_clk); + dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr); + dwc2_set_param_phy_ulpi_ext_vbus(hsotg, + params->phy_ulpi_ext_vbus); + dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width); + dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls); + dwc2_set_param_ts_dline(hsotg, params->ts_dline); + dwc2_set_param_i2c_enable(hsotg, params->i2c_enable); + dwc2_set_param_en_multiple_tx_fifo(hsotg, + params->en_multiple_tx_fifo); + dwc2_set_param_reload_ctl(hsotg, params->reload_ctl); + dwc2_set_param_ahbcfg(hsotg, params->ahbcfg); + dwc2_set_param_otg_ver(hsotg, params->otg_ver); + dwc2_set_param_uframe_sched(hsotg, params->uframe_sched); + dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl); + dwc2_set_param_hibernation(hsotg, params->hibernation); + + /* + * Set devicetree-only parameters. These parameters do not + * take any values from @params. + */ + if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) || + (hsotg->dr_mode == USB_DR_MODE_OTG)) { + dev_dbg(hsotg->dev, "Setting peripheral device properties\n"); + + dwc2_set_gadget_dma(hsotg); + + /* + * The values for g_rx_fifo_size (2048) and + * g_np_tx_fifo_size (1024) come from the legacy s3c + * gadget driver. These defaults have been hard-coded + * for some time so many platforms depend on these + * values. Leave them as defaults for now and only + * auto-detect if the hardware does not support the + * default. + */ + dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size, + true, "g-rx-fifo-size", 2048, + hw->rx_fifo_size, + 16, hw->rx_fifo_size); + + dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size, + true, "g-np-tx-fifo-size", 1024, + hw->dev_nperio_tx_fifo_size, + 16, hw->dev_nperio_tx_fifo_size); + + dwc2_set_param_tx_fifo_sizes(hsotg); + } +} + +/* + * Gets host hardware parameters. Forces host mode if not currently in + * host mode. Should be called immediately after a core soft reset in + * order to get the reset values. + */ +static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hw_params *hw = &hsotg->hw_params; + u32 gnptxfsiz; + u32 hptxfsiz; + bool forced; + + if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) + return; + + forced = dwc2_force_mode_if_needed(hsotg, true); + + gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); + hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ); + dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); + dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); + + if (forced) + dwc2_clear_force_mode(hsotg); + + hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> + FIFOSIZE_DEPTH_SHIFT; + hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> + FIFOSIZE_DEPTH_SHIFT; +} + +/* + * Gets device hardware parameters. Forces device mode if not + * currently in device mode. Should be called immediately after a core + * soft reset in order to get the reset values. + */ +static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hw_params *hw = &hsotg->hw_params; + bool forced; + u32 gnptxfsiz; + + if (hsotg->dr_mode == USB_DR_MODE_HOST) + return; + + forced = dwc2_force_mode_if_needed(hsotg, false); + + gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); + dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); + + if (forced) + dwc2_clear_force_mode(hsotg); + + hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> + FIFOSIZE_DEPTH_SHIFT; +} + +/** + * During device initialization, read various hardware configuration + * registers and interpret the contents. + */ +int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hw_params *hw = &hsotg->hw_params; + unsigned int width; + u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4; + u32 grxfsiz; + + /* + * Attempt to ensure this device is really a DWC_otg Controller. + * Read and verify the GSNPSID register contents. The value should be + * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3", + * as in "OTG version 2.xx" or "OTG version 3.xx". + */ + hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID); + if ((hw->snpsid & 0xfffff000) != 0x4f542000 && + (hw->snpsid & 0xfffff000) != 0x4f543000 && + (hw->snpsid & 0xffff0000) != 0x55310000 && + (hw->snpsid & 0xffff0000) != 0x55320000) { + dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n", + hw->snpsid); + return -ENODEV; + } + + dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n", + hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf, + hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid); + + hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1); + hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2); + hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3); + hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4); + grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); + + dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1); + dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2); + dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3); + dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4); + dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz); + + /* + * Host specific hardware parameters. Reading these parameters + * requires the controller to be in host mode. The mode will + * be forced, if necessary, to read these values. + */ + dwc2_get_host_hwparams(hsotg); + dwc2_get_dev_hwparams(hsotg); + + /* hwcfg1 */ + hw->dev_ep_dirs = hwcfg1; + + /* hwcfg2 */ + hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >> + GHWCFG2_OP_MODE_SHIFT; + hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >> + GHWCFG2_ARCHITECTURE_SHIFT; + hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO); + hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >> + GHWCFG2_NUM_HOST_CHAN_SHIFT); + hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >> + GHWCFG2_HS_PHY_TYPE_SHIFT; + hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >> + GHWCFG2_FS_PHY_TYPE_SHIFT; + hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >> + GHWCFG2_NUM_DEV_EP_SHIFT; + hw->nperio_tx_q_depth = + (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >> + GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1; + hw->host_perio_tx_q_depth = + (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >> + GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1; + hw->dev_token_q_depth = + (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >> + GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT; + + /* hwcfg3 */ + width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >> + GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT; + hw->max_transfer_size = (1 << (width + 11)) - 1; + width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >> + GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT; + hw->max_packet_count = (1 << (width + 4)) - 1; + hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C); + hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >> + GHWCFG3_DFIFO_DEPTH_SHIFT; + + /* hwcfg4 */ + hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); + hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> + GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; + hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); + hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); + hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> + GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT; + + /* fifo sizes */ + hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> + GRXFSIZ_DEPTH_SHIFT; + + dev_dbg(hsotg->dev, "Detected values from hardware:\n"); + dev_dbg(hsotg->dev, " op_mode=%d\n", + hw->op_mode); + dev_dbg(hsotg->dev, " arch=%d\n", + hw->arch); + dev_dbg(hsotg->dev, " dma_desc_enable=%d\n", + hw->dma_desc_enable); + dev_dbg(hsotg->dev, " power_optimized=%d\n", + hw->power_optimized); + dev_dbg(hsotg->dev, " i2c_enable=%d\n", + hw->i2c_enable); + dev_dbg(hsotg->dev, " hs_phy_type=%d\n", + hw->hs_phy_type); + dev_dbg(hsotg->dev, " fs_phy_type=%d\n", + hw->fs_phy_type); + dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n", + hw->utmi_phy_data_width); + dev_dbg(hsotg->dev, " num_dev_ep=%d\n", + hw->num_dev_ep); + dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n", + hw->num_dev_perio_in_ep); + dev_dbg(hsotg->dev, " host_channels=%d\n", + hw->host_channels); + dev_dbg(hsotg->dev, " max_transfer_size=%d\n", + hw->max_transfer_size); + dev_dbg(hsotg->dev, " max_packet_count=%d\n", + hw->max_packet_count); + dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n", + hw->nperio_tx_q_depth); + dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n", + hw->host_perio_tx_q_depth); + dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n", + hw->dev_token_q_depth); + dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n", + hw->enable_dynamic_fifo); + dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n", + hw->en_multiple_tx_fifo); + dev_dbg(hsotg->dev, " total_fifo_size=%d\n", + hw->total_fifo_size); + dev_dbg(hsotg->dev, " rx_fifo_size=%d\n", + hw->rx_fifo_size); + dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n", + hw->host_nperio_tx_fifo_size); + dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n", + hw->host_perio_tx_fifo_size); + dev_dbg(hsotg->dev, "\n"); + + return 0; +} + +int dwc2_init_params(struct dwc2_hsotg *hsotg) +{ + const struct of_device_id *match; + struct dwc2_core_params params; + + match = of_match_device(dwc2_of_match_table, hsotg->dev); + if (match && match->data) + params = *((struct dwc2_core_params *)match->data); + else + params = params_default; + + if (dwc2_is_fs_iot(hsotg)) { + params.speed = DWC2_SPEED_PARAM_FULL; + params.phy_type = DWC2_PHY_TYPE_PARAM_FS; + } + + dwc2_set_parameters(hsotg, ¶ms); + + return 0; +} diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c index ae419615a176a5e1da5ad13f8167c3bcddf0cdfe..a23329e3d7cd209f6d251c8a209f3a4a538e529d 100644 --- a/drivers/usb/dwc2/pci.c +++ b/drivers/usb/dwc2/pci.c @@ -62,6 +62,20 @@ struct dwc2_pci_glue { struct platform_device *phy; }; +static int dwc2_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc2) +{ + if (pdev->vendor == PCI_VENDOR_ID_SYNOPSYS && + pdev->device == PCI_PRODUCT_ID_HAPS_HSOTG) { + struct property_entry properties[] = { + { }, + }; + + return platform_device_add_properties(dwc2, properties); + } + + return 0; +} + static void dwc2_pci_remove(struct pci_dev *pci) { struct dwc2_pci_glue *glue = pci_get_drvdata(pci); @@ -122,6 +136,10 @@ static int dwc2_pci_probe(struct pci_dev *pci, return PTR_ERR(phy); } + ret = dwc2_pci_quirks(pci, dwc2); + if (ret) + goto err; + ret = platform_device_add(dwc2); if (ret) { dev_err(dev, "failed to register dwc2 device\n"); diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 8e1728b39a497edb08bf6232b75c63b31c72d40f..4fc8c603afb8b476266da434b4cd4a0625f7f30b 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -55,165 +55,6 @@ static const char dwc2_driver_name[] = "dwc2"; -static const struct dwc2_core_params params_hi6220 = { - .otg_cap = 2, /* No HNP/SRP capable */ - .otg_ver = 0, /* 1.3 */ - .dma_enable = 1, - .dma_desc_enable = 0, - .dma_desc_fs_enable = 0, - .speed = 0, /* High Speed */ - .enable_dynamic_fifo = 1, - .en_multiple_tx_fifo = 1, - .host_rx_fifo_size = 512, - .host_nperio_tx_fifo_size = 512, - .host_perio_tx_fifo_size = 512, - .max_transfer_size = 65535, - .max_packet_count = 511, - .host_channels = 16, - .phy_type = 1, /* UTMI */ - .phy_utmi_width = 8, - .phy_ulpi_ddr = 0, /* Single */ - .phy_ulpi_ext_vbus = 0, - .i2c_enable = 0, - .ulpi_fs_ls = 0, - .host_support_fs_ls_low_power = 0, - .host_ls_low_power_phy_clk = 0, /* 48 MHz */ - .ts_dline = 0, - .reload_ctl = 0, - .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << - GAHBCFG_HBSTLEN_SHIFT, - .uframe_sched = 0, - .external_id_pin_ctl = -1, - .hibernation = -1, -}; - -static const struct dwc2_core_params params_bcm2835 = { - .otg_cap = 0, /* HNP/SRP capable */ - .otg_ver = 0, /* 1.3 */ - .dma_enable = 1, - .dma_desc_enable = 0, - .dma_desc_fs_enable = 0, - .speed = 0, /* High Speed */ - .enable_dynamic_fifo = 1, - .en_multiple_tx_fifo = 1, - .host_rx_fifo_size = 774, /* 774 DWORDs */ - .host_nperio_tx_fifo_size = 256, /* 256 DWORDs */ - .host_perio_tx_fifo_size = 512, /* 512 DWORDs */ - .max_transfer_size = 65535, - .max_packet_count = 511, - .host_channels = 8, - .phy_type = 1, /* UTMI */ - .phy_utmi_width = 8, /* 8 bits */ - .phy_ulpi_ddr = 0, /* Single */ - .phy_ulpi_ext_vbus = 0, - .i2c_enable = 0, - .ulpi_fs_ls = 0, - .host_support_fs_ls_low_power = 0, - .host_ls_low_power_phy_clk = 0, /* 48 MHz */ - .ts_dline = 0, - .reload_ctl = 0, - .ahbcfg = 0x10, - .uframe_sched = 0, - .external_id_pin_ctl = -1, - .hibernation = -1, -}; - -static const struct dwc2_core_params params_rk3066 = { - .otg_cap = 2, /* non-HNP/non-SRP */ - .otg_ver = -1, - .dma_enable = -1, - .dma_desc_enable = 0, - .dma_desc_fs_enable = 0, - .speed = -1, - .enable_dynamic_fifo = 1, - .en_multiple_tx_fifo = -1, - .host_rx_fifo_size = 525, /* 525 DWORDs */ - .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ - .host_perio_tx_fifo_size = 256, /* 256 DWORDs */ - .max_transfer_size = -1, - .max_packet_count = -1, - .host_channels = -1, - .phy_type = -1, - .phy_utmi_width = -1, - .phy_ulpi_ddr = -1, - .phy_ulpi_ext_vbus = -1, - .i2c_enable = -1, - .ulpi_fs_ls = -1, - .host_support_fs_ls_low_power = -1, - .host_ls_low_power_phy_clk = -1, - .ts_dline = -1, - .reload_ctl = -1, - .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << - GAHBCFG_HBSTLEN_SHIFT, - .uframe_sched = -1, - .external_id_pin_ctl = -1, - .hibernation = -1, -}; - -static const struct dwc2_core_params params_ltq = { - .otg_cap = 2, /* non-HNP/non-SRP */ - .otg_ver = -1, - .dma_enable = -1, - .dma_desc_enable = -1, - .dma_desc_fs_enable = -1, - .speed = -1, - .enable_dynamic_fifo = -1, - .en_multiple_tx_fifo = -1, - .host_rx_fifo_size = 288, /* 288 DWORDs */ - .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ - .host_perio_tx_fifo_size = 96, /* 96 DWORDs */ - .max_transfer_size = 65535, - .max_packet_count = 511, - .host_channels = -1, - .phy_type = -1, - .phy_utmi_width = -1, - .phy_ulpi_ddr = -1, - .phy_ulpi_ext_vbus = -1, - .i2c_enable = -1, - .ulpi_fs_ls = -1, - .host_support_fs_ls_low_power = -1, - .host_ls_low_power_phy_clk = -1, - .ts_dline = -1, - .reload_ctl = -1, - .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << - GAHBCFG_HBSTLEN_SHIFT, - .uframe_sched = -1, - .external_id_pin_ctl = -1, - .hibernation = -1, -}; - -static const struct dwc2_core_params params_amlogic = { - .otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE, - .otg_ver = -1, - .dma_enable = 1, - .dma_desc_enable = 0, - .dma_desc_fs_enable = 0, - .speed = DWC2_SPEED_PARAM_HIGH, - .enable_dynamic_fifo = 1, - .en_multiple_tx_fifo = -1, - .host_rx_fifo_size = 512, - .host_nperio_tx_fifo_size = 500, - .host_perio_tx_fifo_size = 500, - .max_transfer_size = -1, - .max_packet_count = -1, - .host_channels = 16, - .phy_type = DWC2_PHY_TYPE_PARAM_UTMI, - .phy_utmi_width = -1, - .phy_ulpi_ddr = -1, - .phy_ulpi_ext_vbus = -1, - .i2c_enable = -1, - .ulpi_fs_ls = -1, - .host_support_fs_ls_low_power = -1, - .host_ls_low_power_phy_clk = -1, - .ts_dline = -1, - .reload_ctl = 1, - .ahbcfg = GAHBCFG_HBSTLEN_INCR8 << - GAHBCFG_HBSTLEN_SHIFT, - .uframe_sched = 0, - .external_id_pin_ctl = -1, - .hibernation = -1, -}; - /* * Check the dr_mode against the module configuration and hardware * capabilities. @@ -510,20 +351,6 @@ static void dwc2_driver_shutdown(struct platform_device *dev) disable_irq(hsotg->irq); } -static const struct of_device_id dwc2_of_match_table[] = { - { .compatible = "brcm,bcm2835-usb", .data = ¶ms_bcm2835 }, - { .compatible = "hisilicon,hi6220-usb", .data = ¶ms_hi6220 }, - { .compatible = "rockchip,rk3066-usb", .data = ¶ms_rk3066 }, - { .compatible = "lantiq,arx100-usb", .data = ¶ms_ltq }, - { .compatible = "lantiq,xrx200-usb", .data = ¶ms_ltq }, - { .compatible = "snps,dwc2", .data = NULL }, - { .compatible = "samsung,s3c6400-hsotg", .data = NULL}, - { .compatible = "amlogic,meson8b-usb", .data = ¶ms_amlogic }, - { .compatible = "amlogic,meson-gxbb-usb", .data = ¶ms_amlogic }, - {}, -}; -MODULE_DEVICE_TABLE(of, dwc2_of_match_table); - /** * dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg * driver @@ -538,30 +365,10 @@ MODULE_DEVICE_TABLE(of, dwc2_of_match_table); */ static int dwc2_driver_probe(struct platform_device *dev) { - const struct of_device_id *match; - const struct dwc2_core_params *params; - struct dwc2_core_params defparams; struct dwc2_hsotg *hsotg; struct resource *res; int retval; - match = of_match_device(dwc2_of_match_table, &dev->dev); - if (match && match->data) { - params = match->data; - } else { - /* Default all params to autodetect */ - dwc2_set_all_params(&defparams, -1); - params = &defparams; - - /* - * Disable descriptor dma mode by default as the HW can support - * it, but does not support it for SPLIT transactions. - * Disable it for FS devices as well. - */ - defparams.dma_desc_enable = 0; - defparams.dma_desc_fs_enable = 0; - } - hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL); if (!hsotg) return -ENOMEM; @@ -591,13 +398,6 @@ static int dwc2_driver_probe(struct platform_device *dev) spin_lock_init(&hsotg->lock); - hsotg->core_params = devm_kzalloc(&dev->dev, - sizeof(*hsotg->core_params), GFP_KERNEL); - if (!hsotg->core_params) - return -ENOMEM; - - dwc2_set_all_params(hsotg->core_params, -1); - hsotg->irq = platform_get_irq(dev, 0); if (hsotg->irq < 0) { dev_err(&dev->dev, "missing IRQ resource\n"); @@ -631,11 +431,12 @@ static int dwc2_driver_probe(struct platform_device *dev) if (retval) goto error; - /* Validate parameter values */ - dwc2_set_parameters(hsotg, params); - dwc2_force_dr_mode(hsotg); + retval = dwc2_init_params(hsotg); + if (retval) + goto error; + if (hsotg->dr_mode != USB_DR_MODE_HOST) { retval = dwc2_gadget_init(hsotg, hsotg->irq); if (retval) diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig index b97cde76914daae6290af9d101a3efa757072a00..c5aa235863e8c7e88b30544f8e1aba98c9a18d03 100644 --- a/drivers/usb/dwc3/Kconfig +++ b/drivers/usb/dwc3/Kconfig @@ -62,7 +62,7 @@ config USB_DWC3_OMAP config USB_DWC3_EXYNOS tristate "Samsung Exynos Platform" - depends on ARCH_EXYNOS && OF || COMPILE_TEST + depends on (ARCH_EXYNOS || COMPILE_TEST) && OF default USB_DWC3 help Recent Exynos5 SoCs ship with one DesignWare Core USB3 IP inside, @@ -70,7 +70,7 @@ config USB_DWC3_EXYNOS config USB_DWC3_PCI tristate "PCIe-based Platforms" - depends on PCI + depends on PCI && ACPI default USB_DWC3 help If you're using the DesignWare Core IP with a PCIe, please say @@ -98,7 +98,7 @@ config USB_DWC3_OF_SIMPLE config USB_DWC3_ST tristate "STMicroelectronics Platforms" - depends on ARCH_STI && OF + depends on (ARCH_STI || COMPILE_TEST) && OF default USB_DWC3 help STMicroelectronics SoCs with one DesignWare Core USB3 IP diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile index 22420e17d68bc5d708655ed164af528f2b8b9100..ffca34029b21ff6c3bdb3e44ae5531c74d78856a 100644 --- a/drivers/usb/dwc3/Makefile +++ b/drivers/usb/dwc3/Makefile @@ -3,7 +3,11 @@ CFLAGS_trace.o := -I$(src) obj-$(CONFIG_USB_DWC3) += dwc3.o -dwc3-y := core.o debug.o trace.o +dwc3-y := core.o + +ifneq ($(CONFIG_FTRACE),) + dwc3-y += trace.o +endif ifneq ($(filter y,$(CONFIG_USB_DWC3_HOST) $(CONFIG_USB_DWC3_DUAL_ROLE)),) dwc3-y += host.o diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 7287a763cd0cc4ca5a114ed687e6c34b0bdd643d..369bab16a824599f2295dbdf91e825e1a69e4446 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -169,33 +169,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) return -ETIMEDOUT; } -/** - * dwc3_soft_reset - Issue soft reset - * @dwc: Pointer to our controller context structure - */ -static int dwc3_soft_reset(struct dwc3 *dwc) -{ - unsigned long timeout; - u32 reg; - - timeout = jiffies + msecs_to_jiffies(500); - dwc3_writel(dwc->regs, DWC3_DCTL, DWC3_DCTL_CSFTRST); - do { - reg = dwc3_readl(dwc->regs, DWC3_DCTL); - if (!(reg & DWC3_DCTL_CSFTRST)) - break; - - if (time_after(jiffies, timeout)) { - dev_err(dwc->dev, "Reset Timed Out\n"); - return -ETIMEDOUT; - } - - cpu_relax(); - } while (true); - - return 0; -} - /* * dwc3_frame_length_adjustment - Adjusts frame length if required * @dwc3: Pointer to our controller context structure @@ -229,7 +202,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc) static void dwc3_free_one_event_buffer(struct dwc3 *dwc, struct dwc3_event_buffer *evt) { - dma_free_coherent(dwc->dev, evt->length, evt->buf, evt->dma); + dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma); } /** @@ -251,7 +224,11 @@ static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc, evt->dwc = dwc; evt->length = length; - evt->buf = dma_alloc_coherent(dwc->dev, length, + evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL); + if (!evt->cache) + return ERR_PTR(-ENOMEM); + + evt->buf = dma_alloc_coherent(dwc->sysdev, length, &evt->dma, GFP_KERNEL); if (!evt->buf) return ERR_PTR(-ENOMEM); @@ -305,13 +282,7 @@ static int dwc3_event_buffers_setup(struct dwc3 *dwc) struct dwc3_event_buffer *evt; evt = dwc->ev_buf; - dwc3_trace(trace_dwc3_core, - "Event buf %p dma %08llx length %d\n", - evt->buf, (unsigned long long) evt->dma, - evt->length); - evt->lpos = 0; - dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), lower_32_bits(evt->dma)); dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), @@ -370,11 +341,11 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc) if (!WARN_ON(dwc->scratchbuf)) return 0; - scratch_addr = dma_map_single(dwc->dev, dwc->scratchbuf, + scratch_addr = dma_map_single(dwc->sysdev, dwc->scratchbuf, dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(dwc->dev, scratch_addr)) { - dev_err(dwc->dev, "failed to map scratch buffer\n"); + if (dma_mapping_error(dwc->sysdev, scratch_addr)) { + dev_err(dwc->sysdev, "failed to map scratch buffer\n"); ret = -EFAULT; goto err0; } @@ -398,7 +369,7 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc) return 0; err1: - dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch * + dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL); err0: @@ -417,7 +388,7 @@ static void dwc3_free_scratch_buffers(struct dwc3 *dwc) if (!WARN_ON(dwc->scratchbuf)) return; - dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch * + dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL); kfree(dwc->scratchbuf); } @@ -428,9 +399,6 @@ static void dwc3_core_num_eps(struct dwc3 *dwc) dwc->num_in_eps = DWC3_NUM_IN_EPS(parms); dwc->num_out_eps = DWC3_NUM_EPS(parms) - dwc->num_in_eps; - - dwc3_trace(trace_dwc3_core, "found %d IN and %d OUT endpoints", - dwc->num_in_eps, dwc->num_out_eps); } static void dwc3_cache_hwparams(struct dwc3 *dwc) @@ -524,13 +492,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc) } /* FALLTHROUGH */ case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: - /* Making sure the interface and PHY are operational */ - ret = dwc3_soft_reset(dwc); - if (ret) - return ret; - - udelay(1); - ret = dwc3_ulpi_init(dwc); if (ret) return ret; @@ -594,19 +555,12 @@ static void dwc3_core_exit(struct dwc3 *dwc) phy_power_off(dwc->usb3_generic_phy); } -/** - * dwc3_core_init - Low-level initialization of DWC3 Core - * @dwc: Pointer to our controller context structure - * - * Returns 0 on success otherwise negative errno. - */ -static int dwc3_core_init(struct dwc3 *dwc) +static bool dwc3_core_is_valid(struct dwc3 *dwc) { - u32 hwparams4 = dwc->hwparams.hwparams4; - u32 reg; - int ret; + u32 reg; reg = dwc3_readl(dwc->regs, DWC3_GSNPSID); + /* This should read as U3 followed by revision number */ if ((reg & DWC3_GSNPSID_MASK) == 0x55330000) { /* Detected DWC_usb3 IP */ @@ -616,36 +570,16 @@ static int dwc3_core_init(struct dwc3 *dwc) dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER); dwc->revision |= DWC3_REVISION_IS_DWC31; } else { - dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); - ret = -ENODEV; - goto err0; + return false; } - /* - * Write Linux Version Code to our GUID register so it's easy to figure - * out which kernel version a bug was found. - */ - dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); - - /* Handle USB2.0-only core configuration */ - if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == - DWC3_GHWPARAMS3_SSPHY_IFC_DIS) { - if (dwc->maximum_speed == USB_SPEED_SUPER) - dwc->maximum_speed = USB_SPEED_HIGH; - } - - /* issue device SoftReset too */ - ret = dwc3_soft_reset(dwc); - if (ret) - goto err0; - - ret = dwc3_core_soft_reset(dwc); - if (ret) - goto err0; + return true; +} - ret = dwc3_phy_setup(dwc); - if (ret) - goto err0; +static void dwc3_core_setup_global_control(struct dwc3 *dwc) +{ + u32 hwparams4 = dwc->hwparams.hwparams4; + u32 reg; reg = dwc3_readl(dwc->regs, DWC3_GCTL); reg &= ~DWC3_GCTL_SCALEDOWN_MASK; @@ -683,13 +617,13 @@ static int dwc3_core_init(struct dwc3 *dwc) reg |= DWC3_GCTL_GBLHIBERNATIONEN; break; default: - dwc3_trace(trace_dwc3_core, "No power optimization available\n"); + /* nothing */ + break; } /* check if current dwc3 is on simulation board */ if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) { - dwc3_trace(trace_dwc3_core, - "running on FPGA platform\n"); + dev_info(dwc->dev, "Running with FPGA optmizations\n"); dwc->is_fpga = true; } @@ -714,7 +648,47 @@ static int dwc3_core_init(struct dwc3 *dwc) reg |= DWC3_GCTL_U2RSTECN; dwc3_writel(dwc->regs, DWC3_GCTL, reg); +} + +/** + * dwc3_core_init - Low-level initialization of DWC3 Core + * @dwc: Pointer to our controller context structure + * + * Returns 0 on success otherwise negative errno. + */ +static int dwc3_core_init(struct dwc3 *dwc) +{ + u32 reg; + int ret; + + if (!dwc3_core_is_valid(dwc)) { + dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); + ret = -ENODEV; + goto err0; + } + + /* + * Write Linux Version Code to our GUID register so it's easy to figure + * out which kernel version a bug was found. + */ + dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); + + /* Handle USB2.0-only core configuration */ + if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == + DWC3_GHWPARAMS3_SSPHY_IFC_DIS) { + if (dwc->maximum_speed == USB_SPEED_SUPER) + dwc->maximum_speed = USB_SPEED_HIGH; + } + + ret = dwc3_core_soft_reset(dwc); + if (ret) + goto err0; + ret = dwc3_phy_setup(dwc); + if (ret) + goto err0; + + dwc3_core_setup_global_control(dwc); dwc3_core_num_eps(dwc); ret = dwc3_setup_scratch_buffers(dwc); @@ -766,18 +740,27 @@ static int dwc3_core_init(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); } + /* + * Enable hardware control of sending remote wakeup in HS when + * the device is in the L1 state. + */ + if (dwc->revision >= DWC3_REVISION_290A) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); + reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW; + dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); + } + return 0; err4: - phy_power_off(dwc->usb2_generic_phy); + phy_power_off(dwc->usb3_generic_phy); err3: - phy_power_off(dwc->usb3_generic_phy); + phy_power_off(dwc->usb2_generic_phy); err2: usb_phy_set_suspend(dwc->usb2_phy, 1); usb_phy_set_suspend(dwc->usb3_phy, 1); - dwc3_core_exit(dwc); err1: usb_phy_shutdown(dwc->usb2_phy); @@ -920,57 +903,13 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc) } } -#define DWC3_ALIGN_MASK (16 - 1) - -static int dwc3_probe(struct platform_device *pdev) +static void dwc3_get_properties(struct dwc3 *dwc) { - struct device *dev = &pdev->dev; - struct resource *res; - struct dwc3 *dwc; + struct device *dev = dwc->dev; u8 lpm_nyet_threshold; u8 tx_de_emphasis; u8 hird_threshold; - int ret; - - void __iomem *regs; - void *mem; - - mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL); - if (!mem) - return -ENOMEM; - - dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1); - dwc->mem = mem; - dwc->dev = dev; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "missing memory resource\n"); - return -ENODEV; - } - - dwc->xhci_resources[0].start = res->start; - dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + - DWC3_XHCI_REGS_END; - dwc->xhci_resources[0].flags = res->flags; - dwc->xhci_resources[0].name = res->name; - - res->start += DWC3_GLOBALS_REGS_START; - - /* - * Request memory region but exclude xHCI regs, - * since it will be requested by the xhci-plat driver. - */ - regs = devm_ioremap_resource(dev, res); - if (IS_ERR(regs)) { - ret = PTR_ERR(regs); - goto err0; - } - - dwc->regs = regs; - dwc->regs_size = resource_size(res); - /* default to highest possible threshold */ lpm_nyet_threshold = 0xff; @@ -987,6 +926,13 @@ static int dwc3_probe(struct platform_device *pdev) dwc->dr_mode = usb_get_dr_mode(dev); dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); + dwc->sysdev_is_parent = device_property_read_bool(dev, + "linux,sysdev_is_parent"); + if (dwc->sysdev_is_parent) + dwc->sysdev = dwc->dev->parent; + else + dwc->sysdev = dwc->dev; + dwc->has_lpm_erratum = device_property_read_bool(dev, "snps,has-lpm-erratum"); device_property_read_u8(dev, "snps,lpm-nyet-threshold", @@ -1042,6 +988,112 @@ static int dwc3_probe(struct platform_device *pdev) dwc->hird_threshold = hird_threshold | (dwc->is_utmi_l1_suspend << 4); + dwc->imod_interval = 0; +} + +/* check whether the core supports IMOD */ +bool dwc3_has_imod(struct dwc3 *dwc) +{ + return ((dwc3_is_usb3(dwc) && + dwc->revision >= DWC3_REVISION_300A) || + (dwc3_is_usb31(dwc) && + dwc->revision >= DWC3_USB31_REVISION_120A)); +} + +static void dwc3_check_params(struct dwc3 *dwc) +{ + struct device *dev = dwc->dev; + + /* Check for proper value of imod_interval */ + if (dwc->imod_interval && !dwc3_has_imod(dwc)) { + dev_warn(dwc->dev, "Interrupt moderation not supported\n"); + dwc->imod_interval = 0; + } + + /* + * Workaround for STAR 9000961433 which affects only version + * 3.00a of the DWC_usb3 core. This prevents the controller + * interrupt from being masked while handling events. IMOD + * allows us to work around this issue. Enable it for the + * affected version. + */ + if (!dwc->imod_interval && + (dwc->revision == DWC3_REVISION_300A)) + dwc->imod_interval = 1; + + /* Check the maximum_speed parameter */ + switch (dwc->maximum_speed) { + case USB_SPEED_LOW: + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + break; + default: + dev_err(dev, "invalid maximum_speed parameter %d\n", + dwc->maximum_speed); + /* fall through */ + case USB_SPEED_UNKNOWN: + /* default to superspeed */ + dwc->maximum_speed = USB_SPEED_SUPER; + + /* + * default to superspeed plus if we are capable. + */ + if (dwc3_is_usb31(dwc) && + (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == + DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) + dwc->maximum_speed = USB_SPEED_SUPER_PLUS; + + break; + } +} + +static int dwc3_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct dwc3 *dwc; + + int ret; + + void __iomem *regs; + + dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL); + if (!dwc) + return -ENOMEM; + + dwc->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "missing memory resource\n"); + return -ENODEV; + } + + dwc->xhci_resources[0].start = res->start; + dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + + DWC3_XHCI_REGS_END; + dwc->xhci_resources[0].flags = res->flags; + dwc->xhci_resources[0].name = res->name; + + res->start += DWC3_GLOBALS_REGS_START; + + /* + * Request memory region but exclude xHCI regs, + * since it will be requested by the xhci-plat driver. + */ + regs = devm_ioremap_resource(dev, res); + if (IS_ERR(regs)) { + ret = PTR_ERR(regs); + goto err0; + } + + dwc->regs = regs; + dwc->regs_size = resource_size(res); + + dwc3_get_properties(dwc); + platform_set_drvdata(pdev, dwc); dwc3_cache_hwparams(dwc); @@ -1051,12 +1103,6 @@ static int dwc3_probe(struct platform_device *pdev) spin_lock_init(&dwc->lock); - if (!dev->dma_mask) { - dev->dma_mask = dev->parent->dma_mask; - dev->dma_parms = dev->parent->dma_parms; - dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask); - } - pm_runtime_set_active(dev); pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); @@ -1088,32 +1134,7 @@ static int dwc3_probe(struct platform_device *pdev) goto err4; } - /* Check the maximum_speed parameter */ - switch (dwc->maximum_speed) { - case USB_SPEED_LOW: - case USB_SPEED_FULL: - case USB_SPEED_HIGH: - case USB_SPEED_SUPER: - case USB_SPEED_SUPER_PLUS: - break; - default: - dev_err(dev, "invalid maximum_speed parameter %d\n", - dwc->maximum_speed); - /* fall through */ - case USB_SPEED_UNKNOWN: - /* default to superspeed */ - dwc->maximum_speed = USB_SPEED_SUPER; - - /* - * default to superspeed plus if we are capable. - */ - if (dwc3_is_usb31(dwc) && - (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == - DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) - dwc->maximum_speed = USB_SPEED_SUPER_PLUS; - - break; - } + dwc3_check_params(dwc); ret = dwc3_core_init_mode(dwc); if (ret) diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 6b60e42626a235e2f8a7197749c9e43c1b2723ea..de5a8570be04213cd3fd39c774662c9173907e34 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -37,6 +38,7 @@ #define DWC3_MSG_MAX 500 /* Global constants */ +#define DWC3_PULL_UP_TIMEOUT 500 /* ms */ #define DWC3_ZLP_BUF_SIZE 1024 /* size of a superspeed bulk */ #define DWC3_EP0_BOUNCE_SIZE 512 #define DWC3_ENDPOINTS_NUM 32 @@ -65,6 +67,7 @@ #define DWC3_DEVICE_EVENT_OVERFLOW 11 #define DWC3_GEVNTCOUNT_MASK 0xfffc +#define DWC3_GEVNTCOUNT_EHB (1 << 31) #define DWC3_GSNPSID_MASK 0xffff0000 #define DWC3_GSNPSREV_MASK 0xffff @@ -147,6 +150,8 @@ #define DWC3_DEPCMDPAR0 0x08 #define DWC3_DEPCMD 0x0c +#define DWC3_DEV_IMOD(n) (0xca00 + (n * 0x4)) + /* OTG Registers */ #define DWC3_OCFG 0xcc00 #define DWC3_OCTL 0xcc04 @@ -198,6 +203,9 @@ #define DWC3_GCTL_GBLHIBERNATIONEN (1 << 1) #define DWC3_GCTL_DSBLCLKGTNG (1 << 0) +/* Global User Control 1 Register */ +#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW (1 << 24) + /* Global USB2 PHY Configuration Register */ #define DWC3_GUSB2PHYCFG_PHYSOFTRST (1 << 31) #define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS (1 << 30) @@ -450,6 +458,8 @@ #define DWC3_DEPCMD_SETTRANSFRESOURCE (0x02 << 0) #define DWC3_DEPCMD_SETEPCONFIG (0x01 << 0) +#define DWC3_DEPCMD_CMD(x) ((x) & 0xf) + /* The EP number goes 0..31 so ep0 is always out and ep1 is always in */ #define DWC3_DALEPENA_EP(n) (1 << n) @@ -458,6 +468,11 @@ #define DWC3_DEPCMD_TYPE_BULK 2 #define DWC3_DEPCMD_TYPE_INTR 3 +#define DWC3_DEV_IMOD_COUNT_SHIFT 16 +#define DWC3_DEV_IMOD_COUNT_MASK (0xffff << 16) +#define DWC3_DEV_IMOD_INTERVAL_SHIFT 0 +#define DWC3_DEV_IMOD_INTERVAL_MASK (0xffff << 0) + /* Structures */ struct dwc3_trb; @@ -465,6 +480,7 @@ struct dwc3_trb; /** * struct dwc3_event_buffer - Software event buffer representation * @buf: _THE_ buffer + * @cache: The buffer cache used in the threaded interrupt * @length: size of this buffer * @lpos: event offset * @count: cache of last read event count register @@ -474,6 +490,7 @@ struct dwc3_trb; */ struct dwc3_event_buffer { void *buf; + void *cache; unsigned length; unsigned int lpos; unsigned int count; @@ -499,6 +516,7 @@ struct dwc3_event_buffer { * @endpoint: usb endpoint * @pending_list: list of pending requests for this endpoint * @started_list: list of started requests on this endpoint + * @wait_end_transfer: wait_queue_head_t for waiting on End Transfer complete * @lock: spinlock for endpoint request queue traversal * @regs: pointer to first endpoint register * @trb_pool: array of transaction buffers @@ -524,12 +542,13 @@ struct dwc3_ep { struct list_head pending_list; struct list_head started_list; + wait_queue_head_t wait_end_transfer; + spinlock_t lock; void __iomem *regs; struct dwc3_trb *trb_pool; dma_addr_t trb_pool_dma; - const struct usb_ss_ep_comp_descriptor *comp_desc; struct dwc3 *dwc; u32 saved_state; @@ -540,6 +559,8 @@ struct dwc3_ep { #define DWC3_EP_BUSY (1 << 4) #define DWC3_EP_PENDING_REQUEST (1 << 5) #define DWC3_EP_MISSED_ISOC (1 << 6) +#define DWC3_EP_END_TRANSFER_PENDING (1 << 7) +#define DWC3_EP_TRANSFER_STARTED (1 << 8) /* This last one is specific to EP0 */ #define DWC3_EP0_DIR_IN (1 << 31) @@ -703,7 +724,7 @@ struct dwc3_hwparams { * @dep: struct dwc3_ep owning this request * @sg: pointer to first incomplete sg * @num_pending_sgs: counter to pending sgs - * @first_trb_index: index to first trb used by this request + * @remaining: amount of data remaining * @epnum: endpoint number to which this request refers * @trb: pointer to struct dwc3_trb * @trb_dma: DMA address of @trb @@ -718,7 +739,7 @@ struct dwc3_request { struct scatterlist *sg; unsigned num_pending_sgs; - u8 first_trb_index; + unsigned remaining; u8 epnum; struct dwc3_trb *trb; dma_addr_t trb_dma; @@ -748,6 +769,7 @@ struct dwc3_scratchpad_array { * @ep0_usb_req: dummy req used while handling STD USB requests * @ep0_bounce_addr: dma address of ep0_bounce * @scratch_addr: dma address of scratchbuf + * @ep0_in_setup: one control transfer is completed and enter setup phase * @lock: for synchronizing * @dev: pointer to our struct device * @xhci: pointer to our xHCI child @@ -784,7 +806,6 @@ struct dwc3_scratchpad_array { * @ep0state: state of endpoint zero * @link_state: link state * @speed: device speed (super, high, full, low) - * @mem: points to start of memory which is used for this struct. * @hwparams: copy of hwparams registers * @root: debugfs root folder pointer * @regset: debugfs pointer to regdump file @@ -798,6 +819,7 @@ struct dwc3_scratchpad_array { * @ep0_bounced: true when we used bounce buffer * @ep0_expect_in: true when we expect a DATA IN transfer * @has_hibernation: true when dwc3 was configured with Hibernation + * @sysdev_is_parent: true when dwc3 device has a parent driver * @has_lpm_erratum: true when core was configured with LPM Erratum. Note that * there's now way for software to detect this in runtime. * @is_utmi_l1_suspend: the core asserts output signal @@ -833,6 +855,8 @@ struct dwc3_scratchpad_array { * 1 - -3.5dB de-emphasis * 2 - No de-emphasis * 3 - Reserved + * @imod_interval: set the interrupt moderation interval in 250ns + * increments or 0 to disable. */ struct dwc3 { struct usb_ctrlrequest *ctrl_req; @@ -846,11 +870,13 @@ struct dwc3 { dma_addr_t ep0_bounce_addr; dma_addr_t scratch_addr; struct dwc3_request ep0_usb_req; + struct completion ep0_in_setup; /* device lock */ spinlock_t lock; struct device *dev; + struct device *sysdev; struct platform_device *xhci; struct resource xhci_resources[DWC3_XHCI_RESOURCES_NUM]; @@ -909,6 +935,7 @@ struct dwc3 { #define DWC3_REVISION_260A 0x5533260a #define DWC3_REVISION_270A 0x5533270a #define DWC3_REVISION_280A 0x5533280a +#define DWC3_REVISION_290A 0x5533290a #define DWC3_REVISION_300A 0x5533300a #define DWC3_REVISION_310A 0x5533310a @@ -918,6 +945,7 @@ struct dwc3 { */ #define DWC3_REVISION_IS_DWC31 0x80000000 #define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_DWC31) +#define DWC3_USB31_REVISION_120A (0x3132302a | DWC3_REVISION_IS_DWC31) enum dwc3_ep0_next ep0_next_event; enum dwc3_ep0_state ep0state; @@ -934,8 +962,6 @@ struct dwc3 { u8 num_out_eps; u8 num_in_eps; - void *mem; - struct dwc3_hwparams hwparams; struct dentry *root; struct debugfs_regset32 *regset; @@ -952,6 +978,7 @@ struct dwc3 { unsigned ep0_bounced:1; unsigned ep0_expect_in:1; unsigned has_hibernation:1; + unsigned sysdev_is_parent:1; unsigned has_lpm_erratum:1; unsigned is_utmi_l1_suspend:1; unsigned is_fpga:1; @@ -978,6 +1005,8 @@ struct dwc3 { unsigned tx_de_emphasis_quirk:1; unsigned tx_de_emphasis:2; + + u16 imod_interval; }; /* -------------------------------------------------------------------------- */ @@ -1039,12 +1068,16 @@ struct dwc3_event_depevt { /* Control-only Status */ #define DEPEVT_STATUS_CONTROL_DATA 1 #define DEPEVT_STATUS_CONTROL_STATUS 2 +#define DEPEVT_STATUS_CONTROL_PHASE(n) ((n) & 3) /* In response to Start Transfer */ #define DEPEVT_TRANSFER_NO_RESOURCE 1 #define DEPEVT_TRANSFER_BUS_EXPIRY 2 u32 parameters:16; + +/* For Command Complete Events */ +#define DEPEVT_PARAMETER_CMD(n) (((n) & (0xf << 8)) >> 8) } __packed; /** @@ -1133,12 +1166,20 @@ struct dwc3_gadget_ep_cmd_params { void dwc3_set_mode(struct dwc3 *dwc, u32 mode); u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type); +/* check whether we are on the DWC_usb3 core */ +static inline bool dwc3_is_usb3(struct dwc3 *dwc) +{ + return !(dwc->revision & DWC3_REVISION_IS_DWC31); +} + /* check whether we are on the DWC_usb31 core */ static inline bool dwc3_is_usb31(struct dwc3 *dwc) { return !!(dwc->revision & DWC3_REVISION_IS_DWC31); } +bool dwc3_has_imod(struct dwc3 *dwc); + #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) int dwc3_host_init(struct dwc3 *dwc); void dwc3_host_exit(struct dwc3 *dwc); diff --git a/drivers/usb/dwc3/debug.c b/drivers/usb/dwc3/debug.c deleted file mode 100644 index 0be6885bc37006364860e0aeb9e2b644cad72a8f..0000000000000000000000000000000000000000 --- a/drivers/usb/dwc3/debug.c +++ /dev/null @@ -1,32 +0,0 @@ -/** - * debug.c - DesignWare USB3 DRD Controller Debug/Trace Support - * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com - * - * Author: Felipe Balbi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 of - * the License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "debug.h" - -void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - va_start(args, fmt); - vaf.fmt = fmt; - vaf.va = &args; - - trace(&vaf); - - va_end(args); -} diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h index 33ab2a203c1bdc4a9d42d7bed9869ad83d2b4cb8..eeed4ffd81312c9922c4fe2e4b83627a8c41059c 100644 --- a/drivers/usb/dwc3/debug.h +++ b/drivers/usb/dwc3/debug.h @@ -124,6 +124,22 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state) } } +static inline const char *dwc3_ep0_state_string(enum dwc3_ep0_state state) +{ + switch (state) { + case EP0_UNCONNECTED: + return "Unconnected"; + case EP0_SETUP_PHASE: + return "Setup Phase"; + case EP0_DATA_PHASE: + return "Data Phase"; + case EP0_STATUS_PHASE: + return "Status Phase"; + default: + return "UNKNOWN"; + } +} + /** * dwc3_gadget_event_string - returns event name * @event: the event code @@ -184,10 +200,11 @@ dwc3_gadget_event_string(const struct dwc3_event_devt *event) * @event: then event code */ static inline const char * -dwc3_ep_event_string(const struct dwc3_event_depevt *event) +dwc3_ep_event_string(const struct dwc3_event_depevt *event, u32 ep0state) { u8 epnum = event->endpoint_number; static char str[256]; + size_t len; int status; int ret; @@ -199,6 +216,10 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event) switch (event->endpoint_event) { case DWC3_DEPEVT_XFERCOMPLETE: strcat(str, "Transfer Complete"); + len = strlen(str); + + if (epnum <= 1) + sprintf(str + len, " [%s]", dwc3_ep0_state_string(ep0state)); break; case DWC3_DEPEVT_XFERINPROGRESS: strcat(str, "Transfer In-Progress"); @@ -207,6 +228,19 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event) strcat(str, "Transfer Not Ready"); status = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE; strcat(str, status ? " (Active)" : " (Not Active)"); + + /* Control Endpoints */ + if (epnum <= 1) { + int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status); + + switch (phase) { + case DEPEVT_STATUS_CONTROL_DATA: + strcat(str, " [Data Phase]"); + break; + case DEPEVT_STATUS_CONTROL_STATUS: + strcat(str, " [Status Phase]"); + } + } break; case DWC3_DEPEVT_RXTXFIFOEVT: strcat(str, "FIFO"); @@ -270,14 +304,14 @@ static inline const char *dwc3_gadget_event_type_string(u8 event) } } -static inline const char *dwc3_decode_event(u32 event) +static inline const char *dwc3_decode_event(u32 event, u32 ep0state) { const union dwc3_event evt = (union dwc3_event) event; if (evt.type.is_devspec) return dwc3_gadget_event_string(&evt.devt); else - return dwc3_ep_event_string(&evt.depevt); + return dwc3_ep_event_string(&evt.depevt, ep0state); } static inline const char *dwc3_ep_cmd_status_string(int status) @@ -310,7 +344,6 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status) } } -void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...); #ifdef CONFIG_DEBUG_FS extern void dwc3_debugfs_init(struct dwc3 *); diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index 2f1fb7e7aa548f831fb9cfa63d24513c292d15b4..e27899bb57064b94811fae851587b06b743128ff 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -117,15 +116,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev) if (!exynos) return -ENOMEM; - /* - * Right now device-tree probed devices don't get dma_mask set. - * Since shared usb code relies on it, set it here for now. - * Once we move to full device tree support this will vanish off. - */ - ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); - if (ret) - return ret; - platform_set_drvdata(pdev, exynos); exynos->dev = dev; diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 6df0f5dad9a4cdecd4c1841c6a277de167e1960e..2b73339f286ba109d74e465aa55189bf84463398 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -39,6 +39,27 @@ #define PCI_DEVICE_ID_INTEL_APL 0x5aaa #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 +#define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" +#define PCI_INTEL_BXT_FUNC_PMU_PWR 4 +#define PCI_INTEL_BXT_STATE_D0 0 +#define PCI_INTEL_BXT_STATE_D3 3 + +/** + * struct dwc3_pci - Driver private structure + * @dwc3: child dwc3 platform_device + * @pci: our link to PCI bus + * @uuid: _DSM UUID + * @has_dsm_for_pm: true for devices which need to run _DSM on runtime PM + */ +struct dwc3_pci { + struct platform_device *dwc3; + struct pci_dev *pci; + + u8 uuid[16]; + + unsigned int has_dsm_for_pm:1; +}; + static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; @@ -48,8 +69,21 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = { { }, }; -static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3) +static int dwc3_pci_quirks(struct dwc3_pci *dwc) { + struct platform_device *dwc3 = dwc->dwc3; + struct pci_dev *pdev = dwc->pci; + int ret; + + struct property_entry sysdev_property[] = { + PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), + { }, + }; + + ret = platform_device_add_properties(dwc3, sysdev_property); + if (ret) + return ret; + if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == PCI_DEVICE_ID_AMD_NL_USB) { struct property_entry properties[] = { @@ -89,6 +123,12 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3) if (ret < 0) return ret; + if (pdev->device == PCI_DEVICE_ID_INTEL_BXT || + pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) { + acpi_str_to_uuid(PCI_INTEL_BXT_DSM_UUID, dwc->uuid); + dwc->has_dsm_for_pm = true; + } + if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) { struct gpio_desc *gpio; @@ -139,8 +179,8 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3) static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) { + struct dwc3_pci *dwc; struct resource res[2]; - struct platform_device *dwc3; int ret; struct device *dev = &pci->dev; @@ -152,11 +192,13 @@ static int dwc3_pci_probe(struct pci_dev *pci, pci_set_master(pci); - dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO); - if (!dwc3) { - dev_err(dev, "couldn't allocate dwc3 device\n"); + dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL); + if (!dwc) + return -ENOMEM; + + dwc->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO); + if (!dwc->dwc3) return -ENOMEM; - } memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res)); @@ -169,20 +211,21 @@ static int dwc3_pci_probe(struct pci_dev *pci, res[1].name = "dwc_usb3"; res[1].flags = IORESOURCE_IRQ; - ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res)); + ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res)); if (ret) { dev_err(dev, "couldn't add resources to dwc3 device\n"); return ret; } - dwc3->dev.parent = dev; - ACPI_COMPANION_SET(&dwc3->dev, ACPI_COMPANION(dev)); + dwc->pci = pci; + dwc->dwc3->dev.parent = dev; + ACPI_COMPANION_SET(&dwc->dwc3->dev, ACPI_COMPANION(dev)); - ret = dwc3_pci_quirks(pci, dwc3); + ret = dwc3_pci_quirks(dwc); if (ret) goto err; - ret = platform_device_add(dwc3); + ret = platform_device_add(dwc->dwc3); if (ret) { dev_err(dev, "failed to register dwc3 device\n"); goto err; @@ -190,21 +233,23 @@ static int dwc3_pci_probe(struct pci_dev *pci, device_init_wakeup(dev, true); device_set_run_wake(dev, true); - pci_set_drvdata(pci, dwc3); + pci_set_drvdata(pci, dwc); pm_runtime_put(dev); return 0; err: - platform_device_put(dwc3); + platform_device_put(dwc->dwc3); return ret; } static void dwc3_pci_remove(struct pci_dev *pci) { + struct dwc3_pci *dwc = pci_get_drvdata(pci); + device_init_wakeup(&pci->dev, false); pm_runtime_get(&pci->dev); acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pci->dev)); - platform_device_unregister(pci_get_drvdata(pci)); + platform_device_unregister(dwc->dwc3); } static const struct pci_device_id dwc3_pci_id_table[] = { @@ -234,40 +279,75 @@ static const struct pci_device_id dwc3_pci_id_table[] = { }; MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table); +#if defined(CONFIG_PM) || defined(CONFIG_PM_SLEEP) +static int dwc3_pci_dsm(struct dwc3_pci *dwc, int param) +{ + union acpi_object *obj; + union acpi_object tmp; + union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp); + + if (!dwc->has_dsm_for_pm) + return 0; + + tmp.type = ACPI_TYPE_INTEGER; + tmp.integer.value = param; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), dwc->uuid, + 1, PCI_INTEL_BXT_FUNC_PMU_PWR, &argv4); + if (!obj) { + dev_err(&dwc->pci->dev, "failed to evaluate _DSM\n"); + return -EIO; + } + + ACPI_FREE(obj); + + return 0; +} +#endif /* CONFIG_PM || CONFIG_PM_SLEEP */ + #ifdef CONFIG_PM static int dwc3_pci_runtime_suspend(struct device *dev) { + struct dwc3_pci *dwc = dev_get_drvdata(dev); + if (device_run_wake(dev)) - return 0; + return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3); return -EBUSY; } static int dwc3_pci_runtime_resume(struct device *dev) { - struct platform_device *dwc3 = dev_get_drvdata(dev); + struct dwc3_pci *dwc = dev_get_drvdata(dev); + struct platform_device *dwc3 = dwc->dwc3; + int ret; + + ret = dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0); + if (ret) + return ret; return pm_runtime_get(&dwc3->dev); } #endif /* CONFIG_PM */ #ifdef CONFIG_PM_SLEEP -static int dwc3_pci_pm_dummy(struct device *dev) +static int dwc3_pci_suspend(struct device *dev) { - /* - * There's nothing to do here. No, seriously. Everything is either taken - * care either by PCI subsystem or dwc3/core.c, so we have nothing - * missing here. - * - * So you'd think we didn't need this at all, but PCI subsystem will - * bail out if we don't have a valid callback :-s - */ - return 0; + struct dwc3_pci *dwc = dev_get_drvdata(dev); + + return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3); +} + +static int dwc3_pci_resume(struct device *dev) +{ + struct dwc3_pci *dwc = dev_get_drvdata(dev); + + return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0); } #endif /* CONFIG_PM_SLEEP */ static struct dev_pm_ops dwc3_pci_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy) + SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_suspend, dwc3_pci_resume) SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume, NULL) }; diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index 89a2f712fdfe32f5fc0a6fc0681b0d8db005e2a2..dfbf464eb88c8f379b06a3f21d61ac8c5335a163 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "core.h" @@ -218,7 +219,6 @@ static int st_dwc3_probe(struct platform_device *pdev) if (IS_ERR(regmap)) return PTR_ERR(regmap); - dma_set_coherent_mask(dev, dev->coherent_dma_mask); dwc3_data->dev = dev; dwc3_data->regmap = regmap; diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index fe79d771dee4c4c936ae72756020e047e5500db4..4878d187c7d4a4b3b3851c29624ff04bb32df020 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -39,22 +39,6 @@ static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep); static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, struct dwc3_ep *dep, struct dwc3_request *req); -static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state) -{ - switch (state) { - case EP0_UNCONNECTED: - return "Unconnected"; - case EP0_SETUP_PHASE: - return "Setup Phase"; - case EP0_DATA_PHASE: - return "Data Phase"; - case EP0_STATUS_PHASE: - return "Status Phase"; - default: - return "UNKNOWN"; - } -} - static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma, u32 len, u32 type, bool chain) { @@ -65,10 +49,8 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma, int ret; dep = dwc->eps[epnum]; - if (dep->flags & DWC3_EP_BUSY) { - dwc3_trace(trace_dwc3_ep0, "%s still busy", dep->name); + if (dep->flags & DWC3_EP_BUSY) return 0; - } trb = &dwc->ep0_trb[dep->trb_enqueue]; @@ -99,11 +81,8 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma, trace_dwc3_prepare_trb(dep, trb); ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, ¶ms); - if (ret < 0) { - dwc3_trace(trace_dwc3_ep0, "%s STARTTRANSFER failed", - dep->name); + if (ret < 0) return ret; - } dep->flags |= DWC3_EP_BUSY; dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); @@ -163,9 +142,6 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep, if (dwc->ep0state == EP0_STATUS_PHASE) __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]); - else - dwc3_trace(trace_dwc3_ep0, - "too early for delayed status"); return 0; } @@ -229,9 +205,8 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request, spin_lock_irqsave(&dwc->lock, flags); if (!dep->endpoint.desc) { - dwc3_trace(trace_dwc3_ep0, - "trying to queue request %p to disabled %s", - request, dep->name); + dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", + dep->name); ret = -ESHUTDOWN; goto out; } @@ -242,11 +217,6 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request, goto out; } - dwc3_trace(trace_dwc3_ep0, - "queueing request %p to %s length %d state '%s'", - request, dep->name, request->length, - dwc3_ep0_state_string(dwc->ep0state)); - ret = __dwc3_gadget_ep0_queue(dep, req); out: @@ -308,6 +278,8 @@ void dwc3_ep0_out_start(struct dwc3 *dwc) { int ret; + complete(&dwc->ep0_in_setup); + ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8, DWC3_TRBCTL_CONTROL_SETUP, false); WARN_ON(ret < 0); @@ -395,126 +367,198 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc, return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req); } -static int dwc3_ep0_handle_feature(struct dwc3 *dwc, +static int dwc3_ep0_handle_u1(struct dwc3 *dwc, enum usb_device_state state, + int set) +{ + u32 reg; + + if (state != USB_STATE_CONFIGURED) + return -EINVAL; + if ((dwc->speed != DWC3_DSTS_SUPERSPEED) && + (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS)) + return -EINVAL; + + reg = dwc3_readl(dwc->regs, DWC3_DCTL); + if (set) + reg |= DWC3_DCTL_INITU1ENA; + else + reg &= ~DWC3_DCTL_INITU1ENA; + dwc3_writel(dwc->regs, DWC3_DCTL, reg); + + return 0; +} + +static int dwc3_ep0_handle_u2(struct dwc3 *dwc, enum usb_device_state state, + int set) +{ + u32 reg; + + + if (state != USB_STATE_CONFIGURED) + return -EINVAL; + if ((dwc->speed != DWC3_DSTS_SUPERSPEED) && + (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS)) + return -EINVAL; + + reg = dwc3_readl(dwc->regs, DWC3_DCTL); + if (set) + reg |= DWC3_DCTL_INITU2ENA; + else + reg &= ~DWC3_DCTL_INITU2ENA; + dwc3_writel(dwc->regs, DWC3_DCTL, reg); + + return 0; +} + +static int dwc3_ep0_handle_test(struct dwc3 *dwc, enum usb_device_state state, + u32 wIndex, int set) +{ + if ((wIndex & 0xff) != 0) + return -EINVAL; + if (!set) + return -EINVAL; + + switch (wIndex >> 8) { + case TEST_J: + case TEST_K: + case TEST_SE0_NAK: + case TEST_PACKET: + case TEST_FORCE_EN: + dwc->test_mode_nr = wIndex >> 8; + dwc->test_mode = true; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int dwc3_ep0_handle_device(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl, int set) { - struct dwc3_ep *dep; - u32 recip; + enum usb_device_state state; u32 wValue; u32 wIndex; - u32 reg; - int ret; - enum usb_device_state state; + int ret = 0; wValue = le16_to_cpu(ctrl->wValue); wIndex = le16_to_cpu(ctrl->wIndex); - recip = ctrl->bRequestType & USB_RECIP_MASK; state = dwc->gadget.state; - switch (recip) { - case USB_RECIP_DEVICE: + switch (wValue) { + case USB_DEVICE_REMOTE_WAKEUP: + break; + /* + * 9.4.1 says only only for SS, in AddressState only for + * default control pipe + */ + case USB_DEVICE_U1_ENABLE: + ret = dwc3_ep0_handle_u1(dwc, state, set); + break; + case USB_DEVICE_U2_ENABLE: + ret = dwc3_ep0_handle_u2(dwc, state, set); + break; + case USB_DEVICE_LTM_ENABLE: + ret = -EINVAL; + break; + case USB_DEVICE_TEST_MODE: + ret = dwc3_ep0_handle_test(dwc, state, wIndex, set); + break; + default: + ret = -EINVAL; + } - switch (wValue) { - case USB_DEVICE_REMOTE_WAKEUP: - break; + return ret; +} + +static int dwc3_ep0_handle_intf(struct dwc3 *dwc, + struct usb_ctrlrequest *ctrl, int set) +{ + enum usb_device_state state; + u32 wValue; + u32 wIndex; + int ret = 0; + + wValue = le16_to_cpu(ctrl->wValue); + wIndex = le16_to_cpu(ctrl->wIndex); + state = dwc->gadget.state; + + switch (wValue) { + case USB_INTRF_FUNC_SUSPEND: /* - * 9.4.1 says only only for SS, in AddressState only for - * default control pipe + * REVISIT: Ideally we would enable some low power mode here, + * however it's unclear what we should be doing here. + * + * For now, we're not doing anything, just making sure we return + * 0 so USB Command Verifier tests pass without any errors. */ - case USB_DEVICE_U1_ENABLE: - if (state != USB_STATE_CONFIGURED) - return -EINVAL; - if ((dwc->speed != DWC3_DSTS_SUPERSPEED) && - (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS)) - return -EINVAL; + break; + default: + ret = -EINVAL; + } - reg = dwc3_readl(dwc->regs, DWC3_DCTL); - if (set) - reg |= DWC3_DCTL_INITU1ENA; - else - reg &= ~DWC3_DCTL_INITU1ENA; - dwc3_writel(dwc->regs, DWC3_DCTL, reg); - break; + return ret; +} - case USB_DEVICE_U2_ENABLE: - if (state != USB_STATE_CONFIGURED) - return -EINVAL; - if ((dwc->speed != DWC3_DSTS_SUPERSPEED) && - (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS)) - return -EINVAL; +static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc, + struct usb_ctrlrequest *ctrl, int set) +{ + struct dwc3_ep *dep; + enum usb_device_state state; + u32 wValue; + u32 wIndex; + int ret; - reg = dwc3_readl(dwc->regs, DWC3_DCTL); - if (set) - reg |= DWC3_DCTL_INITU2ENA; - else - reg &= ~DWC3_DCTL_INITU2ENA; - dwc3_writel(dwc->regs, DWC3_DCTL, reg); - break; + wValue = le16_to_cpu(ctrl->wValue); + wIndex = le16_to_cpu(ctrl->wIndex); + state = dwc->gadget.state; - case USB_DEVICE_LTM_ENABLE: + switch (wValue) { + case USB_ENDPOINT_HALT: + dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex); + if (!dep) return -EINVAL; - case USB_DEVICE_TEST_MODE: - if ((wIndex & 0xff) != 0) - return -EINVAL; - if (!set) - return -EINVAL; - - switch (wIndex >> 8) { - case TEST_J: - case TEST_K: - case TEST_SE0_NAK: - case TEST_PACKET: - case TEST_FORCE_EN: - dwc->test_mode_nr = wIndex >> 8; - dwc->test_mode = true; - break; - default: - return -EINVAL; - } + if (set == 0 && (dep->flags & DWC3_EP_WEDGE)) break; - default: + + ret = __dwc3_gadget_ep_set_halt(dep, set, true); + if (ret) return -EINVAL; - } break; + default: + return -EINVAL; + } + return 0; +} + +static int dwc3_ep0_handle_feature(struct dwc3 *dwc, + struct usb_ctrlrequest *ctrl, int set) +{ + u32 recip; + int ret; + enum usb_device_state state; + + recip = ctrl->bRequestType & USB_RECIP_MASK; + state = dwc->gadget.state; + + switch (recip) { + case USB_RECIP_DEVICE: + ret = dwc3_ep0_handle_device(dwc, ctrl, set); + break; case USB_RECIP_INTERFACE: - switch (wValue) { - case USB_INTRF_FUNC_SUSPEND: - if (wIndex & USB_INTRF_FUNC_SUSPEND_LP) - /* XXX enable Low power suspend */ - ; - if (wIndex & USB_INTRF_FUNC_SUSPEND_RW) - /* XXX enable remote wakeup */ - ; - break; - default: - return -EINVAL; - } + ret = dwc3_ep0_handle_intf(dwc, ctrl, set); break; - case USB_RECIP_ENDPOINT: - switch (wValue) { - case USB_ENDPOINT_HALT: - dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex); - if (!dep) - return -EINVAL; - if (set == 0 && (dep->flags & DWC3_EP_WEDGE)) - break; - ret = __dwc3_gadget_ep_set_halt(dep, set, true); - if (ret) - return -EINVAL; - break; - default: - return -EINVAL; - } + ret = dwc3_ep0_handle_endpoint(dwc, ctrl, set); break; - default: - return -EINVAL; + ret = -EINVAL; } - return 0; + return ret; } static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) @@ -525,13 +569,12 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) addr = le16_to_cpu(ctrl->wValue); if (addr > 127) { - dwc3_trace(trace_dwc3_ep0, "invalid device address %d", addr); + dev_err(dwc->dev, "invalid device address %d\n", addr); return -EINVAL; } if (state == USB_STATE_CONFIGURED) { - dwc3_trace(trace_dwc3_ep0, - "trying to set address when configured"); + dev_err(dwc->dev, "can't SetAddress() from Configured State\n"); return -EINVAL; } @@ -716,35 +759,27 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) switch (ctrl->bRequest) { case USB_REQ_GET_STATUS: - dwc3_trace(trace_dwc3_ep0, "USB_REQ_GET_STATUS"); ret = dwc3_ep0_handle_status(dwc, ctrl); break; case USB_REQ_CLEAR_FEATURE: - dwc3_trace(trace_dwc3_ep0, "USB_REQ_CLEAR_FEATURE"); ret = dwc3_ep0_handle_feature(dwc, ctrl, 0); break; case USB_REQ_SET_FEATURE: - dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_FEATURE"); ret = dwc3_ep0_handle_feature(dwc, ctrl, 1); break; case USB_REQ_SET_ADDRESS: - dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ADDRESS"); ret = dwc3_ep0_set_address(dwc, ctrl); break; case USB_REQ_SET_CONFIGURATION: - dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_CONFIGURATION"); ret = dwc3_ep0_set_config(dwc, ctrl); break; case USB_REQ_SET_SEL: - dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_SEL"); ret = dwc3_ep0_set_sel(dwc, ctrl); break; case USB_REQ_SET_ISOCH_DELAY: - dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); break; default: - dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); ret = dwc3_ep0_delegate_req(dwc, ctrl); break; } @@ -820,9 +855,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, status = DWC3_TRB_SIZE_TRBSTS(trb->size); if (status == DWC3_TRBSTS_SETUP_PENDING) { dwc->setup_packet_pending = true; - - dwc3_trace(trace_dwc3_ep0, "Setup Pending received"); - if (r) dwc3_gadget_giveback(ep0, r, -ECONNRESET); @@ -912,7 +944,7 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc, ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr); if (ret < 0) { - dwc3_trace(trace_dwc3_ep0, "Invalid Test #%d", + dev_err(dwc->dev, "invalid test #%d\n", dwc->test_mode_nr); dwc3_ep0_stall_and_restart(dwc); return; @@ -920,10 +952,8 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc, } status = DWC3_TRB_SIZE_TRBSTS(trb->size); - if (status == DWC3_TRBSTS_SETUP_PENDING) { + if (status == DWC3_TRBSTS_SETUP_PENDING) dwc->setup_packet_pending = true; - dwc3_trace(trace_dwc3_ep0, "Setup Pending received"); - } dwc->ep0state = EP0_SETUP_PHASE; dwc3_ep0_out_start(dwc); @@ -940,17 +970,14 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc, switch (dwc->ep0state) { case EP0_SETUP_PHASE: - dwc3_trace(trace_dwc3_ep0, "Setup Phase"); dwc3_ep0_inspect_setup(dwc, event); break; case EP0_DATA_PHASE: - dwc3_trace(trace_dwc3_ep0, "Data Phase"); dwc3_ep0_complete_data(dwc, event); break; case EP0_STATUS_PHASE: - dwc3_trace(trace_dwc3_ep0, "Status Phase"); dwc3_ep0_complete_status(dwc, event); break; default: @@ -974,12 +1001,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, u32 transfer_size = 0; u32 maxpacket; - ret = usb_gadget_map_request(&dwc->gadget, &req->request, - dep->number); - if (ret) { - dwc3_trace(trace_dwc3_ep0, "failed to map request"); + ret = usb_gadget_map_request_by_dev(dwc->sysdev, + &req->request, dep->number); + if (ret) return; - } maxpacket = dep->endpoint.maxpacket; @@ -1002,12 +1027,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, dwc->ep0_bounce_addr, transfer_size, DWC3_TRBCTL_CONTROL_DATA, false); } else { - ret = usb_gadget_map_request(&dwc->gadget, &req->request, - dep->number); - if (ret) { - dwc3_trace(trace_dwc3_ep0, "failed to map request"); + ret = usb_gadget_map_request_by_dev(dwc->sysdev, + &req->request, dep->number); + if (ret) return; - } ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma, req->request.length, DWC3_TRBCTL_CONTROL_DATA, @@ -1065,8 +1088,6 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc, { switch (event->status) { case DEPEVT_STATUS_CONTROL_DATA: - dwc3_trace(trace_dwc3_ep0, "Control Data"); - /* * We already have a DATA transfer in the controller's cache, * if we receive a XferNotReady(DATA) we will ignore it, unless @@ -1079,8 +1100,7 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc, if (dwc->ep0_expect_in != event->endpoint_number) { struct dwc3_ep *dep = dwc->eps[dwc->ep0_expect_in]; - dwc3_trace(trace_dwc3_ep0, - "Wrong direction for Data phase"); + dev_err(dwc->dev, "unexpected direction for Data Phase\n"); dwc3_ep0_end_control_data(dwc, dep); dwc3_ep0_stall_and_restart(dwc); return; @@ -1092,13 +1112,10 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc, if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS) return; - dwc3_trace(trace_dwc3_ep0, "Control Status"); - dwc->ep0state = EP0_STATUS_PHASE; if (dwc->delayed_status) { WARN_ON_ONCE(event->endpoint_number != 1); - dwc3_trace(trace_dwc3_ep0, "Delayed Status"); return; } @@ -1109,10 +1126,6 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc, void dwc3_ep0_interrupt(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { - dwc3_trace(trace_dwc3_ep0, "%s: state '%s'", - dwc3_ep_event_string(event), - dwc3_ep0_state_string(dwc->ep0state)); - switch (event->endpoint_event) { case DWC3_DEPEVT_XFERCOMPLETE: dwc3_ep0_xfer_complete(dwc, event); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 1dfa56a5f1c511a6a40f38cb976ed1096af3c5db..efddaf5d11d12639867ce8cb7c9a24ae2c7333c9 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -139,9 +139,6 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) udelay(5); } - dwc3_trace(trace_dwc3_gadget, - "link state change request timed out"); - return -ETIMEDOUT; } @@ -178,6 +175,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, req->started = false; list_del(&req->list); req->trb = NULL; + req->remaining = 0; if (req->request.status == -EINPROGRESS) req->request.status = status; @@ -185,8 +183,8 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, if (dwc->ep0_bounced && dep->number == 0) dwc->ep0_bounced = false; else - usb_gadget_unmap_request(&dwc->gadget, &req->request, - req->direction); + usb_gadget_unmap_request_by_dev(dwc->sysdev, + &req->request, req->direction); trace_dwc3_gadget_giveback(req); @@ -216,7 +214,7 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) ret = -EINVAL; break; } - } while (timeout--); + } while (--timeout); if (!timeout) { ret = -ETIMEDOUT; @@ -233,6 +231,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc); int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) { + const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; struct dwc3 *dwc = dep->dwc; u32 timeout = 500; u32 reg; @@ -258,7 +257,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, } } - if (cmd == DWC3_DEPCMD_STARTTRANSFER) { + if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { int needs_wakeup; needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 || @@ -276,7 +275,28 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); - dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT); + /* + * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're + * not relying on XferNotReady, we can make use of a special "No + * Response Update Transfer" command where we should clear both CmdAct + * and CmdIOC bits. + * + * With this, we don't need to wait for command completion and can + * straight away issue further commands to the endpoint. + * + * NOTICE: We're making an assumption that control endpoints will never + * make use of Update Transfer command. This is a safe assumption + * because we can never have more than one request at a time with + * Control Endpoints. If anybody changes that assumption, this chunk + * needs to be updated accordingly. + */ + if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER && + !usb_endpoint_xfer_isoc(desc)) + cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT); + else + cmd |= DWC3_DEPCMD_CMDACT; + + dwc3_writel(dep->regs, DWC3_DEPCMD, cmd); do { reg = dwc3_readl(dep->regs, DWC3_DEPCMD); if (!(reg & DWC3_DEPCMD_CMDACT)) { @@ -318,6 +338,20 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status); + if (ret == 0) { + switch (DWC3_DEPCMD_CMD(cmd)) { + case DWC3_DEPCMD_STARTTRANSFER: + dep->flags |= DWC3_EP_TRANSFER_STARTED; + break; + case DWC3_DEPCMD_ENDTRANSFER: + dep->flags &= ~DWC3_EP_TRANSFER_STARTED; + break; + default: + /* nothing */ + break; + } + } + if (unlikely(susphy)) { reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); reg |= DWC3_GUSB2PHYCFG_SUSPHY; @@ -365,7 +399,7 @@ static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) if (dep->trb_pool) return 0; - dep->trb_pool = dma_alloc_coherent(dwc->dev, + dep->trb_pool = dma_alloc_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, &dep->trb_pool_dma, GFP_KERNEL); if (!dep->trb_pool) { @@ -381,7 +415,7 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep) { struct dwc3 *dwc = dep->dwc; - dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, + dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, dep->trb_pool, dep->trb_pool_dma); dep->trb_pool = NULL; @@ -454,16 +488,19 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) } static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, - const struct usb_endpoint_descriptor *desc, - const struct usb_ss_ep_comp_descriptor *comp_desc, bool modify, bool restore) { + const struct usb_ss_ep_comp_descriptor *comp_desc; + const struct usb_endpoint_descriptor *desc; struct dwc3_gadget_ep_cmd_params params; if (dev_WARN_ONCE(dwc->dev, modify && restore, "Can't modify and restore\n")) return -EINVAL; + comp_desc = dep->endpoint.comp_desc; + desc = dep->endpoint.desc; + memset(¶ms, 0x00, sizeof(params)); params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) @@ -542,24 +579,21 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) * Caller should take care of locking */ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, - const struct usb_endpoint_descriptor *desc, - const struct usb_ss_ep_comp_descriptor *comp_desc, bool modify, bool restore) { + const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; struct dwc3 *dwc = dep->dwc; + u32 reg; int ret; - dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name); - if (!(dep->flags & DWC3_EP_ENABLED)) { ret = dwc3_gadget_start_config(dwc, dep); if (ret) return ret; } - ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify, - restore); + ret = dwc3_gadget_set_ep_config(dwc, dep, modify, restore); if (ret) return ret; @@ -567,17 +601,18 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, struct dwc3_trb *trb_st_hw; struct dwc3_trb *trb_link; - dep->endpoint.desc = desc; - dep->comp_desc = comp_desc; dep->type = usb_endpoint_type(desc); dep->flags |= DWC3_EP_ENABLED; + dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); reg |= DWC3_DALEPENA_EP(dep->number); dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); + init_waitqueue_head(&dep->wait_end_transfer); + if (usb_endpoint_xfer_control(desc)) - return 0; + goto out; /* Initialize the TRB ring */ dep->trb_dequeue = 0; @@ -595,6 +630,39 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, trb_link->ctrl |= DWC3_TRB_CTRL_HWO; } + /* + * Issue StartTransfer here with no-op TRB so we can always rely on No + * Response Update Transfer command. + */ + if (usb_endpoint_xfer_bulk(desc)) { + struct dwc3_gadget_ep_cmd_params params; + struct dwc3_trb *trb; + dma_addr_t trb_dma; + u32 cmd; + + memset(¶ms, 0, sizeof(params)); + trb = &dep->trb_pool[0]; + trb_dma = dwc3_trb_dma_offset(dep, trb); + + params.param0 = upper_32_bits(trb_dma); + params.param1 = lower_32_bits(trb_dma); + + cmd = DWC3_DEPCMD_STARTTRANSFER; + + ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); + if (ret < 0) + return ret; + + dep->flags |= DWC3_EP_BUSY; + + dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); + WARN_ON_ONCE(!dep->resource_index); + } + + +out: + trace_dwc3_gadget_ep_enable(dep); + return 0; } @@ -632,7 +700,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) struct dwc3 *dwc = dep->dwc; u32 reg; - dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name); + trace_dwc3_gadget_ep_disable(dep); dwc3_remove_requests(dwc, dep); @@ -645,10 +713,14 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); dep->stream_capable = false; - dep->endpoint.desc = NULL; - dep->comp_desc = NULL; dep->type = 0; - dep->flags = 0; + dep->flags &= DWC3_EP_END_TRANSFER_PENDING; + + /* Clear out the ep descriptors for non-ep0 */ + if (dep->number > 1) { + dep->endpoint.comp_desc = NULL; + dep->endpoint.desc = NULL; + } return 0; } @@ -695,7 +767,7 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep, return 0; spin_lock_irqsave(&dwc->lock, flags); - ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); + ret = __dwc3_gadget_ep_enable(dep, false, false); spin_unlock_irqrestore(&dwc->lock, flags); return ret; @@ -771,10 +843,9 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, unsigned length, unsigned chain, unsigned node) { struct dwc3_trb *trb; - - dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s", - dep->name, req, (unsigned long long) dma, - length, chain ? " chain" : ""); + struct dwc3 *dwc = dep->dwc; + struct usb_gadget *gadget = &dwc->gadget; + enum usb_device_speed speed = gadget->speed; trb = &dep->trb_pool[dep->trb_enqueue]; @@ -782,7 +853,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, dwc3_gadget_move_started_request(req); req->trb = trb; req->trb_dma = dwc3_trb_dma_offset(dep, trb); - req->first_trb_index = dep->trb_enqueue; dep->queued_requests++; } @@ -798,10 +868,16 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, break; case USB_ENDPOINT_XFER_ISOC: - if (!node) + if (!node) { trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; - else + + if (speed == USB_SPEED_HIGH) { + struct usb_ep *ep = &dep->endpoint; + trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1); + } + } else { trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; + } /* always enable Interrupt on Missed ISOC */ trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; @@ -816,15 +892,21 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, * This is only possible with faulty memory because we * checked it already :) */ - BUG(); + dev_WARN(dwc->dev, "Unknown endpoint type %d\n", + usb_endpoint_type(dep->endpoint.desc)); } /* always enable Continue on Short Packet */ - trb->ctrl |= DWC3_TRB_CTRL_CSP; + if (usb_endpoint_dir_out(dep->endpoint.desc)) { + trb->ctrl |= DWC3_TRB_CTRL_CSP; + + if (req->request.short_not_ok) + trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; + } if ((!req->request.no_interrupt && !chain) || (dwc3_calc_trbs_left(dep) == 0)) - trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI; + trb->ctrl |= DWC3_TRB_CTRL_IOC; if (chain) trb->ctrl |= DWC3_TRB_CTRL_CHN; @@ -859,6 +941,7 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) { struct dwc3_trb *tmp; + struct dwc3 *dwc = dep->dwc; u8 trbs_left; /* @@ -870,7 +953,8 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) */ if (dep->trb_enqueue == dep->trb_dequeue) { tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); - if (tmp->ctrl & DWC3_TRB_CTRL_HWO) + if (dev_WARN_ONCE(dwc->dev, tmp->ctrl & DWC3_TRB_CTRL_HWO, + "%s No TRBS left\n", dep->name)) return 0; return DWC3_TRB_NUM - 1; @@ -941,6 +1025,24 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep) if (!dwc3_calc_trbs_left(dep)) return; + /* + * We can get in a situation where there's a request in the started list + * but there weren't enough TRBs to fully kick it in the first time + * around, so it has been waiting for more TRBs to be freed up. + * + * In that case, we should check if we have a request with pending_sgs + * in the started list and prepare TRBs for that request first, + * otherwise we will prepare TRBs completely out of order and that will + * break things. + */ + list_for_each_entry(req, &dep->started_list, list) { + if (req->num_pending_sgs > 0) + dwc3_prepare_one_trb_sg(dep, req); + + if (!dwc3_calc_trbs_left(dep)) + return; + } + list_for_each_entry_safe(req, n, &dep->pending_list, list) { if (req->num_pending_sgs > 0) dwc3_prepare_one_trb_sg(dep, req); @@ -956,7 +1058,6 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param) { struct dwc3_gadget_ep_cmd_params params; struct dwc3_request *req; - struct dwc3 *dwc = dep->dwc; int starting; int ret; u32 cmd; @@ -989,9 +1090,10 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param) * here and stop, unmap, free and del each of the linked * requests instead of what we do now. */ - usb_gadget_unmap_request(&dwc->gadget, &req->request, - req->direction); - list_del(&req->list); + if (req->trb) + memset(req->trb, 0, sizeof(struct dwc3_trb)); + dep->queued_requests--; + dwc3_gadget_giveback(dep, req, ret); return ret; } @@ -1005,14 +1107,21 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param) return 0; } +static int __dwc3_gadget_get_frame(struct dwc3 *dwc) +{ + u32 reg; + + reg = dwc3_readl(dwc->regs, DWC3_DSTS); + return DWC3_DSTS_SOFFN(reg); +} + static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, struct dwc3_ep *dep, u32 cur_uf) { u32 uf; if (list_empty(&dep->pending_list)) { - dwc3_trace(trace_dwc3_gadget, - "ISOC ep %s run out for requests", + dev_info(dwc->dev, "%s: ran out of requests\n", dep->name); dep->flags |= DWC3_EP_PENDING_REQUEST; return; @@ -1041,16 +1150,15 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) int ret; if (!dep->endpoint.desc) { - dwc3_trace(trace_dwc3_gadget, - "trying to queue request %p to disabled %s", - &req->request, dep->endpoint.name); + dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", + dep->name); return -ESHUTDOWN; } if (WARN(req->dep != dep, "request %p belongs to '%s'\n", &req->request, req->dep->name)) { - dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'", - &req->request, req->dep->name); + dev_err(dwc->dev, "%s: request %p belongs to '%s'\n", + dep->name, &req->request, req->dep->name); return -EINVAL; } @@ -1063,8 +1171,8 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) trace_dwc3_ep_queue(req); - ret = usb_gadget_map_request(&dwc->gadget, &req->request, - dep->direction); + ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request, + dep->direction); if (ret) return ret; @@ -1082,10 +1190,17 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) * errors which will force us issue EndTransfer command. */ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { - if ((dep->flags & DWC3_EP_PENDING_REQUEST) && - list_empty(&dep->started_list)) { - dwc3_stop_active_transfer(dwc, dep->number, true); - dep->flags = DWC3_EP_ENABLED; + if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { + if (dep->flags & DWC3_EP_TRANSFER_STARTED) { + dwc3_stop_active_transfer(dwc, dep->number, true); + dep->flags = DWC3_EP_ENABLED; + } else { + u32 cur_uf; + + cur_uf = __dwc3_gadget_get_frame(dwc); + __dwc3_gadget_start_isoc(dwc, dep, cur_uf); + dep->flags &= ~DWC3_EP_PENDING_REQUEST; + } } return 0; } @@ -1094,10 +1209,6 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) return 0; ret = __dwc3_gadget_kick_transfer(dep, 0); - if (ret && ret != -EBUSY) - dwc3_trace(trace_dwc3_gadget, - "%s: failed to kick transfers", - dep->name); if (ret == -EBUSY) ret = 0; @@ -1116,7 +1227,6 @@ static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep) struct usb_request *request; struct usb_ep *ep = &dep->endpoint; - dwc3_trace(trace_dwc3_gadget, "queueing ZLP"); request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC); if (!request) return -ENOMEM; @@ -1235,9 +1345,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) if (!protocol && ((dep->direction && transfer_in_flight) || (!dep->direction && started))) { - dwc3_trace(trace_dwc3_gadget, - "%s: pending request, cannot halt", - dep->name); return -EAGAIN; } @@ -1331,10 +1438,8 @@ static const struct usb_ep_ops dwc3_gadget_ep_ops = { static int dwc3_gadget_get_frame(struct usb_gadget *g) { struct dwc3 *dwc = gadget_to_dwc(g); - u32 reg; - reg = dwc3_readl(dwc->regs, DWC3_DSTS); - return DWC3_DSTS_SOFFN(reg); + return __dwc3_gadget_get_frame(dwc); } static int __dwc3_gadget_wakeup(struct dwc3 *dwc) @@ -1357,10 +1462,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc) speed = reg & DWC3_DSTS_CONNECTSPD; if ((speed == DWC3_DSTS_SUPERSPEED) || - (speed == DWC3_DSTS_SUPERSPEED_PLUS)) { - dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed"); + (speed == DWC3_DSTS_SUPERSPEED_PLUS)) return 0; - } link_state = DWC3_DSTS_USBLNKST(reg); @@ -1369,9 +1472,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc) case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ break; default: - dwc3_trace(trace_dwc3_gadget, - "can't wakeup from '%s'", - dwc3_gadget_link_string(link_state)); return -EINVAL; } @@ -1476,11 +1576,6 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) if (!timeout) return -ETIMEDOUT; - dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s", - dwc->gadget_driver - ? dwc->gadget_driver->function : "no-function", - is_on ? "connect" : "disconnect"); - return 0; } @@ -1492,6 +1587,21 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) is_on = !!is_on; + /* + * Per databook, when we want to stop the gadget, if a control transfer + * is still in process, complete it and get the core into setup phase. + */ + if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) { + reinit_completion(&dwc->ep0_in_setup); + + ret = wait_for_completion_timeout(&dwc->ep0_in_setup, + msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT)); + if (ret == 0) { + dev_err(dwc->dev, "timed out waiting for SETUP phase\n"); + return -ETIMEDOUT; + } + } + spin_lock_irqsave(&dwc->lock, flags); ret = dwc3_gadget_run_stop(dwc, is_on, false); spin_unlock_irqrestore(&dwc->lock, flags); @@ -1509,11 +1619,13 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc) DWC3_DEVTEN_CMDCMPLTEN | DWC3_DEVTEN_ERRTICERREN | DWC3_DEVTEN_WKUPEVTEN | - DWC3_DEVTEN_ULSTCNGEN | DWC3_DEVTEN_CONNECTDONEEN | DWC3_DEVTEN_USBRSTEN | DWC3_DEVTEN_DISCONNEVTEN); + if (dwc->revision < DWC3_REVISION_250A) + reg |= DWC3_DEVTEN_ULSTCNGEN; + dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); } @@ -1573,6 +1685,17 @@ static int __dwc3_gadget_start(struct dwc3 *dwc) int ret = 0; u32 reg; + /* + * Use IMOD if enabled via dwc->imod_interval. Otherwise, if + * the core supports IMOD, disable it. + */ + if (dwc->imod_interval) { + dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); + dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); + } else if (dwc3_has_imod(dwc)) { + dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0); + } + reg = dwc3_readl(dwc->regs, DWC3_DCFG); reg &= ~(DWC3_DCFG_SPEED_MASK); @@ -1633,16 +1756,14 @@ static int __dwc3_gadget_start(struct dwc3 *dwc) dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); dep = dwc->eps[0]; - ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, - false); + ret = __dwc3_gadget_ep_enable(dep, false, false); if (ret) { dev_err(dwc->dev, "failed to enable %s\n", dep->name); goto err0; } dep = dwc->eps[1]; - ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, - false); + ret = __dwc3_gadget_ep_enable(dep, false, false); if (ret) { dev_err(dwc->dev, "failed to enable %s\n", dep->name); goto err1; @@ -1708,9 +1829,6 @@ static int dwc3_gadget_start(struct usb_gadget *g, static void __dwc3_gadget_stop(struct dwc3 *dwc) { - if (pm_runtime_suspended(dwc->dev)) - return; - dwc3_gadget_disable_irq(dwc); __dwc3_gadget_ep_disable(dwc->eps[0]); __dwc3_gadget_ep_disable(dwc->eps[1]); @@ -1720,9 +1838,30 @@ static int dwc3_gadget_stop(struct usb_gadget *g) { struct dwc3 *dwc = gadget_to_dwc(g); unsigned long flags; + int epnum; spin_lock_irqsave(&dwc->lock, flags); + + if (pm_runtime_suspended(dwc->dev)) + goto out; + __dwc3_gadget_stop(dwc); + + for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { + struct dwc3_ep *dep = dwc->eps[epnum]; + + if (!dep) + continue; + + if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) + continue; + + wait_event_lock_irq(dep->wait_end_transfer, + !(dep->flags & DWC3_EP_END_TRANSFER_PENDING), + dwc->lock); + } + +out: dwc->gadget_driver = NULL; spin_unlock_irqrestore(&dwc->lock, flags); @@ -1765,9 +1904,13 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, (epnum & 1) ? "in" : "out"); dep->endpoint.name = dep->name; - spin_lock_init(&dep->lock); - dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name); + if (!(dep->number > 1)) { + dep->endpoint.desc = &dwc3_gadget_ep0_desc; + dep->endpoint.comp_desc = NULL; + } + + spin_lock_init(&dep->lock); if (epnum == 0 || epnum == 1) { usb_ep_set_maxpacket_limit(&dep->endpoint, 512); @@ -1815,15 +1958,13 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); if (ret < 0) { - dwc3_trace(trace_dwc3_gadget, - "failed to allocate OUT endpoints"); + dev_err(dwc->dev, "failed to initialize OUT endpoints\n"); return ret; } ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); if (ret < 0) { - dwc3_trace(trace_dwc3_gadget, - "failed to allocate IN endpoints"); + dev_err(dwc->dev, "failed to initialize IN endpoints\n"); return ret; } @@ -1892,15 +2033,12 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, return 1; count = trb->size & DWC3_TRB_SIZE_MASK; - req->request.actual += count; + req->remaining += count; if (dep->direction) { if (count) { trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { - dwc3_trace(trace_dwc3_gadget, - "%s: incomplete IN transfer", - dep->name); /* * If missed isoc occurred and there is * no request queued then issue END @@ -1946,11 +2084,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, struct dwc3_request *req, *n; struct dwc3_trb *trb; bool ioc = false; - int ret; + int ret = 0; list_for_each_entry_safe(req, n, &dep->started_list, list) { unsigned length; - unsigned actual; int chain; length = req->request.length; @@ -1964,6 +2101,9 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, for_each_sg(sg, s, pending, i) { trb = &dep->trb_pool[dep->trb_dequeue]; + if (trb->ctrl & DWC3_TRB_CTRL_HWO) + break; + req->sg = sg_next(s); req->num_pending_sgs--; @@ -1978,17 +2118,9 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, event, status, chain); } - /* - * We assume here we will always receive the entire data block - * which we should receive. Meaning, if we program RX to - * receive 4K but we receive only 2K, we assume that's all we - * should receive and we simply bounce the request back to the - * gadget driver for further processing. - */ - actual = length - req->request.actual; - req->request.actual = actual; + req->request.actual = length - req->remaining; - if (ret && chain && (actual < length) && req->num_pending_sgs) + if ((req->request.actual < length) && req->num_pending_sgs) return __dwc3_gadget_kick_transfer(dep, 0); dwc3_gadget_giveback(dep, req, status); @@ -2096,10 +2228,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, { struct dwc3_ep *dep; u8 epnum = event->endpoint_number; + u8 cmd; dep = dwc->eps[epnum]; - if (!(dep->flags & DWC3_EP_ENABLED)) + if (!(dep->flags & DWC3_EP_ENABLED) && + !(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) return; if (epnum == 0 || epnum == 1) { @@ -2112,9 +2246,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, dep->resource_index = 0; if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { - dwc3_trace(trace_dwc3_gadget, - "%s is an Isochronous endpoint", - dep->name); + dev_err(dwc->dev, "XferComplete for Isochronous endpoint\n"); return; } @@ -2127,22 +2259,11 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { dwc3_gadget_start_isoc(dwc, dep, event); } else { - int active; int ret; - active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE; - - dwc3_trace(trace_dwc3_gadget, "%s: reason %s", - dep->name, active ? "Transfer Active" - : "Transfer Not Active"); - ret = __dwc3_gadget_kick_transfer(dep, 0); if (!ret || ret == -EBUSY) return; - - dwc3_trace(trace_dwc3_gadget, - "%s: failed to kick transfers", - dep->name); } break; @@ -2152,26 +2273,16 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, dep->name); return; } + break; + case DWC3_DEPEVT_EPCMDCMPLT: + cmd = DEPEVT_PARAMETER_CMD(event->parameters); - switch (event->status) { - case DEPEVT_STREAMEVT_FOUND: - dwc3_trace(trace_dwc3_gadget, - "Stream %d found and started", - event->parameters); - - break; - case DEPEVT_STREAMEVT_NOTFOUND: - /* FALLTHROUGH */ - default: - dwc3_trace(trace_dwc3_gadget, - "unable to find suitable stream"); + if (cmd == DWC3_DEPCMD_ENDTRANSFER) { + dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; + wake_up(&dep->wait_end_transfer); } break; case DWC3_DEPEVT_RXTXFIFOEVT: - dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name); - break; - case DWC3_DEPEVT_EPCMDCMPLT: - dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete"); break; } } @@ -2224,7 +2335,8 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) dep = dwc->eps[epnum]; - if (!dep->resource_index) + if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) || + !dep->resource_index) return; /* @@ -2268,25 +2380,9 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) dep->resource_index = 0; dep->flags &= ~DWC3_EP_BUSY; - if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) + if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) { + dep->flags |= DWC3_EP_END_TRANSFER_PENDING; udelay(100); -} - -static void dwc3_stop_active_transfers(struct dwc3 *dwc) -{ - u32 epnum; - - for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { - struct dwc3_ep *dep; - - dep = dwc->eps[epnum]; - if (!dep) - continue; - - if (!(dep->flags & DWC3_EP_ENABLED)) - continue; - - dwc3_remove_requests(dwc, dep); } } @@ -2375,8 +2471,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) reg &= ~DWC3_DCTL_TSTCTRL_MASK; dwc3_writel(dwc->regs, DWC3_DCTL, reg); dwc->test_mode = false; - - dwc3_stop_active_transfers(dwc); dwc3_clear_stall_all_ep(dwc); /* Reset device address to zero */ @@ -2385,32 +2479,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_DCFG, reg); } -static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) -{ - u32 reg; - u32 usb30_clock = DWC3_GCTL_CLK_BUS; - - /* - * We change the clock only at SS but I dunno why I would want to do - * this. Maybe it becomes part of the power saving plan. - */ - - if ((speed != DWC3_DSTS_SUPERSPEED) && - (speed != DWC3_DSTS_SUPERSPEED_PLUS)) - return; - - /* - * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed - * each time on Connect Done. - */ - if (!usb30_clock) - return; - - reg = dwc3_readl(dwc->regs, DWC3_GCTL); - reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); - dwc3_writel(dwc->regs, DWC3_GCTL, reg); -} - static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) { struct dwc3_ep *dep; @@ -2422,7 +2490,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) speed = reg & DWC3_DSTS_CONNECTSPD; dwc->speed = speed; - dwc3_update_ram_clk_sel(dwc, speed); + /* + * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed + * each time on Connect Done. + * + * Currently we always use the reset value. If any platform + * wants to set this to a different value, we need to add a + * setting and update GCTL.RAMCLKSEL here. + */ switch (speed) { case DWC3_DSTS_SUPERSPEED_PLUS: @@ -2491,7 +2566,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) */ WARN_ONCE(dwc->revision < DWC3_REVISION_240A && dwc->has_lpm_erratum, - "LPM Erratum not available on dwc3 revisisions < 2.40a\n"); + "LPM Erratum not available on dwc3 revisions < 2.40a\n"); if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A) reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold); @@ -2504,16 +2579,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) } dep = dwc->eps[0]; - ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, - false); + ret = __dwc3_gadget_ep_enable(dep, true, false); if (ret) { dev_err(dwc->dev, "failed to enable %s\n", dep->name); return; } dep = dwc->eps[1]; - ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, - false); + ret = __dwc3_gadget_ep_enable(dep, true, false); if (ret) { dev_err(dwc->dev, "failed to enable %s\n", dep->name); return; @@ -2570,8 +2643,6 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { if ((dwc->link_state == DWC3_LINK_STATE_U3) && (next == DWC3_LINK_STATE_RESUME)) { - dwc3_trace(trace_dwc3_gadget, - "ignoring transition U3 -> Resume"); return; } } @@ -2705,11 +2776,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc, break; case DWC3_DEVICE_EVENT_EOPF: /* It changed to be suspend event for version 2.30a and above */ - if (dwc->revision < DWC3_REVISION_230A) { - dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame"); - } else { - dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event"); - + if (dwc->revision >= DWC3_REVISION_230A) { /* * Ignore suspend event until the gadget enters into * USB_STATE_CONFIGURED state. @@ -2720,16 +2787,9 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc, } break; case DWC3_DEVICE_EVENT_SOF: - dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame"); - break; case DWC3_DEVICE_EVENT_ERRATIC_ERROR: - dwc3_trace(trace_dwc3_gadget, "Erratic Error"); - break; case DWC3_DEVICE_EVENT_CMD_CMPL: - dwc3_trace(trace_dwc3_gadget, "Command Complete"); - break; case DWC3_DEVICE_EVENT_OVERFLOW: - dwc3_trace(trace_dwc3_gadget, "Overflow"); break; default: dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); @@ -2739,7 +2799,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc, static void dwc3_process_event_entry(struct dwc3 *dwc, const union dwc3_event *event) { - trace_dwc3_event(event->raw); + trace_dwc3_event(event->raw, dwc); /* Endpoint IRQ, handle it and return early */ if (event->type.is_devspec == 0) { @@ -2772,7 +2832,7 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) while (left > 0) { union dwc3_event event; - event.raw = *(u32 *) (evt->buf + evt->lpos); + event.raw = *(u32 *) (evt->cache + evt->lpos); dwc3_process_event_entry(dwc, &event); @@ -2785,10 +2845,8 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) * boundary so I worry about that once we try to handle * that. */ - evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; + evt->lpos = (evt->lpos + 4) % evt->length; left -= 4; - - dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4); } evt->count = 0; @@ -2800,6 +2858,11 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) reg &= ~DWC3_GEVNTSIZ_INTMASK; dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); + if (dwc->imod_interval) { + dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); + dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); + } + return ret; } @@ -2820,6 +2883,7 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) { struct dwc3 *dwc = evt->dwc; + u32 amount; u32 count; u32 reg; @@ -2843,6 +2907,14 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) reg |= DWC3_GEVNTSIZ_INTMASK; dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); + amount = min(count, evt->length - evt->lpos); + memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount); + + if (amount < count) + memcpy(evt->cache, evt->buf, count - amount); + + dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); + return IRQ_WAKE_THREAD; } @@ -2853,6 +2925,39 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt) return dwc3_check_event_buf(evt); } +static int dwc3_gadget_get_irq(struct dwc3 *dwc) +{ + struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); + int irq; + + irq = platform_get_irq_byname(dwc3_pdev, "peripheral"); + if (irq > 0) + goto out; + + if (irq == -EPROBE_DEFER) + goto out; + + irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); + if (irq > 0) + goto out; + + if (irq == -EPROBE_DEFER) + goto out; + + irq = platform_get_irq(dwc3_pdev, 0); + if (irq > 0) + goto out; + + if (irq != -EPROBE_DEFER) + dev_err(dwc->dev, "missing peripheral IRQ\n"); + + if (!irq) + irq = -EINVAL; + +out: + return irq; +} + /** * dwc3_gadget_init - Initializes gadget related registers * @dwc: pointer to our controller context structure @@ -2861,35 +2966,18 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt) */ int dwc3_gadget_init(struct dwc3 *dwc) { - int ret, irq; - struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); + int ret; + int irq; - irq = platform_get_irq_byname(dwc3_pdev, "peripheral"); - if (irq == -EPROBE_DEFER) - return irq; - - if (irq <= 0) { - irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); - if (irq == -EPROBE_DEFER) - return irq; - - if (irq <= 0) { - irq = platform_get_irq(dwc3_pdev, 0); - if (irq <= 0) { - if (irq != -EPROBE_DEFER) { - dev_err(dwc->dev, - "missing peripheral IRQ\n"); - } - if (!irq) - irq = -EINVAL; - return irq; - } - } + irq = dwc3_gadget_get_irq(dwc); + if (irq < 0) { + ret = irq; + goto err0; } dwc->irq_gadget = irq; - dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), + dwc->ctrl_req = dma_alloc_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req), &dwc->ctrl_req_addr, GFP_KERNEL); if (!dwc->ctrl_req) { dev_err(dwc->dev, "failed to allocate ctrl request\n"); @@ -2897,8 +2985,9 @@ int dwc3_gadget_init(struct dwc3 *dwc) goto err0; } - dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, - &dwc->ep0_trb_addr, GFP_KERNEL); + dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev, + sizeof(*dwc->ep0_trb) * 2, + &dwc->ep0_trb_addr, GFP_KERNEL); if (!dwc->ep0_trb) { dev_err(dwc->dev, "failed to allocate ep0 trb\n"); ret = -ENOMEM; @@ -2911,7 +3000,7 @@ int dwc3_gadget_init(struct dwc3 *dwc) goto err2; } - dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, + dwc->ep0_bounce = dma_alloc_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, GFP_KERNEL); if (!dwc->ep0_bounce) { @@ -2926,6 +3015,8 @@ int dwc3_gadget_init(struct dwc3 *dwc) goto err4; } + init_completion(&dwc->ep0_in_setup); + dwc->gadget.ops = &dwc3_gadget_ops; dwc->gadget.speed = USB_SPEED_UNKNOWN; dwc->gadget.sg_supported = true; @@ -2949,8 +3040,7 @@ int dwc3_gadget_init(struct dwc3 *dwc) * composite.c that we are USB 2.0 + LPM ECN. */ if (dwc->revision < DWC3_REVISION_220A) - dwc3_trace(trace_dwc3_gadget, - "Changing max_speed on rev %08x", + dev_info(dwc->dev, "changing max_speed on rev %08x\n", dwc->revision); dwc->gadget.max_speed = dwc->maximum_speed; @@ -2983,18 +3073,18 @@ int dwc3_gadget_init(struct dwc3 *dwc) err4: dwc3_gadget_free_endpoints(dwc); - dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, + dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE, dwc->ep0_bounce, dwc->ep0_bounce_addr); err3: kfree(dwc->setup_buf); err2: - dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, + dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, dwc->ep0_trb, dwc->ep0_trb_addr); err1: - dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), + dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req), dwc->ctrl_req, dwc->ctrl_req_addr); err0: @@ -3009,16 +3099,16 @@ void dwc3_gadget_exit(struct dwc3 *dwc) dwc3_gadget_free_endpoints(dwc); - dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, + dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE, dwc->ep0_bounce, dwc->ep0_bounce_addr); kfree(dwc->setup_buf); kfree(dwc->zlp_buf); - dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, + dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, dwc->ep0_trb, dwc->ep0_trb_addr); - dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), + dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req), dwc->ctrl_req, dwc->ctrl_req_addr); } diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h index e4a1d974a5aea209fbea745280bb70cada700883..3129bcf74d7d8de7ffe8d23923a7ba34d4e5cff9 100644 --- a/drivers/usb/dwc3/gadget.h +++ b/drivers/usb/dwc3/gadget.h @@ -62,10 +62,7 @@ struct dwc3; static inline struct dwc3_request *next_request(struct list_head *list) { - if (list_empty(list)) - return NULL; - - return list_first_entry(list, struct dwc3_request, list); + return list_first_entry_or_null(list, struct dwc3_request, list); } static inline void dwc3_gadget_move_started_request(struct dwc3_request *req) diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index f6533c68fed1e0dcc73b1af4b2f7948a6313fb7e..487f0ff6ae258aeee4cf22029e213fdcdf75631a 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -19,6 +19,39 @@ #include "core.h" +static int dwc3_host_get_irq(struct dwc3 *dwc) +{ + struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); + int irq; + + irq = platform_get_irq_byname(dwc3_pdev, "host"); + if (irq > 0) + goto out; + + if (irq == -EPROBE_DEFER) + goto out; + + irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); + if (irq > 0) + goto out; + + if (irq == -EPROBE_DEFER) + goto out; + + irq = platform_get_irq(dwc3_pdev, 0); + if (irq > 0) + goto out; + + if (irq != -EPROBE_DEFER) + dev_err(dwc->dev, "missing host IRQ\n"); + + if (!irq) + irq = -EINVAL; + +out: + return irq; +} + int dwc3_host_init(struct dwc3 *dwc) { struct property_entry props[2]; @@ -27,39 +60,18 @@ int dwc3_host_init(struct dwc3 *dwc) struct resource *res; struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); - irq = platform_get_irq_byname(dwc3_pdev, "host"); - if (irq == -EPROBE_DEFER) + irq = dwc3_host_get_irq(dwc); + if (irq < 0) return irq; - if (irq <= 0) { - irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); - if (irq == -EPROBE_DEFER) - return irq; - - if (irq <= 0) { - irq = platform_get_irq(dwc3_pdev, 0); - if (irq <= 0) { - if (irq != -EPROBE_DEFER) { - dev_err(dwc->dev, - "missing host IRQ\n"); - } - if (!irq) - irq = -EINVAL; - return irq; - } else { - res = platform_get_resource(dwc3_pdev, - IORESOURCE_IRQ, 0); - } - } else { - res = platform_get_resource_byname(dwc3_pdev, - IORESOURCE_IRQ, - "dwc_usb3"); - } - - } else { + res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, "host"); + if (!res) res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, - "host"); - } + "dwc_usb3"); + if (!res) + res = platform_get_resource(dwc3_pdev, IORESOURCE_IRQ, 0); + if (!res) + return -ENOMEM; dwc->xhci_resources[1].start = irq; dwc->xhci_resources[1].end = irq; @@ -72,11 +84,7 @@ int dwc3_host_init(struct dwc3 *dwc) return -ENOMEM; } - dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask); - xhci->dev.parent = dwc->dev; - xhci->dev.dma_mask = dwc->dev->dma_mask; - xhci->dev.dma_parms = dwc->dev->dma_parms; dwc->xhci = xhci; @@ -99,9 +107,9 @@ int dwc3_host_init(struct dwc3 *dwc) } phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy", - dev_name(&xhci->dev)); + dev_name(dwc->dev)); phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy", - dev_name(&xhci->dev)); + dev_name(dwc->dev)); ret = platform_device_add(xhci); if (ret) { @@ -112,9 +120,9 @@ int dwc3_host_init(struct dwc3 *dwc) return 0; err2: phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy", - dev_name(&xhci->dev)); + dev_name(dwc->dev)); phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy", - dev_name(&xhci->dev)); + dev_name(dwc->dev)); err1: platform_device_put(xhci); return ret; @@ -123,8 +131,8 @@ int dwc3_host_init(struct dwc3 *dwc) void dwc3_host_exit(struct dwc3 *dwc) { phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy", - dev_name(&dwc->xhci->dev)); + dev_name(dwc->dev)); phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy", - dev_name(&dwc->xhci->dev)); + dev_name(dwc->dev)); platform_device_unregister(dwc->xhci); } diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h index a06f9a8fecc701c99c370150416bdf5351b4d3d8..c69b066968245d5149de5ed2068160e46ae0a097 100644 --- a/drivers/usb/dwc3/io.h +++ b/drivers/usb/dwc3/io.h @@ -40,8 +40,7 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset) * documentation, so we revert it back to the proper addresses, the * same way they are described on SNPS documentation */ - dwc3_trace(trace_dwc3_readl, "addr %p value %08x", - base - DWC3_GLOBALS_REGS_START + offset, value); + trace_dwc3_readl(base - DWC3_GLOBALS_REGS_START, offset, value); return value; } @@ -60,8 +59,7 @@ static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value) * documentation, so we revert it back to the proper addresses, the * same way they are described on SNPS documentation */ - dwc3_trace(trace_dwc3_writel, "addr %p value %08x", - base - DWC3_GLOBALS_REGS_START + offset, value); + trace_dwc3_writel(base - DWC3_GLOBALS_REGS_START, offset, value); } #endif /* __DRIVERS_USB_DWC3_IO_H */ diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h index d24cefd191b558e8452beab674130b7ff49b893d..2b124f94d858487ec9694683f618c0b1448e64d4 100644 --- a/drivers/usb/dwc3/trace.h +++ b/drivers/usb/dwc3/trace.h @@ -37,47 +37,66 @@ DECLARE_EVENT_CLASS(dwc3_log_msg, TP_printk("%s", __get_str(msg)) ); -DEFINE_EVENT(dwc3_log_msg, dwc3_readl, +DEFINE_EVENT(dwc3_log_msg, dwc3_gadget, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf) ); -DEFINE_EVENT(dwc3_log_msg, dwc3_writel, +DEFINE_EVENT(dwc3_log_msg, dwc3_core, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf) ); -DEFINE_EVENT(dwc3_log_msg, dwc3_gadget, +DEFINE_EVENT(dwc3_log_msg, dwc3_ep0, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf) ); -DEFINE_EVENT(dwc3_log_msg, dwc3_core, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf) +DECLARE_EVENT_CLASS(dwc3_log_io, + TP_PROTO(void *base, u32 offset, u32 value), + TP_ARGS(base, offset, value), + TP_STRUCT__entry( + __field(void *, base) + __field(u32, offset) + __field(u32, value) + ), + TP_fast_assign( + __entry->base = base; + __entry->offset = offset; + __entry->value = value; + ), + TP_printk("addr %p value %08x", __entry->base + __entry->offset, + __entry->value) ); -DEFINE_EVENT(dwc3_log_msg, dwc3_ep0, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf) +DEFINE_EVENT(dwc3_log_io, dwc3_readl, + TP_PROTO(void *base, u32 offset, u32 value), + TP_ARGS(base, offset, value) +); + +DEFINE_EVENT(dwc3_log_io, dwc3_writel, + TP_PROTO(void *base, u32 offset, u32 value), + TP_ARGS(base, offset, value) ); DECLARE_EVENT_CLASS(dwc3_log_event, - TP_PROTO(u32 event), - TP_ARGS(event), + TP_PROTO(u32 event, struct dwc3 *dwc), + TP_ARGS(event, dwc), TP_STRUCT__entry( __field(u32, event) + __field(u32, ep0state) ), TP_fast_assign( __entry->event = event; + __entry->ep0state = dwc->ep0state; ), TP_printk("event (%08x): %s", __entry->event, - dwc3_decode_event(__entry->event)) + dwc3_decode_event(__entry->event, __entry->ep0state)) ); DEFINE_EVENT(dwc3_log_event, dwc3_event, - TP_PROTO(u32 event), - TP_ARGS(event) + TP_PROTO(u32 event, struct dwc3 *dwc), + TP_ARGS(event, dwc) ); DECLARE_EVENT_CLASS(dwc3_log_ctrl, @@ -237,6 +256,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb, __field(u32, bph) __field(u32, size) __field(u32, ctrl) + __field(u32, type) ), TP_fast_assign( snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name); @@ -247,11 +267,31 @@ DECLARE_EVENT_CLASS(dwc3_log_trb, __entry->bph = trb->bph; __entry->size = trb->size; __entry->ctrl = trb->ctrl; + __entry->type = usb_endpoint_type(dep->endpoint.desc); ), - TP_printk("%s: %d/%d trb %p buf %08x%08x size %d ctrl %08x (%c%c%c%c:%c%c:%s)", + TP_printk("%s: %d/%d trb %p buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)", __get_str(name), __entry->queued, __entry->allocated, __entry->trb, __entry->bph, __entry->bpl, - __entry->size, __entry->ctrl, + ({char *s; + int pcm = ((__entry->size >> 24) & 3) + 1; + switch (__entry->type) { + case USB_ENDPOINT_XFER_INT: + case USB_ENDPOINT_XFER_ISOC: + switch (pcm) { + case 1: + s = "1x "; + break; + case 2: + s = "2x "; + break; + case 3: + s = "3x "; + break; + } + default: + s = ""; + } s; }), + DWC3_TRB_SIZE_LENGTH(__entry->size), __entry->ctrl, __entry->ctrl & DWC3_TRB_CTRL_HWO ? 'H' : 'h', __entry->ctrl & DWC3_TRB_CTRL_LST ? 'L' : 'l', __entry->ctrl & DWC3_TRB_CTRL_CHN ? 'C' : 'c', @@ -301,6 +341,57 @@ DEFINE_EVENT(dwc3_log_trb, dwc3_complete_trb, TP_ARGS(dep, trb) ); +DECLARE_EVENT_CLASS(dwc3_log_ep, + TP_PROTO(struct dwc3_ep *dep), + TP_ARGS(dep), + TP_STRUCT__entry( + __dynamic_array(char, name, DWC3_MSG_MAX) + __field(unsigned, maxpacket) + __field(unsigned, maxpacket_limit) + __field(unsigned, max_streams) + __field(unsigned, maxburst) + __field(unsigned, flags) + __field(unsigned, direction) + __field(u8, trb_enqueue) + __field(u8, trb_dequeue) + ), + TP_fast_assign( + snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name); + __entry->maxpacket = dep->endpoint.maxpacket; + __entry->maxpacket_limit = dep->endpoint.maxpacket_limit; + __entry->max_streams = dep->endpoint.max_streams; + __entry->maxburst = dep->endpoint.maxburst; + __entry->flags = dep->flags; + __entry->direction = dep->direction; + __entry->trb_enqueue = dep->trb_enqueue; + __entry->trb_dequeue = dep->trb_dequeue; + ), + TP_printk("%s: mps %d/%d streams %d burst %d ring %d/%d flags %c:%c%c%c%c%c:%c:%c", + __get_str(name), __entry->maxpacket, + __entry->maxpacket_limit, __entry->max_streams, + __entry->maxburst, __entry->trb_enqueue, + __entry->trb_dequeue, + __entry->flags & DWC3_EP_ENABLED ? 'E' : 'e', + __entry->flags & DWC3_EP_STALL ? 'S' : 's', + __entry->flags & DWC3_EP_WEDGE ? 'W' : 'w', + __entry->flags & DWC3_EP_BUSY ? 'B' : 'b', + __entry->flags & DWC3_EP_PENDING_REQUEST ? 'P' : 'p', + __entry->flags & DWC3_EP_MISSED_ISOC ? 'M' : 'm', + __entry->flags & DWC3_EP_END_TRANSFER_PENDING ? 'E' : 'e', + __entry->direction ? '<' : '>' + ) +); + +DEFINE_EVENT(dwc3_log_ep, dwc3_gadget_ep_enable, + TP_PROTO(struct dwc3_ep *dep), + TP_ARGS(dep) +); + +DEFINE_EVENT(dwc3_log_ep, dwc3_gadget_ep_disable, + TP_PROTO(struct dwc3_ep *dep), + TP_ARGS(dep) +); + #endif /* __DWC3_TRACE_H */ /* this part has to be here */ diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 32176f779861624578b47be6dd4975a269b7f664..41ab61f9b6e0b24be1dd950afdae9f4f3a179d50 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -201,7 +201,12 @@ int config_ep_by_speed(struct usb_gadget *g, _ep->desc = chosen_desc; _ep->comp_desc = NULL; _ep->maxburst = 0; - _ep->mult = 0; + _ep->mult = 1; + + if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) || + usb_endpoint_xfer_int(_ep->desc))) + _ep->mult = usb_endpoint_maxp_mult(_ep->desc); + if (!want_comp_desc) return 0; @@ -218,7 +223,7 @@ int config_ep_by_speed(struct usb_gadget *g, switch (usb_endpoint_type(_ep->desc)) { case USB_ENDPOINT_XFER_ISOC: /* mult: bits 1:0 of bmAttributes */ - _ep->mult = comp_desc->bmAttributes & 0x3; + _ep->mult = (comp_desc->bmAttributes & 0x3) + 1; case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_INT: _ep->maxburst = comp_desc->bMaxBurst + 1; @@ -2382,18 +2387,8 @@ EXPORT_SYMBOL_GPL(usb_composite_setup_continue); static char *composite_default_mfr(struct usb_gadget *gadget) { - char *mfr; - int len; - - len = snprintf(NULL, 0, "%s %s with %s", init_utsname()->sysname, - init_utsname()->release, gadget->name); - len++; - mfr = kmalloc(len, GFP_KERNEL); - if (!mfr) - return NULL; - snprintf(mfr, len, "%s %s with %s", init_utsname()->sysname, - init_utsname()->release, gadget->name); - return mfr; + return kasprintf(GFP_KERNEL, "%s %s with %s", init_utsname()->sysname, + init_utsname()->release, gadget->name); } void usb_composite_overwrite_options(struct usb_composite_dev *cdev, diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 3984787f8e9720933830864200196b32930e10b4..78c44979dde382ca8c58e9f9f84024180ce9663d 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -408,7 +408,7 @@ static int config_usb_cfg_link( return ret; } -static int config_usb_cfg_unlink( +static void config_usb_cfg_unlink( struct config_item *usb_cfg_ci, struct config_item *usb_func_ci) { @@ -437,12 +437,11 @@ static int config_usb_cfg_unlink( list_del(&f->list); usb_put_function(f); mutex_unlock(&gi->lock); - return 0; + return; } } mutex_unlock(&gi->lock); WARN(1, "Unable to locate function to unbind\n"); - return 0; } static struct configfs_item_operations gadget_config_item_ops = { @@ -865,7 +864,7 @@ static int os_desc_link(struct config_item *os_desc_ci, return ret; } -static int os_desc_unlink(struct config_item *os_desc_ci, +static void os_desc_unlink(struct config_item *os_desc_ci, struct config_item *usb_cfg_ci) { struct gadget_info *gi = container_of(to_config_group(os_desc_ci), @@ -878,7 +877,6 @@ static int os_desc_unlink(struct config_item *os_desc_ci, cdev->os_desc_config = NULL; WARN_ON(gi->composite.gadget_driver.udc_name); mutex_unlock(&gi->lock); - return 0; } static struct configfs_item_operations os_desc_ops = { diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index e40d47d47d82ad9afd7124ef3ab0151b6e6ecfa7..aab3fc1dbb94c44f0287990e89aa98fc8bf3da3a 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -266,7 +266,7 @@ static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req) { struct ffs_data *ffs = req->context; - complete_all(&ffs->ep0req_completion); + complete(&ffs->ep0req_completion); } static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len) @@ -949,7 +949,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) goto error_mutex; } if (!io_data->read && - copy_from_iter(data, data_len, &io_data->data) != data_len) { + !copy_from_iter_full(data, data_len, &io_data->data)) { ret = -EFAULT; goto error_mutex; } @@ -3225,11 +3225,11 @@ static bool ffs_func_req_match(struct usb_function *f, switch (creq->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: - return ffs_func_revmap_intf(func, - le16_to_cpu(creq->wIndex) >= 0); + return (ffs_func_revmap_intf(func, + le16_to_cpu(creq->wIndex)) >= 0); case USB_RECIP_ENDPOINT: - return ffs_func_revmap_ep(func, - le16_to_cpu(creq->wIndex) >= 0); + return (ffs_func_revmap_ep(func, + le16_to_cpu(creq->wIndex)) >= 0); default: return (bool) (func->ffs->user_flags & FUNCTIONFS_ALL_CTRL_RECIP); diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index e2966f87c860b49e46a7148e51e35a76ca15685c..3151d2a0fe594b516841ae8c653878ad45899e2e 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -98,6 +98,60 @@ static struct hid_descriptor hidg_desc = { /*.desc[0].wDescriptorLenght = DYNAMIC */ }; +/* Super-Speed Support */ + +static struct usb_endpoint_descriptor hidg_ss_in_ep_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + /*.wMaxPacketSize = DYNAMIC */ + .bInterval = 4, /* FIXME: Add this field in the + * HID gadget configuration? + * (struct hidg_func_descriptor) + */ +}; + +static struct usb_ss_ep_comp_descriptor hidg_ss_in_comp_desc = { + .bLength = sizeof(hidg_ss_in_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ + /* .wBytesPerInterval = DYNAMIC */ +}; + +static struct usb_endpoint_descriptor hidg_ss_out_ep_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_INT, + /*.wMaxPacketSize = DYNAMIC */ + .bInterval = 4, /* FIXME: Add this field in the + * HID gadget configuration? + * (struct hidg_func_descriptor) + */ +}; + +static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = { + .bLength = sizeof(hidg_ss_out_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ + /* .wBytesPerInterval = DYNAMIC */ +}; + +static struct usb_descriptor_header *hidg_ss_descriptors[] = { + (struct usb_descriptor_header *)&hidg_interface_desc, + (struct usb_descriptor_header *)&hidg_desc, + (struct usb_descriptor_header *)&hidg_ss_in_ep_desc, + (struct usb_descriptor_header *)&hidg_ss_in_comp_desc, + (struct usb_descriptor_header *)&hidg_ss_out_ep_desc, + (struct usb_descriptor_header *)&hidg_ss_out_comp_desc, + NULL, +}; + /* High-Speed Support */ static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = { @@ -624,8 +678,14 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) /* set descriptor dynamic values */ hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass; hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol; + hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); + hidg_ss_in_comp_desc.wBytesPerInterval = + cpu_to_le16(hidg->report_length); hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); + hidg_ss_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); + hidg_ss_out_comp_desc.wBytesPerInterval = + cpu_to_le16(hidg->report_length); hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); /* @@ -641,8 +701,13 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) hidg_hs_out_ep_desc.bEndpointAddress = hidg_fs_out_ep_desc.bEndpointAddress; + hidg_ss_in_ep_desc.bEndpointAddress = + hidg_fs_in_ep_desc.bEndpointAddress; + hidg_ss_out_ep_desc.bEndpointAddress = + hidg_fs_out_ep_desc.bEndpointAddress; + status = usb_assign_descriptors(f, hidg_fs_descriptors, - hidg_hs_descriptors, NULL, NULL); + hidg_hs_descriptors, hidg_ss_descriptors, NULL); if (status) goto fail; @@ -840,7 +905,7 @@ static void hidg_free_inst(struct usb_function_instance *f) mutex_lock(&hidg_ida_lock); hidg_put_minor(opts->minor); - if (idr_is_empty(&hidg_ida.idr)) + if (ida_is_empty(&hidg_ida)) ghid_cleanup(); mutex_unlock(&hidg_ida_lock); @@ -866,7 +931,7 @@ static struct usb_function_instance *hidg_alloc_inst(void) mutex_lock(&hidg_ida_lock); - if (idr_is_empty(&hidg_ida.idr)) { + if (ida_is_empty(&hidg_ida)) { status = ghid_setup(NULL, HIDG_MINORS); if (status) { ret = ERR_PTR(status); @@ -879,7 +944,7 @@ static struct usb_function_instance *hidg_alloc_inst(void) if (opts->minor < 0) { ret = ERR_PTR(opts->minor); kfree(opts); - if (idr_is_empty(&hidg_ida.idr)) + if (ida_is_empty(&hidg_ida)) ghid_cleanup(); goto unlock; } diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 639603722709648148423d01eb4df85544c9ff49..224717e63a5300970867a663cd030d8cd62068f6 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -998,7 +998,7 @@ static struct sk_buff *package_for_tx(struct f_ncm *ncm) /* Merge the skbs */ swap(skb2, ncm->skb_tx_data); if (ncm->skb_tx_data) { - dev_kfree_skb_any(ncm->skb_tx_data); + dev_consume_skb_any(ncm->skb_tx_data); ncm->skb_tx_data = NULL; } @@ -1009,7 +1009,7 @@ static struct sk_buff *package_for_tx(struct f_ncm *ncm) /* Copy NTB across. */ ntb_iter = (void *) skb_put(skb2, ncm->skb_tx_ndp->len); memcpy(ntb_iter, ncm->skb_tx_ndp->data, ncm->skb_tx_ndp->len); - dev_kfree_skb_any(ncm->skb_tx_ndp); + dev_consume_skb_any(ncm->skb_tx_ndp); ncm->skb_tx_ndp = NULL; /* Insert zero'd datagram. */ @@ -1078,6 +1078,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port, if (!ncm->skb_tx_data) goto err; + ncm->skb_tx_data->dev = ncm->netdev; ntb_data = (void *) skb_put(ncm->skb_tx_data, ncb_len); memset(ntb_data, 0, ncb_len); /* dwSignature */ @@ -1096,6 +1097,8 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port, GFP_ATOMIC); if (!ncm->skb_tx_ndp) goto err; + + ncm->skb_tx_ndp->dev = ncm->netdev; ntb_ndp = (void *) skb_put(ncm->skb_tx_ndp, opts->ndp_size); memset(ntb_ndp, 0, ncb_len); @@ -1110,8 +1113,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port, } /* Delay the timer. */ - hrtimer_start(&ncm->task_timer, - ktime_set(0, TX_TIMEOUT_NSECS), + hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS, HRTIMER_MODE_REL); /* Add the datagram position entries */ @@ -1133,7 +1135,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port, memset(ntb_data, 0, dgram_pad); ntb_data = (void *) skb_put(ncm->skb_tx_data, skb->len); memcpy(ntb_data, skb->data, skb->len); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); skb = NULL; } else if (ncm->skb_tx_data && ncm->timer_force_tx) { @@ -1329,7 +1331,7 @@ static int ncm_unwrap_ntb(struct gether *port, } while (ndp_len > 2 * (opts->dgram_item_len * 2)); } while (ndp_index); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); VDBG(port->func.config->cdev, "Parsed NTB with %d frames\n", dgram_counter); diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index 0473d619d5bf64d23154527ee597cc62706ada7c..b4058f0000e4878efae4a475f834d06f16679fdd 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c @@ -261,19 +261,10 @@ static int pn_net_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static int pn_net_mtu(struct net_device *dev, int new_mtu) -{ - if ((new_mtu < PHONET_MIN_MTU) || (new_mtu > PHONET_MAX_MTU)) - return -EINVAL; - dev->mtu = new_mtu; - return 0; -} - static const struct net_device_ops pn_netdev_ops = { .ndo_open = pn_net_open, .ndo_stop = pn_net_close, .ndo_start_xmit = pn_net_xmit, - .ndo_change_mtu = pn_net_mtu, }; static void pn_net_setup(struct net_device *dev) @@ -282,6 +273,8 @@ static void pn_net_setup(struct net_device *dev) dev->type = ARPHRD_PHONET; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->mtu = PHONET_DEV_MTU; + dev->min_mtu = PHONET_MIN_MTU; + dev->max_mtu = PHONET_MAX_MTU; dev->hard_header_len = 1; dev->dev_addr[0] = PN_MEDIA_USB; dev->addr_len = 1; diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 0de36cda6e4106156821e4d024c800cf6c4af206..8054da9276dd90afc6a38fb9c69e7771c7c25d59 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -1265,7 +1265,7 @@ static void gprinter_free_inst(struct usb_function_instance *f) mutex_lock(&printer_ida_lock); gprinter_put_minor(opts->minor); - if (idr_is_empty(&printer_ida.idr)) + if (ida_is_empty(&printer_ida)) gprinter_cleanup(); mutex_unlock(&printer_ida_lock); @@ -1289,7 +1289,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void) mutex_lock(&printer_ida_lock); - if (idr_is_empty(&printer_ida.idr)) { + if (ida_is_empty(&printer_ida)) { status = gprinter_setup(PRINTER_MINORS); if (status) { ret = ERR_PTR(status); @@ -1302,7 +1302,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void) if (opts->minor < 0) { ret = ERR_PTR(opts->minor); kfree(opts); - if (idr_is_empty(&printer_ida.idr)) + if (ida_is_empty(&printer_ida)) gprinter_cleanup(); goto unlock; } diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index 197f73386fac9ab45473de09db462ab5e4640d90..d2351139342f6200209078e769e04f5ea1eb2d1f 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -1073,7 +1073,7 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu, struct usbg_cmd *cmd; int tag; - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); if (tag < 0) return ERR_PTR(-ENOMEM); diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index cd214ec8a601b4913b67f7d67e52fb8c57e003c4..969cfe7413808c5b1bcc421b56d60fb6db65e57d 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -1067,13 +1067,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc); if (!agdev->out_ep) { dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); - goto err; + return ret; } agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc); if (!agdev->in_ep) { dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); - goto err; + return ret; } uac2->p_prm.uac2 = uac2; @@ -1091,7 +1091,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL, NULL); if (ret) - goto err; + return ret; prm = &agdev->uac2.c_prm; prm->max_psize = hs_epout_desc.wMaxPacketSize; @@ -1106,19 +1106,19 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL); if (!prm->rbuf) { prm->max_psize = 0; - goto err_free_descs; + goto err; } ret = alsa_uac2_init(agdev); if (ret) - goto err_free_descs; + goto err; return 0; -err_free_descs: - usb_free_all_descriptors(fn); err: kfree(agdev->uac2.p_prm.rbuf); kfree(agdev->uac2.c_prm.rbuf); +err_free_descs: + usb_free_all_descriptors(fn); return -EINVAL; } diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index ab6ac1b74ac0f59e4a09ec340a66a954934836b1..a3b5e468b116e1122edfe6c16f308467ed0c879d 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -80,8 +80,7 @@ static const struct file_operations rndis_proc_fops; #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ /* supported OIDs */ -static const u32 oid_supported_list[] = -{ +static const u32 oid_supported_list[] = { /* the general stuff */ RNDIS_OID_GEN_SUPPORTED_LIST, RNDIS_OID_GEN_HARDWARE_STATUS, @@ -474,8 +473,7 @@ static int gen_ndis_query_resp(struct rndis_params *params, u32 OID, u8 *buf, break; default: - pr_warning("%s: query unknown OID 0x%08X\n", - __func__, OID); + pr_warn("%s: query unknown OID 0x%08X\n", __func__, OID); } if (retval < 0) length = 0; @@ -546,8 +544,8 @@ static int gen_ndis_set_resp(struct rndis_params *params, u32 OID, break; default: - pr_warning("%s: set unknown OID 0x%08X, size %d\n", - __func__, OID, buf_len); + pr_warn("%s: set unknown OID 0x%08X, size %d\n", + __func__, OID, buf_len); } return retval; @@ -854,7 +852,7 @@ int rndis_msg_parser(struct rndis_params *params, u8 *buf) * In one case those messages seemed to relate to the host * suspending itself. */ - pr_warning("%s: unknown RNDIS message 0x%08X len %d\n", + pr_warn("%s: unknown RNDIS message 0x%08X len %d\n", __func__, MsgType, MsgLength); print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET, buf, MsgLength); diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h index ef92eb66d8adf91fac1cc7dea85490ebc2f0895a..21e0430ffb98608bb6e0ab85ae9d3d51b508428a 100644 --- a/drivers/usb/gadget/function/rndis.h +++ b/drivers/usb/gadget/function/rndis.h @@ -22,8 +22,7 @@ #define RNDIS_MAXIMUM_FRAME_SIZE 1518 #define RNDIS_MAX_TOTAL_SIZE 1558 -typedef struct rndis_init_msg_type -{ +typedef struct rndis_init_msg_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; @@ -32,8 +31,7 @@ typedef struct rndis_init_msg_type __le32 MaxTransferSize; } rndis_init_msg_type; -typedef struct rndis_init_cmplt_type -{ +typedef struct rndis_init_cmplt_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; @@ -49,15 +47,13 @@ typedef struct rndis_init_cmplt_type __le32 AFListSize; } rndis_init_cmplt_type; -typedef struct rndis_halt_msg_type -{ +typedef struct rndis_halt_msg_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; } rndis_halt_msg_type; -typedef struct rndis_query_msg_type -{ +typedef struct rndis_query_msg_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; @@ -67,8 +63,7 @@ typedef struct rndis_query_msg_type __le32 DeviceVcHandle; } rndis_query_msg_type; -typedef struct rndis_query_cmplt_type -{ +typedef struct rndis_query_cmplt_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; @@ -77,8 +72,7 @@ typedef struct rndis_query_cmplt_type __le32 InformationBufferOffset; } rndis_query_cmplt_type; -typedef struct rndis_set_msg_type -{ +typedef struct rndis_set_msg_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; @@ -88,31 +82,27 @@ typedef struct rndis_set_msg_type __le32 DeviceVcHandle; } rndis_set_msg_type; -typedef struct rndis_set_cmplt_type -{ +typedef struct rndis_set_cmplt_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; __le32 Status; } rndis_set_cmplt_type; -typedef struct rndis_reset_msg_type -{ +typedef struct rndis_reset_msg_type { __le32 MessageType; __le32 MessageLength; __le32 Reserved; } rndis_reset_msg_type; -typedef struct rndis_reset_cmplt_type -{ +typedef struct rndis_reset_cmplt_type { __le32 MessageType; __le32 MessageLength; __le32 Status; __le32 AddressingReset; } rndis_reset_cmplt_type; -typedef struct rndis_indicate_status_msg_type -{ +typedef struct rndis_indicate_status_msg_type { __le32 MessageType; __le32 MessageLength; __le32 Status; @@ -120,23 +110,20 @@ typedef struct rndis_indicate_status_msg_type __le32 StatusBufferOffset; } rndis_indicate_status_msg_type; -typedef struct rndis_keepalive_msg_type -{ +typedef struct rndis_keepalive_msg_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; } rndis_keepalive_msg_type; -typedef struct rndis_keepalive_cmplt_type -{ +typedef struct rndis_keepalive_cmplt_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; __le32 Status; } rndis_keepalive_cmplt_type; -struct rndis_packet_msg_type -{ +struct rndis_packet_msg_type { __le32 MessageType; __le32 MessageLength; __le32 DataOffset; @@ -150,8 +137,7 @@ struct rndis_packet_msg_type __le32 Reserved; } __attribute__ ((packed)); -struct rndis_config_parameter -{ +struct rndis_config_parameter { __le32 ParameterNameOffset; __le32 ParameterNameLength; __le32 ParameterType; @@ -160,23 +146,20 @@ struct rndis_config_parameter }; /* implementation specific */ -enum rndis_state -{ +enum rndis_state { RNDIS_UNINITIALIZED, RNDIS_INITIALIZED, RNDIS_DATA_INITIALIZED, }; -typedef struct rndis_resp_t -{ +typedef struct rndis_resp_t { struct list_head list; u8 *buf; u32 length; int send; } rndis_resp_t; -typedef struct rndis_params -{ +typedef struct rndis_params { int confignr; u8 used; u16 saved_filter; diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index fe1811650dbc3d9fb0fc6a983486cbae3b653475..b4e5d6dfd549501b29a95f8da35b3d32f29bfb45 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -142,15 +142,6 @@ static inline int qlen(struct usb_gadget *gadget, unsigned qmult) /* NETWORK DRIVER HOOKUP (to the layer above this driver) */ -static int ueth_change_mtu(struct net_device *net, int new_mtu) -{ - if (new_mtu <= ETH_HLEN || new_mtu > GETHER_MAX_ETH_FRAME_LEN) - return -ERANGE; - net->mtu = new_mtu; - - return 0; -} - static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) { struct eth_dev *dev = netdev_priv(net); @@ -224,7 +215,7 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) if (dev->port_usb->is_fixed) size = max_t(size_t, size, dev->port_usb->fixed_out_len); - skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags); + skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags); if (skb == NULL) { DBG(dev, "no rx skb\n"); goto enomem; @@ -455,16 +446,17 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req) /* FALLTHROUGH */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ + dev_kfree_skb_any(skb); break; case 0: dev->net->stats.tx_bytes += skb->len; + dev_consume_skb_any(skb); } dev->net->stats.tx_packets++; spin_lock(&dev->req_lock); list_add(&req->list, &dev->tx_reqs); spin_unlock(&dev->req_lock); - dev_kfree_skb_any(skb); atomic_dec(&dev->tx_qlen); if (netif_carrier_ok(dev->net)) @@ -588,14 +580,6 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, req->length = length; - /* throttle high/super speed IRQ rate back slightly */ - if (gadget_is_dualspeed(dev->gadget)) - req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH || - dev->gadget->speed == USB_SPEED_SUPER)) && - !list_empty(&dev->tx_reqs)) - ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0) - : 0; - retval = usb_ep_queue(in, req, GFP_ATOMIC); switch (retval) { default: @@ -737,7 +721,6 @@ static const struct net_device_ops eth_netdev_ops = { .ndo_open = eth_open, .ndo_stop = eth_stop, .ndo_start_xmit = eth_start_xmit, - .ndo_change_mtu = ueth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -800,6 +783,10 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, net->ethtool_ops = &ops; + /* MTU range: 14 - 15412 */ + net->min_mtu = ETH_HLEN; + net->max_mtu = GETHER_MAX_ETH_FRAME_LEN; + dev->gadget = g; SET_NETDEV_DEV(net, &g->dev); SET_NETDEV_DEVTYPE(net, &gadget_type); diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index e0cd1e4c88927cfc29e12f050ec2a4eb8555136b..000677c991b0264ee079f46e79be1ab271001c9b 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -622,8 +622,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req) switch (req->status) { default: /* presumably a transient fault */ - pr_warning("%s: unexpected %s status %d\n", - __func__, ep->name, req->status); + pr_warn("%s: unexpected %s status %d\n", + __func__, ep->name, req->status); /* FALL THROUGH */ case 0: /* normal completion */ @@ -1256,7 +1256,8 @@ static void gserial_console_exit(void) struct gscons_info *info = &gscons_info; unregister_console(&gserial_cons); - kthread_stop(info->console_thread); + if (info->console_thread != NULL) + kthread_stop(info->console_thread); gs_buf_free(&info->con_buf); } diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h index 7d3bb6272e063a9bbb3f8e504c1447c5ab829094..11d70dead32b67bb0d2218060303cec352a3415d 100644 --- a/drivers/usb/gadget/function/uvc.h +++ b/drivers/usb/gadget/function/uvc.h @@ -26,14 +26,12 @@ #define UVC_EVENT_DATA (V4L2_EVENT_PRIVATE_START + 5) #define UVC_EVENT_LAST (V4L2_EVENT_PRIVATE_START + 5) -struct uvc_request_data -{ +struct uvc_request_data { __s32 length; __u8 data[60]; }; -struct uvc_event -{ +struct uvc_event { union { enum usb_device_speed speed; struct usb_ctrlrequest req; @@ -104,8 +102,7 @@ extern unsigned int uvc_gadget_trace_param; * Structures */ -struct uvc_video -{ +struct uvc_video { struct usb_ep *ep; /* Frame parameters */ @@ -134,15 +131,13 @@ struct uvc_video unsigned int fid; }; -enum uvc_state -{ +enum uvc_state { UVC_STATE_DISCONNECTED, UVC_STATE_CONNECTED, UVC_STATE_STREAMING, }; -struct uvc_device -{ +struct uvc_device { struct video_device vdev; struct v4l2_device v4l2_dev; enum uvc_state state; @@ -175,8 +170,7 @@ static inline struct uvc_device *to_uvc(struct usb_function *f) return container_of(f, struct uvc_device, func); } -struct uvc_file_handle -{ +struct uvc_file_handle { struct v4l2_fh vfh; struct uvc_video *device; }; diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index 31125a4a2658938cdc67ef4b24ee9bfbfeeadf44..4e037d2a7a60c3eefdd9f6585def29b23c02984b 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c @@ -547,7 +547,7 @@ static int uvcg_control_class_allow_link(struct config_item *src, return ret; } -static int uvcg_control_class_drop_link(struct config_item *src, +static void uvcg_control_class_drop_link(struct config_item *src, struct config_item *target) { struct config_item *control, *header; @@ -555,7 +555,6 @@ static int uvcg_control_class_drop_link(struct config_item *src, struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; struct uvc_descriptor_header **class_array; struct uvcg_control_header *target_hdr; - int ret = -EINVAL; mutex_lock(su_mutex); /* for navigating configfs hierarchy */ @@ -569,23 +568,17 @@ static int uvcg_control_class_drop_link(struct config_item *src, mutex_lock(&opts->lock); class_array = uvcg_get_ctl_class_arr(src, opts); - if (!class_array) - goto unlock; - if (opts->refcnt) { - ret = -EBUSY; + if (!class_array || opts->refcnt) goto unlock; - } target_hdr = to_uvcg_control_header(target); --target_hdr->linked; class_array[0] = NULL; - ret = 0; unlock: mutex_unlock(&opts->lock); out: mutex_unlock(su_mutex); - return ret; } static struct configfs_item_operations uvcg_control_class_item_ops = { @@ -777,7 +770,7 @@ static int uvcg_streaming_header_allow_link(struct config_item *src, return ret; } -static int uvcg_streaming_header_drop_link(struct config_item *src, +static void uvcg_streaming_header_drop_link(struct config_item *src, struct config_item *target) { struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; @@ -786,7 +779,6 @@ static int uvcg_streaming_header_drop_link(struct config_item *src, struct uvcg_streaming_header *src_hdr; struct uvcg_format *target_fmt = NULL; struct uvcg_format_ptr *format_ptr, *tmp; - int ret = -EINVAL; src_hdr = to_uvcg_streaming_header(src); mutex_lock(su_mutex); /* for navigating configfs hierarchy */ @@ -811,8 +803,6 @@ static int uvcg_streaming_header_drop_link(struct config_item *src, out: mutex_unlock(&opts->lock); mutex_unlock(su_mutex); - return ret; - } static struct configfs_item_operations uvcg_streaming_header_item_ops = { @@ -2051,7 +2041,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src, return ret; } -static int uvcg_streaming_class_drop_link(struct config_item *src, +static void uvcg_streaming_class_drop_link(struct config_item *src, struct config_item *target) { struct config_item *streaming, *header; @@ -2059,7 +2049,6 @@ static int uvcg_streaming_class_drop_link(struct config_item *src, struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; struct uvc_descriptor_header ***class_array; struct uvcg_streaming_header *target_hdr; - int ret = -EINVAL; mutex_lock(su_mutex); /* for navigating configfs hierarchy */ @@ -2076,23 +2065,19 @@ static int uvcg_streaming_class_drop_link(struct config_item *src, if (!class_array || !*class_array) goto unlock; - if (opts->refcnt) { - ret = -EBUSY; + if (opts->refcnt) goto unlock; - } target_hdr = to_uvcg_streaming_header(target); --target_hdr->linked; kfree(**class_array); kfree(*class_array); *class_array = NULL; - ret = 0; unlock: mutex_unlock(&opts->lock); out: mutex_unlock(su_mutex); - return ret; } static struct configfs_item_operations uvcg_streaming_class_item_ops = { diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c index f4ccbd56f4d254c42112f3ae89949aefe6d5c8d1..3e22b45687d37d8f256f0f1524af0e390434bf14 100644 --- a/drivers/usb/gadget/function/uvc_v4l2.c +++ b/drivers/usb/gadget/function/uvc_v4l2.c @@ -53,8 +53,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data) * V4L2 ioctls */ -struct uvc_format -{ +struct uvc_format { u8 bpp; u32 fcc; }; diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index 3d0d5d94a62f2fe6927983ba6979784eb20eceeb..0f01c04d7cbd856581da53d6e69cba5f50b2c922 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c @@ -243,7 +243,7 @@ uvc_video_alloc_requests(struct uvc_video *video) req_size = video->ep->maxpacket * max_t(unsigned int, video->ep->maxburst, 1) - * (video->ep->mult + 1); + * (video->ep->mult); for (i = 0; i < UVC_NUM_REQUESTS; ++i) { video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL); diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index bd82dd12deffd25aa9ae8528d60a898bdfa7551b..e8f4102d19df54fa1983578f8f773067ee317d92 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -667,7 +667,7 @@ ep_write_iter(struct kiocb *iocb, struct iov_iter *from) return -ENOMEM; } - if (unlikely(copy_from_iter(buf, len, from) != len)) { + if (unlikely(!copy_from_iter_full(buf, len, from))) { value = -EFAULT; goto out; } diff --git a/drivers/usb/gadget/udc/at91_udc.h b/drivers/usb/gadget/udc/at91_udc.h index 0a433e6b346bca3f170d83a497a3bfe68cb863b6..9bbe72764f3112fff8ac3fb8aa39d4c76af6a4b8 100644 --- a/drivers/usb/gadget/udc/at91_udc.h +++ b/drivers/usb/gadget/udc/at91_udc.h @@ -175,7 +175,7 @@ struct at91_request { #endif #define ERR(stuff...) pr_err("udc: " stuff) -#define WARNING(stuff...) pr_warning("udc: " stuff) +#define WARNING(stuff...) pr_warn("udc: " stuff) #define INFO(stuff...) pr_info("udc: " stuff) #define DBG(stuff...) pr_debug("udc: " stuff) diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 45bc997d071131c1e7ef51c3968dd5e295994f44..f3212db9bc37bf1889c482e86c9c679720766b1f 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -529,7 +529,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); - maxpacket = usb_endpoint_maxp(desc) & 0x7ff; + maxpacket = usb_endpoint_maxp(desc); if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index) || ep->index == 0 @@ -573,7 +573,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) * Bits 11:12 specify number of _additional_ * transactions per microframe. */ - nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1; + nr_trans = usb_endpoint_maxp_mult(desc); if (nr_trans > 3) return -EINVAL; @@ -1464,8 +1464,8 @@ static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep) pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); DBG(DBG_HW, "Packet length: %u\n", pkt_len); if (pkt_len != sizeof(crq)) { - pr_warning("udc: Invalid packet length %u " - "(expected %zu)\n", pkt_len, sizeof(crq)); + pr_warn("udc: Invalid packet length %u (expected %zu)\n", + pkt_len, sizeof(crq)); set_protocol_stall(udc, ep); return; } diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c index 4d5e9188beae4522b235cf22b6b2e877402742d5..6e920f1dce021e79ba9fb5eb24884b3106469826 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_cmd.c +++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c @@ -182,7 +182,7 @@ int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep) usb_endpoint_xfer_int(desc)) { param2 |= si; - mbs = (usb_endpoint_maxp(desc) & 0x1800) >> 11; + mbs = usb_endpoint_maxp_mult(desc); param2 |= mbs << MB_SHIFT; } break; diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index ccaa74ab6c0ed5c8bc2a0c10b921745c8d2977a9..ff1ef24d1777a1fc4a73436cb456d2082d41de7e 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -446,7 +446,7 @@ static int setup_bd_list_xfr(struct bdc *bdc, struct bdc_req *req, int num_bds) bd_xfr->start_bdi = bd_list->eqp_bdi; bd = bdi_to_bd(ep, bd_list->eqp_bdi); req_len = req->usb_req.length; - maxp = usb_endpoint_maxp(ep->desc) & 0x7ff; + maxp = usb_endpoint_maxp(ep->desc); tfs = roundup(req->usb_req.length, maxp); tfs = tfs/maxp; dev_vdbg(bdc->dev, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n", diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 77d07904f932e7d65bf735f5dc6238465a309650..02b14e91ae6c5a586417dec05d558dbe1cace35a 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -503,7 +503,7 @@ static int dummy_enable(struct usb_ep *_ep, * maximum packet size. * For SS devices the wMaxPacketSize is limited by 1024. */ - max = usb_endpoint_maxp(desc) & 0x7ff; + max = usb_endpoint_maxp(desc); /* drivers must not request bad settings, since lower levels * (hardware or its drivers) may not check. some endpoints @@ -1483,8 +1483,7 @@ static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep) int tmp; /* high bandwidth mode */ - tmp = usb_endpoint_maxp(ep->desc); - tmp = (tmp >> 11) & 0x03; + tmp = usb_endpoint_maxp_mult(ep->desc); tmp *= 8 /* applies to entire frame */; limit += limit * tmp; } diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index aab5221d6c2e35050edccaaa170f960f90c40887..71094e479a965f97134167aba0ece8b0385d5224 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -585,8 +585,7 @@ static int fsl_ep_enable(struct usb_ep *_ep, break; case USB_ENDPOINT_XFER_ISOC: /* Calculate transactions needed for high bandwidth iso */ - mult = (unsigned char)(1 + ((max >> 11) & 0x03)); - max = max & 0x7ff; /* bit 0~10 */ + mult = usb_endpoint_maxp_mult(desc); /* 3 transactions at most */ if (mult > 3) goto en_done; diff --git a/drivers/usb/gadget/udc/fsl_usb2_udc.h b/drivers/usb/gadget/udc/fsl_usb2_udc.h index 84715625b2b3b6e56361c10cefb88bde0290513d..e92b8408b6f610c9982c9fc91499e3c93daad8af 100644 --- a/drivers/usb/gadget/udc/fsl_usb2_udc.h +++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h @@ -554,7 +554,7 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length) #endif #define ERR(stuff...) pr_err("udc: " stuff) -#define WARNING(stuff...) pr_warning("udc: " stuff) +#define WARNING(stuff...) pr_warn("udc: " stuff) #define INFO(stuff...) pr_info("udc: " stuff) /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c index 948845c90e47d3cf3333511f74673b2d7cd62daf..42ff308578df5c68751b86f59f10ecb8dec72ba5 100644 --- a/drivers/usb/gadget/udc/fusb300_udc.c +++ b/drivers/usb/gadget/udc/fusb300_udc.c @@ -218,7 +218,7 @@ static int config_ep(struct fusb300_ep *ep, (info.type == USB_ENDPOINT_XFER_ISOC)) { info.interval = desc->bInterval; if (info.type == USB_ENDPOINT_XFER_ISOC) - info.bw_num = ((desc->wMaxPacketSize & 0x1800) >> 11); + info.bw_num = usb_endpoint_maxp_mult(desc); } ep_fifo_setting(fusb300, info); diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 39b7136d31d9c7b5be3263bb2ed2070dd3b2f537..b16f8af3405045cf5f2af2570c20f104c5143a04 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -1539,7 +1539,7 @@ static int gr_ep_enable(struct usb_ep *_ep, * additional transactions. */ max = 0x7ff & usb_endpoint_maxp(desc); - nt = 0x3 & (usb_endpoint_maxp(desc) >> 11); + nt = usb_endpoint_maxp_mult(desc) - 1; buffer_size = GR_BUFFER_SIZE(epctrl); if (nt && (mode == 0 || mode == 2)) { dev_err(dev->dev, diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c index 6e977dc225709e7ee90e10b09abf2fad006278c1..de3e034836592019ecba74b3c9b900714a247f72 100644 --- a/drivers/usb/gadget/udc/m66592-udc.c +++ b/drivers/usb/gadget/udc/m66592-udc.c @@ -637,7 +637,7 @@ static void init_controller(struct m66592 *m66592) clock = M66592_XTAL48; break; default: - pr_warning("m66592-udc: xtal configuration error\n"); + pr_warn("m66592-udc: xtal configuration error\n"); clock = 0; } @@ -649,7 +649,7 @@ static void init_controller(struct m66592 *m66592) irq_sense = 0; break; default: - pr_warning("m66592-udc: irq trigger config error\n"); + pr_warn("m66592-udc: irq trigger config error\n"); irq_sense = 0; } diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c index b9e19a5913224be4fca62cbc03d93de1cb4c2701..8d726bd767fd44408f7ba2f63f88278d0a772f4b 100644 --- a/drivers/usb/gadget/udc/mv_u3d_core.c +++ b/drivers/usb/gadget/udc/mv_u3d_core.c @@ -462,6 +462,12 @@ static int mv_u3d_req_to_trb(struct mv_u3d_req *req) req->trb_head->trb_hw, trb_num * sizeof(*trb_hw), DMA_BIDIRECTIONAL); + if (dma_mapping_error(u3d->gadget.dev.parent, + req->trb_head->trb_dma)) { + kfree(req->trb_head->trb_hw); + kfree(req->trb_head); + return -EFAULT; + } req->chain = 1; } @@ -487,30 +493,32 @@ mv_u3d_start_queue(struct mv_u3d_ep *ep) ret = usb_gadget_map_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep)); if (ret) - return ret; + goto break_processing; req->req.status = -EINPROGRESS; req->req.actual = 0; req->trb_count = 0; - /* build trbs and push them to device queue */ - if (!mv_u3d_req_to_trb(req)) { - ret = mv_u3d_queue_trb(ep, req); - if (ret) { - ep->processing = 0; - return ret; - } - } else { - ep->processing = 0; + /* build trbs */ + ret = mv_u3d_req_to_trb(req); + if (ret) { dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__); - return -ENOMEM; + goto break_processing; } + /* and push them to device queue */ + ret = mv_u3d_queue_trb(ep, req); + if (ret) + goto break_processing; + /* irq handler advances the queue */ - if (req) - list_add_tail(&req->queue, &ep->queue); + list_add_tail(&req->queue, &ep->queue); return 0; + +break_processing: + ep->processing = 0; + return ret; } static int mv_u3d_ep_enable(struct usb_ep *_ep, diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index ce73b3552269fcd3793102d408685f69701ed22e..d82a91bddbd979ba7690cc6e93c7b1b418b15e43 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -494,8 +494,7 @@ static int mv_ep_enable(struct usb_ep *_ep, break; case USB_ENDPOINT_XFER_ISOC: /* Calculate transactions needed for high bandwidth iso */ - mult = (unsigned char)(1 + ((max >> 11) & 0x03)); - max = max & 0x7ff; /* bit 0~10 */ + mult = usb_endpoint_maxp_mult(desc); /* 3 transactions at most */ if (mult > 3) goto en_done; diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 7c61134320939c4e95b936a07387b680db5c9d38..078c91d546e0fec6b24cd0c149c35476bbd0cd89 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -202,10 +202,10 @@ net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; - max = usb_endpoint_maxp(desc) & 0x1fff; + max = usb_endpoint_maxp(desc); spin_lock_irqsave(&dev->lock, flags); - _ep->maxpacket = max & 0x7fff; + _ep->maxpacket = max; ep->desc = desc; /* net2272_ep_reset() has already been called */ diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 61c938c36d88fd2ce83f2d149910f4086ddce5e1..85504419ab312e58a83c52d524bfdebadf5e1ed7 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -224,14 +224,14 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) } /* sanity check ep-e/ep-f since their fifos are small */ - max = usb_endpoint_maxp(desc) & 0x1fff; + max = usb_endpoint_maxp(desc); if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) { ret = -ERANGE; goto print_err; } spin_lock_irqsave(&dev->lock, flags); - _ep->maxpacket = max & 0x7ff; + _ep->maxpacket = max; ep->desc = desc; /* ep_reset() has already been called */ @@ -1839,7 +1839,7 @@ static ssize_t queues_show(struct device *_dev, struct device_attribute *attr, ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK, (t & USB_DIR_IN) ? "in" : "out", type_string(d->bmAttributes), - usb_endpoint_maxp(d) & 0x1fff, + usb_endpoint_maxp(d), ep->dma ? "dma" : "pio", ep->fifo_size ); } else /* ep0 should only have one transfer queued */ diff --git a/drivers/usb/gadget/udc/omap_udc.h b/drivers/usb/gadget/udc/omap_udc.h index cfadeb5fc5deba66ac66e7a2228812f08fcd5261..26974196cf4454701d00e00b33dd9d8725628e7d 100644 --- a/drivers/usb/gadget/udc/omap_udc.h +++ b/drivers/usb/gadget/udc/omap_udc.h @@ -187,7 +187,7 @@ struct omap_udc { #endif #define ERR(stuff...) pr_err("udc: " stuff) -#define WARNING(stuff...) pr_warning("udc: " stuff) +#define WARNING(stuff...) pr_warn("udc: " stuff) #define INFO(stuff...) pr_info("udc: " stuff) #define DBG(stuff...) pr_debug("udc: " stuff) diff --git a/drivers/usb/gadget/udc/pxa25x_udc.h b/drivers/usb/gadget/udc/pxa25x_udc.h index 4b8b72d7ab37306ad99240ab691dc5d5e0d6eb7e..a458bec2536d4c1832a60589a70f33e30fd0a86e 100644 --- a/drivers/usb/gadget/udc/pxa25x_udc.h +++ b/drivers/usb/gadget/udc/pxa25x_udc.h @@ -248,7 +248,7 @@ dump_state(struct pxa25x_udc *dev) #define DBG(lvl, stuff...) do{if ((lvl) <= UDC_DEBUG) DMSG(stuff);}while(0) #define ERR(stuff...) pr_err("udc: " stuff) -#define WARNING(stuff...) pr_warning("udc: " stuff) +#define WARNING(stuff...) pr_warn("udc: " stuff) #define INFO(stuff...) pr_info("udc: " stuff) diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c index eb3571ee59e3c4a454b87e2103a921ed88eea2f7..4643a01262b40db391624cd842669b50252d865a 100644 --- a/drivers/usb/gadget/udc/s3c2410_udc.c +++ b/drivers/usb/gadget/udc/s3c2410_udc.c @@ -1047,10 +1047,10 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep, if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; - max = usb_endpoint_maxp(desc) & 0x1fff; + max = usb_endpoint_maxp(desc); local_irq_save(flags); - _ep->maxpacket = max & 0x7ff; + _ep->maxpacket = max; ep->ep.desc = desc; ep->halted = 0; ep->bEndpointAddress = desc->bEndpointAddress; diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 0b80cee30da4740e8c28c6ea7a4cc700163a12ee..6361fc73930669750317b4fa4e995d74e8954e3c 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -479,9 +479,10 @@ config USB_OHCI_HCD_OMAP3 OMAP3 and later chips. config USB_OHCI_HCD_DAVINCI - bool "OHCI support for TI DaVinci DA8xx" + tristate "OHCI support for TI DaVinci DA8xx" depends on ARCH_DAVINCI_DA8XX - depends on USB_OHCI_HCD=y + depends on USB_OHCI_HCD + select PHY_DA8XX_USB default y help Enables support for the DaVinci DA8xx integrated OHCI diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index 6ef785b0ea8ff0b5f4ee3bc40ba22d17c855478f..2644537b7bcfb5cc8452243838815d3aa5669409 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -61,6 +61,7 @@ obj-$(CONFIG_USB_OHCI_HCD_AT91) += ohci-at91.o obj-$(CONFIG_USB_OHCI_HCD_S3C2410) += ohci-s3c2410.o obj-$(CONFIG_USB_OHCI_HCD_LPC32XX) += ohci-nxp.o obj-$(CONFIG_USB_OHCI_HCD_PXA27X) += ohci-pxa27x.o +obj-$(CONFIG_USB_OHCI_HCD_DAVINCI) += ohci-da8xx.o obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o obj-$(CONFIG_USB_FHCI_HCD) += fhci.o diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 9f5ffb62997303dedd1d6c7d929191992a039c81..91701cc680824189a634807d2aed8a33467d712f 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -286,6 +286,9 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci) if (pdata->has_fsl_erratum_a005275 == 1) ehci->has_fsl_hs_errata = 1; + if (pdata->has_fsl_erratum_a005697 == 1) + ehci->has_fsl_susp_errata = 1; + if ((pdata->operating_mode == FSL_USB2_DR_HOST) || (pdata->operating_mode == FSL_USB2_DR_OTG)) if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0)) diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 74f62d68f01362d144e8de21bd188c06e31a7945..df169c8e7225f611cf52411c1d6d171f6fb3546a 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -310,6 +310,14 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) } spin_unlock_irq(&ehci->lock); + if (changed && ehci_has_fsl_susp_errata(ehci)) + /* + * Wait for at least 10 millisecondes to ensure the controller + * enter the suspend status before initiating a port resume + * using the Force Port Resume bit (Not-EHCI compatible). + */ + usleep_range(10000, 20000); + if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) { /* * Wait for HCD to enter low-power mode or for the bus @@ -1200,6 +1208,12 @@ int ehci_hub_control( wIndex, (temp1 & HOSTPC_PHCD) ? "succeeded" : "failed"); } + if (ehci_has_fsl_susp_errata(ehci)) { + /* 10ms for HCD enter suspend */ + spin_unlock_irqrestore(&ehci->lock, flags); + usleep_range(10000, 20000); + spin_lock_irqsave(&ehci->lock, flags); + } set_bit(wIndex, &ehci->suspended_ports); break; case USB_PORT_FEAT_POWER: diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 3b3649d88c5f00281123975a931f3ecd52a14289..93326974ff4b3ac3ec413ceda9d6cfa54cca0046 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -258,9 +258,8 @@ static int ehci_pci_setup(struct usb_hcd *hcd) /* These workarounds need to be applied after ehci_setup() */ switch (pdev->vendor) { case PCI_VENDOR_ID_NEC: - ehci->need_io_watchdog = 0; - break; case PCI_VENDOR_ID_INTEL: + case PCI_VENDOR_ID_AMD: ehci->need_io_watchdog = 0; break; case PCI_VENDOR_ID_NVIDIA: diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index eca3710d8fc44833506f80f1578eb1578b6838f6..8f3f055c05fa64c4816b87f14b9bbe77e54972cc 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -550,11 +550,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) /*-------------------------------------------------------------------------*/ -// high bandwidth multiplier, as encoded in highspeed endpoint descriptors -#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) -// ... and packet size, for any kind of endpoint descriptor -#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) - /* * reverse of qh_urb_transaction: free a list of TDs. * used for cleanup after errors, before HC sees an URB's TDs. @@ -651,7 +646,7 @@ qh_urb_transaction ( token |= (1 /* "in" */ << 8); /* else it's already initted to "out" pid (0 << 8) */ - maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); + maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input); /* * buffer gets wrapped in one or more qtds; @@ -770,9 +765,11 @@ qh_make ( gfp_t flags ) { struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); + struct usb_host_endpoint *ep; u32 info1 = 0, info2 = 0; int is_input, type; int maxp = 0; + int mult; struct usb_tt *tt = urb->dev->tt; struct ehci_qh_hw *hw; @@ -787,13 +784,15 @@ qh_make ( is_input = usb_pipein (urb->pipe); type = usb_pipetype (urb->pipe); - maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input); + ep = usb_pipe_endpoint (urb->dev, urb->pipe); + maxp = usb_endpoint_maxp (&ep->desc); + mult = usb_endpoint_maxp_mult (&ep->desc); /* 1024 byte maxpacket is a hardware ceiling. High bandwidth * acts like up to 3KB, but is built from smaller packets. */ - if (max_packet(maxp) > 1024) { - ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp)); + if (maxp > 1024) { + ehci_dbg(ehci, "bogus qh maxpacket %d\n", maxp); goto done; } @@ -809,8 +808,7 @@ qh_make ( unsigned tmp; qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, - is_input, 0, - hb_mult(maxp) * max_packet(maxp))); + is_input, 0, mult * maxp)); qh->ps.phase = NO_FRAME; if (urb->dev->speed == USB_SPEED_HIGH) { @@ -854,7 +852,7 @@ qh_make ( think_time = tt ? tt->think_time : 0; qh->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time (urb->dev->speed, - is_input, 0, max_packet (maxp))); + is_input, 0, maxp)); if (urb->interval > ehci->periodic_size) urb->interval = ehci->periodic_size; qh->ps.period = urb->interval; @@ -925,11 +923,11 @@ qh_make ( * to help them do so. So now people expect to use * such nonconformant devices with Linux too; sigh. */ - info1 |= max_packet(maxp) << 16; + info1 |= maxp << 16; info2 |= (EHCI_TUNE_MULT_HS << 30); } else { /* PIPE_INTERRUPT */ - info1 |= max_packet (maxp) << 16; - info2 |= hb_mult (maxp) << 30; + info1 |= maxp << 16; + info2 |= mult << 30; } break; default: @@ -1221,7 +1219,7 @@ static int submit_single_step_set_feature( token |= (1 /* "in" */ << 8); /*This is IN stage*/ - maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0)); + maxpacket = usb_maxpacket(urb->dev, urb->pipe, 0); qtd_fill(ehci, qtd, buf, len, token, maxpacket); diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 1dfe54f147370711da9884066164cc8913a4c5ee..980a6b3b2da20587ff8577f074a74f0db5901e05 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1064,11 +1064,10 @@ iso_stream_init( /* knows about ITD vs SITD */ if (dev->speed == USB_SPEED_HIGH) { - unsigned multi = hb_mult(maxp); + unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc); stream->highspeed = 1; - maxp = max_packet(maxp); buf1 |= maxp; maxp *= multi; diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c index 69f50e6533a6ec68fa3cefeeb43842964dc227f7..3893b5bafd8725a00ec4b32335a83c53fcef33a9 100644 --- a/drivers/usb/host/ehci-timer.c +++ b/drivers/usb/host/ehci-timer.c @@ -88,8 +88,7 @@ static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event, ktime_t *timeout = &ehci->hr_timeouts[event]; if (resched) - *timeout = ktime_add(ktime_get(), - ktime_set(0, event_delays_ns[event])); + *timeout = ktime_add(ktime_get(), event_delays_ns[event]); ehci->enabled_hrtimer_events |= (1 << event); /* Track only the lowest-numbered pending event */ @@ -425,7 +424,7 @@ static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t) */ now = ktime_get(); for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) { - if (now.tv64 >= ehci->hr_timeouts[e].tv64) + if (now >= ehci->hr_timeouts[e]) event_handlers[e](ehci); else ehci_enable_event(ehci, e, false); diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c index e42a29e8e229bd79280f5e721717bf15aff5dd50..63b9d0c67963094f15ba9950c9f85c1126f3a0db 100644 --- a/drivers/usb/host/ehci-w90x900.c +++ b/drivers/usb/host/ehci-w90x900.c @@ -33,8 +33,7 @@ static const char hcd_name[] = "ehci-w90x900 "; static struct hc_driver __read_mostly ehci_w90x900_hc_driver; -static int usb_w90x900_probe(const struct hc_driver *driver, - struct platform_device *pdev) +static int ehci_w90x900_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct ehci_hcd *ehci; @@ -42,7 +41,8 @@ static int usb_w90x900_probe(const struct hc_driver *driver, int retval = 0, irq; unsigned long val; - hcd = usb_create_hcd(driver, &pdev->dev, "w90x900 EHCI"); + hcd = usb_create_hcd(&ehci_w90x900_hc_driver, + &pdev->dev, "w90x900 EHCI"); if (!hcd) { retval = -ENOMEM; goto err1; @@ -63,9 +63,9 @@ static int usb_w90x900_probe(const struct hc_driver *driver, HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); /* enable PHY 0,1,the regs only apply to w90p910 - * 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of - * w90p910 IC relative to ehci->regs. - */ + * 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of + * w90p910 IC relative to ehci->regs. + */ val = __raw_readl(ehci->regs+PHY0_CTR); val |= ENPHY; __raw_writel(val, ehci->regs+PHY0_CTR); @@ -92,26 +92,12 @@ static int usb_w90x900_probe(const struct hc_driver *driver, return retval; } -static void usb_w90x900_remove(struct usb_hcd *hcd, - struct platform_device *pdev) -{ - usb_remove_hcd(hcd); - usb_put_hcd(hcd); -} - -static int ehci_w90x900_probe(struct platform_device *pdev) -{ - if (usb_disabled()) - return -ENODEV; - - return usb_w90x900_probe(&ehci_w90x900_hc_driver, pdev); -} - static int ehci_w90x900_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); - usb_w90x900_remove(hcd, pdev); + usb_remove_hcd(hcd); + usb_put_hcd(hcd); return 0; } diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 3f3b74aeca9740c67f45831762beea1d3c2d9097..a8e36170d8b8a623e4726265b4d194156f231d4f 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -219,6 +219,7 @@ struct ehci_hcd { /* one per controller */ unsigned no_selective_suspend:1; unsigned has_fsl_port_bug:1; /* FreeScale */ unsigned has_fsl_hs_errata:1; /* Freescale HS quirk */ + unsigned has_fsl_susp_errata:1; /* NXP SUSP quirk */ unsigned big_endian_mmio:1; unsigned big_endian_desc:1; unsigned big_endian_capbase:1; @@ -709,6 +710,13 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc) #define ehci_has_fsl_hs_errata(e) (0) #endif +/* + * Some Freescale/NXP processors have an erratum (USB A-005697) + * in which we need to wait for 10ms for bus to enter suspend mode + * after setting SUSP bit. + */ +#define ehci_has_fsl_susp_errata(e) ((e)->has_fsl_susp_errata) + /* * While most USB host controllers implement their registers in * little-endian format, a minority (celleb companion chip) implement diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c index 66efa9a6768776f91407dd3e91b7499560301ac2..9d0b0518290a1d596f7018216bc694d5bbc037b1 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/host/fotg210-hcd.c @@ -1080,8 +1080,7 @@ static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event, ktime_t *timeout = &fotg210->hr_timeouts[event]; if (resched) - *timeout = ktime_add(ktime_get(), - ktime_set(0, event_delays_ns[event])); + *timeout = ktime_add(ktime_get(), event_delays_ns[event]); fotg210->enabled_hrtimer_events |= (1 << event); /* Track only the lowest-numbered pending event */ @@ -1381,7 +1380,7 @@ static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t) */ now = ktime_get(); for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) { - if (now.tv64 >= fotg210->hr_timeouts[e].tv64) + if (now >= fotg210->hr_timeouts[e]) event_handlers[e](fotg210); else fotg210_enable_event(fotg210, e, false); diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c index f07ccb25bc244f4f98c33a80d12a9c874e3696c1..e90ddb5307653f7a61b3054def1e5e6c82217efb 100644 --- a/drivers/usb/host/fsl-mph-dr-of.c +++ b/drivers/usb/host/fsl-mph-dr-of.c @@ -226,6 +226,8 @@ static int fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev) of_property_read_bool(np, "fsl,usb-erratum-a007792"); pdata->has_fsl_erratum_a005275 = of_property_read_bool(np, "fsl,usb-erratum-a005275"); + pdata->has_fsl_erratum_a005697 = + of_property_read_bool(np, "fsl,usb_erratum-a005697"); /* * Determine whether phy_clk_valid needs to be checked diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index 6cf82ee460a68aec25e47b6e0086e1b3552c433f..0f2b4b358e1a74384c05b5bff3ffe0acdbf512f5 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c @@ -147,7 +147,7 @@ static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362 if (epq) DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name); else - pr_warning("%s: invalid PTD $%04x\n", __func__, offset); + pr_warn("%s: invalid PTD $%04x\n", __func__, offset); return epq; } @@ -157,8 +157,9 @@ static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index) int offset; if (index * epq->blk_size > epq->buf_size) { - pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index, - epq->buf_size / epq->blk_size); + pr_warn("%s: Bad %s index %d(%d)\n", + __func__, epq->name, index, + epq->buf_size / epq->blk_size); return -EINVAL; } offset = epq->buf_start + index * epq->blk_size; @@ -902,8 +903,8 @@ static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd) ptd_offset = next_ptd(epq, ep); if (ptd_offset < 0) { - pr_warning("%s: req %d No more %s PTD buffers available\n", __func__, - ep->num_req, epq->name); + pr_warn("%s: req %d No more %s PTD buffers available\n", + __func__, ep->num_req, epq->name); break; } } @@ -973,8 +974,8 @@ static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done break; } if (done_map) - pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map, - epq->skip_map); + pr_warn("%s: done_map not clear: %08lx:%08lx\n", + __func__, done_map, epq->skip_map); atomic_dec(&epq->finishing); } @@ -1433,7 +1434,7 @@ static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) } else DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb); } else { - pr_warning("%s: No EP in URB %p\n", __func__, urb); + pr_warn("%s: No EP in URB %p\n", __func__, urb); retval = -EINVAL; } done: @@ -1748,10 +1749,10 @@ static int isp1362_bus_suspend(struct usb_hcd *hcd) /* FALL THROUGH */ case OHCI_USB_RESET: status = -EBUSY; - pr_warning("%s: needs reinit!\n", __func__); + pr_warn("%s: needs reinit!\n", __func__); goto done; case OHCI_USB_SUSPEND: - pr_warning("%s: already suspended?\n", __func__); + pr_warn("%s: already suspended?\n", __func__); goto done; } DBG(0, "%s: suspend root hub\n", __func__); @@ -1839,7 +1840,7 @@ static int isp1362_bus_resume(struct usb_hcd *hcd) isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL); pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control); if (hcd->state == HC_STATE_RESUMING) { - pr_warning("%s: duplicate resume\n", __func__); + pr_warn("%s: duplicate resume\n", __func__); status = 0; } else switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) { @@ -2474,8 +2475,8 @@ static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd) __func__, offset); break; } - pr_warning("%s: memory check with offset %02x ok after second read\n", - __func__, offset); + pr_warn("%s: memory check with offset %02x ok after second read\n", + __func__, offset); } } kfree(ref); diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index b38a228134df108d148037232126e815d9557a7b..be9e6383688111fe8d2c53d3e679758f36fd9873 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -14,8 +14,8 @@ #include #include +#include #include -#include #include #include #include @@ -39,8 +39,8 @@ #define AT91_MAX_USBH_PORTS 3 struct at91_usbh_data { - int vbus_pin[AT91_MAX_USBH_PORTS]; /* port power-control pin */ - int overcurrent_pin[AT91_MAX_USBH_PORTS]; + struct gpio_desc *vbus_pin[AT91_MAX_USBH_PORTS]; + struct gpio_desc *overcurrent_pin[AT91_MAX_USBH_PORTS]; u8 ports; /* number of ports on root hub */ u8 overcurrent_supported; u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS]; @@ -68,8 +68,6 @@ static const struct ohci_driver_overrides ohci_at91_drv_overrides __initconst = .extra_priv_size = sizeof(struct ohci_at91_priv), }; -extern int usb_disabled(void); - /*-------------------------------------------------------------------------*/ static void at91_start_clock(struct ohci_at91_priv *ohci_at91) @@ -268,11 +266,8 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int if (!valid_port(port)) return; - if (!gpio_is_valid(pdata->vbus_pin[port])) - return; - - gpio_set_value(pdata->vbus_pin[port], - pdata->vbus_pin_active_low[port] ^ enable); + gpiod_set_value(pdata->vbus_pin[port], + pdata->vbus_pin_active_low[port] ^ enable); } static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port) @@ -280,11 +275,8 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port) if (!valid_port(port)) return -EINVAL; - if (!gpio_is_valid(pdata->vbus_pin[port])) - return -EINVAL; - - return gpio_get_value(pdata->vbus_pin[port]) ^ - pdata->vbus_pin_active_low[port]; + return gpiod_get_value(pdata->vbus_pin[port]) ^ + pdata->vbus_pin_active_low[port]; } /* @@ -474,16 +466,13 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data) { struct platform_device *pdev = data; struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev); - int val, gpio, port; + int val, port; /* From the GPIO notifying the over-current situation, find * out the corresponding port */ at91_for_each_port(port) { - if (gpio_is_valid(pdata->overcurrent_pin[port]) && - gpio_to_irq(pdata->overcurrent_pin[port]) == irq) { - gpio = pdata->overcurrent_pin[port]; + if (gpiod_to_irq(pdata->overcurrent_pin[port]) == irq) break; - } } if (port == AT91_MAX_USBH_PORTS) { @@ -491,7 +480,7 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data) return IRQ_HANDLED; } - val = gpio_get_value(gpio); + val = gpiod_get_value(pdata->overcurrent_pin[port]); /* When notified of an over-current situation, disable power on the corresponding port, and mark this port in @@ -522,9 +511,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct at91_usbh_data *pdata; int i; - int gpio; int ret; - enum of_gpio_flags flags; + int err; u32 ports; /* Right now device-tree probed devices don't get dma_mask set. @@ -545,38 +533,16 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) pdata->ports = ports; at91_for_each_port(i) { - /* - * do not configure PIO if not in relation with - * real USB port on board - */ - if (i >= pdata->ports) { - pdata->vbus_pin[i] = -EINVAL; - pdata->overcurrent_pin[i] = -EINVAL; + pdata->vbus_pin[i] = devm_gpiod_get_optional(&pdev->dev, + "atmel,vbus-gpio", + GPIOD_IN); + if (IS_ERR(pdata->vbus_pin[i])) { + err = PTR_ERR(pdata->vbus_pin[i]); + dev_err(&pdev->dev, "unable to claim gpio \"vbus\": %d\n", err); continue; } - gpio = of_get_named_gpio_flags(np, "atmel,vbus-gpio", i, - &flags); - pdata->vbus_pin[i] = gpio; - if (!gpio_is_valid(gpio)) - continue; - pdata->vbus_pin_active_low[i] = flags & OF_GPIO_ACTIVE_LOW; - - ret = gpio_request(gpio, "ohci_vbus"); - if (ret) { - dev_err(&pdev->dev, - "can't request vbus gpio %d\n", gpio); - continue; - } - ret = gpio_direction_output(gpio, - !pdata->vbus_pin_active_low[i]); - if (ret) { - dev_err(&pdev->dev, - "can't put vbus gpio %d as output %d\n", - gpio, !pdata->vbus_pin_active_low[i]); - gpio_free(gpio); - continue; - } + pdata->vbus_pin_active_low[i] = gpiod_get_value(pdata->vbus_pin[i]); ohci_at91_usb_set_power(pdata, i, 1); } @@ -586,37 +552,21 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) break; pdata->overcurrent_pin[i] = - of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags); - - if (!gpio_is_valid(pdata->overcurrent_pin[i])) - continue; - gpio = pdata->overcurrent_pin[i]; - - ret = gpio_request(gpio, "ohci_overcurrent"); - if (ret) { - dev_err(&pdev->dev, - "can't request overcurrent gpio %d\n", - gpio); + devm_gpiod_get_optional(&pdev->dev, + "atmel,oc-gpio", GPIOD_IN); + if (IS_ERR(pdata->overcurrent_pin[i])) { + err = PTR_ERR(pdata->overcurrent_pin[i]); + dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err); continue; } - ret = gpio_direction_input(gpio); - if (ret) { - dev_err(&pdev->dev, - "can't configure overcurrent gpio %d as input\n", - gpio); - gpio_free(gpio); - continue; - } - - ret = request_irq(gpio_to_irq(gpio), - ohci_hcd_at91_overcurrent_irq, - IRQF_SHARED, "ohci_overcurrent", pdev); - if (ret) { - gpio_free(gpio); - dev_err(&pdev->dev, - "can't get gpio IRQ for overcurrent\n"); - } + ret = devm_request_irq(&pdev->dev, + gpiod_to_irq(pdata->overcurrent_pin[i]), + ohci_hcd_at91_overcurrent_irq, + IRQF_SHARED, + "ohci_overcurrent", pdev); + if (ret) + dev_info(&pdev->dev, "failed to request gpio \"overcurrent\" IRQ\n"); } device_init_wakeup(&pdev->dev, 1); @@ -629,19 +579,8 @@ static int ohci_hcd_at91_drv_remove(struct platform_device *pdev) int i; if (pdata) { - at91_for_each_port(i) { - if (!gpio_is_valid(pdata->vbus_pin[i])) - continue; + at91_for_each_port(i) ohci_at91_usb_set_power(pdata, i, 0); - gpio_free(pdata->vbus_pin[i]); - } - - at91_for_each_port(i) { - if (!gpio_is_valid(pdata->overcurrent_pin[i])) - continue; - free_irq(gpio_to_irq(pdata->overcurrent_pin[i]), pdev); - gpio_free(pdata->overcurrent_pin[i]); - } } device_init_wakeup(&pdev->dev, 0); diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c index e5c33bc98ea499df5f3ab6a61c488b717734588d..05da2cb596126683eec0c2ffbeb59aebdd343025 100644 --- a/drivers/usb/host/ohci-da8xx.c +++ b/drivers/usb/host/ohci-da8xx.c @@ -11,62 +11,192 @@ * kind, whether express or implied. */ +#include +#include #include #include +#include +#include #include -#include - -#include +#include #include +#include +#include +#include +#include -#ifndef CONFIG_ARCH_DAVINCI_DA8XX -#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX." -#endif +#include "ohci.h" -#define CFGCHIP2 DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG) +#define DRIVER_DESC "DA8XX" +#define DRV_NAME "ohci-da8xx" -static struct clk *usb11_clk; -static struct clk *usb20_clk; +static struct hc_driver __read_mostly ohci_da8xx_hc_driver; + +static int (*orig_ohci_hub_control)(struct usb_hcd *hcd, u16 typeReq, + u16 wValue, u16 wIndex, char *buf, u16 wLength); +static int (*orig_ohci_hub_status_data)(struct usb_hcd *hcd, char *buf); + +struct da8xx_ohci_hcd { + struct usb_hcd *hcd; + struct clk *usb11_clk; + struct phy *usb11_phy; + struct regulator *vbus_reg; + struct notifier_block nb; + unsigned int reg_enabled; +}; + +#define to_da8xx_ohci(hcd) (struct da8xx_ohci_hcd *)(hcd_to_ohci(hcd)->priv) /* Over-current indicator change bitmask */ static volatile u16 ocic_mask; -static void ohci_da8xx_clock(int on) +static int ohci_da8xx_enable(struct usb_hcd *hcd) { - u32 cfgchip2; - - cfgchip2 = __raw_readl(CFGCHIP2); - if (on) { - clk_enable(usb11_clk); - - /* - * If USB 1.1 reference clock is sourced from USB 2.0 PHY, we - * need to enable the USB 2.0 module clocking, start its PHY, - * and not allow it to stop the clock during USB 2.0 suspend. - */ - if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) { - clk_enable(usb20_clk); - - cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN); - cfgchip2 |= CFGCHIP2_PHY_PLLON; - __raw_writel(cfgchip2, CFGCHIP2); - - pr_info("Waiting for USB PHY clock good...\n"); - while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD)) - cpu_relax(); - } + struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd); + int ret; + + ret = clk_prepare_enable(da8xx_ohci->usb11_clk); + if (ret) + return ret; + + ret = phy_init(da8xx_ohci->usb11_phy); + if (ret) + goto err_phy_init; + + ret = phy_power_on(da8xx_ohci->usb11_phy); + if (ret) + goto err_phy_power_on; + + return 0; + +err_phy_power_on: + phy_exit(da8xx_ohci->usb11_phy); +err_phy_init: + clk_disable_unprepare(da8xx_ohci->usb11_clk); + + return ret; +} + +static void ohci_da8xx_disable(struct usb_hcd *hcd) +{ + struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd); + + phy_power_off(da8xx_ohci->usb11_phy); + phy_exit(da8xx_ohci->usb11_phy); + clk_disable_unprepare(da8xx_ohci->usb11_clk); +} - /* Enable USB 1.1 PHY */ - cfgchip2 |= CFGCHIP2_USB1SUSPENDM; - } else { - clk_disable(usb11_clk); - if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) - clk_disable(usb20_clk); +static int ohci_da8xx_set_power(struct usb_hcd *hcd, int on) +{ + struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd); + struct device *dev = hcd->self.controller; + struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev); + int ret; + + if (hub && hub->set_power) + return hub->set_power(1, on); + + if (!da8xx_ohci->vbus_reg) + return 0; - /* Disable USB 1.1 PHY */ - cfgchip2 &= ~CFGCHIP2_USB1SUSPENDM; + if (on && !da8xx_ohci->reg_enabled) { + ret = regulator_enable(da8xx_ohci->vbus_reg); + if (ret) { + dev_err(dev, "Failed to enable regulator: %d\n", ret); + return ret; + } + da8xx_ohci->reg_enabled = 1; + + } else if (!on && da8xx_ohci->reg_enabled) { + ret = regulator_disable(da8xx_ohci->vbus_reg); + if (ret) { + dev_err(dev, "Failed to disable regulator: %d\n", ret); + return ret; + } + da8xx_ohci->reg_enabled = 0; } - __raw_writel(cfgchip2, CFGCHIP2); + + return 0; +} + +static int ohci_da8xx_get_power(struct usb_hcd *hcd) +{ + struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd); + struct device *dev = hcd->self.controller; + struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev); + + if (hub && hub->get_power) + return hub->get_power(1); + + if (da8xx_ohci->vbus_reg) + return regulator_is_enabled(da8xx_ohci->vbus_reg); + + return 1; +} + +static int ohci_da8xx_get_oci(struct usb_hcd *hcd) +{ + struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd); + struct device *dev = hcd->self.controller; + struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev); + unsigned int flags; + int ret; + + if (hub && hub->get_oci) + return hub->get_oci(1); + + if (!da8xx_ohci->vbus_reg) + return 0; + + ret = regulator_get_error_flags(da8xx_ohci->vbus_reg, &flags); + if (ret) + return ret; + + if (flags & REGULATOR_ERROR_OVER_CURRENT) + return 1; + + return 0; +} + +static int ohci_da8xx_has_set_power(struct usb_hcd *hcd) +{ + struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd); + struct device *dev = hcd->self.controller; + struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev); + + if (hub && hub->set_power) + return 1; + + if (da8xx_ohci->vbus_reg) + return 1; + + return 0; +} + +static int ohci_da8xx_has_oci(struct usb_hcd *hcd) +{ + struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd); + struct device *dev = hcd->self.controller; + struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev); + + if (hub && hub->get_oci) + return 1; + + if (da8xx_ohci->vbus_reg) + return 1; + + return 0; +} + +static int ohci_da8xx_has_potpgt(struct usb_hcd *hcd) +{ + struct device *dev = hcd->self.controller; + struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev); + + if (hub && hub->potpgt) + return 1; + + return 0; } /* @@ -82,7 +212,51 @@ static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub, hub->set_power(port, 0); } -static int ohci_da8xx_init(struct usb_hcd *hcd) +static int ohci_da8xx_regulator_event(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct da8xx_ohci_hcd *da8xx_ohci = + container_of(nb, struct da8xx_ohci_hcd, nb); + + if (event & REGULATOR_EVENT_OVER_CURRENT) { + ocic_mask |= 1 << 1; + ohci_da8xx_set_power(da8xx_ohci->hcd, 0); + } + + return 0; +} + +static int ohci_da8xx_register_notify(struct usb_hcd *hcd) +{ + struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd); + struct device *dev = hcd->self.controller; + struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev); + int ret = 0; + + if (hub && hub->ocic_notify) { + ret = hub->ocic_notify(ohci_da8xx_ocic_handler); + } else if (da8xx_ohci->vbus_reg) { + da8xx_ohci->nb.notifier_call = ohci_da8xx_regulator_event; + ret = devm_regulator_register_notifier(da8xx_ohci->vbus_reg, + &da8xx_ohci->nb); + } + + if (ret) + dev_err(dev, "Failed to register notifier: %d\n", ret); + + return ret; +} + +static void ohci_da8xx_unregister_notify(struct usb_hcd *hcd) +{ + struct device *dev = hcd->self.controller; + struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev); + + if (hub && hub->ocic_notify) + hub->ocic_notify(NULL); +} + +static int ohci_da8xx_reset(struct usb_hcd *hcd) { struct device *dev = hcd->self.controller; struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev); @@ -92,7 +266,9 @@ static int ohci_da8xx_init(struct usb_hcd *hcd) dev_dbg(dev, "starting USB controller\n"); - ohci_da8xx_clock(1); + result = ohci_da8xx_enable(hcd); + if (result < 0) + return result; /* * DA8xx only have 1 port connected to the pins but the HC root hub @@ -100,9 +276,11 @@ static int ohci_da8xx_init(struct usb_hcd *hcd) */ ohci->num_ports = 1; - result = ohci_init(ohci); - if (result < 0) + result = ohci_setup(hcd); + if (result < 0) { + ohci_da8xx_disable(hcd); return result; + } /* * Since we're providing a board-specific root hub port power control @@ -111,45 +289,29 @@ static int ohci_da8xx_init(struct usb_hcd *hcd) * the correct hub descriptor... */ rh_a = ohci_readl(ohci, &ohci->regs->roothub.a); - if (hub->set_power) { + if (ohci_da8xx_has_set_power(hcd)) { rh_a &= ~RH_A_NPS; rh_a |= RH_A_PSM; } - if (hub->get_oci) { + if (ohci_da8xx_has_oci(hcd)) { rh_a &= ~RH_A_NOCP; rh_a |= RH_A_OCPM; } - rh_a &= ~RH_A_POTPGT; - rh_a |= hub->potpgt << 24; + if (ohci_da8xx_has_potpgt(hcd)) { + rh_a &= ~RH_A_POTPGT; + rh_a |= hub->potpgt << 24; + } ohci_writel(ohci, rh_a, &ohci->regs->roothub.a); return result; } -static void ohci_da8xx_stop(struct usb_hcd *hcd) -{ - ohci_stop(hcd); - ohci_da8xx_clock(0); -} - -static int ohci_da8xx_start(struct usb_hcd *hcd) -{ - struct ohci_hcd *ohci = hcd_to_ohci(hcd); - int result; - - result = ohci_run(ohci); - if (result < 0) - ohci_da8xx_stop(hcd); - - return result; -} - /* * Update the status data from the hub with the over-current indicator change. */ static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf) { - int length = ohci_hub_status_data(hcd, buf); + int length = orig_ohci_hub_status_data(hcd, buf); /* See if we have OCIC bit set on port 1 */ if (ocic_mask & (1 << 1)) { @@ -171,7 +333,6 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { struct device *dev = hcd->self.controller; - struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev); int temp; switch (typeReq) { @@ -185,11 +346,11 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1); /* The port power status (PPS) bit defaults to 1 */ - if (hub->get_power && hub->get_power(wIndex) == 0) + if (!ohci_da8xx_get_power(hcd)) temp &= ~RH_PS_PPS; /* The port over-current indicator (POCI) bit is always 0 */ - if (hub->get_oci && hub->get_oci(wIndex) > 0) + if (ohci_da8xx_get_oci(hcd) > 0) temp |= RH_PS_POCI; /* The over-current indicator change (OCIC) bit is 0 too */ @@ -214,10 +375,7 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, dev_dbg(dev, "%sPortFeature(%u): %s\n", temp ? "Set" : "Clear", wIndex, "POWER"); - if (!hub->set_power) - return -EPIPE; - - return hub->set_power(wIndex, temp) ? -EPIPE : 0; + return ohci_da8xx_set_power(hcd, temp) ? -EPIPE : 0; case USB_PORT_FEAT_C_OVER_CURRENT: dev_dbg(dev, "%sPortFeature(%u): %s\n", temp ? "Set" : "Clear", wIndex, @@ -231,86 +389,61 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, } } - return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); + return orig_ohci_hub_control(hcd, typeReq, wValue, + wIndex, buf, wLength); } -static const struct hc_driver ohci_da8xx_hc_driver = { - .description = hcd_name, - .product_desc = "DA8xx OHCI", - .hcd_priv_size = sizeof(struct ohci_hcd), - - /* - * generic hardware linkage - */ - .irq = ohci_irq, - .flags = HCD_USB11 | HCD_MEMORY, - - /* - * basic lifecycle operations - */ - .reset = ohci_da8xx_init, - .start = ohci_da8xx_start, - .stop = ohci_da8xx_stop, - .shutdown = ohci_shutdown, - - /* - * managing i/o requests and associated device resources - */ - .urb_enqueue = ohci_urb_enqueue, - .urb_dequeue = ohci_urb_dequeue, - .endpoint_disable = ohci_endpoint_disable, - - /* - * scheduling support - */ - .get_frame_number = ohci_get_frame, - - /* - * root hub support - */ - .hub_status_data = ohci_da8xx_hub_status_data, - .hub_control = ohci_da8xx_hub_control, - -#ifdef CONFIG_PM - .bus_suspend = ohci_bus_suspend, - .bus_resume = ohci_bus_resume, -#endif - .start_port_reset = ohci_start_port_reset, -}; - /*-------------------------------------------------------------------------*/ +#ifdef CONFIG_OF +static const struct of_device_id da8xx_ohci_ids[] = { + { .compatible = "ti,da830-ohci" }, + { } +}; +MODULE_DEVICE_TABLE(of, da8xx_ohci_ids); +#endif - -/** - * usb_hcd_da8xx_probe - initialize DA8xx-based HCDs - * Context: !in_interrupt() - * - * Allocates basic resources for this USB host controller, and - * then invokes the start() method for the HCD associated with it - * through the hotplug entry's driver_data. - */ -static int usb_hcd_da8xx_probe(const struct hc_driver *driver, - struct platform_device *pdev) +static int ohci_da8xx_probe(struct platform_device *pdev) { - struct da8xx_ohci_root_hub *hub = dev_get_platdata(&pdev->dev); + struct da8xx_ohci_hcd *da8xx_ohci; struct usb_hcd *hcd; struct resource *mem; int error, irq; + hcd = usb_create_hcd(&ohci_da8xx_hc_driver, &pdev->dev, + dev_name(&pdev->dev)); + if (!hcd) + return -ENOMEM; - if (hub == NULL) - return -ENODEV; + da8xx_ohci = to_da8xx_ohci(hcd); + da8xx_ohci->hcd = hcd; - usb11_clk = devm_clk_get(&pdev->dev, "usb11"); - if (IS_ERR(usb11_clk)) - return PTR_ERR(usb11_clk); + da8xx_ohci->usb11_clk = devm_clk_get(&pdev->dev, "usb11"); + if (IS_ERR(da8xx_ohci->usb11_clk)) { + error = PTR_ERR(da8xx_ohci->usb11_clk); + if (error != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to get clock.\n"); + goto err; + } - usb20_clk = devm_clk_get(&pdev->dev, "usb20"); - if (IS_ERR(usb20_clk)) - return PTR_ERR(usb20_clk); + da8xx_ohci->usb11_phy = devm_phy_get(&pdev->dev, "usb-phy"); + if (IS_ERR(da8xx_ohci->usb11_phy)) { + error = PTR_ERR(da8xx_ohci->usb11_phy); + if (error != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to get phy.\n"); + goto err; + } - hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); - if (!hcd) - return -ENOMEM; + da8xx_ohci->vbus_reg = devm_regulator_get_optional(&pdev->dev, "vbus"); + if (IS_ERR(da8xx_ohci->vbus_reg)) { + error = PTR_ERR(da8xx_ohci->vbus_reg); + if (error == -ENODEV) { + da8xx_ohci->vbus_reg = NULL; + } else if (error == -EPROBE_DEFER) { + goto err; + } else { + dev_err(&pdev->dev, "Failed to get regulator\n"); + goto err; + } + } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); hcd->regs = devm_ioremap_resource(&pdev->dev, mem); @@ -321,60 +454,38 @@ static int usb_hcd_da8xx_probe(const struct hc_driver *driver, hcd->rsrc_start = mem->start; hcd->rsrc_len = resource_size(mem); - ohci_hcd_init(hcd_to_ohci(hcd)); - irq = platform_get_irq(pdev, 0); if (irq < 0) { error = -ENODEV; goto err; } + error = usb_add_hcd(hcd, irq, 0); if (error) goto err; device_wakeup_enable(hcd->self.controller); - if (hub->ocic_notify) { - error = hub->ocic_notify(ohci_da8xx_ocic_handler); - if (!error) - return 0; - } + error = ohci_da8xx_register_notify(hcd); + if (error) + goto err_remove_hcd; + + return 0; +err_remove_hcd: usb_remove_hcd(hcd); err: usb_put_hcd(hcd); return error; } -/** - * usb_hcd_da8xx_remove - shutdown processing for DA8xx-based HCDs - * @dev: USB Host Controller being removed - * Context: !in_interrupt() - * - * Reverses the effect of usb_hcd_da8xx_probe(), first invoking - * the HCD's stop() method. It is always called from a thread - * context, normally "rmmod", "apmd", or something similar. - */ -static inline void -usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev) +static int ohci_da8xx_remove(struct platform_device *pdev) { - struct da8xx_ohci_root_hub *hub = dev_get_platdata(&pdev->dev); + struct usb_hcd *hcd = platform_get_drvdata(pdev); - hub->ocic_notify(NULL); + ohci_da8xx_unregister_notify(hcd); usb_remove_hcd(hcd); usb_put_hcd(hcd); -} - -static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev) -{ - return usb_hcd_da8xx_probe(&ohci_da8xx_hc_driver, dev); -} - -static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev) -{ - struct usb_hcd *hcd = platform_get_drvdata(dev); - - usb_hcd_da8xx_remove(hcd, dev); return 0; } @@ -397,7 +508,7 @@ static int ohci_da8xx_suspend(struct platform_device *pdev, if (ret) return ret; - ohci_da8xx_clock(0); + ohci_da8xx_disable(hcd); hcd->state = HC_STATE_SUSPENDED; return ret; @@ -407,32 +518,77 @@ static int ohci_da8xx_resume(struct platform_device *dev) { struct usb_hcd *hcd = platform_get_drvdata(dev); struct ohci_hcd *ohci = hcd_to_ohci(hcd); + int ret; if (time_before(jiffies, ohci->next_statechange)) msleep(5); ohci->next_statechange = jiffies; - ohci_da8xx_clock(1); - dev->dev.power.power_state = PMSG_ON; - usb_hcd_resume_root_hub(hcd); + ret = ohci_da8xx_enable(hcd); + if (ret) + return ret; + + ohci_resume(hcd, false); + return 0; } #endif +static const struct ohci_driver_overrides da8xx_overrides __initconst = { + .reset = ohci_da8xx_reset, + .extra_priv_size = sizeof(struct da8xx_ohci_hcd), +}; + /* * Driver definition to register with platform structure. */ static struct platform_driver ohci_hcd_da8xx_driver = { - .probe = ohci_hcd_da8xx_drv_probe, - .remove = ohci_hcd_da8xx_drv_remove, + .probe = ohci_da8xx_probe, + .remove = ohci_da8xx_remove, .shutdown = usb_hcd_platform_shutdown, #ifdef CONFIG_PM .suspend = ohci_da8xx_suspend, .resume = ohci_da8xx_resume, #endif .driver = { - .name = "ohci", + .name = DRV_NAME, + .of_match_table = of_match_ptr(da8xx_ohci_ids), }, }; -MODULE_ALIAS("platform:ohci"); +static int __init ohci_da8xx_init(void) +{ + + if (usb_disabled()) + return -ENODEV; + + pr_info("%s: " DRIVER_DESC "\n", DRV_NAME); + ohci_init_driver(&ohci_da8xx_hc_driver, &da8xx_overrides); + + /* + * The Davinci da8xx HW has some unusual quirks, which require + * da8xx-specific workarounds. We override certain hc_driver + * functions here to achieve that. We explicitly do not enhance + * ohci_driver_overrides to allow this more easily, since this + * is an unusual case, and we don't want to encourage others to + * override these functions by making it too easy. + */ + + orig_ohci_hub_control = ohci_da8xx_hc_driver.hub_control; + orig_ohci_hub_status_data = ohci_da8xx_hc_driver.hub_status_data; + + ohci_da8xx_hc_driver.hub_status_data = ohci_da8xx_hub_status_data; + ohci_da8xx_hc_driver.hub_control = ohci_da8xx_hub_control; + + return platform_driver_register(&ohci_hcd_da8xx_driver); +} +module_init(ohci_da8xx_init); + +static void __exit ohci_da8xx_exit(void) +{ + platform_driver_unregister(&ohci_hcd_da8xx_driver); +} +module_exit(ohci_da8xx_exit); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 86612ac3fda220e3d1c9b1b5c959f45f723d543d..8685cf3e629271d9165d6dd7f21bb868a90d4de9 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -1219,11 +1219,6 @@ MODULE_LICENSE ("GPL"); #define SA1111_DRIVER ohci_hcd_sa1111_driver #endif -#ifdef CONFIG_USB_OHCI_HCD_DAVINCI -#include "ohci-da8xx.c" -#define DAVINCI_PLATFORM_DRIVER ohci_hcd_da8xx_driver -#endif - #ifdef CONFIG_USB_OHCI_HCD_PPC_OF #include "ohci-ppc-of.c" #define OF_PLATFORM_DRIVER ohci_hcd_ppc_of_driver @@ -1303,19 +1298,9 @@ static int __init ohci_hcd_mod_init(void) goto error_tmio; #endif -#ifdef DAVINCI_PLATFORM_DRIVER - retval = platform_driver_register(&DAVINCI_PLATFORM_DRIVER); - if (retval < 0) - goto error_davinci; -#endif - return retval; /* Error path */ -#ifdef DAVINCI_PLATFORM_DRIVER - platform_driver_unregister(&DAVINCI_PLATFORM_DRIVER); - error_davinci: -#endif #ifdef TMIO_OHCI_DRIVER platform_driver_unregister(&TMIO_OHCI_DRIVER); error_tmio: @@ -1351,9 +1336,6 @@ module_init(ohci_hcd_mod_init); static void __exit ohci_hcd_mod_exit(void) { -#ifdef DAVINCI_PLATFORM_DRIVER - platform_driver_unregister(&DAVINCI_PLATFORM_DRIVER); -#endif #ifdef TMIO_OHCI_DRIVER platform_driver_unregister(&TMIO_OHCI_DRIVER); #endif diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c index c9e315c6808aa77d1e491507d58e7612e14465cc..ed8a762b8670395e9b2eababd1408d11e68d46b0 100644 --- a/drivers/usb/host/ohci-mem.c +++ b/drivers/usb/host/ohci-mem.c @@ -88,10 +88,9 @@ td_alloc (struct ohci_hcd *hc, gfp_t mem_flags) dma_addr_t dma; struct td *td; - td = dma_pool_alloc (hc->td_cache, mem_flags, &dma); + td = dma_pool_zalloc (hc->td_cache, mem_flags, &dma); if (td) { /* in case hc fetches it, make it look dead */ - memset (td, 0, sizeof *td); td->hwNextTD = cpu_to_hc32 (hc, dma); td->td_dma = dma; /* hashed in td_fill */ @@ -122,9 +121,8 @@ ed_alloc (struct ohci_hcd *hc, gfp_t mem_flags) dma_addr_t dma; struct ed *ed; - ed = dma_pool_alloc (hc->ed_cache, mem_flags, &dma); + ed = dma_pool_zalloc (hc->ed_cache, mem_flags, &dma); if (ed) { - memset (ed, 0, sizeof (*ed)); INIT_LIST_HEAD (&ed->td_list); ed->dma = dma; } diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c index b7d4756232ae305ed2502fb531cab50fa51addc5..6df8e2ed40fd9966229bf6487d34b2eb331b8d28 100644 --- a/drivers/usb/host/ohci-nxp.c +++ b/drivers/usb/host/ohci-nxp.c @@ -56,8 +56,6 @@ static struct hc_driver __read_mostly ohci_nxp_hc_driver; static struct i2c_client *isp1301_i2c_client; -extern int usb_disabled(void); - static struct clk *usb_host_clk; static void isp1301_configure_lpc32xx(void) @@ -127,6 +125,7 @@ static inline void isp1301_vbus_off(void) static void ohci_nxp_start_hc(void) { unsigned long tmp = __raw_readl(USB_OTG_STAT_CONTROL) | HOST_EN; + __raw_writel(tmp, USB_OTG_STAT_CONTROL); isp1301_vbus_on(); } @@ -134,6 +133,7 @@ static void ohci_nxp_start_hc(void) static void ohci_nxp_stop_hc(void) { unsigned long tmp; + isp1301_vbus_off(); tmp = __raw_readl(USB_OTG_STAT_CONTROL) & ~HOST_EN; __raw_writel(tmp, USB_OTG_STAT_CONTROL); @@ -155,9 +155,8 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev) } isp1301_i2c_client = isp1301_get_client(isp1301_node); - if (!isp1301_i2c_client) { + if (!isp1301_i2c_client) return -EPROBE_DEFER; - } ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index 495c1454b9e81e229efc86086320375a1c3dee6f..b08e385399b9e4144d85da2d469ccc089d956a5d 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c @@ -68,9 +68,6 @@ static inline int tps65010_set_gpio_out_value(unsigned gpio, unsigned value) #endif -extern int usb_disabled(void); -extern int ocpi_enable(void); - static struct clk *usb_host_ck; static struct clk *usb_dc_ck; @@ -296,15 +293,14 @@ static int ohci_omap_reset(struct usb_hcd *hcd) /*-------------------------------------------------------------------------*/ /** - * usb_hcd_omap_probe - initialize OMAP-based HCDs + * ohci_hcd_omap_probe - initialize OMAP-based HCDs * Context: !in_interrupt() * * Allocates basic resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. */ -static int usb_hcd_omap_probe (const struct hc_driver *driver, - struct platform_device *pdev) +static int ohci_hcd_omap_probe(struct platform_device *pdev) { int retval, irq; struct usb_hcd *hcd = 0; @@ -336,7 +332,8 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver, } - hcd = usb_create_hcd (driver, &pdev->dev, dev_name(&pdev->dev)); + hcd = usb_create_hcd(&ohci_omap_hc_driver, &pdev->dev, + dev_name(&pdev->dev)); if (!hcd) { retval = -ENOMEM; goto err0; @@ -384,17 +381,18 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver, /* may be called with controller, bus, and devices active */ /** - * usb_hcd_omap_remove - shutdown processing for OMAP-based HCDs + * ohci_hcd_omap_remove - shutdown processing for OMAP-based HCDs * @dev: USB Host Controller being removed * Context: !in_interrupt() * - * Reverses the effect of usb_hcd_omap_probe(), first invoking + * Reverses the effect of ohci_hcd_omap_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, normally "rmmod", "apmd", or something similar. */ -static inline void -usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev) +static int ohci_hcd_omap_remove(struct platform_device *pdev) { + struct usb_hcd *hcd = platform_get_drvdata(pdev); + dev_dbg(hcd->self.controller, "stopping USB Controller\n"); usb_remove_hcd(hcd); omap_ohci_clock_power(0); @@ -409,21 +407,6 @@ usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev) usb_put_hcd(hcd); clk_put(usb_dc_ck); clk_put(usb_host_ck); -} - -/*-------------------------------------------------------------------------*/ - -static int ohci_hcd_omap_drv_probe(struct platform_device *dev) -{ - return usb_hcd_omap_probe(&ohci_omap_hc_driver, dev); -} - -static int ohci_hcd_omap_drv_remove(struct platform_device *dev) -{ - struct usb_hcd *hcd = platform_get_drvdata(dev); - - usb_hcd_omap_remove(hcd, dev); - return 0; } @@ -472,8 +455,8 @@ static int ohci_omap_resume(struct platform_device *dev) * Driver definition to register with the OMAP bus */ static struct platform_driver ohci_hcd_omap_driver = { - .probe = ohci_hcd_omap_drv_probe, - .remove = ohci_hcd_omap_drv_remove, + .probe = ohci_hcd_omap_probe, + .remove = ohci_hcd_omap_remove, .shutdown = usb_hcd_platform_shutdown, #ifdef CONFIG_PM .suspend = ohci_omap_suspend, diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index a667cf2d57883689538e2a0d9a471b494e831b0c..79efde8f21e080571df68825981080f1663dd520 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c @@ -404,7 +404,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev) /** - * usb_hcd_pxa27x_probe - initialize pxa27x-based HCDs + * ohci_hcd_pxa27x_probe - initialize pxa27x-based HCDs * Context: !in_interrupt() * * Allocates basic resources for this USB host controller, and @@ -412,7 +412,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev) * through the hotplug entry's driver_data. * */ -int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device *pdev) +static int ohci_hcd_pxa27x_probe(struct platform_device *pdev) { int retval, irq; struct usb_hcd *hcd; @@ -442,7 +442,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device if (IS_ERR(usb_clk)) return PTR_ERR(usb_clk); - hcd = usb_create_hcd (driver, &pdev->dev, "pxa27x"); + hcd = usb_create_hcd(&ohci_pxa27x_hc_driver, &pdev->dev, "pxa27x"); if (!hcd) return -ENOMEM; @@ -503,17 +503,18 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device /* may be called with controller, bus, and devices active */ /** - * usb_hcd_pxa27x_remove - shutdown processing for pxa27x-based HCDs + * ohci_hcd_pxa27x_remove - shutdown processing for pxa27x-based HCDs * @dev: USB Host Controller being removed * Context: !in_interrupt() * - * Reverses the effect of usb_hcd_pxa27x_probe(), first invoking + * Reverses the effect of ohci_hcd_pxa27x_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, normally "rmmod", "apmd", or something similar. * */ -void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev) +static int ohci_hcd_pxa27x_remove(struct platform_device *pdev) { + struct usb_hcd *hcd = platform_get_drvdata(pdev); struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd); unsigned int i; @@ -524,28 +525,11 @@ void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev) pxa27x_ohci_set_vbus_power(pxa_ohci, i, false); usb_put_hcd(hcd); + return 0; } /*-------------------------------------------------------------------------*/ -static int ohci_hcd_pxa27x_drv_probe(struct platform_device *pdev) -{ - pr_debug ("In ohci_hcd_pxa27x_drv_probe"); - - if (usb_disabled()) - return -ENODEV; - - return usb_hcd_pxa27x_probe(&ohci_pxa27x_hc_driver, pdev); -} - -static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev) -{ - struct usb_hcd *hcd = platform_get_drvdata(pdev); - - usb_hcd_pxa27x_remove(hcd, pdev); - return 0; -} - #ifdef CONFIG_PM static int ohci_hcd_pxa27x_drv_suspend(struct device *dev) { @@ -598,8 +582,8 @@ static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { #endif static struct platform_driver ohci_hcd_pxa27x_driver = { - .probe = ohci_hcd_pxa27x_drv_probe, - .remove = ohci_hcd_pxa27x_drv_remove, + .probe = ohci_hcd_pxa27x_probe, + .remove = ohci_hcd_pxa27x_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "pxa27x-ohci", diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c index 7a1919ca543a00022f0b515da075ddff9060db88..b006b93126f7847e925908c3d6d7321f15bfec51 100644 --- a/drivers/usb/host/ohci-s3c2410.c +++ b/drivers/usb/host/ohci-s3c2410.c @@ -43,6 +43,8 @@ static const char hcd_name[] = "ohci-s3c2410"; static struct clk *clk; static struct clk *usb_clk; +static struct hc_driver __read_mostly ohci_s3c2410_hc_driver; + /* forward definitions */ static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc); @@ -321,26 +323,29 @@ static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc) /* may be called with controller, bus, and devices active */ /* - * usb_hcd_s3c2410_remove - shutdown processing for HCD + * ohci_hcd_s3c2410_remove - shutdown processing for HCD * @dev: USB Host Controller being removed * Context: !in_interrupt() * - * Reverses the effect of usb_hcd_3c2410_probe(), first invoking + * Reverses the effect of ohci_hcd_3c2410_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, normally "rmmod", "apmd", or something similar. * */ -static void -usb_hcd_s3c2410_remove(struct usb_hcd *hcd, struct platform_device *dev) +static int +ohci_hcd_s3c2410_remove(struct platform_device *dev) { + struct usb_hcd *hcd = platform_get_drvdata(dev); + usb_remove_hcd(hcd); s3c2410_stop_hc(dev); usb_put_hcd(hcd); + return 0; } /** - * usb_hcd_s3c2410_probe - initialize S3C2410-based HCDs + * ohci_hcd_s3c2410_probe - initialize S3C2410-based HCDs * Context: !in_interrupt() * * Allocates basic resources for this USB host controller, and @@ -348,8 +353,7 @@ usb_hcd_s3c2410_remove(struct usb_hcd *hcd, struct platform_device *dev) * through the hotplug entry's driver_data. * */ -static int usb_hcd_s3c2410_probe(const struct hc_driver *driver, - struct platform_device *dev) +static int ohci_hcd_s3c2410_probe(struct platform_device *dev) { struct usb_hcd *hcd = NULL; struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev); @@ -358,7 +362,7 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver, s3c2410_usb_set_power(info, 1, 1); s3c2410_usb_set_power(info, 2, 1); - hcd = usb_create_hcd(driver, &dev->dev, "s3c24xx"); + hcd = usb_create_hcd(&ohci_s3c2410_hc_driver, &dev->dev, "s3c24xx"); if (hcd == NULL) return -ENOMEM; @@ -404,21 +408,6 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver, /*-------------------------------------------------------------------------*/ -static struct hc_driver __read_mostly ohci_s3c2410_hc_driver; - -static int ohci_hcd_s3c2410_drv_probe(struct platform_device *pdev) -{ - return usb_hcd_s3c2410_probe(&ohci_s3c2410_hc_driver, pdev); -} - -static int ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev) -{ - struct usb_hcd *hcd = platform_get_drvdata(pdev); - - usb_hcd_s3c2410_remove(hcd, pdev); - return 0; -} - #ifdef CONFIG_PM static int ohci_hcd_s3c2410_drv_suspend(struct device *dev) { @@ -457,13 +446,21 @@ static const struct dev_pm_ops ohci_hcd_s3c2410_pm_ops = { .resume = ohci_hcd_s3c2410_drv_resume, }; +static const struct of_device_id ohci_hcd_s3c2410_dt_ids[] = { + { .compatible = "samsung,s3c2410-ohci" }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, ohci_hcd_s3c2410_dt_ids); + static struct platform_driver ohci_hcd_s3c2410_driver = { - .probe = ohci_hcd_s3c2410_drv_probe, - .remove = ohci_hcd_s3c2410_drv_remove, + .probe = ohci_hcd_s3c2410_probe, + .remove = ohci_hcd_s3c2410_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "s3c2410-ohci", .pm = &ohci_hcd_s3c2410_pm_ops, + .of_match_table = ohci_hcd_s3c2410_dt_ids, }, }; diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index d793f548dfe26aef387d13b703454abe67beb5be..a9a1e4c40480cf2c5c7c7995aa5d337d3ef3ee87 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -995,6 +995,14 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev) } val = readl(base + ext_cap_offset); + /* Auto handoff never worked for these devices. Force it and continue */ + if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) || + (pdev->vendor == PCI_VENDOR_ID_RENESAS + && pdev->device == 0x0014)) { + val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED; + writel(val, base + ext_cap_offset); + } + /* If the BIOS owns the HC, signal that the OS wants it, and wait */ if (val & XHCI_HC_BIOS_OWNED) { writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index 5d3d914ab4fb441f78ab3e5fae799453fb2ec014..683098afa93ea150ad9888875b6e11ad2d5d33be 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -42,7 +42,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c index 940304c33224574439cea18d83f24b6bc763ba10..02260cfdedb135c92d219a2e5531775355973188 100644 --- a/drivers/usb/host/uhci-pci.c +++ b/drivers/usb/host/uhci-pci.c @@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd) if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP) uhci->wait_for_hp = 1; + /* Intel controllers use non-PME wakeup signalling */ + if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL) + device_set_run_wake(uhci_dev(uhci), 1); + /* Set up pointers to PCI-specific functions */ uhci->reset_hc = uhci_pci_reset_hc; uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 6afe32381209d76cd0cf2f46d686a9f07a43f2c9..321de2e0161b4a3ef825489ab59a96e6250b1cd9 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1032,7 +1032,6 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, goto fail; dev->num_rings_cached = 0; - init_completion(&dev->cmd_completion); dev->udev = udev; /* Point to output device context in dcbaa. */ @@ -1370,7 +1369,7 @@ static u32 xhci_get_endpoint_max_burst(struct usb_device *udev, if (udev->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(&ep->desc) || usb_endpoint_xfer_int(&ep->desc))) - return (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11; + return usb_endpoint_maxp_mult(&ep->desc) - 1; return 0; } @@ -1415,10 +1414,10 @@ static u32 xhci_get_max_esit_payload(struct usb_device *udev, else if (udev->speed >= USB_SPEED_SUPER) return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); - max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); - max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11; + max_packet = usb_endpoint_maxp(&ep->desc); + max_burst = usb_endpoint_maxp_mult(&ep->desc); /* A 0 in max burst means 1 transfer per ESIT */ - return max_packet * (max_burst + 1); + return max_packet * max_burst; } /* Set up an endpoint with one ring segment. Do not allocate stream rings. @@ -1461,7 +1460,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, max_esit_payload = xhci_get_max_esit_payload(udev, ep); interval = xhci_get_endpoint_interval(udev, ep); mult = xhci_get_endpoint_mult(udev, ep); - max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); + max_packet = usb_endpoint_maxp(&ep->desc); max_burst = xhci_get_endpoint_max_burst(udev, ep); avg_trb_len = max_esit_payload; @@ -2384,7 +2383,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * "physically contiguous and 64-byte (cache line) aligned". */ xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, - GFP_KERNEL); + flags); if (!xhci->dcbaa) goto fail; memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); @@ -2480,7 +2479,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->erst.entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma, - GFP_KERNEL); + flags); if (!xhci->erst.entries) goto fail; xhci_dbg_trace(xhci, trace_xhci_dbg_init, @@ -2536,7 +2535,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * something other than the default (~1ms minimum between interrupts). * See section 5.5.1.2. */ - init_completion(&xhci->addr_dev); for (i = 0; i < MAX_HC_SLOTS; ++i) xhci->devs[i] = NULL; for (i = 0; i < USB_MAXCHILDREN; ++i) { diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index 73f763c4f5f591a2c19053083dedf7b6aa252f31..6e7ddf6cafae14a63601f63f4c10c44e1c81c372 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -337,7 +337,7 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n", __func__, usb_endpoint_type(&ep->desc), udev->speed, - GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)), + usb_endpoint_maxp(&ep->desc), usb_endpoint_dir_in(&ep->desc), ep); if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) { @@ -403,7 +403,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, xhci_dbg(xhci, "%s() type:%d, speed:%d, mpks:%d, dir:%d, ep:%p\n", __func__, usb_endpoint_type(&ep->desc), udev->speed, - GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)), + usb_endpoint_maxp(&ep->desc), usb_endpoint_dir_in(&ep->desc), ep); if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 79959f17c38cfe3cd0791b87cf9f0dd90f93ad9d..1094ebd2838ff95a5834f7a680e4a2c0aa13301c 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -94,6 +94,9 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk) int ret; int i; + if (!mtk->has_ippc) + return 0; + /* power on host ip */ value = readl(&ippc->ip_pw_ctr1); value &= ~CTRL1_IP_HOST_PDN; @@ -139,6 +142,9 @@ static int xhci_mtk_host_disable(struct xhci_hcd_mtk *mtk) int ret; int i; + if (!mtk->has_ippc) + return 0; + /* power down all u3 ports */ for (i = 0; i < mtk->num_u3_ports; i++) { value = readl(&ippc->u3_ctrl_p[i]); @@ -173,6 +179,9 @@ static int xhci_mtk_ssusb_config(struct xhci_hcd_mtk *mtk) struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs; u32 value; + if (!mtk->has_ippc) + return 0; + /* reset whole ip */ value = readl(&ippc->ip_pw_ctr0); value |= CTRL0_IP_SW_RST; @@ -475,6 +484,7 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci) /* called during probe() after chip reset completes */ static int xhci_mtk_setup(struct usb_hcd *hcd) { + struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); int ret; @@ -482,12 +492,21 @@ static int xhci_mtk_setup(struct usb_hcd *hcd) ret = xhci_mtk_ssusb_config(mtk); if (ret) return ret; + } + + ret = xhci_gen_setup(hcd, xhci_mtk_quirks); + if (ret) + return ret; + + if (usb_hcd_is_primary_hcd(hcd)) { + mtk->num_u3_ports = xhci->num_usb3_ports; + mtk->num_u2_ports = xhci->num_usb2_ports; ret = xhci_mtk_sch_init(mtk); if (ret) return ret; } - return xhci_gen_setup(hcd, xhci_mtk_quirks); + return ret; } static int xhci_mtk_probe(struct platform_device *pdev) @@ -586,7 +605,7 @@ static int xhci_mtk_probe(struct platform_device *pdev) mtk->hcd = platform_get_drvdata(pdev); platform_set_drvdata(pdev, mtk); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac"); hcd->regs = devm_ioremap_resource(dev, res); if (IS_ERR(hcd->regs)) { ret = PTR_ERR(hcd->regs); @@ -595,11 +614,16 @@ static int xhci_mtk_probe(struct platform_device *pdev) hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - mtk->ippc_regs = devm_ioremap_resource(dev, res); - if (IS_ERR(mtk->ippc_regs)) { - ret = PTR_ERR(mtk->ippc_regs); - goto put_usb2_hcd; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ippc"); + if (res) { /* ippc register is optional */ + mtk->ippc_regs = devm_ioremap_resource(dev, res); + if (IS_ERR(mtk->ippc_regs)) { + ret = PTR_ERR(mtk->ippc_regs); + goto put_usb2_hcd; + } + mtk->has_ippc = true; + } else { + mtk->has_ippc = false; } for (phy_num = 0; phy_num < mtk->num_phys; phy_num++) { diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h index 7da677c79ea85d4dbad8200ad70a247b3462304b..2845c49efe1bf90c23eac3e6a26eba12b504daa0 100644 --- a/drivers/usb/host/xhci-mtk.h +++ b/drivers/usb/host/xhci-mtk.h @@ -118,6 +118,7 @@ struct xhci_hcd_mtk { struct usb_hcd *hcd; struct mu3h_sch_bw_info *sch_array; struct mu3c_ippc_regs __iomem *ippc_regs; + bool has_ippc; int num_u2_ports; int num_u3_ports; struct regulator *vusb33; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index ed56bf9ed885fbb3766b6a30c9ac786023d351ec..ddfab301e36658adccde76b9a47141b39a3508c7 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -100,6 +100,12 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = { .plat_start = xhci_rcar_start, }; +static const struct xhci_plat_priv xhci_plat_renesas_rcar_r8a7796 = { + .firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3, + .init_quirk = xhci_rcar_init_quirk, + .plat_start = xhci_rcar_start, +}; + static const struct of_device_id usb_xhci_of_match[] = { { .compatible = "generic-xhci", @@ -123,6 +129,9 @@ static const struct of_device_id usb_xhci_of_match[] = { }, { .compatible = "renesas,xhci-r8a7795", .data = &xhci_plat_renesas_rcar_gen3, + }, { + .compatible = "renesas,xhci-r8a7796", + .data = &xhci_plat_renesas_rcar_r8a7796, }, { .compatible = "renesas,rcar-gen2-xhci", .data = &xhci_plat_renesas_rcar_gen2, diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index 0e4535e632ec9e147b075438bb2ba79c79b44e37..d28df386e780bd04608ad199535ee326119b932c 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -19,6 +19,8 @@ #include "xhci-rcar.h" /* +* - The V3 firmware is for r8a7796 (with good performance). +* - The V2 firmware can be used on both r8a7795 (es1.x) and r8a7796. * - The V2 firmware is possible to use on R-Car Gen2. However, the V2 causes * performance degradation. So, this driver continues to use the V1 if R-Car * Gen2. @@ -26,6 +28,7 @@ */ MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V1); MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V2); +MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3); /*** Register Offset ***/ #define RCAR_USB3_INT_ENA 0x224 /* Interrupt Enable */ @@ -92,6 +95,7 @@ static int xhci_rcar_is_gen3(struct device *dev) struct device_node *node = dev->of_node; return of_device_is_compatible(node, "renesas,xhci-r8a7795") || + of_device_is_compatible(node, "renesas,xhci-r8a7796") || of_device_is_compatible(node, "renesas,rcar-gen3-xhci"); } diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h index 2941a25cfe9895f60d167a161ac2d983c7c7213d..d2ffe20401cf9f372bbcc0be4a7443e0e3b92b60 100644 --- a/drivers/usb/host/xhci-rcar.h +++ b/drivers/usb/host/xhci-rcar.h @@ -13,6 +13,7 @@ #define XHCI_RCAR_FIRMWARE_NAME_V1 "r8a779x_usb3_v1.dlmem" #define XHCI_RCAR_FIRMWARE_NAME_V2 "r8a779x_usb3_v2.dlmem" +#define XHCI_RCAR_FIRMWARE_NAME_V3 "r8a779x_usb3_v3.dlmem" #if IS_ENABLED(CONFIG_USB_XHCI_RCAR) void xhci_rcar_start(struct usb_hcd *hcd); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 797137e26549b566474d99e1d3e6efe277aa4648..bdf6b13d9b6755a0504a15ba10fcfe1d06b5bc73 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -89,6 +89,11 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, return seg->dma + (segment_offset * sizeof(*trb)); } +static bool trb_is_noop(union xhci_trb *trb) +{ + return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); +} + static bool trb_is_link(union xhci_trb *trb) { return TRB_TYPE_LINK_LE32(trb->link.control); @@ -110,6 +115,20 @@ static bool link_trb_toggles_cycle(union xhci_trb *trb) return le32_to_cpu(trb->link.control) & LINK_TOGGLE; } +static bool last_td_in_urb(struct xhci_td *td) +{ + struct urb_priv *urb_priv = td->urb->hcpriv; + + return urb_priv->td_cnt == urb_priv->length; +} + +static void inc_td_cnt(struct urb *urb) +{ + struct urb_priv *urb_priv = urb->hcpriv; + + urb_priv->td_cnt++; +} + /* Updates trb to point to the next TRB in the ring, and updates seg if the next * TRB is in a new segment. This does not skip over link TRBs, and it does not * effect the ring dequeue or enqueue pointers. @@ -303,7 +322,6 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) "maybe the host is dead\n"); del_timer(&xhci->cmd_timer); xhci->xhc_state |= XHCI_STATE_DYING; - xhci_quiesce(xhci); xhci_halt(xhci); return -ESHUTDOWN; } @@ -473,9 +491,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, if (new_deq == cur_td->last_trb) td_last_trb_found = true; - if (cycle_found && - TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) && - new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE)) + if (cycle_found && trb_is_link(new_deq) && + link_trb_toggles_cycle(new_deq)) state->new_cycle_state ^= 0x1; next_trb(xhci, ep_ring, &new_seg, &new_deq); @@ -511,54 +528,32 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. */ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, - struct xhci_td *cur_td, bool flip_cycle) + struct xhci_td *td, bool flip_cycle) { - struct xhci_segment *cur_seg; - union xhci_trb *cur_trb; - - for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; - true; - next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { - if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) { - /* Unchain any chained Link TRBs, but - * leave the pointers intact. - */ - cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); - /* Flip the cycle bit (link TRBs can't be the first - * or last TRB). - */ - if (flip_cycle) - cur_trb->generic.field[3] ^= - cpu_to_le32(TRB_CYCLE); - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Cancel (unchain) link TRB"); - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Address = %p (0x%llx dma); " - "in seg %p (0x%llx dma)", - cur_trb, - (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), - cur_seg, - (unsigned long long)cur_seg->dma); + struct xhci_segment *seg = td->start_seg; + union xhci_trb *trb = td->first_trb; + + while (1) { + if (trb_is_link(trb)) { + /* unchain chained link TRBs */ + trb->link.control &= cpu_to_le32(~TRB_CHAIN); } else { - cur_trb->generic.field[0] = 0; - cur_trb->generic.field[1] = 0; - cur_trb->generic.field[2] = 0; + trb->generic.field[0] = 0; + trb->generic.field[1] = 0; + trb->generic.field[2] = 0; /* Preserve only the cycle bit of this TRB */ - cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); - /* Flip the cycle bit except on the first or last TRB */ - if (flip_cycle && cur_trb != cur_td->first_trb && - cur_trb != cur_td->last_trb) - cur_trb->generic.field[3] ^= - cpu_to_le32(TRB_CYCLE); - cur_trb->generic.field[3] |= cpu_to_le32( + trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); + trb->generic.field[3] |= cpu_to_le32( TRB_TYPE(TRB_TR_NOOP)); - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "TRB to noop at offset 0x%llx", - (unsigned long long) - xhci_trb_virt_to_dma(cur_seg, cur_trb)); } - if (cur_trb == cur_td->last_trb) + /* flip cycle if asked to */ + if (flip_cycle && trb != td->first_trb && trb != td->last_trb) + trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); + + if (trb == td->last_trb) break; + + next_trb(xhci, ep_ring, &seg, &trb); } } @@ -574,39 +569,33 @@ static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, ep->stop_cmds_pending--; } -/* Must be called with xhci->lock held in interrupt context */ +/* + * Must be called with xhci->lock held in interrupt context, + * releases and re-acquires xhci->lock + */ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, - struct xhci_td *cur_td, int status) + struct xhci_td *cur_td, int status) { - struct usb_hcd *hcd; - struct urb *urb; - struct urb_priv *urb_priv; + struct urb *urb = cur_td->urb; + struct urb_priv *urb_priv = urb->hcpriv; + struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); - urb = cur_td->urb; - urb_priv = urb->hcpriv; - urb_priv->td_cnt++; - hcd = bus_to_hcd(urb->dev->bus); - - /* Only giveback urb when this is the last td in urb */ - if (urb_priv->td_cnt == urb_priv->length) { - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { - xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; - if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { - if (xhci->quirks & XHCI_AMD_PLL_FIX) - usb_amd_quirk_pll_enable(); - } + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; + if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { + if (xhci->quirks & XHCI_AMD_PLL_FIX) + usb_amd_quirk_pll_enable(); } - usb_hcd_unlink_urb_from_ep(hcd, urb); - - spin_unlock(&xhci->lock); - usb_hcd_giveback_urb(hcd, urb, status); - xhci_urb_free_priv(urb_priv); - spin_lock(&xhci->lock); } + xhci_urb_free_priv(urb_priv); + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock(&xhci->lock); + usb_hcd_giveback_urb(hcd, urb, status); + spin_lock(&xhci->lock); } -void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring, - struct xhci_td *td) +static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, + struct xhci_ring *ring, struct xhci_td *td) { struct device *dev = xhci_to_hcd(xhci)->self.controller; struct xhci_segment *seg = td->bounce_seg; @@ -752,7 +741,9 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); if (ep_ring && cur_td->bounce_seg) xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td); - xhci_giveback_urb_in_irq(xhci, cur_td, 0); + inc_td_cnt(cur_td->urb); + if (last_td_in_urb(cur_td)) + xhci_giveback_urb_in_irq(xhci, cur_td, 0); /* Stop processing the cancelled list if the watchdog timer is * running. @@ -777,7 +768,10 @@ static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) if (cur_td->bounce_seg) xhci_unmap_td_bounce_buffer(xhci, ring, cur_td); - xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); + + inc_td_cnt(cur_td->urb); + if (last_td_in_urb(cur_td)) + xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); } } @@ -814,7 +808,10 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, cur_td = list_first_entry(&ep->cancelled_td_list, struct xhci_td, cancelled_td_list); list_del_init(&cur_td->cancelled_td_list); - xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); + + inc_td_cnt(cur_td->urb); + if (last_td_in_urb(cur_td)) + xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); } } @@ -1003,8 +1000,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, break; case COMP_CTX_STATE: xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); - ep_state = le32_to_cpu(ep_ctx->ep_info); - ep_state &= EP_STATE_MASK; + ep_state = GET_EP_CTX_STATE(ep_ctx); slot_state = le32_to_cpu(slot_ctx->dev_state); slot_state = GET_SLOT_STATE(slot_state); xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, @@ -1096,12 +1092,12 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, } static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, - u32 cmd_comp_code) + struct xhci_command *command, u32 cmd_comp_code) { if (cmd_comp_code == COMP_SUCCESS) - xhci->slot_id = slot_id; + command->slot_id = slot_id; else - xhci->slot_id = 0; + command->slot_id = 0; } static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) @@ -1183,7 +1179,7 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, struct xhci_event_cmd *event) { if (!(xhci->quirks & XHCI_NEC_HOST)) { - xhci->error_bitmask |= 1 << 6; + xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n"); return; } xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, @@ -1325,14 +1321,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cmd_trb = xhci->cmd_ring->dequeue; cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, cmd_trb); - /* Is the command ring deq ptr out of sync with the deq seg ptr? */ - if (cmd_dequeue_dma == 0) { - xhci->error_bitmask |= 1 << 4; - return; - } - /* Does the DMA address match our internal dequeue pointer address? */ - if (cmd_dma != (u64) cmd_dequeue_dma) { - xhci->error_bitmask |= 1 << 5; + /* + * Check whether the completion event is for our internal kept + * command. + */ + if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) { + xhci_warn(xhci, + "ERROR mismatched command completion event\n"); return; } @@ -1371,7 +1366,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); switch (cmd_type) { case TRB_ENABLE_SLOT: - xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code); + xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); break; case TRB_DISABLE_SLOT: xhci_handle_cmd_disable_slot(xhci, slot_id); @@ -1418,7 +1413,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, break; default: /* Skip over unknown commands on the event ring */ - xhci->error_bitmask |= 1 << 6; + xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); break; } @@ -1519,10 +1514,10 @@ static void handle_port_status(struct xhci_hcd *xhci, bool bogus_port_status = false; /* Port status change events always have a successful completion code */ - if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) { - xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); - xhci->error_bitmask |= 1 << 8; - } + if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) + xhci_warn(xhci, + "WARN: xHC returned failed port status event\n"); + port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); @@ -1759,7 +1754,7 @@ struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, unsigned int stream_id, - struct xhci_td *td, union xhci_trb *event_trb) + struct xhci_td *td, union xhci_trb *ep_trb) { struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; struct xhci_command *command; @@ -1798,8 +1793,7 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, * endpoint anyway. Check if a babble halted the * endpoint. */ - if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == - cpu_to_le32(EP_STATE_HALTED)) + if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED) return 1; return 0; @@ -1824,7 +1818,7 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) * Return 1 if the urb can be given back. */ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *event_trb, struct xhci_transfer_event *event, + union xhci_trb *ep_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status, bool skip) { struct xhci_virt_device *xdev; @@ -1833,7 +1827,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, int ep_index; struct urb *urb = NULL; struct xhci_ep_ctx *ep_ctx; - int ret = 0; struct urb_priv *urb_priv; u32 trb_comp_code; @@ -1866,7 +1859,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, * The class driver clears the device side halt later. */ xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, - ep_ring->stream_id, td, event_trb); + ep_ring->stream_id, td, ep_trb); } else { /* Update ring dequeue pointer */ while (ep_ring->dequeue != td->last_trb) @@ -1889,41 +1882,54 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, * unsigned). Play it safe and say we didn't transfer anything. */ if (urb->actual_length > urb->transfer_buffer_length) { - xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n", - urb->transfer_buffer_length, - urb->actual_length); + xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", + urb->transfer_buffer_length, urb->actual_length); urb->actual_length = 0; - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - *status = -EREMOTEIO; - else - *status = 0; + *status = 0; } list_del_init(&td->td_list); /* Was this TD slated to be cancelled but completed anyway? */ if (!list_empty(&td->cancelled_td_list)) list_del_init(&td->cancelled_td_list); - urb_priv->td_cnt++; + inc_td_cnt(urb); /* Giveback the urb when all the tds are completed */ - if (urb_priv->td_cnt == urb_priv->length) { - ret = 1; - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { - xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; - if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { - if (xhci->quirks & XHCI_AMD_PLL_FIX) - usb_amd_quirk_pll_enable(); - } - } + if (last_td_in_urb(td)) { + if ((urb->actual_length != urb->transfer_buffer_length && + (urb->transfer_flags & URB_SHORT_NOT_OK)) || + (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) + xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", + urb, urb->actual_length, + urb->transfer_buffer_length, *status); + + /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) + *status = 0; + xhci_giveback_urb_in_irq(xhci, td, *status); } + return 0; +} - return ret; +/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ +static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, + union xhci_trb *stop_trb) +{ + u32 sum; + union xhci_trb *trb = ring->dequeue; + struct xhci_segment *seg = ring->deq_seg; + + for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) { + if (!trb_is_noop(trb) && !trb_is_link(trb)) + sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); + } + return sum; } /* * Process control tds, update urb status and actual_length. */ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *event_trb, struct xhci_transfer_event *event, + union xhci_trb *ep_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status) { struct xhci_virt_device *xdev; @@ -1932,6 +1938,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, int ep_index; struct xhci_ep_ctx *ep_ctx; u32 trb_comp_code; + u32 remaining, requested; + bool on_data_stage; slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); xdev = xhci->devs[slot_id]; @@ -1939,195 +1947,161 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + requested = td->urb->transfer_buffer_length; + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + + /* not setup (dequeue), or status stage means we are at data stage */ + on_data_stage = (ep_trb != ep_ring->dequeue && ep_trb != td->last_trb); switch (trb_comp_code) { case COMP_SUCCESS: - if (event_trb == ep_ring->dequeue) { - xhci_warn(xhci, "WARN: Success on ctrl setup TRB " - "without IOC set??\n"); - *status = -ESHUTDOWN; - } else if (event_trb != td->last_trb) { - xhci_warn(xhci, "WARN: Success on ctrl data TRB " - "without IOC set??\n"); + if (ep_trb != td->last_trb) { + xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", + on_data_stage ? "data" : "setup"); *status = -ESHUTDOWN; - } else { - *status = 0; + break; } + *status = 0; break; case COMP_SHORT_TX: - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - *status = -EREMOTEIO; - else - *status = 0; + *status = 0; break; case COMP_STOP_SHORT: - if (event_trb == ep_ring->dequeue || event_trb == td->last_trb) - xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); + if (on_data_stage) + td->urb->actual_length = remaining; else - td->urb->actual_length = - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - - return finish_td(xhci, td, event_trb, event, ep, status, false); + xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); + goto finish_td; case COMP_STOP: - /* Did we stop at data stage? */ - if (event_trb != ep_ring->dequeue && event_trb != td->last_trb) - td->urb->actual_length = - td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - /* fall through */ + if (on_data_stage) + td->urb->actual_length = requested - remaining; + goto finish_td; case COMP_STOP_INVAL: - return finish_td(xhci, td, event_trb, event, ep, status, false); + goto finish_td; default: if (!xhci_requires_manual_halt_cleanup(xhci, - ep_ctx, trb_comp_code)) + ep_ctx, trb_comp_code)) break; - xhci_dbg(xhci, "TRB error code %u, " - "halted endpoint index = %u\n", - trb_comp_code, ep_index); + xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", + trb_comp_code, ep_index); /* else fall through */ case COMP_STALL: /* Did we transfer part of the data (middle) phase? */ - if (event_trb != ep_ring->dequeue && - event_trb != td->last_trb) - td->urb->actual_length = - td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + if (on_data_stage) + td->urb->actual_length = requested - remaining; else if (!td->urb_length_set) td->urb->actual_length = 0; - - return finish_td(xhci, td, event_trb, event, ep, status, false); + goto finish_td; } + + /* stopped at setup stage, no data transferred */ + if (ep_trb == ep_ring->dequeue) + goto finish_td; + /* - * Did we transfer any data, despite the errors that might have - * happened? I.e. did we get past the setup stage? + * if on data stage then update the actual_length of the URB and flag it + * as set, so it won't be overwritten in the event for the last TRB. */ - if (event_trb != ep_ring->dequeue) { - /* The event was for the status stage */ - if (event_trb == td->last_trb) { - if (td->urb_length_set) { - /* Don't overwrite a previously set error code - */ - if ((*status == -EINPROGRESS || *status == 0) && - (td->urb->transfer_flags - & URB_SHORT_NOT_OK)) - /* Did we already see a short data - * stage? */ - *status = -EREMOTEIO; - } else { - td->urb->actual_length = - td->urb->transfer_buffer_length; - } - } else { - /* - * Maybe the event was for the data stage? If so, update - * already the actual_length of the URB and flag it as - * set, so that it is not overwritten in the event for - * the last TRB. - */ - td->urb_length_set = true; - td->urb->actual_length = - td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - xhci_dbg(xhci, "Waiting for status " - "stage event\n"); - return 0; - } + if (on_data_stage) { + td->urb_length_set = true; + td->urb->actual_length = requested - remaining; + xhci_dbg(xhci, "Waiting for status stage event\n"); + return 0; } - return finish_td(xhci, td, event_trb, event, ep, status, false); + /* at status stage */ + if (!td->urb_length_set) + td->urb->actual_length = requested; + +finish_td: + return finish_td(xhci, td, ep_trb, event, ep, status, false); } /* * Process isochronous tds, update urb packet status and actual_length. */ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *event_trb, struct xhci_transfer_event *event, + union xhci_trb *ep_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status) { struct xhci_ring *ep_ring; struct urb_priv *urb_priv; int idx; - int len = 0; - union xhci_trb *cur_trb; - struct xhci_segment *cur_seg; struct usb_iso_packet_descriptor *frame; u32 trb_comp_code; - bool skip_td = false; + bool sum_trbs_for_length = false; + u32 remaining, requested, ep_trb_len; + int short_framestatus; ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); urb_priv = td->urb->hcpriv; idx = urb_priv->td_cnt; frame = &td->urb->iso_frame_desc[idx]; + requested = frame->length; + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); + short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? + -EREMOTEIO : 0; /* handle completion code */ switch (trb_comp_code) { case COMP_SUCCESS: - if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { - frame->status = 0; + if (remaining) { + frame->status = short_framestatus; + if (xhci->quirks & XHCI_TRUST_TX_LENGTH) + sum_trbs_for_length = true; break; } - if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) - trb_comp_code = COMP_SHORT_TX; - /* fallthrough */ - case COMP_STOP_SHORT: + frame->status = 0; + break; case COMP_SHORT_TX: - frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ? - -EREMOTEIO : 0; + frame->status = short_framestatus; + sum_trbs_for_length = true; break; case COMP_BW_OVER: frame->status = -ECOMM; - skip_td = true; break; case COMP_BUFF_OVER: case COMP_BABBLE: frame->status = -EOVERFLOW; - skip_td = true; break; case COMP_DEV_ERR: case COMP_STALL: frame->status = -EPROTO; - skip_td = true; break; case COMP_TX_ERR: frame->status = -EPROTO; - if (event_trb != td->last_trb) + if (ep_trb != td->last_trb) return 0; - skip_td = true; break; case COMP_STOP: + sum_trbs_for_length = true; + break; + case COMP_STOP_SHORT: + /* field normally containing residue now contains tranferred */ + frame->status = short_framestatus; + requested = remaining; + break; case COMP_STOP_INVAL: + requested = 0; + remaining = 0; break; default: + sum_trbs_for_length = true; frame->status = -1; break; } - if (trb_comp_code == COMP_SUCCESS || skip_td) { - frame->actual_length = frame->length; - td->urb->actual_length += frame->length; - } else if (trb_comp_code == COMP_STOP_SHORT) { - frame->actual_length = - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - td->urb->actual_length += frame->actual_length; - } else { - for (cur_trb = ep_ring->dequeue, - cur_seg = ep_ring->deq_seg; cur_trb != event_trb; - next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { - if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && - !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) - len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); - } - len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + if (sum_trbs_for_length) + frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) + + ep_trb_len - remaining; + else + frame->actual_length = requested; - if (trb_comp_code != COMP_STOP_INVAL) { - frame->actual_length = len; - td->urb->actual_length += len; - } - } + td->urb->actual_length += frame->actual_length; - return finish_td(xhci, td, event_trb, event, ep, status, false); + return finish_td(xhci, td, ep_trb, event, ep, status, false); } static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, @@ -2162,119 +2136,62 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, * Process bulk and interrupt tds, update urb status and actual_length. */ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *event_trb, struct xhci_transfer_event *event, + union xhci_trb *ep_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status) { struct xhci_ring *ep_ring; - union xhci_trb *cur_trb; - struct xhci_segment *cur_seg; u32 trb_comp_code; + u32 remaining, requested, ep_trb_len; ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); + requested = td->urb->transfer_buffer_length; switch (trb_comp_code) { case COMP_SUCCESS: - /* Double check that the HW transferred everything. */ - if (event_trb != td->last_trb || - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { - xhci_warn(xhci, "WARN Successful completion " - "on short TX\n"); - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - *status = -EREMOTEIO; - else - *status = 0; - if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) - trb_comp_code = COMP_SHORT_TX; - } else { - *status = 0; + /* handle success with untransferred data as short packet */ + if (ep_trb != td->last_trb || remaining) { + xhci_warn(xhci, "WARN Successful completion on short TX\n"); + xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", + td->urb->ep->desc.bEndpointAddress, + requested, remaining); } + *status = 0; break; - case COMP_STOP_SHORT: case COMP_SHORT_TX: - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - *status = -EREMOTEIO; - else - *status = 0; + xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", + td->urb->ep->desc.bEndpointAddress, + requested, remaining); + *status = 0; + break; + case COMP_STOP_SHORT: + td->urb->actual_length = remaining; + goto finish_td; + case COMP_STOP_INVAL: + /* stopped on ep trb with invalid length, exclude it */ + ep_trb_len = 0; + remaining = 0; break; default: - /* Others already handled above */ + /* do nothing */ break; } - if (trb_comp_code == COMP_SHORT_TX) - xhci_dbg(xhci, "ep %#x - asked for %d bytes, " - "%d bytes untransferred\n", - td->urb->ep->desc.bEndpointAddress, - td->urb->transfer_buffer_length, - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); - /* Stopped - short packet completion */ - if (trb_comp_code == COMP_STOP_SHORT) { - td->urb->actual_length = - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - if (td->urb->transfer_buffer_length < - td->urb->actual_length) { - xhci_warn(xhci, "HC gave bad length of %d bytes txed\n", - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); - td->urb->actual_length = 0; - /* status will be set by usb core for canceled urbs */ - } - /* Fast path - was this the last TRB in the TD for this URB? */ - } else if (event_trb == td->last_trb) { - if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { - td->urb->actual_length = - td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - if (td->urb->transfer_buffer_length < - td->urb->actual_length) { - xhci_warn(xhci, "HC gave bad length " - "of %d bytes left\n", - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); - td->urb->actual_length = 0; - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - *status = -EREMOTEIO; - else - *status = 0; - } - /* Don't overwrite a previously set error code */ - if (*status == -EINPROGRESS) { - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - *status = -EREMOTEIO; - else - *status = 0; - } - } else { - td->urb->actual_length = - td->urb->transfer_buffer_length; - /* Ignore a short packet completion if the - * untransferred length was zero. - */ - if (*status == -EREMOTEIO) - *status = 0; - } - } else { - /* Slow path - walk the list, starting from the dequeue - * pointer, to get the actual length transferred. - */ + if (ep_trb == td->last_trb) + td->urb->actual_length = requested - remaining; + else + td->urb->actual_length = + sum_trb_lengths(xhci, ep_ring, ep_trb) + + ep_trb_len - remaining; +finish_td: + if (remaining > requested) { + xhci_warn(xhci, "bad transfer trb length %d in event trb\n", + remaining); td->urb->actual_length = 0; - for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; - cur_trb != event_trb; - next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { - if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && - !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) - td->urb->actual_length += - TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); - } - /* If the ring didn't stop on a Link or No-op TRB, add - * in the actual bytes transferred from the Normal TRB - */ - if (trb_comp_code != COMP_STOP_INVAL) - td->urb->actual_length += - TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); } - - return finish_td(xhci, td, event_trb, event, ep, status, false); + return finish_td(xhci, td, ep_trb, event, ep, status, false); } /* @@ -2293,16 +2210,13 @@ static int handle_tx_event(struct xhci_hcd *xhci, unsigned int slot_id; int ep_index; struct xhci_td *td = NULL; - dma_addr_t event_dma; - struct xhci_segment *event_seg; - union xhci_trb *event_trb; - struct urb *urb = NULL; + dma_addr_t ep_trb_dma; + struct xhci_segment *ep_seg; + union xhci_trb *ep_trb; int status = -EINPROGRESS; - struct urb_priv *urb_priv; struct xhci_ep_ctx *ep_ctx; struct list_head *tmp; u32 trb_comp_code; - int ret = 0; int td_num = 0; bool handling_skipped_tds = false; @@ -2328,9 +2242,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep = &xdev->eps[ep_index]; ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); - if (!ep_ring || - (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == - EP_STATE_DISABLED) { + if (!ep_ring || GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) { xhci_err(xhci, "ERROR Transfer event for disabled endpoint " "or incorrect stream ring\n"); xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", @@ -2352,7 +2264,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, td_num++; } - event_dma = le64_to_cpu(event->buffer); + ep_trb_dma = le64_to_cpu(event->buffer); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); /* Look for common error cases */ switch (trb_comp_code) { @@ -2480,7 +2392,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_dbg(xhci, "td_list is empty while skip " "flag set. Clear skip flag.\n"); } - ret = 0; goto cleanup; } @@ -2489,7 +2400,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep->skip = false; xhci_dbg(xhci, "All tds on the ep_ring skipped. " "Clear skip flag.\n"); - ret = 0; goto cleanup; } @@ -2498,8 +2408,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, td_num--; /* Is this a TRB in the currently executing TD? */ - event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, - td->last_trb, event_dma, false); + ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, + td->last_trb, ep_trb_dma, false); /* * Skip the Force Stopped Event. The event_trb(event_dma) of FSE @@ -2509,13 +2419,12 @@ static int handle_tx_event(struct xhci_hcd *xhci, * last TRB of the previous TD. The command completion handle * will take care the rest. */ - if (!event_seg && (trb_comp_code == COMP_STOP || + if (!ep_seg && (trb_comp_code == COMP_STOP || trb_comp_code == COMP_STOP_INVAL)) { - ret = 0; goto cleanup; } - if (!event_seg) { + if (!ep_seg) { if (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { /* Some host controllers give a spurious @@ -2525,7 +2434,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && ep_ring->last_td_was_short) { ep_ring->last_td_was_short = false; - ret = 0; goto cleanup; } /* HC is busted, give up! */ @@ -2536,11 +2444,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, trb_comp_code); trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, td->last_trb, - event_dma, true); + ep_trb_dma, true); return -ESHUTDOWN; } - ret = skip_isoc_td(xhci, td, event, ep, &status); + skip_isoc_td(xhci, td, event, ep, &status); goto cleanup; } if (trb_comp_code == COMP_SHORT_TX) @@ -2553,36 +2461,28 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep->skip = false; } - event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / - sizeof(*event_trb)]; + ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / + sizeof(*ep_trb)]; /* * No-op TRB should not trigger interrupts. - * If event_trb is a no-op TRB, it means the + * If ep_trb is a no-op TRB, it means the * corresponding TD has been cancelled. Just ignore * the TD. */ - if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) { - xhci_dbg(xhci, - "event_trb is a no-op TRB. Skip it\n"); + if (trb_is_noop(ep_trb)) { + xhci_dbg(xhci, "ep_trb is a no-op TRB. Skip it\n"); goto cleanup; } - /* Now update the urb's actual_length and give back to - * the core - */ + /* update the urb's actual_length and give back to the core */ if (usb_endpoint_xfer_control(&td->urb->ep->desc)) - ret = process_ctrl_td(xhci, td, event_trb, event, ep, - &status); + process_ctrl_td(xhci, td, ep_trb, event, ep, &status); else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) - ret = process_isoc_td(xhci, td, event_trb, event, ep, - &status); + process_isoc_td(xhci, td, ep_trb, event, ep, &status); else - ret = process_bulk_intr_td(xhci, td, event_trb, event, - ep, &status); - + process_bulk_intr_td(xhci, td, ep_trb, event, ep, + &status); cleanup: - - handling_skipped_tds = ep->skip && trb_comp_code != COMP_MISSED_INT && trb_comp_code != COMP_PING_ERR; @@ -2594,33 +2494,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, if (!handling_skipped_tds) inc_deq(xhci, xhci->event_ring); - if (ret) { - urb = td->urb; - urb_priv = urb->hcpriv; - - xhci_urb_free_priv(urb_priv); - - usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); - if ((urb->actual_length != urb->transfer_buffer_length && - (urb->transfer_flags & - URB_SHORT_NOT_OK)) || - (status != 0 && - !usb_endpoint_xfer_isoc(&urb->ep->desc))) - xhci_dbg(xhci, "Giveback URB %p, len = %d, " - "expected = %d, status = %d\n", - urb, urb->actual_length, - urb->transfer_buffer_length, - status); - spin_unlock(&xhci->lock); - /* EHCI, UHCI, and OHCI always unconditionally set the - * urb->status of an isochronous endpoint to 0. - */ - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) - status = 0; - usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status); - spin_lock(&xhci->lock); - } - /* * If ep->skip is set, it means there are missed tds on the * endpoint ring need to take care of. @@ -2644,18 +2517,17 @@ static int xhci_handle_event(struct xhci_hcd *xhci) int update_ptrs = 1; int ret; + /* Event ring hasn't been allocated yet. */ if (!xhci->event_ring || !xhci->event_ring->dequeue) { - xhci->error_bitmask |= 1 << 1; - return 0; + xhci_err(xhci, "ERROR event ring not ready\n"); + return -ENOMEM; } event = xhci->event_ring->dequeue; /* Does the HC or OS own the TRB? */ if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != - xhci->event_ring->cycle_state) { - xhci->error_bitmask |= 1 << 2; + xhci->event_ring->cycle_state) return 0; - } /* * Barrier between reading the TRB_CYCLE (valid) flag above and any @@ -2663,7 +2535,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci) */ rmb(); /* FIXME: Handle more event types. */ - switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) { + switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) { case TRB_TYPE(TRB_COMPLETION): handle_cmd_completion(xhci, &event->event_cmd); break; @@ -2673,9 +2545,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci) break; case TRB_TYPE(TRB_TRANSFER): ret = handle_tx_event(xhci, &event->trans_event); - if (ret < 0) - xhci->error_bitmask |= 1 << 9; - else + if (ret >= 0) update_ptrs = 0; break; case TRB_TYPE(TRB_DEV_NOTE): @@ -2686,7 +2556,9 @@ static int xhci_handle_event(struct xhci_hcd *xhci) TRB_TYPE(48)) handle_vendor_event(xhci, event); else - xhci->error_bitmask |= 1 << 3; + xhci_warn(xhci, "ERROR unknown event type %d\n", + TRB_FIELD_TO_TYPE( + le32_to_cpu(event->event_cmd.flags))); } /* Any of the above functions may drop and re-acquire the lock, so check * to make sure a watchdog timer didn't mark the host as non-responsive. @@ -2931,8 +2803,7 @@ static int prepare_transfer(struct xhci_hcd *xhci, return -EINVAL; } - ret = prepare_ring(xhci, ep_ring, - le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, + ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), num_trbs, mem_flags); if (ret) return ret; @@ -3120,7 +2991,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, if (xhci->quirks & XHCI_MTK_HOST) trb_buff_len = 0; - maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); + maxp = usb_endpoint_maxp(&urb->ep->desc); total_packet_count = DIV_ROUND_UP(td_total_len, maxp); /* Queueing functions don't count the current TRB into transferred */ @@ -3136,7 +3007,7 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, unsigned int max_pkt; u32 new_buff_len; - max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); + max_pkt = usb_endpoint_maxp(&urb->ep->desc); unalign = (enqd_len + *trb_buff_len) % max_pkt; /* we got lucky, last normal TRB data on segment is packet aligned */ @@ -3650,7 +3521,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, addr = start_addr + urb->iso_frame_desc[i].offset; td_len = urb->iso_frame_desc[i].length; td_remain_len = td_len; - max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); + max_pkt = usb_endpoint_maxp(&urb->ep->desc); total_pkt_count = DIV_ROUND_UP(td_len, max_pkt); /* A zero-length transfer still involves at least one packet. */ @@ -3828,7 +3699,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, /* Check the ring to guarantee there is enough room for the whole urb. * Do not insert any td of the urb to the ring if the check failed. */ - ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, + ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), num_trbs, mem_flags); if (ret) return ret; @@ -3841,8 +3712,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, /* Calculate the start frame and put it in urb->start_frame. */ if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { - if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == - EP_STATE_RUNNING) { + if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) { urb->start_frame = xep->next_frame_id; goto skip_start_over; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 1a4ca02729c274cb83762636e5556ca75e840144..1cd56417cbec455b6db7605d3b3e20aa20a0998d 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -113,12 +113,12 @@ int xhci_halt(struct xhci_hcd *xhci) ret = xhci_handshake(&xhci->op_regs->status, STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); - if (!ret) { - xhci->xhc_state |= XHCI_STATE_HALTED; - xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; - } else - xhci_warn(xhci, "Host not halted after %u microseconds.\n", - XHCI_MAX_HALT_USEC); + if (ret) { + xhci_warn(xhci, "Host halt failed, %d\n", ret); + return ret; + } + xhci->xhc_state |= XHCI_STATE_HALTED; + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; return ret; } @@ -167,6 +167,12 @@ int xhci_reset(struct xhci_hcd *xhci) int ret, i; state = readl(&xhci->op_regs->status); + + if (state == ~(u32)0) { + xhci_warn(xhci, "Host not accessible, reset failed.\n"); + return -ENODEV; + } + if ((state & STS_HALT) == 0) { xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); return 0; @@ -690,7 +696,6 @@ void xhci_stop(struct usb_hcd *hcd) xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; xhci_halt(xhci); xhci_reset(xhci); - spin_unlock_irq(&xhci->lock); } @@ -1645,8 +1650,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, /* If the HC already knows the endpoint is disabled, * or the HCD has noted it is disabled, ignore this request */ - if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == - cpu_to_le32(EP_STATE_DISABLED)) || + if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) || le32_to_cpu(ctrl_ctx->drop_flags) & xhci_get_endpoint_flag(&ep->desc)) { /* Do not warn when called after a usb_device_reset */ @@ -3209,7 +3213,7 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, for (i = 0; i < num_eps; i++) { ep_index = xhci_get_endpoint_index(&eps[i]->desc); - max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&eps[i]->desc)); + max_packet = usb_endpoint_maxp(&eps[i]->desc); vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, num_stream_ctxs, num_streams, @@ -3683,27 +3687,26 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) int ret, slot_id; struct xhci_command *command; - command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); + command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); if (!command) return 0; /* xhci->slot_id and xhci->addr_dev are not thread-safe */ mutex_lock(&xhci->mutex); spin_lock_irqsave(&xhci->lock, flags); - command->completion = &xhci->addr_dev; ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); mutex_unlock(&xhci->mutex); xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); - kfree(command); + xhci_free_command(xhci, command); return 0; } xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); wait_for_completion(command->completion); - slot_id = xhci->slot_id; + slot_id = command->slot_id; mutex_unlock(&xhci->mutex); if (!slot_id || command->status != COMP_SUCCESS) { @@ -3711,7 +3714,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", HCS_MAX_SLOTS( readl(&xhci->cap_regs->hcs_params1))); - kfree(command); + xhci_free_command(xhci, command); return 0; } @@ -3747,7 +3750,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) #endif - kfree(command); + xhci_free_command(xhci, command); /* Is this a LS or FS device under a HS hub? */ /* Hub or peripherial? */ return 1; @@ -3755,6 +3758,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) disable_slot: /* Disable slot, if we can do it without mem alloc */ spin_lock_irqsave(&xhci->lock, flags); + kfree(command->completion); command->completion = NULL; command->status = 0; if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, @@ -3816,14 +3820,13 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, } } - command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); + command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); if (!command) { ret = -ENOMEM; goto out; } command->in_ctx = virt_dev->in_ctx; - command->completion = &xhci->addr_dev; slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); @@ -3941,7 +3944,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); out: mutex_unlock(&xhci->mutex); - kfree(command); + if (command) { + kfree(command->completion); + kfree(command); + } return ret; } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index f945380035d07e2f7af2bb73da9d39bf94475991..8ccc11a974b8a62ee0afe18808babb0603071efa 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -709,6 +709,8 @@ struct xhci_ep_ctx { #define EP_STATE_HALTED 2 #define EP_STATE_STOPPED 3 #define EP_STATE_ERROR 4 +#define GET_EP_CTX_STATE(ctx) (le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK) + /* Mult - Max number of burtst within an interval, in EP companion desc. */ #define EP_MULT(p) (((p) & 0x3) << 8) #define CTX_TO_EP_MULT(p) (((p) >> 8) & 0x3) @@ -747,11 +749,6 @@ struct xhci_ep_ctx { #define MAX_PACKET_MASK (0xffff << 16) #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) -/* Get max packet size from ep desc. Bit 10..0 specify the max packet size. - * USB2.0 spec 9.6.6. - */ -#define GET_MAX_PACKET(p) ((p) & 0x7ff) - /* tx_info bitmasks */ #define EP_AVG_TRB_LENGTH(p) ((p) & 0xffff) #define EP_MAX_ESIT_PAYLOAD_LO(p) (((p) & 0xffff) << 16) @@ -789,6 +786,7 @@ struct xhci_command { /* Input context for changing device state */ struct xhci_container_ctx *in_ctx; u32 status; + int slot_id; /* If completion is null, no one is waiting on this command * and the structure can be freed after the command completes. */ @@ -997,7 +995,6 @@ struct xhci_virt_device { int num_rings_cached; #define XHCI_MAX_RINGS_CACHED 31 struct xhci_virt_ep eps[31]; - struct completion cmd_completion; u8 fake_port; u8 real_port; struct xhci_interval_bw_table *bw_table; @@ -1583,8 +1580,6 @@ struct xhci_hcd { /* slot enabling and address device helpers */ /* these are not thread safe so use mutex */ struct mutex mutex; - struct completion addr_dev; - int slot_id; /* For USB 3.0 LPM enable/disable. */ struct xhci_command *lpm_command; /* Internal mirror of the HW's dcbaa */ @@ -1618,8 +1613,6 @@ struct xhci_hcd { #define XHCI_STATE_DYING (1 << 0) #define XHCI_STATE_HALTED (1 << 1) #define XHCI_STATE_REMOVING (1 << 2) - /* Statistics */ - int error_bitmask; unsigned int quirks; #define XHCI_LINK_TRB_QUIRK (1 << 0) #define XHCI_RESET_EP_QUIRK (1 << 1) diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c index 9535b2872183468dcd4b063c3dec930e92a5551f..79205b31e4a9d5caf01d5c2e06f0349805cb9846 100644 --- a/drivers/usb/isp1760/isp1760-if.c +++ b/drivers/usb/isp1760/isp1760-if.c @@ -197,7 +197,7 @@ static int isp1760_plat_probe(struct platform_device *pdev) irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq_res) { - pr_warning("isp1760: IRQ resource not available\n"); + pr_warn("isp1760: IRQ resource not available\n"); return -ENODEV; } irqflags = irq_res->flags & IRQF_TRIGGER_MASK; diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c index 6ddd08a32777c6f636789e2b15a5ba430a5fb9af..aa350dc9eb25c2db5a785fbd5147229d0cf06963 100644 --- a/drivers/usb/misc/chaoskey.c +++ b/drivers/usb/misc/chaoskey.c @@ -215,19 +215,7 @@ static int chaoskey_probe(struct usb_interface *interface, dev->hwrng.name = dev->name ? dev->name : chaoskey_driver.name; dev->hwrng.read = chaoskey_rng_read; - - /* Set the 'quality' metric. Quality is measured in units of - * 1/1024's of a bit ("mills"). This should be set to 1024, - * but there is a bug in the hwrng core which masks it with - * 1023. - * - * The patch that has been merged to the crypto development - * tree for that bug limits the value to 1024 at most, so by - * setting this to 1024 + 1023, we get 1023 before the fix is - * merged and 1024 afterwards. We'll patch this driver once - * both bits of code are in the same tree. - */ - dev->hwrng.quality = 1024 + 1023; + dev->hwrng.quality = 1024; dev->hwrng_registered = (hwrng_register(&dev->hwrng) == 0); if (!dev->hwrng_registered) diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index 9a82f8308ad7fa131e30f88079163d06f62f5807..01a9373b7e18a16fa0b8bc35625fffbe0eefc15b 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c @@ -48,7 +48,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index 2975e80b7a56f9bce341707012c174da09dc9d49..debc1fd74b0df2cb17817ec60254104e0fc72b2b 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include /* image constants */ diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 9ca595632f171c2ac705c3c465769afe80c1e6f7..3bc5356832db777424111b74cefbcfe2e77ce4e4 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -28,7 +28,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index c8fbe7b739a0bb110cdfbca9e82bc7238adf5e08..b10e26c74a9088b20f44b5ad376bebfe869d196b 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c @@ -83,7 +83,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c index 13731d5126248b280a60471cd5348a36e5802c05..fc329c98a6e8ea2bbbf3b9c8c551f774b3d62922 100644 --- a/drivers/usb/misc/rio500.c +++ b/drivers/usb/misc/rio500.c @@ -421,7 +421,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) } else if (result != -EREMOTEIO) { mutex_unlock(&(rio->lock)); dev_err(&rio->rio_dev->dev, - "Read Whoops - result:%u partial:%u this_read:%u\n", + "Read Whoops - result:%d partial:%u this_read:%u\n", result, partial, this_read); return -EIO; } else { diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c index 460cebf322e39109bb86ca4cb5eac81014ae101e..4b5777ec1501fe37a82c3bd3b23b35d8ab96ec79 100644 --- a/drivers/usb/misc/sisusbvga/sisusb_con.c +++ b/drivers/usb/misc/sisusbvga/sisusb_con.c @@ -686,8 +686,6 @@ static void sisusbcon_scrolldelta(struct vc_data *c, int lines) { struct sisusb_usb_data *sisusb; - int margin = c->vc_size_row * 4; - int ul, we, p, st; sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); if (!sisusb) @@ -700,39 +698,8 @@ sisusbcon_scrolldelta(struct vc_data *c, int lines) return; } - if (!lines) /* Turn scrollback off */ - c->vc_visible_origin = c->vc_origin; - else { - - if (sisusb->con_rolled_over > - (c->vc_scr_end - sisusb->scrbuf) + margin) { - - ul = c->vc_scr_end - sisusb->scrbuf; - we = sisusb->con_rolled_over + c->vc_size_row; - - } else { - - ul = 0; - we = sisusb->scrbuf_size; - - } - - p = (c->vc_visible_origin - sisusb->scrbuf - ul + we) % we + - lines * c->vc_size_row; - - st = (c->vc_origin - sisusb->scrbuf - ul + we) % we; - - if (st < 2 * margin) - margin = 0; - - if (p < margin) - p = 0; - - if (p > st - margin) - p = st; - - c->vc_visible_origin = sisusb->scrbuf + (p + ul) % we; - } + vc_scrolldelta_helper(c, lines, sisusb->con_rolled_over, + (void *)sisusb->scrbuf, sisusb->scrbuf_size); sisusbcon_set_start_address(sisusb, c); @@ -808,9 +775,10 @@ sisusbcon_cursor(struct vc_data *c, int mode) mutex_unlock(&sisusb->lock); } -static int +static bool sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb, - int t, int b, int dir, int lines) + unsigned int t, unsigned int b, enum con_scroll dir, + unsigned int lines) { int cols = sisusb->sisusb_num_columns; int length = ((b - t) * cols) * 2; @@ -852,8 +820,9 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb, } /* Interface routine */ -static int -sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines) +static bool +sisusbcon_scroll(struct vc_data *c, unsigned int t, unsigned int b, + enum con_scroll dir, unsigned int lines) { struct sisusb_usb_data *sisusb; u16 eattr = c->vc_video_erase_char; @@ -870,17 +839,17 @@ sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines) */ if (!lines) - return 1; + return true; sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); if (!sisusb) - return 0; + return false; /* sisusb->lock is down */ if (sisusb_is_inactive(c, sisusb)) { mutex_unlock(&sisusb->lock); - return 0; + return false; } /* Special case */ @@ -971,7 +940,7 @@ sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines) mutex_unlock(&sisusb->lock); - return 1; + return true; } /* Interface routine */ diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 5c8210dc6fd9cf9c0b39003d92b59a1440ddb0c2..3525626bf086a3f9fd7ea2185079df780840c29d 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -1915,7 +1915,7 @@ static struct urb *iso_alloc_urb( if (bytes < 0 || !desc) return NULL; maxp = 0x7ff & usb_endpoint_maxp(desc); - maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11)); + maxp *= usb_endpoint_maxp_mult(desc); packets = DIV_ROUND_UP(bytes, maxp); urb = usb_alloc_urb(packets, GFP_KERNEL); @@ -2001,8 +2001,8 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param, "iso period %d %sframes, wMaxPacket %d, transactions: %d\n", 1 << (desc->bInterval - 1), (udev->speed == USB_SPEED_HIGH) ? "micro" : "", - usb_endpoint_maxp(desc) & 0x7ff, - 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11))); + usb_endpoint_maxp(desc), + usb_endpoint_maxp_mult(desc)); dev_info(&dev->intf->dev, "total %lu msec (%lu packets)\n", diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index 1a874a1f3890eb6d385e767589df6d5ea9d65e5c..91c22276c03b55c2c78e0de55e8a2a9b618607ea 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -20,7 +20,7 @@ #include #include -#include +#include #include "usb_mon.h" diff --git a/drivers/usb/mon/mon_stat.c b/drivers/usb/mon/mon_stat.c index 5388a339cfb81ba76852fc2948a7ea14060c0089..5bdf73a574981506f3776b5f6a6ec966d846d937 100644 --- a/drivers/usb/mon/mon_stat.c +++ b/drivers/usb/mon/mon_stat.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include "usb_mon.h" diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index e59334b09c4126ac75077c8b5333222f5e66f4b8..db1a4abf2806132c31a5c47a02c43794805eb9b1 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include "usb_mon.h" diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..25cd61947beea51eb185c0cd64d68289bc8f6994 --- /dev/null +++ b/drivers/usb/mtu3/Kconfig @@ -0,0 +1,54 @@ +# For MTK USB3.0 IP + +config USB_MTU3 + tristate "MediaTek USB3 Dual Role controller" + depends on EXTCON && (USB || USB_GADGET) && HAS_DMA + depends on ARCH_MEDIATEK || COMPILE_TEST + select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD + help + Say Y or M here if your system runs on MediaTek SoCs with + Dual Role SuperSpeed USB controller. You can select usb + mode as peripheral role or host role, or both. + + If you don't know what this is, please say N. + + Choose M here to compile this driver as a module, and it + will be called mtu3.ko. + + +if USB_MTU3 +choice + bool "MTU3 Mode Selection" + default USB_MTU3_DUAL_ROLE if (USB && USB_GADGET) + default USB_MTU3_HOST if (USB && !USB_GADGET) + default USB_MTU3_GADGET if (!USB && USB_GADGET) + +config USB_MTU3_HOST + bool "Host only mode" + depends on USB=y || USB=USB_MTU3 + help + Select this when you want to use MTU3 in host mode only, + thereby the gadget feature will be regressed. + +config USB_MTU3_GADGET + bool "Gadget only mode" + depends on USB_GADGET=y || USB_GADGET=USB_MTU3 + help + Select this when you want to use MTU3 in gadget mode only, + thereby the host feature will be regressed. + +config USB_MTU3_DUAL_ROLE + bool "Dual Role mode" + depends on ((USB=y || USB=USB_MTU3) && (USB_GADGET=y || USB_GADGET=USB_MTU3)) + help + This is the default mode of working of MTU3 controller where + both host and gadget features are enabled. + +endchoice + +config USB_MTU3_DEBUG + bool "Enable Debugging Messages" + help + Say Y here to enable debugging messages in the MTU3 Driver. + +endif diff --git a/drivers/usb/mtu3/Makefile b/drivers/usb/mtu3/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..60e0fff7a847d966b07f13c85318896721718228 --- /dev/null +++ b/drivers/usb/mtu3/Makefile @@ -0,0 +1,18 @@ + +ccflags-$(CONFIG_USB_MTU3_DEBUG) += -DDEBUG + +obj-$(CONFIG_USB_MTU3) += mtu3.o + +mtu3-y := mtu3_plat.o + +ifneq ($(filter y,$(CONFIG_USB_MTU3_HOST) $(CONFIG_USB_MTU3_DUAL_ROLE)),) + mtu3-y += mtu3_host.o +endif + +ifneq ($(filter y,$(CONFIG_USB_MTU3_GADGET) $(CONFIG_USB_MTU3_DUAL_ROLE)),) + mtu3-y += mtu3_core.o mtu3_gadget_ep0.o mtu3_gadget.o mtu3_qmu.o +endif + +ifneq ($(CONFIG_USB_MTU3_DUAL_ROLE),) + mtu3-y += mtu3_dr.o +endif diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h new file mode 100644 index 0000000000000000000000000000000000000000..ba9df719f3632cd55e2083969feffd75d4aea7cb --- /dev/null +++ b/drivers/usb/mtu3/mtu3.h @@ -0,0 +1,417 @@ +/* + * mtu3.h - MediaTek USB3 DRD header + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __MTU3_H__ +#define __MTU3_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct mtu3; +struct mtu3_ep; +struct mtu3_request; + +#include "mtu3_hw_regs.h" +#include "mtu3_qmu.h" + +#define MU3D_EP_TXCR0(epnum) (U3D_TX1CSR0 + (((epnum) - 1) * 0x10)) +#define MU3D_EP_TXCR1(epnum) (U3D_TX1CSR1 + (((epnum) - 1) * 0x10)) +#define MU3D_EP_TXCR2(epnum) (U3D_TX1CSR2 + (((epnum) - 1) * 0x10)) + +#define MU3D_EP_RXCR0(epnum) (U3D_RX1CSR0 + (((epnum) - 1) * 0x10)) +#define MU3D_EP_RXCR1(epnum) (U3D_RX1CSR1 + (((epnum) - 1) * 0x10)) +#define MU3D_EP_RXCR2(epnum) (U3D_RX1CSR2 + (((epnum) - 1) * 0x10)) + +#define USB_QMU_RQCSR(epnum) (U3D_RXQCSR1 + (((epnum) - 1) * 0x10)) +#define USB_QMU_RQSAR(epnum) (U3D_RXQSAR1 + (((epnum) - 1) * 0x10)) +#define USB_QMU_RQCPR(epnum) (U3D_RXQCPR1 + (((epnum) - 1) * 0x10)) + +#define USB_QMU_TQCSR(epnum) (U3D_TXQCSR1 + (((epnum) - 1) * 0x10)) +#define USB_QMU_TQSAR(epnum) (U3D_TXQSAR1 + (((epnum) - 1) * 0x10)) +#define USB_QMU_TQCPR(epnum) (U3D_TXQCPR1 + (((epnum) - 1) * 0x10)) + +#define SSUSB_U3_CTRL(p) (U3D_SSUSB_U3_CTRL_0P + ((p) * 0x08)) +#define SSUSB_U2_CTRL(p) (U3D_SSUSB_U2_CTRL_0P + ((p) * 0x08)) + +#define MTU3_DRIVER_NAME "mtu3" +#define DMA_ADDR_INVALID (~(dma_addr_t)0) + +#define MTU3_EP_ENABLED BIT(0) +#define MTU3_EP_STALL BIT(1) +#define MTU3_EP_WEDGE BIT(2) +#define MTU3_EP_BUSY BIT(3) + +#define MTU3_U3_IP_SLOT_DEFAULT 2 +#define MTU3_U2_IP_SLOT_DEFAULT 1 + +/** + * Normally the device works on HS or SS, to simplify fifo management, + * devide fifo into some 512B parts, use bitmap to manage it; And + * 128 bits size of bitmap is large enough, that means it can manage + * up to 64KB fifo size. + * NOTE: MTU3_EP_FIFO_UNIT should be power of two + */ +#define MTU3_EP_FIFO_UNIT (1 << 9) +#define MTU3_FIFO_BIT_SIZE 128 +#define MTU3_U2_IP_EP0_FIFO_SIZE 64 + +/** + * Maximum size of ep0 response buffer for ch9 requests, + * the SET_SEL request uses 6 so far, and GET_STATUS is 2 + */ +#define EP0_RESPONSE_BUF 6 + +/* device operated link and speed got from DEVICE_CONF register */ +enum mtu3_speed { + MTU3_SPEED_INACTIVE = 0, + MTU3_SPEED_FULL = 1, + MTU3_SPEED_HIGH = 3, + MTU3_SPEED_SUPER = 4, +}; + +/** + * @MU3D_EP0_STATE_SETUP: waits for SETUP or received a SETUP + * without data stage. + * @MU3D_EP0_STATE_TX: IN data stage + * @MU3D_EP0_STATE_RX: OUT data stage + * @MU3D_EP0_STATE_TX_END: the last IN data is transferred, and + * waits for its completion interrupt + * @MU3D_EP0_STATE_STALL: ep0 is in stall status, will be auto-cleared + * after receives a SETUP. + */ +enum mtu3_g_ep0_state { + MU3D_EP0_STATE_SETUP = 1, + MU3D_EP0_STATE_TX, + MU3D_EP0_STATE_RX, + MU3D_EP0_STATE_TX_END, + MU3D_EP0_STATE_STALL, +}; + +/** + * @base: the base address of fifo + * @limit: the bitmap size in bits + * @bitmap: fifo bitmap in unit of @MTU3_EP_FIFO_UNIT + */ +struct mtu3_fifo_info { + u32 base; + u32 limit; + DECLARE_BITMAP(bitmap, MTU3_FIFO_BIT_SIZE); +}; + +/** + * General Purpose Descriptor (GPD): + * The format of TX GPD is a little different from RX one. + * And the size of GPD is 16 bytes. + * + * @flag: + * bit0: Hardware Own (HWO) + * bit1: Buffer Descriptor Present (BDP), always 0, BD is not supported + * bit2: Bypass (BPS), 1: HW skips this GPD if HWO = 1 + * bit7: Interrupt On Completion (IOC) + * @chksum: This is used to validate the contents of this GPD; + * If TXQ_CS_EN / RXQ_CS_EN bit is set, an interrupt is issued + * when checksum validation fails; + * Checksum value is calculated over the 16 bytes of the GPD by default; + * @data_buf_len (RX ONLY): This value indicates the length of + * the assigned data buffer + * @next_gpd: Physical address of the next GPD + * @buffer: Physical address of the data buffer + * @buf_len: + * (TX): This value indicates the length of the assigned data buffer + * (RX): The total length of data received + * @ext_len: reserved + * @ext_flag: + * bit5 (TX ONLY): Zero Length Packet (ZLP), + */ +struct qmu_gpd { + __u8 flag; + __u8 chksum; + __le16 data_buf_len; + __le32 next_gpd; + __le32 buffer; + __le16 buf_len; + __u8 ext_len; + __u8 ext_flag; +} __packed; + +/** +* dma: physical base address of GPD segment +* start: virtual base address of GPD segment +* end: the last GPD element +* enqueue: the first empty GPD to use +* dequeue: the first completed GPD serviced by ISR +* NOTE: the size of GPD ring should be >= 2 +*/ +struct mtu3_gpd_ring { + dma_addr_t dma; + struct qmu_gpd *start; + struct qmu_gpd *end; + struct qmu_gpd *enqueue; + struct qmu_gpd *dequeue; +}; + +/** +* @vbus: vbus 5V used by host mode +* @edev: external connector used to detect vbus and iddig changes +* @vbus_nb: notifier for vbus detection +* @vbus_nb: notifier for iddig(idpin) detection +* @extcon_reg_dwork: delay work for extcon notifier register, waiting for +* xHCI driver initialization, it's necessary for system bootup +* as device. +* @is_u3_drd: whether port0 supports usb3.0 dual-role device or not +* @id_*: used to maually switch between host and device modes by idpin +* @manual_drd_enabled: it's true when supports dual-role device by debugfs +* to switch host/device modes depending on user input. +*/ +struct otg_switch_mtk { + struct regulator *vbus; + struct extcon_dev *edev; + struct notifier_block vbus_nb; + struct notifier_block id_nb; + struct delayed_work extcon_reg_dwork; + bool is_u3_drd; + /* dual-role switch by debugfs */ + struct pinctrl *id_pinctrl; + struct pinctrl_state *id_float; + struct pinctrl_state *id_ground; + bool manual_drd_enabled; +}; + +/** + * @mac_base: register base address of device MAC, exclude xHCI's + * @ippc_base: register base address of IP Power and Clock interface (IPPC) + * @vusb33: usb3.3V shared by device/host IP + * @sys_clk: system clock of mtu3, shared by device/host IP + * @dr_mode: works in which mode: + * host only, device only or dual-role mode + * @u2_ports: number of usb2.0 host ports + * @u3_ports: number of usb3.0 host ports + * @dbgfs_root: only used when supports manual dual-role switch via debugfs + * @wakeup_en: it's true when supports remote wakeup in host mode + * @wk_deb_p0: port0's wakeup debounce clock + * @wk_deb_p1: it's optional, and depends on port1 is supported or not + */ +struct ssusb_mtk { + struct device *dev; + struct mtu3 *u3d; + void __iomem *mac_base; + void __iomem *ippc_base; + struct phy **phys; + int num_phys; + /* common power & clock */ + struct regulator *vusb33; + struct clk *sys_clk; + /* otg */ + struct otg_switch_mtk otg_switch; + enum usb_dr_mode dr_mode; + bool is_host; + int u2_ports; + int u3_ports; + struct dentry *dbgfs_root; + /* usb wakeup for host mode */ + bool wakeup_en; + struct clk *wk_deb_p0; + struct clk *wk_deb_p1; + struct regmap *pericfg; +}; + +/** + * @fifo_size: it is (@slot + 1) * @fifo_seg_size + * @fifo_seg_size: it is roundup_pow_of_two(@maxp) + */ +struct mtu3_ep { + struct usb_ep ep; + char name[12]; + struct mtu3 *mtu; + u8 epnum; + u8 type; + u8 is_in; + u16 maxp; + int slot; + u32 fifo_size; + u32 fifo_addr; + u32 fifo_seg_size; + struct mtu3_fifo_info *fifo; + + struct list_head req_list; + struct mtu3_gpd_ring gpd_ring; + const struct usb_ss_ep_comp_descriptor *comp_desc; + const struct usb_endpoint_descriptor *desc; + + int flags; + u8 wedged; + u8 busy; +}; + +struct mtu3_request { + struct usb_request request; + struct list_head list; + struct mtu3_ep *mep; + struct mtu3 *mtu; + struct qmu_gpd *gpd; + int epnum; +}; + +static inline struct ssusb_mtk *dev_to_ssusb(struct device *dev) +{ + return dev_get_drvdata(dev); +} + +/** + * struct mtu3 - device driver instance data. + * @slot: MTU3_U2_IP_SLOT_DEFAULT for U2 IP only, + * MTU3_U3_IP_SLOT_DEFAULT for U3 IP + * @may_wakeup: means device's remote wakeup is enabled + * @is_self_powered: is reported in device status and the config descriptor + * @ep0_req: dummy request used while handling standard USB requests + * for GET_STATUS and SET_SEL + * @setup_buf: ep0 response buffer for GET_STATUS and SET_SEL requests + */ +struct mtu3 { + spinlock_t lock; + struct ssusb_mtk *ssusb; + struct device *dev; + void __iomem *mac_base; + void __iomem *ippc_base; + int irq; + + struct mtu3_fifo_info tx_fifo; + struct mtu3_fifo_info rx_fifo; + + struct mtu3_ep *ep_array; + struct mtu3_ep *in_eps; + struct mtu3_ep *out_eps; + struct mtu3_ep *ep0; + int num_eps; + int slot; + int active_ep; + + struct dma_pool *qmu_gpd_pool; + enum mtu3_g_ep0_state ep0_state; + struct usb_gadget g; /* the gadget */ + struct usb_gadget_driver *gadget_driver; + struct mtu3_request ep0_req; + u8 setup_buf[EP0_RESPONSE_BUF]; + u32 max_speed; + + unsigned is_active:1; + unsigned may_wakeup:1; + unsigned is_self_powered:1; + unsigned test_mode:1; + unsigned softconnect:1; + unsigned u1_enable:1; + unsigned u2_enable:1; + unsigned is_u3_ip:1; + + u8 address; + u8 test_mode_nr; + u32 hw_version; +}; + +static inline struct mtu3 *gadget_to_mtu3(struct usb_gadget *g) +{ + return container_of(g, struct mtu3, g); +} + +static inline int is_first_entry(const struct list_head *list, + const struct list_head *head) +{ + return list_is_last(head, list); +} + +static inline struct mtu3_request *to_mtu3_request(struct usb_request *req) +{ + return req ? container_of(req, struct mtu3_request, request) : NULL; +} + +static inline struct mtu3_ep *to_mtu3_ep(struct usb_ep *ep) +{ + return ep ? container_of(ep, struct mtu3_ep, ep) : NULL; +} + +static inline struct mtu3_request *next_request(struct mtu3_ep *mep) +{ + struct list_head *queue = &mep->req_list; + + if (list_empty(queue)) + return NULL; + + return list_first_entry(queue, struct mtu3_request, list); +} + +static inline void mtu3_writel(void __iomem *base, u32 offset, u32 data) +{ + writel(data, base + offset); +} + +static inline u32 mtu3_readl(void __iomem *base, u32 offset) +{ + return readl(base + offset); +} + +static inline void mtu3_setbits(void __iomem *base, u32 offset, u32 bits) +{ + void __iomem *addr = base + offset; + u32 tmp = readl(addr); + + writel((tmp | (bits)), addr); +} + +static inline void mtu3_clrbits(void __iomem *base, u32 offset, u32 bits) +{ + void __iomem *addr = base + offset; + u32 tmp = readl(addr); + + writel((tmp & ~(bits)), addr); +} + +int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks); +struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags); +void mtu3_free_request(struct usb_ep *ep, struct usb_request *req); +void mtu3_req_complete(struct mtu3_ep *mep, + struct usb_request *req, int status); + +int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep, + int interval, int burst, int mult); +void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep); +void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set); +void mtu3_ep0_setup(struct mtu3 *mtu); +void mtu3_start(struct mtu3 *mtu); +void mtu3_stop(struct mtu3 *mtu); +void mtu3_dev_on_off(struct mtu3 *mtu, int is_on); + +int mtu3_gadget_setup(struct mtu3 *mtu); +void mtu3_gadget_cleanup(struct mtu3 *mtu); +void mtu3_gadget_reset(struct mtu3 *mtu); +void mtu3_gadget_suspend(struct mtu3 *mtu); +void mtu3_gadget_resume(struct mtu3 *mtu); +void mtu3_gadget_disconnect(struct mtu3 *mtu); + +irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu); +extern const struct usb_ep_ops mtu3_ep0_ops; + +#endif diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c new file mode 100644 index 0000000000000000000000000000000000000000..99c65b0788ff935add8c2d860ab6be0d177b6e84 --- /dev/null +++ b/drivers/usb/mtu3/mtu3_core.c @@ -0,0 +1,863 @@ +/* + * mtu3_core.c - hardware access layer and gadget init/exit of + * MediaTek usb3 Dual-Role Controller Driver + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include "mtu3.h" + +static int ep_fifo_alloc(struct mtu3_ep *mep, u32 seg_size) +{ + struct mtu3_fifo_info *fifo = mep->fifo; + u32 num_bits = DIV_ROUND_UP(seg_size, MTU3_EP_FIFO_UNIT); + u32 start_bit; + + /* ensure that @mep->fifo_seg_size is power of two */ + num_bits = roundup_pow_of_two(num_bits); + if (num_bits > fifo->limit) + return -EINVAL; + + mep->fifo_seg_size = num_bits * MTU3_EP_FIFO_UNIT; + num_bits = num_bits * (mep->slot + 1); + start_bit = bitmap_find_next_zero_area(fifo->bitmap, + fifo->limit, 0, num_bits, 0); + if (start_bit >= fifo->limit) + return -EOVERFLOW; + + bitmap_set(fifo->bitmap, start_bit, num_bits); + mep->fifo_size = num_bits * MTU3_EP_FIFO_UNIT; + mep->fifo_addr = fifo->base + MTU3_EP_FIFO_UNIT * start_bit; + + dev_dbg(mep->mtu->dev, "%s fifo:%#x/%#x, start_bit: %d\n", + __func__, mep->fifo_seg_size, mep->fifo_size, start_bit); + + return mep->fifo_addr; +} + +static void ep_fifo_free(struct mtu3_ep *mep) +{ + struct mtu3_fifo_info *fifo = mep->fifo; + u32 addr = mep->fifo_addr; + u32 bits = mep->fifo_size / MTU3_EP_FIFO_UNIT; + u32 start_bit; + + if (unlikely(addr < fifo->base || bits > fifo->limit)) + return; + + start_bit = (addr - fifo->base) / MTU3_EP_FIFO_UNIT; + bitmap_clear(fifo->bitmap, start_bit, bits); + mep->fifo_size = 0; + mep->fifo_seg_size = 0; + + dev_dbg(mep->mtu->dev, "%s size:%#x/%#x, start_bit: %d\n", + __func__, mep->fifo_seg_size, mep->fifo_size, start_bit); +} + +/* enable/disable U3D SS function */ +static inline void mtu3_ss_func_set(struct mtu3 *mtu, bool enable) +{ + /* If usb3_en==0, LTSSM will go to SS.Disable state */ + if (enable) + mtu3_setbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN); + else + mtu3_clrbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN); + + dev_dbg(mtu->dev, "USB3_EN = %d\n", !!enable); +} + +/* set/clear U3D HS device soft connect */ +static inline void mtu3_hs_softconn_set(struct mtu3 *mtu, bool enable) +{ + if (enable) { + mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, + SOFT_CONN | SUSPENDM_ENABLE); + } else { + mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, + SOFT_CONN | SUSPENDM_ENABLE); + } + dev_dbg(mtu->dev, "SOFTCONN = %d\n", !!enable); +} + +/* only port0 of U2/U3 supports device mode */ +static int mtu3_device_enable(struct mtu3 *mtu) +{ + void __iomem *ibase = mtu->ippc_base; + u32 check_clk = 0; + + mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); + + if (mtu->is_u3_ip) { + check_clk = SSUSB_U3_MAC_RST_B_STS; + mtu3_clrbits(ibase, SSUSB_U3_CTRL(0), + (SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN | + SSUSB_U3_PORT_HOST_SEL)); + } + mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), + (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN | + SSUSB_U2_PORT_HOST_SEL)); + mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL); + + return ssusb_check_clocks(mtu->ssusb, check_clk); +} + +static void mtu3_device_disable(struct mtu3 *mtu) +{ + void __iomem *ibase = mtu->ippc_base; + + if (mtu->is_u3_ip) + mtu3_setbits(ibase, SSUSB_U3_CTRL(0), + (SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN)); + + mtu3_setbits(ibase, SSUSB_U2_CTRL(0), + SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN); + mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL); + mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); +} + +/* reset U3D's device module. */ +static void mtu3_device_reset(struct mtu3 *mtu) +{ + void __iomem *ibase = mtu->ippc_base; + + mtu3_setbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST); + udelay(1); + mtu3_clrbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST); +} + +/* disable all interrupts */ +static void mtu3_intr_disable(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + + /* Disable level 1 interrupts */ + mtu3_writel(mbase, U3D_LV1IECR, ~0x0); + /* Disable endpoint interrupts */ + mtu3_writel(mbase, U3D_EPIECR, ~0x0); +} + +static void mtu3_intr_status_clear(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + + /* Clear EP0 and Tx/Rx EPn interrupts status */ + mtu3_writel(mbase, U3D_EPISR, ~0x0); + /* Clear U2 USB common interrupts status */ + mtu3_writel(mbase, U3D_COMMON_USB_INTR, ~0x0); + /* Clear U3 LTSSM interrupts status */ + mtu3_writel(mbase, U3D_LTSSM_INTR, ~0x0); + /* Clear speed change interrupt status */ + mtu3_writel(mbase, U3D_DEV_LINK_INTR, ~0x0); +} + +/* enable system global interrupt */ +static void mtu3_intr_enable(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + u32 value; + + /*Enable level 1 interrupts (BMU, QMU, MAC3, DMA, MAC2, EPCTL) */ + value = BMU_INTR | QMU_INTR | MAC3_INTR | MAC2_INTR | EP_CTRL_INTR; + mtu3_writel(mbase, U3D_LV1IESR, value); + + /* Enable U2 common USB interrupts */ + value = SUSPEND_INTR | RESUME_INTR | RESET_INTR; + mtu3_writel(mbase, U3D_COMMON_USB_INTR_ENABLE, value); + + if (mtu->is_u3_ip) { + /* Enable U3 LTSSM interrupts */ + value = HOT_RST_INTR | WARM_RST_INTR | VBUS_RISE_INTR | + VBUS_FALL_INTR | ENTER_U3_INTR | EXIT_U3_INTR; + mtu3_writel(mbase, U3D_LTSSM_INTR_ENABLE, value); + } + + /* Enable QMU interrupts. */ + value = TXQ_CSERR_INT | TXQ_LENERR_INT | RXQ_CSERR_INT | + RXQ_LENERR_INT | RXQ_ZLPERR_INT; + mtu3_writel(mbase, U3D_QIESR1, value); + + /* Enable speed change interrupt */ + mtu3_writel(mbase, U3D_DEV_LINK_INTR_ENABLE, SSUSB_DEV_SPEED_CHG_INTR); +} + +/* set/clear the stall and toggle bits for non-ep0 */ +void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set) +{ + struct mtu3 *mtu = mep->mtu; + void __iomem *mbase = mtu->mac_base; + u8 epnum = mep->epnum; + u32 csr; + + if (mep->is_in) { /* TX */ + csr = mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)) & TX_W1C_BITS; + if (set) + csr |= TX_SENDSTALL; + else + csr = (csr & (~TX_SENDSTALL)) | TX_SENTSTALL; + mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr); + } else { /* RX */ + csr = mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)) & RX_W1C_BITS; + if (set) + csr |= RX_SENDSTALL; + else + csr = (csr & (~RX_SENDSTALL)) | RX_SENTSTALL; + mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr); + } + + if (!set) { + mtu3_setbits(mbase, U3D_EP_RST, EP_RST(mep->is_in, epnum)); + mtu3_clrbits(mbase, U3D_EP_RST, EP_RST(mep->is_in, epnum)); + mep->flags &= ~MTU3_EP_STALL; + } else { + mep->flags |= MTU3_EP_STALL; + } + + dev_dbg(mtu->dev, "%s: %s\n", mep->name, + set ? "SEND STALL" : "CLEAR STALL, with EP RESET"); +} + +void mtu3_dev_on_off(struct mtu3 *mtu, int is_on) +{ + if (mtu->is_u3_ip && (mtu->max_speed == USB_SPEED_SUPER)) + mtu3_ss_func_set(mtu, is_on); + else + mtu3_hs_softconn_set(mtu, is_on); + + dev_info(mtu->dev, "gadget (%s) pullup D%s\n", + usb_speed_string(mtu->max_speed), is_on ? "+" : "-"); +} + +void mtu3_start(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + + dev_dbg(mtu->dev, "%s devctl 0x%x\n", __func__, + mtu3_readl(mbase, U3D_DEVICE_CONTROL)); + + mtu3_clrbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); + + /* + * When disable U2 port, USB2_CSR's register will be reset to + * default value after re-enable it again(HS is enabled by default). + * So if force mac to work as FS, disable HS function. + */ + if (mtu->max_speed == USB_SPEED_FULL) + mtu3_clrbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE); + + /* Initialize the default interrupts */ + mtu3_intr_enable(mtu); + mtu->is_active = 1; + + if (mtu->softconnect) + mtu3_dev_on_off(mtu, 1); +} + +void mtu3_stop(struct mtu3 *mtu) +{ + dev_dbg(mtu->dev, "%s\n", __func__); + + mtu3_intr_disable(mtu); + mtu3_intr_status_clear(mtu); + + if (mtu->softconnect) + mtu3_dev_on_off(mtu, 0); + + mtu->is_active = 0; + mtu3_setbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); +} + +/* for non-ep0 */ +int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep, + int interval, int burst, int mult) +{ + void __iomem *mbase = mtu->mac_base; + int epnum = mep->epnum; + u32 csr0, csr1, csr2; + int fifo_sgsz, fifo_addr; + int num_pkts; + + fifo_addr = ep_fifo_alloc(mep, mep->maxp); + if (fifo_addr < 0) { + dev_err(mtu->dev, "alloc ep fifo failed(%d)\n", mep->maxp); + return -ENOMEM; + } + fifo_sgsz = ilog2(mep->fifo_seg_size); + dev_dbg(mtu->dev, "%s fifosz: %x(%x/%x)\n", __func__, fifo_sgsz, + mep->fifo_seg_size, mep->fifo_size); + + if (mep->is_in) { + csr0 = TX_TXMAXPKTSZ(mep->maxp); + csr0 |= TX_DMAREQEN; + + num_pkts = (burst + 1) * (mult + 1) - 1; + csr1 = TX_SS_BURST(burst) | TX_SLOT(mep->slot); + csr1 |= TX_MAX_PKT(num_pkts) | TX_MULT(mult); + + csr2 = TX_FIFOADDR(fifo_addr >> 4); + csr2 |= TX_FIFOSEGSIZE(fifo_sgsz); + + switch (mep->type) { + case USB_ENDPOINT_XFER_BULK: + csr1 |= TX_TYPE(TYPE_BULK); + break; + case USB_ENDPOINT_XFER_ISOC: + csr1 |= TX_TYPE(TYPE_ISO); + csr2 |= TX_BINTERVAL(interval); + break; + case USB_ENDPOINT_XFER_INT: + csr1 |= TX_TYPE(TYPE_INT); + csr2 |= TX_BINTERVAL(interval); + break; + } + + /* Enable QMU Done interrupt */ + mtu3_setbits(mbase, U3D_QIESR0, QMU_TX_DONE_INT(epnum)); + + mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr0); + mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), csr1); + mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), csr2); + + dev_dbg(mtu->dev, "U3D_TX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n", + epnum, mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)), + mtu3_readl(mbase, MU3D_EP_TXCR1(epnum)), + mtu3_readl(mbase, MU3D_EP_TXCR2(epnum))); + } else { + csr0 = RX_RXMAXPKTSZ(mep->maxp); + csr0 |= RX_DMAREQEN; + + num_pkts = (burst + 1) * (mult + 1) - 1; + csr1 = RX_SS_BURST(burst) | RX_SLOT(mep->slot); + csr1 |= RX_MAX_PKT(num_pkts) | RX_MULT(mult); + + csr2 = RX_FIFOADDR(fifo_addr >> 4); + csr2 |= RX_FIFOSEGSIZE(fifo_sgsz); + + switch (mep->type) { + case USB_ENDPOINT_XFER_BULK: + csr1 |= RX_TYPE(TYPE_BULK); + break; + case USB_ENDPOINT_XFER_ISOC: + csr1 |= RX_TYPE(TYPE_ISO); + csr2 |= RX_BINTERVAL(interval); + break; + case USB_ENDPOINT_XFER_INT: + csr1 |= RX_TYPE(TYPE_INT); + csr2 |= RX_BINTERVAL(interval); + break; + } + + /*Enable QMU Done interrupt */ + mtu3_setbits(mbase, U3D_QIESR0, QMU_RX_DONE_INT(epnum)); + + mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr0); + mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), csr1); + mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), csr2); + + dev_dbg(mtu->dev, "U3D_RX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n", + epnum, mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)), + mtu3_readl(mbase, MU3D_EP_RXCR1(epnum)), + mtu3_readl(mbase, MU3D_EP_RXCR2(epnum))); + } + + dev_dbg(mtu->dev, "csr0:%#x, csr1:%#x, csr2:%#x\n", csr0, csr1, csr2); + dev_dbg(mtu->dev, "%s: %s, fifo-addr:%#x, fifo-size:%#x(%#x/%#x)\n", + __func__, mep->name, mep->fifo_addr, mep->fifo_size, + fifo_sgsz, mep->fifo_seg_size); + + return 0; +} + +/* for non-ep0 */ +void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep) +{ + void __iomem *mbase = mtu->mac_base; + int epnum = mep->epnum; + + if (mep->is_in) { + mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), 0); + mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), 0); + mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), 0); + mtu3_setbits(mbase, U3D_QIECR0, QMU_TX_DONE_INT(epnum)); + } else { + mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), 0); + mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), 0); + mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), 0); + mtu3_setbits(mbase, U3D_QIECR0, QMU_RX_DONE_INT(epnum)); + } + + ep_fifo_free(mep); + + dev_dbg(mtu->dev, "%s: %s\n", __func__, mep->name); +} + +/* + * Two scenarios: + * 1. when device IP supports SS, the fifo of EP0, TX EPs, RX EPs + * are separated; + * 2. when supports only HS, the fifo is shared for all EPs, and + * the capability registers of @EPNTXFFSZ or @EPNRXFFSZ indicate + * the total fifo size of non-ep0, and ep0's is fixed to 64B, + * so the total fifo size is 64B + @EPNTXFFSZ; + * Due to the first 64B should be reserved for EP0, non-ep0's fifo + * starts from offset 64 and are divided into two equal parts for + * TX or RX EPs for simplification. + */ +static void get_ep_fifo_config(struct mtu3 *mtu) +{ + struct mtu3_fifo_info *tx_fifo; + struct mtu3_fifo_info *rx_fifo; + u32 fifosize; + + if (mtu->is_u3_ip) { + fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ); + tx_fifo = &mtu->tx_fifo; + tx_fifo->base = 0; + tx_fifo->limit = fifosize / MTU3_EP_FIFO_UNIT; + bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE); + + fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNRXFFSZ); + rx_fifo = &mtu->rx_fifo; + rx_fifo->base = 0; + rx_fifo->limit = fifosize / MTU3_EP_FIFO_UNIT; + bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE); + mtu->slot = MTU3_U3_IP_SLOT_DEFAULT; + } else { + fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ); + tx_fifo = &mtu->tx_fifo; + tx_fifo->base = MTU3_U2_IP_EP0_FIFO_SIZE; + tx_fifo->limit = (fifosize / MTU3_EP_FIFO_UNIT) >> 1; + bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE); + + rx_fifo = &mtu->rx_fifo; + rx_fifo->base = + tx_fifo->base + tx_fifo->limit * MTU3_EP_FIFO_UNIT; + rx_fifo->limit = tx_fifo->limit; + bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE); + mtu->slot = MTU3_U2_IP_SLOT_DEFAULT; + } + + dev_dbg(mtu->dev, "%s, TX: base-%d, limit-%d; RX: base-%d, limit-%d\n", + __func__, tx_fifo->base, tx_fifo->limit, + rx_fifo->base, rx_fifo->limit); +} + +void mtu3_ep0_setup(struct mtu3 *mtu) +{ + u32 maxpacket = mtu->g.ep0->maxpacket; + u32 csr; + + dev_dbg(mtu->dev, "%s maxpacket: %d\n", __func__, maxpacket); + + csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR); + csr &= ~EP0_MAXPKTSZ_MSK; + csr |= EP0_MAXPKTSZ(maxpacket); + csr &= EP0_W1C_BITS; + mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr); + + /* Enable EP0 interrupt */ + mtu3_writel(mtu->mac_base, U3D_EPIESR, EP0ISR); +} + +static int mtu3_mem_alloc(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + struct mtu3_ep *ep_array; + int in_ep_num, out_ep_num; + u32 cap_epinfo; + int ret; + int i; + + cap_epinfo = mtu3_readl(mbase, U3D_CAP_EPINFO); + in_ep_num = CAP_TX_EP_NUM(cap_epinfo); + out_ep_num = CAP_RX_EP_NUM(cap_epinfo); + + dev_info(mtu->dev, "fifosz/epnum: Tx=%#x/%d, Rx=%#x/%d\n", + mtu3_readl(mbase, U3D_CAP_EPNTXFFSZ), in_ep_num, + mtu3_readl(mbase, U3D_CAP_EPNRXFFSZ), out_ep_num); + + /* one for ep0, another is reserved */ + mtu->num_eps = min(in_ep_num, out_ep_num) + 1; + ep_array = kcalloc(mtu->num_eps * 2, sizeof(*ep_array), GFP_KERNEL); + if (ep_array == NULL) + return -ENOMEM; + + mtu->ep_array = ep_array; + mtu->in_eps = ep_array; + mtu->out_eps = &ep_array[mtu->num_eps]; + /* ep0 uses in_eps[0], out_eps[0] is reserved */ + mtu->ep0 = mtu->in_eps; + mtu->ep0->mtu = mtu; + mtu->ep0->epnum = 0; + + for (i = 1; i < mtu->num_eps; i++) { + struct mtu3_ep *mep = mtu->in_eps + i; + + mep->fifo = &mtu->tx_fifo; + mep = mtu->out_eps + i; + mep->fifo = &mtu->rx_fifo; + } + + get_ep_fifo_config(mtu); + + ret = mtu3_qmu_init(mtu); + if (ret) + kfree(mtu->ep_array); + + return ret; +} + +static void mtu3_mem_free(struct mtu3 *mtu) +{ + mtu3_qmu_exit(mtu); + kfree(mtu->ep_array); +} + +static void mtu3_set_speed(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + + if (!mtu->is_u3_ip && (mtu->max_speed > USB_SPEED_HIGH)) + mtu->max_speed = USB_SPEED_HIGH; + + if (mtu->max_speed == USB_SPEED_FULL) { + /* disable U3 SS function */ + mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN); + /* disable HS function */ + mtu3_clrbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE); + } else if (mtu->max_speed == USB_SPEED_HIGH) { + mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN); + /* HS/FS detected by HW */ + mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE); + } + + dev_info(mtu->dev, "max_speed: %s\n", + usb_speed_string(mtu->max_speed)); +} + +static void mtu3_regs_init(struct mtu3 *mtu) +{ + + void __iomem *mbase = mtu->mac_base; + + /* be sure interrupts are disabled before registration of ISR */ + mtu3_intr_disable(mtu); + mtu3_intr_status_clear(mtu); + + if (mtu->is_u3_ip) { + /* disable LGO_U1/U2 by default */ + mtu3_clrbits(mbase, U3D_LINK_POWER_CONTROL, + SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE | + SW_U1_REQUEST_ENABLE | SW_U2_REQUEST_ENABLE); + /* device responses to u3_exit from host automatically */ + mtu3_clrbits(mbase, U3D_LTSSM_CTRL, SOFT_U3_EXIT_EN); + /* automatically build U2 link when U3 detect fail */ + mtu3_setbits(mbase, U3D_USB2_TEST_MODE, U2U3_AUTO_SWITCH); + } + + mtu3_set_speed(mtu); + + /* delay about 0.1us from detecting reset to send chirp-K */ + mtu3_clrbits(mbase, U3D_LINK_RESET_INFO, WTCHRP_MSK); + /* U2/U3 detected by HW */ + mtu3_writel(mbase, U3D_DEVICE_CONF, 0); + /* enable QMU 16B checksum */ + mtu3_setbits(mbase, U3D_QCR0, QMU_CS16B_EN); + /* vbus detected by HW */ + mtu3_clrbits(mbase, U3D_MISC_CTRL, VBUS_FRC_EN | VBUS_ON); +} + +static irqreturn_t mtu3_link_isr(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + enum usb_device_speed udev_speed; + u32 maxpkt = 64; + u32 link; + u32 speed; + + link = mtu3_readl(mbase, U3D_DEV_LINK_INTR); + link &= mtu3_readl(mbase, U3D_DEV_LINK_INTR_ENABLE); + mtu3_writel(mbase, U3D_DEV_LINK_INTR, link); /* W1C */ + dev_dbg(mtu->dev, "=== LINK[%x] ===\n", link); + + if (!(link & SSUSB_DEV_SPEED_CHG_INTR)) + return IRQ_NONE; + + speed = SSUSB_DEV_SPEED(mtu3_readl(mbase, U3D_DEVICE_CONF)); + + switch (speed) { + case MTU3_SPEED_FULL: + udev_speed = USB_SPEED_FULL; + /*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */ + mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf) + | LPM_BESLCK(4) | LPM_BESLCK_U3(0xa)); + mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, + LPM_BESL_STALL | LPM_BESLD_STALL); + break; + case MTU3_SPEED_HIGH: + udev_speed = USB_SPEED_HIGH; + /*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */ + mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf) + | LPM_BESLCK(4) | LPM_BESLCK_U3(0xa)); + mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, + LPM_BESL_STALL | LPM_BESLD_STALL); + break; + case MTU3_SPEED_SUPER: + udev_speed = USB_SPEED_SUPER; + maxpkt = 512; + break; + default: + udev_speed = USB_SPEED_UNKNOWN; + break; + } + dev_dbg(mtu->dev, "%s: %s\n", __func__, usb_speed_string(udev_speed)); + + mtu->g.speed = udev_speed; + mtu->g.ep0->maxpacket = maxpkt; + mtu->ep0_state = MU3D_EP0_STATE_SETUP; + + if (udev_speed == USB_SPEED_UNKNOWN) + mtu3_gadget_disconnect(mtu); + else + mtu3_ep0_setup(mtu); + + return IRQ_HANDLED; +} + +static irqreturn_t mtu3_u3_ltssm_isr(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + u32 ltssm; + + ltssm = mtu3_readl(mbase, U3D_LTSSM_INTR); + ltssm &= mtu3_readl(mbase, U3D_LTSSM_INTR_ENABLE); + mtu3_writel(mbase, U3D_LTSSM_INTR, ltssm); /* W1C */ + dev_dbg(mtu->dev, "=== LTSSM[%x] ===\n", ltssm); + + if (ltssm & (HOT_RST_INTR | WARM_RST_INTR)) + mtu3_gadget_reset(mtu); + + if (ltssm & VBUS_FALL_INTR) + mtu3_ss_func_set(mtu, false); + + if (ltssm & VBUS_RISE_INTR) + mtu3_ss_func_set(mtu, true); + + if (ltssm & EXIT_U3_INTR) + mtu3_gadget_resume(mtu); + + if (ltssm & ENTER_U3_INTR) + mtu3_gadget_suspend(mtu); + + return IRQ_HANDLED; +} + +static irqreturn_t mtu3_u2_common_isr(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + u32 u2comm; + + u2comm = mtu3_readl(mbase, U3D_COMMON_USB_INTR); + u2comm &= mtu3_readl(mbase, U3D_COMMON_USB_INTR_ENABLE); + mtu3_writel(mbase, U3D_COMMON_USB_INTR, u2comm); /* W1C */ + dev_dbg(mtu->dev, "=== U2COMM[%x] ===\n", u2comm); + + if (u2comm & SUSPEND_INTR) + mtu3_gadget_suspend(mtu); + + if (u2comm & RESUME_INTR) + mtu3_gadget_resume(mtu); + + if (u2comm & RESET_INTR) + mtu3_gadget_reset(mtu); + + return IRQ_HANDLED; +} + +static irqreturn_t mtu3_irq(int irq, void *data) +{ + struct mtu3 *mtu = (struct mtu3 *)data; + unsigned long flags; + u32 level1; + + spin_lock_irqsave(&mtu->lock, flags); + + /* U3D_LV1ISR is RU */ + level1 = mtu3_readl(mtu->mac_base, U3D_LV1ISR); + level1 &= mtu3_readl(mtu->mac_base, U3D_LV1IER); + + if (level1 & EP_CTRL_INTR) + mtu3_link_isr(mtu); + + if (level1 & MAC2_INTR) + mtu3_u2_common_isr(mtu); + + if (level1 & MAC3_INTR) + mtu3_u3_ltssm_isr(mtu); + + if (level1 & BMU_INTR) + mtu3_ep0_isr(mtu); + + if (level1 & QMU_INTR) + mtu3_qmu_isr(mtu); + + spin_unlock_irqrestore(&mtu->lock, flags); + + return IRQ_HANDLED; +} + +static int mtu3_hw_init(struct mtu3 *mtu) +{ + u32 cap_dev; + int ret; + + mtu->hw_version = mtu3_readl(mtu->ippc_base, U3D_SSUSB_HW_ID); + + cap_dev = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_DEV_CAP); + mtu->is_u3_ip = !!SSUSB_IP_DEV_U3_PORT_NUM(cap_dev); + + dev_info(mtu->dev, "IP version 0x%x(%s IP)\n", mtu->hw_version, + mtu->is_u3_ip ? "U3" : "U2"); + + mtu3_device_reset(mtu); + + ret = mtu3_device_enable(mtu); + if (ret) { + dev_err(mtu->dev, "device enable failed %d\n", ret); + return ret; + } + + ret = mtu3_mem_alloc(mtu); + if (ret) + return -ENOMEM; + + mtu3_regs_init(mtu); + + return 0; +} + +static void mtu3_hw_exit(struct mtu3 *mtu) +{ + mtu3_device_disable(mtu); + mtu3_mem_free(mtu); +} + +/*-------------------------------------------------------------------------*/ + +int ssusb_gadget_init(struct ssusb_mtk *ssusb) +{ + struct device *dev = ssusb->dev; + struct platform_device *pdev = to_platform_device(dev); + struct mtu3 *mtu = NULL; + struct resource *res; + int ret = -ENOMEM; + + mtu = devm_kzalloc(dev, sizeof(struct mtu3), GFP_KERNEL); + if (mtu == NULL) + return -ENOMEM; + + mtu->irq = platform_get_irq(pdev, 0); + if (mtu->irq <= 0) { + dev_err(dev, "fail to get irq number\n"); + return -ENODEV; + } + dev_info(dev, "irq %d\n", mtu->irq); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac"); + mtu->mac_base = devm_ioremap_resource(dev, res); + if (IS_ERR(mtu->mac_base)) { + dev_err(dev, "error mapping memory for dev mac\n"); + return PTR_ERR(mtu->mac_base); + } + + spin_lock_init(&mtu->lock); + mtu->dev = dev; + mtu->ippc_base = ssusb->ippc_base; + ssusb->mac_base = mtu->mac_base; + ssusb->u3d = mtu; + mtu->ssusb = ssusb; + mtu->max_speed = usb_get_maximum_speed(dev); + + /* check the max_speed parameter */ + switch (mtu->max_speed) { + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + case USB_SPEED_SUPER: + break; + default: + dev_err(dev, "invalid max_speed: %s\n", + usb_speed_string(mtu->max_speed)); + /* fall through */ + case USB_SPEED_UNKNOWN: + /* default as SS */ + mtu->max_speed = USB_SPEED_SUPER; + break; + } + + dev_dbg(dev, "mac_base=0x%p, ippc_base=0x%p\n", + mtu->mac_base, mtu->ippc_base); + + ret = mtu3_hw_init(mtu); + if (ret) { + dev_err(dev, "mtu3 hw init failed:%d\n", ret); + return ret; + } + + ret = devm_request_irq(dev, mtu->irq, mtu3_irq, 0, dev_name(dev), mtu); + if (ret) { + dev_err(dev, "request irq %d failed!\n", mtu->irq); + goto irq_err; + } + + device_init_wakeup(dev, true); + + ret = mtu3_gadget_setup(mtu); + if (ret) { + dev_err(dev, "mtu3 gadget init failed:%d\n", ret); + goto gadget_err; + } + + /* init as host mode, power down device IP for power saving */ + if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) + mtu3_stop(mtu); + + dev_dbg(dev, " %s() done...\n", __func__); + + return 0; + +gadget_err: + device_init_wakeup(dev, false); + +irq_err: + mtu3_hw_exit(mtu); + ssusb->u3d = NULL; + dev_err(dev, " %s() fail...\n", __func__); + + return ret; +} + +void ssusb_gadget_exit(struct ssusb_mtk *ssusb) +{ + struct mtu3 *mtu = ssusb->u3d; + + mtu3_gadget_cleanup(mtu); + device_init_wakeup(ssusb->dev, false); + mtu3_hw_exit(mtu); +} diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c new file mode 100644 index 0000000000000000000000000000000000000000..1a8987e7c5b0648c031fb5aece274a4939fc2ead --- /dev/null +++ b/drivers/usb/mtu3/mtu3_dr.c @@ -0,0 +1,379 @@ +/* + * mtu3_dr.c - dual role switch and host glue layer + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "mtu3.h" +#include "mtu3_dr.h" + +#define USB2_PORT 2 +#define USB3_PORT 3 + +enum mtu3_vbus_id_state { + MTU3_ID_FLOAT = 1, + MTU3_ID_GROUND, + MTU3_VBUS_OFF, + MTU3_VBUS_VALID, +}; + +static void toggle_opstate(struct ssusb_mtk *ssusb) +{ + if (!ssusb->otg_switch.is_u3_drd) { + mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION); + mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN); + } +} + +/* only port0 supports dual-role mode */ +static int ssusb_port0_switch(struct ssusb_mtk *ssusb, + int version, bool tohost) +{ + void __iomem *ibase = ssusb->ippc_base; + u32 value; + + dev_dbg(ssusb->dev, "%s (switch u%d port0 to %s)\n", __func__, + version, tohost ? "host" : "device"); + + if (version == USB2_PORT) { + /* 1. power off and disable u2 port0 */ + value = mtu3_readl(ibase, SSUSB_U2_CTRL(0)); + value |= SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS; + mtu3_writel(ibase, SSUSB_U2_CTRL(0), value); + + /* 2. power on, enable u2 port0 and select its mode */ + value = mtu3_readl(ibase, SSUSB_U2_CTRL(0)); + value &= ~(SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS); + value = tohost ? (value | SSUSB_U2_PORT_HOST_SEL) : + (value & (~SSUSB_U2_PORT_HOST_SEL)); + mtu3_writel(ibase, SSUSB_U2_CTRL(0), value); + } else { + /* 1. power off and disable u3 port0 */ + value = mtu3_readl(ibase, SSUSB_U3_CTRL(0)); + value |= SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS; + mtu3_writel(ibase, SSUSB_U3_CTRL(0), value); + + /* 2. power on, enable u3 port0 and select its mode */ + value = mtu3_readl(ibase, SSUSB_U3_CTRL(0)); + value &= ~(SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS); + value = tohost ? (value | SSUSB_U3_PORT_HOST_SEL) : + (value & (~SSUSB_U3_PORT_HOST_SEL)); + mtu3_writel(ibase, SSUSB_U3_CTRL(0), value); + } + + return 0; +} + +static void switch_port_to_host(struct ssusb_mtk *ssusb) +{ + u32 check_clk = 0; + + dev_dbg(ssusb->dev, "%s\n", __func__); + + ssusb_port0_switch(ssusb, USB2_PORT, true); + + if (ssusb->otg_switch.is_u3_drd) { + ssusb_port0_switch(ssusb, USB3_PORT, true); + check_clk = SSUSB_U3_MAC_RST_B_STS; + } + + ssusb_check_clocks(ssusb, check_clk); + + /* after all clocks are stable */ + toggle_opstate(ssusb); +} + +static void switch_port_to_device(struct ssusb_mtk *ssusb) +{ + u32 check_clk = 0; + + dev_dbg(ssusb->dev, "%s\n", __func__); + + ssusb_port0_switch(ssusb, USB2_PORT, false); + + if (ssusb->otg_switch.is_u3_drd) { + ssusb_port0_switch(ssusb, USB3_PORT, false); + check_clk = SSUSB_U3_MAC_RST_B_STS; + } + + ssusb_check_clocks(ssusb, check_clk); +} + +int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on) +{ + struct ssusb_mtk *ssusb = + container_of(otg_sx, struct ssusb_mtk, otg_switch); + struct regulator *vbus = otg_sx->vbus; + int ret; + + /* vbus is optional */ + if (!vbus) + return 0; + + dev_dbg(ssusb->dev, "%s: turn %s\n", __func__, is_on ? "on" : "off"); + + if (is_on) { + ret = regulator_enable(vbus); + if (ret) { + dev_err(ssusb->dev, "vbus regulator enable failed\n"); + return ret; + } + } else { + regulator_disable(vbus); + } + + return 0; +} + +/* + * switch to host: -> MTU3_VBUS_OFF --> MTU3_ID_GROUND + * switch to device: -> MTU3_ID_FLOAT --> MTU3_VBUS_VALID + */ +static void ssusb_set_mailbox(struct otg_switch_mtk *otg_sx, + enum mtu3_vbus_id_state status) +{ + struct ssusb_mtk *ssusb = + container_of(otg_sx, struct ssusb_mtk, otg_switch); + struct mtu3 *mtu = ssusb->u3d; + + dev_dbg(ssusb->dev, "mailbox state(%d)\n", status); + + switch (status) { + case MTU3_ID_GROUND: + switch_port_to_host(ssusb); + ssusb_set_vbus(otg_sx, 1); + ssusb->is_host = true; + break; + case MTU3_ID_FLOAT: + ssusb->is_host = false; + ssusb_set_vbus(otg_sx, 0); + switch_port_to_device(ssusb); + break; + case MTU3_VBUS_OFF: + mtu3_stop(mtu); + pm_relax(ssusb->dev); + break; + case MTU3_VBUS_VALID: + /* avoid suspend when works as device */ + pm_stay_awake(ssusb->dev); + mtu3_start(mtu); + break; + default: + dev_err(ssusb->dev, "invalid state\n"); + } +} + +static int ssusb_id_notifier(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct otg_switch_mtk *otg_sx = + container_of(nb, struct otg_switch_mtk, id_nb); + + if (event) + ssusb_set_mailbox(otg_sx, MTU3_ID_GROUND); + else + ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT); + + return NOTIFY_DONE; +} + +static int ssusb_vbus_notifier(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct otg_switch_mtk *otg_sx = + container_of(nb, struct otg_switch_mtk, vbus_nb); + + if (event) + ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID); + else + ssusb_set_mailbox(otg_sx, MTU3_VBUS_OFF); + + return NOTIFY_DONE; +} + +static int ssusb_extcon_register(struct otg_switch_mtk *otg_sx) +{ + struct ssusb_mtk *ssusb = + container_of(otg_sx, struct ssusb_mtk, otg_switch); + struct extcon_dev *edev = otg_sx->edev; + int ret; + + /* extcon is optional */ + if (!edev) + return 0; + + otg_sx->vbus_nb.notifier_call = ssusb_vbus_notifier; + ret = extcon_register_notifier(edev, EXTCON_USB, + &otg_sx->vbus_nb); + if (ret < 0) + dev_err(ssusb->dev, "failed to register notifier for USB\n"); + + otg_sx->id_nb.notifier_call = ssusb_id_notifier; + ret = extcon_register_notifier(edev, EXTCON_USB_HOST, + &otg_sx->id_nb); + if (ret < 0) + dev_err(ssusb->dev, "failed to register notifier for USB-HOST\n"); + + dev_dbg(ssusb->dev, "EXTCON_USB: %d, EXTCON_USB_HOST: %d\n", + extcon_get_cable_state_(edev, EXTCON_USB), + extcon_get_cable_state_(edev, EXTCON_USB_HOST)); + + /* default as host, switch to device mode if needed */ + if (extcon_get_cable_state_(edev, EXTCON_USB_HOST) == false) + ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT); + if (extcon_get_cable_state_(edev, EXTCON_USB) == true) + ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID); + + return 0; +} + +static void extcon_register_dwork(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct otg_switch_mtk *otg_sx = + container_of(dwork, struct otg_switch_mtk, extcon_reg_dwork); + + ssusb_extcon_register(otg_sx); +} + +/* + * We provide an interface via debugfs to switch between host and device modes + * depending on user input. + * This is useful in special cases, such as uses TYPE-A receptacle but also + * wants to support dual-role mode. + * It generates cable state changes by pulling up/down IDPIN and + * notifies driver to switch mode by "extcon-usb-gpio". + * NOTE: when use MICRO receptacle, should not enable this interface. + */ +static void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host) +{ + struct otg_switch_mtk *otg_sx = &ssusb->otg_switch; + + if (to_host) + pinctrl_select_state(otg_sx->id_pinctrl, otg_sx->id_ground); + else + pinctrl_select_state(otg_sx->id_pinctrl, otg_sx->id_float); +} + + +static int ssusb_mode_show(struct seq_file *sf, void *unused) +{ + struct ssusb_mtk *ssusb = sf->private; + + seq_printf(sf, "current mode: %s(%s drd)\n(echo device/host)\n", + ssusb->is_host ? "host" : "device", + ssusb->otg_switch.manual_drd_enabled ? "manual" : "auto"); + + return 0; +} + +static int ssusb_mode_open(struct inode *inode, struct file *file) +{ + return single_open(file, ssusb_mode_show, inode->i_private); +} + +static ssize_t ssusb_mode_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + struct seq_file *sf = file->private_data; + struct ssusb_mtk *ssusb = sf->private; + char buf[16]; + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + if (!strncmp(buf, "host", 4) && !ssusb->is_host) { + ssusb_mode_manual_switch(ssusb, 1); + } else if (!strncmp(buf, "device", 6) && ssusb->is_host) { + ssusb_mode_manual_switch(ssusb, 0); + } else { + dev_err(ssusb->dev, "wrong or duplicated setting\n"); + return -EINVAL; + } + + return count; +} + +static const struct file_operations ssusb_mode_fops = { + .open = ssusb_mode_open, + .write = ssusb_mode_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void ssusb_debugfs_init(struct ssusb_mtk *ssusb) +{ + struct dentry *root; + struct dentry *file; + + root = debugfs_create_dir(dev_name(ssusb->dev), usb_debug_root); + if (IS_ERR_OR_NULL(root)) { + if (!root) + dev_err(ssusb->dev, "create debugfs root failed\n"); + return; + } + ssusb->dbgfs_root = root; + + file = debugfs_create_file("mode", S_IRUGO | S_IWUSR, root, + ssusb, &ssusb_mode_fops); + if (!file) + dev_dbg(ssusb->dev, "create debugfs mode failed\n"); +} + +static void ssusb_debugfs_exit(struct ssusb_mtk *ssusb) +{ + debugfs_remove_recursive(ssusb->dbgfs_root); +} + +int ssusb_otg_switch_init(struct ssusb_mtk *ssusb) +{ + struct otg_switch_mtk *otg_sx = &ssusb->otg_switch; + + INIT_DELAYED_WORK(&otg_sx->extcon_reg_dwork, extcon_register_dwork); + + if (otg_sx->manual_drd_enabled) + ssusb_debugfs_init(ssusb); + + /* It is enough to delay 1s for waiting for host initialization */ + schedule_delayed_work(&otg_sx->extcon_reg_dwork, HZ); + + return 0; +} + +void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb) +{ + struct otg_switch_mtk *otg_sx = &ssusb->otg_switch; + + cancel_delayed_work(&otg_sx->extcon_reg_dwork); + + if (otg_sx->edev) { + extcon_unregister_notifier(otg_sx->edev, + EXTCON_USB, &otg_sx->vbus_nb); + extcon_unregister_notifier(otg_sx->edev, + EXTCON_USB_HOST, &otg_sx->id_nb); + } + + if (otg_sx->manual_drd_enabled) + ssusb_debugfs_exit(ssusb); +} diff --git a/drivers/usb/mtu3/mtu3_dr.h b/drivers/usb/mtu3/mtu3_dr.h new file mode 100644 index 0000000000000000000000000000000000000000..9b228b5811b06636232ff0393f2a90977cfa71b6 --- /dev/null +++ b/drivers/usb/mtu3/mtu3_dr.h @@ -0,0 +1,108 @@ +/* + * mtu3_dr.h - dual role switch and host glue layer header + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _MTU3_DR_H_ +#define _MTU3_DR_H_ + +#if IS_ENABLED(CONFIG_USB_MTU3_HOST) || IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE) + +int ssusb_host_init(struct ssusb_mtk *ssusb, struct device_node *parent_dn); +void ssusb_host_exit(struct ssusb_mtk *ssusb); +int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb, + struct device_node *dn); +int ssusb_host_enable(struct ssusb_mtk *ssusb); +int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend); +int ssusb_wakeup_enable(struct ssusb_mtk *ssusb); +void ssusb_wakeup_disable(struct ssusb_mtk *ssusb); + +#else + +static inline int ssusb_host_init(struct ssusb_mtk *ssusb, + + struct device_node *parent_dn) +{ + return 0; +} + +static inline void ssusb_host_exit(struct ssusb_mtk *ssusb) +{} + +static inline int ssusb_wakeup_of_property_parse( + struct ssusb_mtk *ssusb, struct device_node *dn) +{ + return 0; +} + +static inline int ssusb_host_enable(struct ssusb_mtk *ssusb) +{ + return 0; +} + +static inline int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend) +{ + return 0; +} + +static inline int ssusb_wakeup_enable(struct ssusb_mtk *ssusb) +{ + return 0; +} + +static inline void ssusb_wakeup_disable(struct ssusb_mtk *ssusb) +{} + +#endif + + +#if IS_ENABLED(CONFIG_USB_MTU3_GADGET) || IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE) +int ssusb_gadget_init(struct ssusb_mtk *ssusb); +void ssusb_gadget_exit(struct ssusb_mtk *ssusb); +#else +static inline int ssusb_gadget_init(struct ssusb_mtk *ssusb) +{ + return 0; +} + +static inline void ssusb_gadget_exit(struct ssusb_mtk *ssusb) +{} +#endif + + +#if IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE) +int ssusb_otg_switch_init(struct ssusb_mtk *ssusb); +void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb); +int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on); + +#else + +static inline int ssusb_otg_switch_init(struct ssusb_mtk *ssusb) +{ + return 0; +} + +static inline void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb) +{} + +static inline int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on) +{ + return 0; +} + +#endif + +#endif /* _MTU3_DR_H_ */ diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c new file mode 100644 index 0000000000000000000000000000000000000000..9dd2441b4fa1a2f82d605522f4f8415574fe0cab --- /dev/null +++ b/drivers/usb/mtu3/mtu3_gadget.c @@ -0,0 +1,730 @@ +/* + * mtu3_gadget.c - MediaTek usb3 DRD peripheral support + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "mtu3.h" + +void mtu3_req_complete(struct mtu3_ep *mep, + struct usb_request *req, int status) +__releases(mep->mtu->lock) +__acquires(mep->mtu->lock) +{ + struct mtu3_request *mreq; + struct mtu3 *mtu; + int busy = mep->busy; + + mreq = to_mtu3_request(req); + list_del(&mreq->list); + if (mreq->request.status == -EINPROGRESS) + mreq->request.status = status; + + mtu = mreq->mtu; + mep->busy = 1; + spin_unlock(&mtu->lock); + + /* ep0 makes use of PIO, needn't unmap it */ + if (mep->epnum) + usb_gadget_unmap_request(&mtu->g, req, mep->is_in); + + dev_dbg(mtu->dev, "%s complete req: %p, sts %d, %d/%d\n", mep->name, + req, req->status, mreq->request.actual, mreq->request.length); + + usb_gadget_giveback_request(&mep->ep, &mreq->request); + + spin_lock(&mtu->lock); + mep->busy = busy; +} + +static void nuke(struct mtu3_ep *mep, const int status) +{ + struct mtu3_request *mreq = NULL; + + mep->busy = 1; + if (list_empty(&mep->req_list)) + return; + + dev_dbg(mep->mtu->dev, "abort %s's req: sts %d\n", mep->name, status); + + /* exclude EP0 */ + if (mep->epnum) + mtu3_qmu_flush(mep); + + while (!list_empty(&mep->req_list)) { + mreq = list_first_entry(&mep->req_list, + struct mtu3_request, list); + mtu3_req_complete(mep, &mreq->request, status); + } +} + +static int mtu3_ep_enable(struct mtu3_ep *mep) +{ + const struct usb_endpoint_descriptor *desc; + const struct usb_ss_ep_comp_descriptor *comp_desc; + struct mtu3 *mtu = mep->mtu; + u32 interval = 0; + u32 mult = 0; + u32 burst = 0; + int max_packet; + int ret; + + desc = mep->desc; + comp_desc = mep->comp_desc; + mep->type = usb_endpoint_type(desc); + max_packet = usb_endpoint_maxp(desc); + mep->maxp = max_packet & GENMASK(10, 0); + + switch (mtu->g.speed) { + case USB_SPEED_SUPER: + if (usb_endpoint_xfer_int(desc) || + usb_endpoint_xfer_isoc(desc)) { + interval = desc->bInterval; + interval = clamp_val(interval, 1, 16) - 1; + if (usb_endpoint_xfer_isoc(desc) && comp_desc) + mult = comp_desc->bmAttributes; + } + if (comp_desc) + burst = comp_desc->bMaxBurst; + + break; + case USB_SPEED_HIGH: + if (usb_endpoint_xfer_isoc(desc) || + usb_endpoint_xfer_int(desc)) { + interval = desc->bInterval; + interval = clamp_val(interval, 1, 16) - 1; + burst = (max_packet & GENMASK(12, 11)) >> 11; + } + break; + default: + break; /*others are ignored */ + } + + dev_dbg(mtu->dev, "%s maxp:%d, interval:%d, burst:%d, mult:%d\n", + __func__, mep->maxp, interval, burst, mult); + + mep->ep.maxpacket = mep->maxp; + mep->ep.desc = desc; + mep->ep.comp_desc = comp_desc; + + /* slot mainly affects bulk/isoc transfer, so ignore int */ + mep->slot = usb_endpoint_xfer_int(desc) ? 0 : mtu->slot; + + ret = mtu3_config_ep(mtu, mep, interval, burst, mult); + if (ret < 0) + return ret; + + ret = mtu3_gpd_ring_alloc(mep); + if (ret < 0) { + mtu3_deconfig_ep(mtu, mep); + return ret; + } + + mtu3_qmu_start(mep); + + return 0; +} + +static int mtu3_ep_disable(struct mtu3_ep *mep) +{ + struct mtu3 *mtu = mep->mtu; + + mtu3_qmu_stop(mep); + + /* abort all pending requests */ + nuke(mep, -ESHUTDOWN); + mtu3_deconfig_ep(mtu, mep); + mtu3_gpd_ring_free(mep); + + mep->desc = NULL; + mep->ep.desc = NULL; + mep->comp_desc = NULL; + mep->type = 0; + mep->flags = 0; + + return 0; +} + +static int mtu3_gadget_ep_enable(struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + struct mtu3_ep *mep; + struct mtu3 *mtu; + unsigned long flags; + int ret = -EINVAL; + + if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { + pr_debug("%s invalid parameters\n", __func__); + return -EINVAL; + } + + if (!desc->wMaxPacketSize) { + pr_debug("%s missing wMaxPacketSize\n", __func__); + return -EINVAL; + } + mep = to_mtu3_ep(ep); + mtu = mep->mtu; + + /* check ep number and direction against endpoint */ + if (usb_endpoint_num(desc) != mep->epnum) + return -EINVAL; + + if (!!usb_endpoint_dir_in(desc) ^ !!mep->is_in) + return -EINVAL; + + dev_dbg(mtu->dev, "%s %s\n", __func__, ep->name); + + if (mep->flags & MTU3_EP_ENABLED) { + dev_WARN_ONCE(mtu->dev, true, "%s is already enabled\n", + mep->name); + return 0; + } + + spin_lock_irqsave(&mtu->lock, flags); + mep->desc = desc; + mep->comp_desc = ep->comp_desc; + + ret = mtu3_ep_enable(mep); + if (ret) + goto error; + + mep->busy = 0; + mep->wedged = 0; + mep->flags |= MTU3_EP_ENABLED; + mtu->active_ep++; + +error: + spin_unlock_irqrestore(&mtu->lock, flags); + + dev_dbg(mtu->dev, "%s active_ep=%d\n", __func__, mtu->active_ep); + + return ret; +} + +static int mtu3_gadget_ep_disable(struct usb_ep *ep) +{ + struct mtu3_ep *mep = to_mtu3_ep(ep); + struct mtu3 *mtu = mep->mtu; + unsigned long flags; + + dev_dbg(mtu->dev, "%s %s\n", __func__, mep->name); + + if (!(mep->flags & MTU3_EP_ENABLED)) { + dev_warn(mtu->dev, "%s is already disabled\n", mep->name); + return 0; + } + + spin_lock_irqsave(&mtu->lock, flags); + mtu3_ep_disable(mep); + mep->flags &= ~MTU3_EP_ENABLED; + mtu->active_ep--; + spin_unlock_irqrestore(&(mtu->lock), flags); + + dev_dbg(mtu->dev, "%s active_ep=%d, mtu3 is_active=%d\n", + __func__, mtu->active_ep, mtu->is_active); + + return 0; +} + +struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) +{ + struct mtu3_ep *mep = to_mtu3_ep(ep); + struct mtu3_request *mreq; + + mreq = kzalloc(sizeof(*mreq), gfp_flags); + if (!mreq) + return NULL; + + mreq->request.dma = DMA_ADDR_INVALID; + mreq->epnum = mep->epnum; + mreq->mep = mep; + + return &mreq->request; +} + +void mtu3_free_request(struct usb_ep *ep, struct usb_request *req) +{ + kfree(to_mtu3_request(req)); +} + +static int mtu3_gadget_queue(struct usb_ep *ep, + struct usb_request *req, gfp_t gfp_flags) +{ + struct mtu3_ep *mep; + struct mtu3_request *mreq; + struct mtu3 *mtu; + unsigned long flags; + int ret = 0; + + if (!ep || !req) + return -EINVAL; + + if (!req->buf) + return -ENODATA; + + mep = to_mtu3_ep(ep); + mtu = mep->mtu; + mreq = to_mtu3_request(req); + mreq->mtu = mtu; + + if (mreq->mep != mep) + return -EINVAL; + + dev_dbg(mtu->dev, "%s %s EP%d(%s), req=%p, maxp=%d, len#%d\n", + __func__, mep->is_in ? "TX" : "RX", mreq->epnum, ep->name, + mreq, ep->maxpacket, mreq->request.length); + + if (req->length > GPD_BUF_SIZE) { + dev_warn(mtu->dev, + "req length > supported MAX:%d requested:%d\n", + GPD_BUF_SIZE, req->length); + return -EOPNOTSUPP; + } + + /* don't queue if the ep is down */ + if (!mep->desc) { + dev_dbg(mtu->dev, "req=%p queued to %s while it's disabled\n", + req, ep->name); + return -ESHUTDOWN; + } + + mreq->request.actual = 0; + mreq->request.status = -EINPROGRESS; + + ret = usb_gadget_map_request(&mtu->g, req, mep->is_in); + if (ret) { + dev_err(mtu->dev, "dma mapping failed\n"); + return ret; + } + + spin_lock_irqsave(&mtu->lock, flags); + + if (mtu3_prepare_transfer(mep)) { + ret = -EAGAIN; + goto error; + } + + list_add_tail(&mreq->list, &mep->req_list); + mtu3_insert_gpd(mep, mreq); + mtu3_qmu_resume(mep); + +error: + spin_unlock_irqrestore(&mtu->lock, flags); + + return ret; +} + +static int mtu3_gadget_dequeue(struct usb_ep *ep, struct usb_request *req) +{ + struct mtu3_ep *mep = to_mtu3_ep(ep); + struct mtu3_request *mreq = to_mtu3_request(req); + struct mtu3_request *r; + unsigned long flags; + int ret = 0; + struct mtu3 *mtu = mep->mtu; + + if (!ep || !req || mreq->mep != mep) + return -EINVAL; + + dev_dbg(mtu->dev, "%s : req=%p\n", __func__, req); + + spin_lock_irqsave(&mtu->lock, flags); + + list_for_each_entry(r, &mep->req_list, list) { + if (r == mreq) + break; + } + if (r != mreq) { + dev_dbg(mtu->dev, "req=%p not queued to %s\n", req, ep->name); + ret = -EINVAL; + goto done; + } + + mtu3_qmu_flush(mep); /* REVISIT: set BPS ?? */ + mtu3_req_complete(mep, req, -ECONNRESET); + mtu3_qmu_start(mep); + +done: + spin_unlock_irqrestore(&mtu->lock, flags); + + return ret; +} + +/* + * Set or clear the halt bit of an EP. + * A halted EP won't TX/RX any data but will queue requests. + */ +static int mtu3_gadget_ep_set_halt(struct usb_ep *ep, int value) +{ + struct mtu3_ep *mep = to_mtu3_ep(ep); + struct mtu3 *mtu = mep->mtu; + struct mtu3_request *mreq; + unsigned long flags; + int ret = 0; + + if (!ep) + return -EINVAL; + + dev_dbg(mtu->dev, "%s : %s...", __func__, ep->name); + + spin_lock_irqsave(&mtu->lock, flags); + + if (mep->type == USB_ENDPOINT_XFER_ISOC) { + ret = -EINVAL; + goto done; + } + + mreq = next_request(mep); + if (value) { + /* + * If there is not request for TX-EP, QMU will not transfer + * data to TX-FIFO, so no need check whether TX-FIFO + * holds bytes or not here + */ + if (mreq) { + dev_dbg(mtu->dev, "req in progress, cannot halt %s\n", + ep->name); + ret = -EAGAIN; + goto done; + } + } else { + mep->wedged = 0; + } + + dev_dbg(mtu->dev, "%s %s stall\n", ep->name, value ? "set" : "clear"); + + mtu3_ep_stall_set(mep, value); + +done: + spin_unlock_irqrestore(&mtu->lock, flags); + + return ret; +} + +/* Sets the halt feature with the clear requests ignored */ +static int mtu3_gadget_ep_set_wedge(struct usb_ep *ep) +{ + struct mtu3_ep *mep = to_mtu3_ep(ep); + + if (!ep) + return -EINVAL; + + mep->wedged = 1; + + return usb_ep_set_halt(ep); +} + +static const struct usb_ep_ops mtu3_ep_ops = { + .enable = mtu3_gadget_ep_enable, + .disable = mtu3_gadget_ep_disable, + .alloc_request = mtu3_alloc_request, + .free_request = mtu3_free_request, + .queue = mtu3_gadget_queue, + .dequeue = mtu3_gadget_dequeue, + .set_halt = mtu3_gadget_ep_set_halt, + .set_wedge = mtu3_gadget_ep_set_wedge, +}; + +static int mtu3_gadget_get_frame(struct usb_gadget *gadget) +{ + struct mtu3 *mtu = gadget_to_mtu3(gadget); + + return (int)mtu3_readl(mtu->mac_base, U3D_USB20_FRAME_NUM); +} + +static int mtu3_gadget_wakeup(struct usb_gadget *gadget) +{ + struct mtu3 *mtu = gadget_to_mtu3(gadget); + unsigned long flags; + + dev_dbg(mtu->dev, "%s\n", __func__); + + /* remote wakeup feature is not enabled by host */ + if (!mtu->may_wakeup) + return -EOPNOTSUPP; + + spin_lock_irqsave(&mtu->lock, flags); + if (mtu->g.speed == USB_SPEED_SUPER) { + mtu3_setbits(mtu->mac_base, U3D_LINK_POWER_CONTROL, UX_EXIT); + } else { + mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME); + spin_unlock_irqrestore(&mtu->lock, flags); + usleep_range(10000, 11000); + spin_lock_irqsave(&mtu->lock, flags); + mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME); + } + spin_unlock_irqrestore(&mtu->lock, flags); + return 0; +} + +static int mtu3_gadget_set_self_powered(struct usb_gadget *gadget, + int is_selfpowered) +{ + struct mtu3 *mtu = gadget_to_mtu3(gadget); + + mtu->is_self_powered = !!is_selfpowered; + return 0; +} + +static int mtu3_gadget_pullup(struct usb_gadget *gadget, int is_on) +{ + struct mtu3 *mtu = gadget_to_mtu3(gadget); + unsigned long flags; + + dev_dbg(mtu->dev, "%s (%s) for %sactive device\n", __func__, + is_on ? "on" : "off", mtu->is_active ? "" : "in"); + + /* we'd rather not pullup unless the device is active. */ + spin_lock_irqsave(&mtu->lock, flags); + + is_on = !!is_on; + if (!mtu->is_active) { + /* save it for mtu3_start() to process the request */ + mtu->softconnect = is_on; + } else if (is_on != mtu->softconnect) { + mtu->softconnect = is_on; + mtu3_dev_on_off(mtu, is_on); + } + + spin_unlock_irqrestore(&mtu->lock, flags); + + return 0; +} + +static int mtu3_gadget_start(struct usb_gadget *gadget, + struct usb_gadget_driver *driver) +{ + struct mtu3 *mtu = gadget_to_mtu3(gadget); + unsigned long flags; + + if (mtu->gadget_driver) { + dev_err(mtu->dev, "%s is already bound to %s\n", + mtu->g.name, mtu->gadget_driver->driver.name); + return -EBUSY; + } + + dev_dbg(mtu->dev, "bind driver %s\n", driver->function); + + spin_lock_irqsave(&mtu->lock, flags); + + mtu->softconnect = 0; + mtu->gadget_driver = driver; + + if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL) + mtu3_start(mtu); + + spin_unlock_irqrestore(&mtu->lock, flags); + + return 0; +} + +static void stop_activity(struct mtu3 *mtu) +{ + struct usb_gadget_driver *driver = mtu->gadget_driver; + int i; + + /* don't disconnect if it's not connected */ + if (mtu->g.speed == USB_SPEED_UNKNOWN) + driver = NULL; + else + mtu->g.speed = USB_SPEED_UNKNOWN; + + /* deactivate the hardware */ + if (mtu->softconnect) { + mtu->softconnect = 0; + mtu3_dev_on_off(mtu, 0); + } + + /* + * killing any outstanding requests will quiesce the driver; + * then report disconnect + */ + nuke(mtu->ep0, -ESHUTDOWN); + for (i = 1; i < mtu->num_eps; i++) { + nuke(mtu->in_eps + i, -ESHUTDOWN); + nuke(mtu->out_eps + i, -ESHUTDOWN); + } + + if (driver) { + spin_unlock(&mtu->lock); + driver->disconnect(&mtu->g); + spin_lock(&mtu->lock); + } +} + +static int mtu3_gadget_stop(struct usb_gadget *g) +{ + struct mtu3 *mtu = gadget_to_mtu3(g); + unsigned long flags; + + dev_dbg(mtu->dev, "%s\n", __func__); + + spin_lock_irqsave(&mtu->lock, flags); + + stop_activity(mtu); + mtu->gadget_driver = NULL; + + if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL) + mtu3_stop(mtu); + + spin_unlock_irqrestore(&mtu->lock, flags); + + return 0; +} + +static const struct usb_gadget_ops mtu3_gadget_ops = { + .get_frame = mtu3_gadget_get_frame, + .wakeup = mtu3_gadget_wakeup, + .set_selfpowered = mtu3_gadget_set_self_powered, + .pullup = mtu3_gadget_pullup, + .udc_start = mtu3_gadget_start, + .udc_stop = mtu3_gadget_stop, +}; + +static void init_hw_ep(struct mtu3 *mtu, struct mtu3_ep *mep, + u32 epnum, u32 is_in) +{ + mep->epnum = epnum; + mep->mtu = mtu; + mep->is_in = is_in; + + INIT_LIST_HEAD(&mep->req_list); + + sprintf(mep->name, "ep%d%s", epnum, + !epnum ? "" : (is_in ? "in" : "out")); + + mep->ep.name = mep->name; + INIT_LIST_HEAD(&mep->ep.ep_list); + + /* initialize maxpacket as SS */ + if (!epnum) { + usb_ep_set_maxpacket_limit(&mep->ep, 512); + mep->ep.caps.type_control = true; + mep->ep.ops = &mtu3_ep0_ops; + mtu->g.ep0 = &mep->ep; + } else { + usb_ep_set_maxpacket_limit(&mep->ep, 1024); + mep->ep.caps.type_iso = true; + mep->ep.caps.type_bulk = true; + mep->ep.caps.type_int = true; + mep->ep.ops = &mtu3_ep_ops; + list_add_tail(&mep->ep.ep_list, &mtu->g.ep_list); + } + + dev_dbg(mtu->dev, "%s, name=%s, maxp=%d\n", __func__, mep->ep.name, + mep->ep.maxpacket); + + if (!epnum) { + mep->ep.caps.dir_in = true; + mep->ep.caps.dir_out = true; + } else if (is_in) { + mep->ep.caps.dir_in = true; + } else { + mep->ep.caps.dir_out = true; + } +} + +static void mtu3_gadget_init_eps(struct mtu3 *mtu) +{ + u8 epnum; + + /* initialize endpoint list just once */ + INIT_LIST_HEAD(&(mtu->g.ep_list)); + + dev_dbg(mtu->dev, "%s num_eps(1 for a pair of tx&rx ep)=%d\n", + __func__, mtu->num_eps); + + init_hw_ep(mtu, mtu->ep0, 0, 0); + for (epnum = 1; epnum < mtu->num_eps; epnum++) { + init_hw_ep(mtu, mtu->in_eps + epnum, epnum, 1); + init_hw_ep(mtu, mtu->out_eps + epnum, epnum, 0); + } +} + +int mtu3_gadget_setup(struct mtu3 *mtu) +{ + int ret; + + mtu->g.ops = &mtu3_gadget_ops; + mtu->g.max_speed = mtu->max_speed; + mtu->g.speed = USB_SPEED_UNKNOWN; + mtu->g.sg_supported = 0; + mtu->g.name = MTU3_DRIVER_NAME; + mtu->is_active = 0; + + mtu3_gadget_init_eps(mtu); + + ret = usb_add_gadget_udc(mtu->dev, &mtu->g); + if (ret) { + dev_err(mtu->dev, "failed to register udc\n"); + return ret; + } + + usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED); + + return 0; +} + +void mtu3_gadget_cleanup(struct mtu3 *mtu) +{ + usb_del_gadget_udc(&mtu->g); +} + +void mtu3_gadget_resume(struct mtu3 *mtu) +{ + dev_dbg(mtu->dev, "gadget RESUME\n"); + if (mtu->gadget_driver && mtu->gadget_driver->resume) { + spin_unlock(&mtu->lock); + mtu->gadget_driver->resume(&mtu->g); + spin_lock(&mtu->lock); + } +} + +/* called when SOF packets stop for 3+ msec or enters U3 */ +void mtu3_gadget_suspend(struct mtu3 *mtu) +{ + dev_dbg(mtu->dev, "gadget SUSPEND\n"); + if (mtu->gadget_driver && mtu->gadget_driver->suspend) { + spin_unlock(&mtu->lock); + mtu->gadget_driver->suspend(&mtu->g); + spin_lock(&mtu->lock); + } +} + +/* called when VBUS drops below session threshold, and in other cases */ +void mtu3_gadget_disconnect(struct mtu3 *mtu) +{ + dev_dbg(mtu->dev, "gadget DISCONNECT\n"); + if (mtu->gadget_driver && mtu->gadget_driver->disconnect) { + spin_unlock(&mtu->lock); + mtu->gadget_driver->disconnect(&mtu->g); + spin_lock(&mtu->lock); + } + + usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED); +} + +void mtu3_gadget_reset(struct mtu3 *mtu) +{ + dev_dbg(mtu->dev, "gadget RESET\n"); + + /* report disconnect, if we didn't flush EP state */ + if (mtu->g.speed != USB_SPEED_UNKNOWN) + mtu3_gadget_disconnect(mtu); + + mtu->address = 0; + mtu->ep0_state = MU3D_EP0_STATE_SETUP; + mtu->may_wakeup = 0; +} diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c new file mode 100644 index 0000000000000000000000000000000000000000..2d7427b48775663a68f2f12226112db10f6c29a4 --- /dev/null +++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c @@ -0,0 +1,881 @@ +/* + * mtu3_gadget_ep0.c - MediaTek USB3 DRD peripheral driver ep0 handling + * + * Copyright (c) 2016 MediaTek Inc. + * + * Author: Chunfeng.Yun + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "mtu3.h" + +/* ep0 is always mtu3->in_eps[0] */ +#define next_ep0_request(mtu) next_request((mtu)->ep0) + +/* for high speed test mode; see USB 2.0 spec 7.1.20 */ +static const u8 mtu3_test_packet[53] = { + /* implicit SYNC then DATA0 to start */ + + /* JKJKJKJK x9 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* JJKKJJKK x8 */ + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + /* JJJJKKKK x8 */ + 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, + /* JJJJJJJKKKKKKK x8 */ + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + /* JJJJJJJK x8 */ + 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, + /* JKKKKKKK x10, JK */ + 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e, + /* implicit CRC16 then EOP to end */ +}; + +static char *decode_ep0_state(struct mtu3 *mtu) +{ + switch (mtu->ep0_state) { + case MU3D_EP0_STATE_SETUP: + return "SETUP"; + case MU3D_EP0_STATE_TX: + return "IN"; + case MU3D_EP0_STATE_RX: + return "OUT"; + case MU3D_EP0_STATE_TX_END: + return "TX-END"; + case MU3D_EP0_STATE_STALL: + return "STALL"; + default: + return "??"; + } +} + +static void ep0_req_giveback(struct mtu3 *mtu, struct usb_request *req) +{ + mtu3_req_complete(mtu->ep0, req, 0); +} + +static int +forward_to_driver(struct mtu3 *mtu, const struct usb_ctrlrequest *setup) +__releases(mtu->lock) +__acquires(mtu->lock) +{ + int ret; + + if (!mtu->gadget_driver) + return -EOPNOTSUPP; + + spin_unlock(&mtu->lock); + ret = mtu->gadget_driver->setup(&mtu->g, setup); + spin_lock(&mtu->lock); + + dev_dbg(mtu->dev, "%s ret %d\n", __func__, ret); + return ret; +} + +static void ep0_write_fifo(struct mtu3_ep *mep, const u8 *src, u16 len) +{ + void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0; + u16 index = 0; + + dev_dbg(mep->mtu->dev, "%s: ep%din, len=%d, buf=%p\n", + __func__, mep->epnum, len, src); + + if (len >= 4) { + iowrite32_rep(fifo, src, len >> 2); + index = len & ~0x03; + } + if (len & 0x02) { + writew(*(u16 *)&src[index], fifo); + index += 2; + } + if (len & 0x01) + writeb(src[index], fifo); +} + +static void ep0_read_fifo(struct mtu3_ep *mep, u8 *dst, u16 len) +{ + void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0; + u32 value; + u16 index = 0; + + dev_dbg(mep->mtu->dev, "%s: ep%dout len=%d buf=%p\n", + __func__, mep->epnum, len, dst); + + if (len >= 4) { + ioread32_rep(fifo, dst, len >> 2); + index = len & ~0x03; + } + if (len & 0x3) { + value = readl(fifo); + memcpy(&dst[index], &value, len & 0x3); + } + +} + +static void ep0_load_test_packet(struct mtu3 *mtu) +{ + /* + * because the length of test packet is less than max packet of HS ep0, + * write it into fifo directly. + */ + ep0_write_fifo(mtu->ep0, mtu3_test_packet, sizeof(mtu3_test_packet)); +} + +/* + * A. send STALL for setup transfer without data stage: + * set SENDSTALL and SETUPPKTRDY at the same time; + * B. send STALL for other cases: + * set SENDSTALL only. + */ +static void ep0_stall_set(struct mtu3_ep *mep0, bool set, u32 pktrdy) +{ + struct mtu3 *mtu = mep0->mtu; + void __iomem *mbase = mtu->mac_base; + u32 csr; + + /* EP0_SENTSTALL is W1C */ + csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS; + if (set) + csr |= EP0_SENDSTALL | pktrdy; + else + csr = (csr & ~EP0_SENDSTALL) | EP0_SENTSTALL; + mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr); + + mtu->ep0_state = MU3D_EP0_STATE_SETUP; + + dev_dbg(mtu->dev, "ep0: %s STALL, ep0_state: %s\n", + set ? "SEND" : "CLEAR", decode_ep0_state(mtu)); +} + +static int ep0_queue(struct mtu3_ep *mep0, struct mtu3_request *mreq); + +static void ep0_dummy_complete(struct usb_ep *ep, struct usb_request *req) +{} + +static void ep0_set_sel_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct mtu3_request *mreq; + struct mtu3 *mtu; + struct usb_set_sel_req sel; + + memcpy(&sel, req->buf, sizeof(sel)); + + mreq = to_mtu3_request(req); + mtu = mreq->mtu; + dev_dbg(mtu->dev, "u1sel:%d, u1pel:%d, u2sel:%d, u2pel:%d\n", + sel.u1_sel, sel.u1_pel, sel.u2_sel, sel.u2_pel); +} + +/* queue data stage to handle 6 byte SET_SEL request */ +static int ep0_set_sel(struct mtu3 *mtu, struct usb_ctrlrequest *setup) +{ + int ret; + u16 length = le16_to_cpu(setup->wLength); + + if (unlikely(length != 6)) { + dev_err(mtu->dev, "%s wrong wLength:%d\n", + __func__, length); + return -EINVAL; + } + + mtu->ep0_req.mep = mtu->ep0; + mtu->ep0_req.request.length = 6; + mtu->ep0_req.request.buf = mtu->setup_buf; + mtu->ep0_req.request.complete = ep0_set_sel_complete; + ret = ep0_queue(mtu->ep0, &mtu->ep0_req); + + return ret < 0 ? ret : 1; +} + +static int +ep0_get_status(struct mtu3 *mtu, const struct usb_ctrlrequest *setup) +{ + struct mtu3_ep *mep = NULL; + int handled = 1; + u8 result[2] = {0, 0}; + u8 epnum = 0; + int is_in; + + switch (setup->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + result[0] = mtu->is_self_powered << USB_DEVICE_SELF_POWERED; + result[0] |= mtu->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; + /* superspeed only */ + if (mtu->g.speed == USB_SPEED_SUPER) { + result[0] |= mtu->u1_enable << USB_DEV_STAT_U1_ENABLED; + result[0] |= mtu->u2_enable << USB_DEV_STAT_U2_ENABLED; + } + + dev_dbg(mtu->dev, "%s result=%x, U1=%x, U2=%x\n", __func__, + result[0], mtu->u1_enable, mtu->u2_enable); + + break; + case USB_RECIP_INTERFACE: + break; + case USB_RECIP_ENDPOINT: + epnum = (u8) le16_to_cpu(setup->wIndex); + is_in = epnum & USB_DIR_IN; + epnum &= USB_ENDPOINT_NUMBER_MASK; + + if (epnum >= mtu->num_eps) { + handled = -EINVAL; + break; + } + if (!epnum) + break; + + mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum; + if (!mep->desc) { + handled = -EINVAL; + break; + } + if (mep->flags & MTU3_EP_STALL) + result[0] |= 1 << USB_ENDPOINT_HALT; + + break; + default: + /* class, vendor, etc ... delegate */ + handled = 0; + break; + } + + if (handled > 0) { + int ret; + + /* prepare a data stage for GET_STATUS */ + dev_dbg(mtu->dev, "get_status=%x\n", *(u16 *)result); + memcpy(mtu->setup_buf, result, sizeof(result)); + mtu->ep0_req.mep = mtu->ep0; + mtu->ep0_req.request.length = 2; + mtu->ep0_req.request.buf = &mtu->setup_buf; + mtu->ep0_req.request.complete = ep0_dummy_complete; + ret = ep0_queue(mtu->ep0, &mtu->ep0_req); + if (ret < 0) + handled = ret; + } + return handled; +} + +static int handle_test_mode(struct mtu3 *mtu, struct usb_ctrlrequest *setup) +{ + void __iomem *mbase = mtu->mac_base; + int handled = 1; + + switch (le16_to_cpu(setup->wIndex) >> 8) { + case TEST_J: + dev_dbg(mtu->dev, "TEST_J\n"); + mtu->test_mode_nr = TEST_J_MODE; + break; + case TEST_K: + dev_dbg(mtu->dev, "TEST_K\n"); + mtu->test_mode_nr = TEST_K_MODE; + break; + case TEST_SE0_NAK: + dev_dbg(mtu->dev, "TEST_SE0_NAK\n"); + mtu->test_mode_nr = TEST_SE0_NAK_MODE; + break; + case TEST_PACKET: + dev_dbg(mtu->dev, "TEST_PACKET\n"); + mtu->test_mode_nr = TEST_PACKET_MODE; + break; + default: + handled = -EINVAL; + goto out; + } + + mtu->test_mode = true; + + /* no TX completion interrupt, and need restart platform after test */ + if (mtu->test_mode_nr == TEST_PACKET_MODE) + ep0_load_test_packet(mtu); + + mtu3_writel(mbase, U3D_USB2_TEST_MODE, mtu->test_mode_nr); + + mtu->ep0_state = MU3D_EP0_STATE_SETUP; + +out: + return handled; +} + +static int ep0_handle_feature_dev(struct mtu3 *mtu, + struct usb_ctrlrequest *setup, bool set) +{ + void __iomem *mbase = mtu->mac_base; + int handled = -EINVAL; + u32 lpc; + + switch (le16_to_cpu(setup->wValue)) { + case USB_DEVICE_REMOTE_WAKEUP: + mtu->may_wakeup = !!set; + handled = 1; + break; + case USB_DEVICE_TEST_MODE: + if (!set || (mtu->g.speed != USB_SPEED_HIGH) || + (le16_to_cpu(setup->wIndex) & 0xff)) + break; + + handled = handle_test_mode(mtu, setup); + break; + case USB_DEVICE_U1_ENABLE: + if (mtu->g.speed != USB_SPEED_SUPER || + mtu->g.state != USB_STATE_CONFIGURED) + break; + + lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL); + if (set) + lpc |= SW_U1_ACCEPT_ENABLE; + else + lpc &= ~SW_U1_ACCEPT_ENABLE; + mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc); + + mtu->u1_enable = !!set; + handled = 1; + break; + case USB_DEVICE_U2_ENABLE: + if (mtu->g.speed != USB_SPEED_SUPER || + mtu->g.state != USB_STATE_CONFIGURED) + break; + + lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL); + if (set) + lpc |= SW_U2_ACCEPT_ENABLE; + else + lpc &= ~SW_U2_ACCEPT_ENABLE; + mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc); + + mtu->u2_enable = !!set; + handled = 1; + break; + default: + handled = -EINVAL; + break; + } + return handled; +} + +static int ep0_handle_feature(struct mtu3 *mtu, + struct usb_ctrlrequest *setup, bool set) +{ + struct mtu3_ep *mep; + int handled = -EINVAL; + int is_in; + u16 value; + u16 index; + u8 epnum; + + value = le16_to_cpu(setup->wValue); + index = le16_to_cpu(setup->wIndex); + + switch (setup->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + handled = ep0_handle_feature_dev(mtu, setup, set); + break; + case USB_RECIP_INTERFACE: + /* superspeed only */ + if ((value == USB_INTRF_FUNC_SUSPEND) + && (mtu->g.speed == USB_SPEED_SUPER)) { + /* + * forward the request because function drivers + * should handle it + */ + handled = 0; + } + break; + case USB_RECIP_ENDPOINT: + epnum = index & USB_ENDPOINT_NUMBER_MASK; + if (epnum == 0 || epnum >= mtu->num_eps || + value != USB_ENDPOINT_HALT) + break; + + is_in = index & USB_DIR_IN; + mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum; + if (!mep->desc) + break; + + handled = 1; + /* ignore request if endpoint is wedged */ + if (mep->wedged) + break; + + mtu3_ep_stall_set(mep, set); + break; + default: + /* class, vendor, etc ... delegate */ + handled = 0; + break; + } + return handled; +} + +/* + * handle all control requests can be handled + * returns: + * negative errno - error happened + * zero - need delegate SETUP to gadget driver + * positive - already handled + */ +static int handle_standard_request(struct mtu3 *mtu, + struct usb_ctrlrequest *setup) +{ + void __iomem *mbase = mtu->mac_base; + enum usb_device_state state = mtu->g.state; + int handled = -EINVAL; + u32 dev_conf; + u16 value; + + value = le16_to_cpu(setup->wValue); + + /* the gadget driver handles everything except what we must handle */ + switch (setup->bRequest) { + case USB_REQ_SET_ADDRESS: + /* change it after the status stage */ + mtu->address = (u8) (value & 0x7f); + dev_dbg(mtu->dev, "set address to 0x%x\n", mtu->address); + + dev_conf = mtu3_readl(mbase, U3D_DEVICE_CONF); + dev_conf &= ~DEV_ADDR_MSK; + dev_conf |= DEV_ADDR(mtu->address); + mtu3_writel(mbase, U3D_DEVICE_CONF, dev_conf); + + if (mtu->address) + usb_gadget_set_state(&mtu->g, USB_STATE_ADDRESS); + else + usb_gadget_set_state(&mtu->g, USB_STATE_DEFAULT); + + handled = 1; + break; + case USB_REQ_SET_CONFIGURATION: + if (state == USB_STATE_ADDRESS) { + usb_gadget_set_state(&mtu->g, + USB_STATE_CONFIGURED); + } else if (state == USB_STATE_CONFIGURED) { + /* + * USB2 spec sec 9.4.7, if wValue is 0 then dev + * is moved to addressed state + */ + if (!value) + usb_gadget_set_state(&mtu->g, + USB_STATE_ADDRESS); + } + handled = 0; + break; + case USB_REQ_CLEAR_FEATURE: + handled = ep0_handle_feature(mtu, setup, 0); + break; + case USB_REQ_SET_FEATURE: + handled = ep0_handle_feature(mtu, setup, 1); + break; + case USB_REQ_GET_STATUS: + handled = ep0_get_status(mtu, setup); + break; + case USB_REQ_SET_SEL: + handled = ep0_set_sel(mtu, setup); + break; + case USB_REQ_SET_ISOCH_DELAY: + handled = 1; + break; + default: + /* delegate SET_CONFIGURATION, etc */ + handled = 0; + } + + return handled; +} + +/* receive an data packet (OUT) */ +static void ep0_rx_state(struct mtu3 *mtu) +{ + struct mtu3_request *mreq; + struct usb_request *req; + void __iomem *mbase = mtu->mac_base; + u32 maxp; + u32 csr; + u16 count = 0; + + dev_dbg(mtu->dev, "%s\n", __func__); + + csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS; + mreq = next_ep0_request(mtu); + req = &mreq->request; + + /* read packet and ack; or stall because of gadget driver bug */ + if (req) { + void *buf = req->buf + req->actual; + unsigned int len = req->length - req->actual; + + /* read the buffer */ + count = mtu3_readl(mbase, U3D_RXCOUNT0); + if (count > len) { + req->status = -EOVERFLOW; + count = len; + } + ep0_read_fifo(mtu->ep0, buf, count); + req->actual += count; + csr |= EP0_RXPKTRDY; + + maxp = mtu->g.ep0->maxpacket; + if (count < maxp || req->actual == req->length) { + mtu->ep0_state = MU3D_EP0_STATE_SETUP; + dev_dbg(mtu->dev, "ep0 state: %s\n", + decode_ep0_state(mtu)); + + csr |= EP0_DATAEND; + } else { + req = NULL; + } + } else { + csr |= EP0_RXPKTRDY | EP0_SENDSTALL; + dev_dbg(mtu->dev, "%s: SENDSTALL\n", __func__); + } + + mtu3_writel(mbase, U3D_EP0CSR, csr); + + /* give back the request if have received all data */ + if (req) + ep0_req_giveback(mtu, req); + +} + +/* transmitting to the host (IN) */ +static void ep0_tx_state(struct mtu3 *mtu) +{ + struct mtu3_request *mreq = next_ep0_request(mtu); + struct usb_request *req; + u32 csr; + u8 *src; + u8 count; + u32 maxp; + + dev_dbg(mtu->dev, "%s\n", __func__); + + if (!mreq) + return; + + maxp = mtu->g.ep0->maxpacket; + req = &mreq->request; + + /* load the data */ + src = (u8 *)req->buf + req->actual; + count = min(maxp, req->length - req->actual); + if (count) + ep0_write_fifo(mtu->ep0, src, count); + + dev_dbg(mtu->dev, "%s act=%d, len=%d, cnt=%d, maxp=%d zero=%d\n", + __func__, req->actual, req->length, count, maxp, req->zero); + + req->actual += count; + + if ((count < maxp) + || ((req->actual == req->length) && !req->zero)) + mtu->ep0_state = MU3D_EP0_STATE_TX_END; + + /* send it out, triggering a "txpktrdy cleared" irq */ + csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS; + mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr | EP0_TXPKTRDY); + + dev_dbg(mtu->dev, "%s ep0csr=0x%x\n", __func__, + mtu3_readl(mtu->mac_base, U3D_EP0CSR)); +} + +static void ep0_read_setup(struct mtu3 *mtu, struct usb_ctrlrequest *setup) +{ + struct mtu3_request *mreq; + u32 count; + u32 csr; + + csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS; + count = mtu3_readl(mtu->mac_base, U3D_RXCOUNT0); + + ep0_read_fifo(mtu->ep0, (u8 *)setup, count); + + dev_dbg(mtu->dev, "SETUP req%02x.%02x v%04x i%04x l%04x\n", + setup->bRequestType, setup->bRequest, + le16_to_cpu(setup->wValue), le16_to_cpu(setup->wIndex), + le16_to_cpu(setup->wLength)); + + /* clean up any leftover transfers */ + mreq = next_ep0_request(mtu); + if (mreq) + ep0_req_giveback(mtu, &mreq->request); + + if (le16_to_cpu(setup->wLength) == 0) { + ; /* no data stage, nothing to do */ + } else if (setup->bRequestType & USB_DIR_IN) { + mtu3_writel(mtu->mac_base, U3D_EP0CSR, + csr | EP0_SETUPPKTRDY | EP0_DPHTX); + mtu->ep0_state = MU3D_EP0_STATE_TX; + } else { + mtu3_writel(mtu->mac_base, U3D_EP0CSR, + (csr | EP0_SETUPPKTRDY) & (~EP0_DPHTX)); + mtu->ep0_state = MU3D_EP0_STATE_RX; + } +} + +static int ep0_handle_setup(struct mtu3 *mtu) +__releases(mtu->lock) +__acquires(mtu->lock) +{ + struct usb_ctrlrequest setup; + struct mtu3_request *mreq; + void __iomem *mbase = mtu->mac_base; + int handled = 0; + + ep0_read_setup(mtu, &setup); + + if ((setup.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) + handled = handle_standard_request(mtu, &setup); + + dev_dbg(mtu->dev, "handled %d, ep0_state: %s\n", + handled, decode_ep0_state(mtu)); + + if (handled < 0) + goto stall; + else if (handled > 0) + goto finish; + + handled = forward_to_driver(mtu, &setup); + if (handled < 0) { +stall: + dev_dbg(mtu->dev, "%s stall (%d)\n", __func__, handled); + + ep0_stall_set(mtu->ep0, true, + le16_to_cpu(setup.wLength) ? 0 : EP0_SETUPPKTRDY); + + return 0; + } + +finish: + if (mtu->test_mode) { + ; /* nothing to do */ + } else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */ + + mtu3_writel(mbase, U3D_EP0CSR, + (mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS) + | EP0_SETUPPKTRDY | EP0_DATAEND); + + /* complete zlp request directly */ + mreq = next_ep0_request(mtu); + if (mreq && !mreq->request.length) + ep0_req_giveback(mtu, &mreq->request); + } + + return 0; +} + +irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + struct mtu3_request *mreq; + u32 int_status; + irqreturn_t ret = IRQ_NONE; + u32 csr; + u32 len; + + int_status = mtu3_readl(mbase, U3D_EPISR); + int_status &= mtu3_readl(mbase, U3D_EPIER); + mtu3_writel(mbase, U3D_EPISR, int_status); /* W1C */ + + /* only handle ep0's */ + if (!(int_status & EP0ISR)) + return IRQ_NONE; + + csr = mtu3_readl(mbase, U3D_EP0CSR); + + dev_dbg(mtu->dev, "%s csr=0x%x\n", __func__, csr); + + /* we sent a stall.. need to clear it now.. */ + if (csr & EP0_SENTSTALL) { + ep0_stall_set(mtu->ep0, false, 0); + csr = mtu3_readl(mbase, U3D_EP0CSR); + ret = IRQ_HANDLED; + } + dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu)); + + switch (mtu->ep0_state) { + case MU3D_EP0_STATE_TX: + /* irq on clearing txpktrdy */ + if ((csr & EP0_FIFOFULL) == 0) { + ep0_tx_state(mtu); + ret = IRQ_HANDLED; + } + break; + case MU3D_EP0_STATE_RX: + /* irq on set rxpktrdy */ + if (csr & EP0_RXPKTRDY) { + ep0_rx_state(mtu); + ret = IRQ_HANDLED; + } + break; + case MU3D_EP0_STATE_TX_END: + mtu3_writel(mbase, U3D_EP0CSR, + (csr & EP0_W1C_BITS) | EP0_DATAEND); + + mreq = next_ep0_request(mtu); + if (mreq) + ep0_req_giveback(mtu, &mreq->request); + + mtu->ep0_state = MU3D_EP0_STATE_SETUP; + ret = IRQ_HANDLED; + dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu)); + break; + case MU3D_EP0_STATE_SETUP: + if (!(csr & EP0_SETUPPKTRDY)) + break; + + len = mtu3_readl(mbase, U3D_RXCOUNT0); + if (len != 8) { + dev_err(mtu->dev, "SETUP packet len %d != 8 ?\n", len); + break; + } + + ep0_handle_setup(mtu); + ret = IRQ_HANDLED; + break; + default: + /* can't happen */ + ep0_stall_set(mtu->ep0, true, 0); + WARN_ON(1); + break; + } + + return ret; +} + + +static int mtu3_ep0_enable(struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + /* always enabled */ + return -EINVAL; +} + +static int mtu3_ep0_disable(struct usb_ep *ep) +{ + /* always enabled */ + return -EINVAL; +} + +static int ep0_queue(struct mtu3_ep *mep, struct mtu3_request *mreq) +{ + struct mtu3 *mtu = mep->mtu; + + mreq->mtu = mtu; + mreq->request.actual = 0; + mreq->request.status = -EINPROGRESS; + + dev_dbg(mtu->dev, "%s %s (ep0_state: %s), len#%d\n", __func__, + mep->name, decode_ep0_state(mtu), mreq->request.length); + + if (!list_empty(&mep->req_list)) + return -EBUSY; + + switch (mtu->ep0_state) { + case MU3D_EP0_STATE_SETUP: + case MU3D_EP0_STATE_RX: /* control-OUT data */ + case MU3D_EP0_STATE_TX: /* control-IN data */ + break; + default: + dev_err(mtu->dev, "%s, error in ep0 state %s\n", __func__, + decode_ep0_state(mtu)); + return -EINVAL; + } + + list_add_tail(&mreq->list, &mep->req_list); + + /* sequence #1, IN ... start writing the data */ + if (mtu->ep0_state == MU3D_EP0_STATE_TX) + ep0_tx_state(mtu); + + return 0; +} + +static int mtu3_ep0_queue(struct usb_ep *ep, + struct usb_request *req, gfp_t gfp) +{ + struct mtu3_ep *mep; + struct mtu3_request *mreq; + struct mtu3 *mtu; + unsigned long flags; + int ret = 0; + + if (!ep || !req) + return -EINVAL; + + mep = to_mtu3_ep(ep); + mtu = mep->mtu; + mreq = to_mtu3_request(req); + + spin_lock_irqsave(&mtu->lock, flags); + ret = ep0_queue(mep, mreq); + spin_unlock_irqrestore(&mtu->lock, flags); + return ret; +} + +static int mtu3_ep0_dequeue(struct usb_ep *ep, struct usb_request *req) +{ + /* we just won't support this */ + return -EINVAL; +} + +static int mtu3_ep0_halt(struct usb_ep *ep, int value) +{ + struct mtu3_ep *mep; + struct mtu3 *mtu; + unsigned long flags; + int ret = 0; + + if (!ep || !value) + return -EINVAL; + + mep = to_mtu3_ep(ep); + mtu = mep->mtu; + + dev_dbg(mtu->dev, "%s\n", __func__); + + spin_lock_irqsave(&mtu->lock, flags); + + if (!list_empty(&mep->req_list)) { + ret = -EBUSY; + goto cleanup; + } + + switch (mtu->ep0_state) { + /* + * stalls are usually issued after parsing SETUP packet, either + * directly in irq context from setup() or else later. + */ + case MU3D_EP0_STATE_TX: + case MU3D_EP0_STATE_TX_END: + case MU3D_EP0_STATE_RX: + case MU3D_EP0_STATE_SETUP: + ep0_stall_set(mtu->ep0, true, 0); + break; + default: + dev_dbg(mtu->dev, "ep0 can't halt in state %s\n", + decode_ep0_state(mtu)); + ret = -EINVAL; + } + +cleanup: + spin_unlock_irqrestore(&mtu->lock, flags); + return ret; +} + +const struct usb_ep_ops mtu3_ep0_ops = { + .enable = mtu3_ep0_enable, + .disable = mtu3_ep0_disable, + .alloc_request = mtu3_alloc_request, + .free_request = mtu3_free_request, + .queue = mtu3_ep0_queue, + .dequeue = mtu3_ep0_dequeue, + .set_halt = mtu3_ep0_halt, +}; diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c new file mode 100644 index 0000000000000000000000000000000000000000..cd4d0108785553bcdc33f80c789247b077b70281 --- /dev/null +++ b/drivers/usb/mtu3/mtu3_host.c @@ -0,0 +1,294 @@ +/* + * mtu3_dr.c - dual role switch and host glue layer + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "mtu3.h" +#include "mtu3_dr.h" + +#define PERI_WK_CTRL1 0x404 +#define UWK_CTL1_IS_C(x) (((x) & 0xf) << 26) +#define UWK_CTL1_IS_E BIT(25) +#define UWK_CTL1_IDDIG_C(x) (((x) & 0xf) << 11) /* cycle debounce */ +#define UWK_CTL1_IDDIG_E BIT(10) /* enable debounce */ +#define UWK_CTL1_IDDIG_P BIT(9) /* polarity */ +#define UWK_CTL1_IS_P BIT(6) /* polarity for ip sleep */ + +/* + * ip-sleep wakeup mode: + * all clocks can be turn off, but power domain should be kept on + */ +static void ssusb_wakeup_ip_sleep_en(struct ssusb_mtk *ssusb) +{ + u32 tmp; + struct regmap *pericfg = ssusb->pericfg; + + regmap_read(pericfg, PERI_WK_CTRL1, &tmp); + tmp &= ~UWK_CTL1_IS_P; + tmp &= ~(UWK_CTL1_IS_C(0xf)); + tmp |= UWK_CTL1_IS_C(0x8); + regmap_write(pericfg, PERI_WK_CTRL1, tmp); + regmap_write(pericfg, PERI_WK_CTRL1, tmp | UWK_CTL1_IS_E); + + regmap_read(pericfg, PERI_WK_CTRL1, &tmp); + dev_dbg(ssusb->dev, "%s(): WK_CTRL1[P6,E25,C26:29]=%#x\n", + __func__, tmp); +} + +static void ssusb_wakeup_ip_sleep_dis(struct ssusb_mtk *ssusb) +{ + u32 tmp; + + regmap_read(ssusb->pericfg, PERI_WK_CTRL1, &tmp); + tmp &= ~UWK_CTL1_IS_E; + regmap_write(ssusb->pericfg, PERI_WK_CTRL1, tmp); +} + +int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb, + struct device_node *dn) +{ + struct device *dev = ssusb->dev; + + /* + * Wakeup function is optional, so it is not an error if this property + * does not exist, and in such case, no need to get relative + * properties anymore. + */ + ssusb->wakeup_en = of_property_read_bool(dn, "mediatek,enable-wakeup"); + if (!ssusb->wakeup_en) + return 0; + + ssusb->wk_deb_p0 = devm_clk_get(dev, "wakeup_deb_p0"); + if (IS_ERR(ssusb->wk_deb_p0)) { + dev_err(dev, "fail to get wakeup_deb_p0\n"); + return PTR_ERR(ssusb->wk_deb_p0); + } + + if (of_property_read_bool(dn, "wakeup_deb_p1")) { + ssusb->wk_deb_p1 = devm_clk_get(dev, "wakeup_deb_p1"); + if (IS_ERR(ssusb->wk_deb_p1)) { + dev_err(dev, "fail to get wakeup_deb_p1\n"); + return PTR_ERR(ssusb->wk_deb_p1); + } + } + + ssusb->pericfg = syscon_regmap_lookup_by_phandle(dn, + "mediatek,syscon-wakeup"); + if (IS_ERR(ssusb->pericfg)) { + dev_err(dev, "fail to get pericfg regs\n"); + return PTR_ERR(ssusb->pericfg); + } + + return 0; +} + +static int ssusb_wakeup_clks_enable(struct ssusb_mtk *ssusb) +{ + int ret; + + ret = clk_prepare_enable(ssusb->wk_deb_p0); + if (ret) { + dev_err(ssusb->dev, "failed to enable wk_deb_p0\n"); + goto usb_p0_err; + } + + ret = clk_prepare_enable(ssusb->wk_deb_p1); + if (ret) { + dev_err(ssusb->dev, "failed to enable wk_deb_p1\n"); + goto usb_p1_err; + } + + return 0; + +usb_p1_err: + clk_disable_unprepare(ssusb->wk_deb_p0); +usb_p0_err: + return -EINVAL; +} + +static void ssusb_wakeup_clks_disable(struct ssusb_mtk *ssusb) +{ + clk_disable_unprepare(ssusb->wk_deb_p1); + clk_disable_unprepare(ssusb->wk_deb_p0); +} + +static void host_ports_num_get(struct ssusb_mtk *ssusb) +{ + u32 xhci_cap; + + xhci_cap = mtu3_readl(ssusb->ippc_base, U3D_SSUSB_IP_XHCI_CAP); + ssusb->u2_ports = SSUSB_IP_XHCI_U2_PORT_NUM(xhci_cap); + ssusb->u3_ports = SSUSB_IP_XHCI_U3_PORT_NUM(xhci_cap); + + dev_dbg(ssusb->dev, "host - u2_ports:%d, u3_ports:%d\n", + ssusb->u2_ports, ssusb->u3_ports); +} + +/* only configure ports will be used later */ +int ssusb_host_enable(struct ssusb_mtk *ssusb) +{ + void __iomem *ibase = ssusb->ippc_base; + int num_u3p = ssusb->u3_ports; + int num_u2p = ssusb->u2_ports; + u32 check_clk; + u32 value; + int i; + + /* power on host ip */ + mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN); + + /* power on and enable all u3 ports */ + for (i = 0; i < num_u3p; i++) { + value = mtu3_readl(ibase, SSUSB_U3_CTRL(i)); + value &= ~(SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS); + value |= SSUSB_U3_PORT_HOST_SEL; + mtu3_writel(ibase, SSUSB_U3_CTRL(i), value); + } + + /* power on and enable all u2 ports */ + for (i = 0; i < num_u2p; i++) { + value = mtu3_readl(ibase, SSUSB_U2_CTRL(i)); + value &= ~(SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS); + value |= SSUSB_U2_PORT_HOST_SEL; + mtu3_writel(ibase, SSUSB_U2_CTRL(i), value); + } + + check_clk = SSUSB_XHCI_RST_B_STS; + if (num_u3p) + check_clk = SSUSB_U3_MAC_RST_B_STS; + + return ssusb_check_clocks(ssusb, check_clk); +} + +int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend) +{ + void __iomem *ibase = ssusb->ippc_base; + int num_u3p = ssusb->u3_ports; + int num_u2p = ssusb->u2_ports; + u32 value; + int ret; + int i; + + /* power down and disable all u3 ports */ + for (i = 0; i < num_u3p; i++) { + value = mtu3_readl(ibase, SSUSB_U3_CTRL(i)); + value |= SSUSB_U3_PORT_PDN; + value |= suspend ? 0 : SSUSB_U3_PORT_DIS; + mtu3_writel(ibase, SSUSB_U3_CTRL(i), value); + } + + /* power down and disable all u2 ports */ + for (i = 0; i < num_u2p; i++) { + value = mtu3_readl(ibase, SSUSB_U2_CTRL(i)); + value |= SSUSB_U2_PORT_PDN; + value |= suspend ? 0 : SSUSB_U2_PORT_DIS; + mtu3_writel(ibase, SSUSB_U2_CTRL(i), value); + } + + /* power down host ip */ + mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN); + + if (!suspend) + return 0; + + /* wait for host ip to sleep */ + ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS1, value, + (value & SSUSB_IP_SLEEP_STS), 100, 100000); + if (ret) + dev_err(ssusb->dev, "ip sleep failed!!!\n"); + + return ret; +} + +static void ssusb_host_setup(struct ssusb_mtk *ssusb) +{ + host_ports_num_get(ssusb); + + /* + * power on host and power on/enable all ports + * if support OTG, gadget driver will switch port0 to device mode + */ + ssusb_host_enable(ssusb); + + /* if port0 supports dual-role, works as host mode by default */ + ssusb_set_vbus(&ssusb->otg_switch, 1); +} + +static void ssusb_host_cleanup(struct ssusb_mtk *ssusb) +{ + if (ssusb->is_host) + ssusb_set_vbus(&ssusb->otg_switch, 0); + + ssusb_host_disable(ssusb, false); +} + +/* + * If host supports multiple ports, the VBUSes(5V) of ports except port0 + * which supports OTG are better to be enabled by default in DTS. + * Because the host driver will keep link with devices attached when system + * enters suspend mode, so no need to control VBUSes after initialization. + */ +int ssusb_host_init(struct ssusb_mtk *ssusb, struct device_node *parent_dn) +{ + struct device *parent_dev = ssusb->dev; + int ret; + + ssusb_host_setup(ssusb); + + ret = of_platform_populate(parent_dn, NULL, NULL, parent_dev); + if (ret) { + dev_dbg(parent_dev, "failed to create child devices at %s\n", + parent_dn->full_name); + return ret; + } + + dev_info(parent_dev, "xHCI platform device register success...\n"); + + return 0; +} + +void ssusb_host_exit(struct ssusb_mtk *ssusb) +{ + of_platform_depopulate(ssusb->dev); + ssusb_host_cleanup(ssusb); +} + +int ssusb_wakeup_enable(struct ssusb_mtk *ssusb) +{ + int ret = 0; + + if (ssusb->wakeup_en) { + ret = ssusb_wakeup_clks_enable(ssusb); + ssusb_wakeup_ip_sleep_en(ssusb); + } + return ret; +} + +void ssusb_wakeup_disable(struct ssusb_mtk *ssusb) +{ + if (ssusb->wakeup_en) { + ssusb_wakeup_ip_sleep_dis(ssusb); + ssusb_wakeup_clks_disable(ssusb); + } +} diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..212367295276a02b2592887bb70b7daf433153ed --- /dev/null +++ b/drivers/usb/mtu3/mtu3_hw_regs.h @@ -0,0 +1,473 @@ +/* + * mtu3_hw_regs.h - MediaTek USB3 DRD register and field definitions + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _SSUSB_HW_REGS_H_ +#define _SSUSB_HW_REGS_H_ + +/* segment offset of MAC register */ +#define SSUSB_DEV_BASE 0x0000 +#define SSUSB_EPCTL_CSR_BASE 0x0800 +#define SSUSB_USB3_MAC_CSR_BASE 0x1400 +#define SSUSB_USB3_SYS_CSR_BASE 0x1400 +#define SSUSB_USB2_CSR_BASE 0x2400 + +/* IPPC register in Infra */ +#define SSUSB_SIFSLV_IPPC_BASE 0x0000 + +/* --------------- SSUSB_DEV REGISTER DEFINITION --------------- */ + +#define U3D_LV1ISR (SSUSB_DEV_BASE + 0x0000) +#define U3D_LV1IER (SSUSB_DEV_BASE + 0x0004) +#define U3D_LV1IESR (SSUSB_DEV_BASE + 0x0008) +#define U3D_LV1IECR (SSUSB_DEV_BASE + 0x000C) + +#define U3D_EPISR (SSUSB_DEV_BASE + 0x0080) +#define U3D_EPIER (SSUSB_DEV_BASE + 0x0084) +#define U3D_EPIESR (SSUSB_DEV_BASE + 0x0088) +#define U3D_EPIECR (SSUSB_DEV_BASE + 0x008C) + +#define U3D_EP0CSR (SSUSB_DEV_BASE + 0x0100) +#define U3D_RXCOUNT0 (SSUSB_DEV_BASE + 0x0108) +#define U3D_RESERVED (SSUSB_DEV_BASE + 0x010C) +#define U3D_TX1CSR0 (SSUSB_DEV_BASE + 0x0110) +#define U3D_TX1CSR1 (SSUSB_DEV_BASE + 0x0114) +#define U3D_TX1CSR2 (SSUSB_DEV_BASE + 0x0118) + +#define U3D_RX1CSR0 (SSUSB_DEV_BASE + 0x0210) +#define U3D_RX1CSR1 (SSUSB_DEV_BASE + 0x0214) +#define U3D_RX1CSR2 (SSUSB_DEV_BASE + 0x0218) + +#define U3D_FIFO0 (SSUSB_DEV_BASE + 0x0300) + +#define U3D_QCR0 (SSUSB_DEV_BASE + 0x0400) +#define U3D_QCR1 (SSUSB_DEV_BASE + 0x0404) +#define U3D_QCR2 (SSUSB_DEV_BASE + 0x0408) +#define U3D_QCR3 (SSUSB_DEV_BASE + 0x040C) + +#define U3D_TXQCSR1 (SSUSB_DEV_BASE + 0x0510) +#define U3D_TXQSAR1 (SSUSB_DEV_BASE + 0x0514) +#define U3D_TXQCPR1 (SSUSB_DEV_BASE + 0x0518) + +#define U3D_RXQCSR1 (SSUSB_DEV_BASE + 0x0610) +#define U3D_RXQSAR1 (SSUSB_DEV_BASE + 0x0614) +#define U3D_RXQCPR1 (SSUSB_DEV_BASE + 0x0618) +#define U3D_RXQLDPR1 (SSUSB_DEV_BASE + 0x061C) + +#define U3D_QISAR0 (SSUSB_DEV_BASE + 0x0700) +#define U3D_QIER0 (SSUSB_DEV_BASE + 0x0704) +#define U3D_QIESR0 (SSUSB_DEV_BASE + 0x0708) +#define U3D_QIECR0 (SSUSB_DEV_BASE + 0x070C) +#define U3D_QISAR1 (SSUSB_DEV_BASE + 0x0710) +#define U3D_QIER1 (SSUSB_DEV_BASE + 0x0714) +#define U3D_QIESR1 (SSUSB_DEV_BASE + 0x0718) +#define U3D_QIECR1 (SSUSB_DEV_BASE + 0x071C) + +#define U3D_TQERRIR0 (SSUSB_DEV_BASE + 0x0780) +#define U3D_TQERRIER0 (SSUSB_DEV_BASE + 0x0784) +#define U3D_TQERRIESR0 (SSUSB_DEV_BASE + 0x0788) +#define U3D_TQERRIECR0 (SSUSB_DEV_BASE + 0x078C) +#define U3D_RQERRIR0 (SSUSB_DEV_BASE + 0x07C0) +#define U3D_RQERRIER0 (SSUSB_DEV_BASE + 0x07C4) +#define U3D_RQERRIESR0 (SSUSB_DEV_BASE + 0x07C8) +#define U3D_RQERRIECR0 (SSUSB_DEV_BASE + 0x07CC) +#define U3D_RQERRIR1 (SSUSB_DEV_BASE + 0x07D0) +#define U3D_RQERRIER1 (SSUSB_DEV_BASE + 0x07D4) +#define U3D_RQERRIESR1 (SSUSB_DEV_BASE + 0x07D8) +#define U3D_RQERRIECR1 (SSUSB_DEV_BASE + 0x07DC) + +#define U3D_CAP_EP0FFSZ (SSUSB_DEV_BASE + 0x0C04) +#define U3D_CAP_EPNTXFFSZ (SSUSB_DEV_BASE + 0x0C08) +#define U3D_CAP_EPNRXFFSZ (SSUSB_DEV_BASE + 0x0C0C) +#define U3D_CAP_EPINFO (SSUSB_DEV_BASE + 0x0C10) +#define U3D_MISC_CTRL (SSUSB_DEV_BASE + 0x0C84) + +/*---------------- SSUSB_DEV FIELD DEFINITION ---------------*/ + +/* U3D_LV1ISR */ +#define EP_CTRL_INTR BIT(5) +#define MAC2_INTR BIT(4) +#define DMA_INTR BIT(3) +#define MAC3_INTR BIT(2) +#define QMU_INTR BIT(1) +#define BMU_INTR BIT(0) + +/* U3D_LV1IECR */ +#define LV1IECR_MSK GENMASK(31, 0) + +/* U3D_EPISR */ +#define EPRISR(x) (BIT(16) << (x)) +#define EPTISR(x) (BIT(0) << (x)) +#define EP0ISR BIT(0) + +/* U3D_EP0CSR */ +#define EP0_SENDSTALL BIT(25) +#define EP0_FIFOFULL BIT(23) +#define EP0_SENTSTALL BIT(22) +#define EP0_DPHTX BIT(20) +#define EP0_DATAEND BIT(19) +#define EP0_TXPKTRDY BIT(18) +#define EP0_SETUPPKTRDY BIT(17) +#define EP0_RXPKTRDY BIT(16) +#define EP0_MAXPKTSZ_MSK GENMASK(9, 0) +#define EP0_MAXPKTSZ(x) ((x) & EP0_MAXPKTSZ_MSK) +#define EP0_W1C_BITS (~(EP0_RXPKTRDY | EP0_SETUPPKTRDY | EP0_SENTSTALL)) + +/* U3D_TX1CSR0 */ +#define TX_DMAREQEN BIT(29) +#define TX_FIFOFULL BIT(25) +#define TX_FIFOEMPTY BIT(24) +#define TX_SENTSTALL BIT(22) +#define TX_SENDSTALL BIT(21) +#define TX_TXPKTRDY BIT(16) +#define TX_TXMAXPKTSZ_MSK GENMASK(10, 0) +#define TX_TXMAXPKTSZ(x) ((x) & TX_TXMAXPKTSZ_MSK) +#define TX_W1C_BITS (~(TX_SENTSTALL)) + +/* U3D_TX1CSR1 */ +#define TX_MULT(x) (((x) & 0x3) << 22) +#define TX_MAX_PKT(x) (((x) & 0x3f) << 16) +#define TX_SLOT(x) (((x) & 0x3f) << 8) +#define TX_TYPE(x) (((x) & 0x3) << 4) +#define TX_SS_BURST(x) (((x) & 0xf) << 0) + +/* for TX_TYPE & RX_TYPE */ +#define TYPE_BULK (0x0) +#define TYPE_INT (0x1) +#define TYPE_ISO (0x2) +#define TYPE_MASK (0x3) + +/* U3D_TX1CSR2 */ +#define TX_BINTERVAL(x) (((x) & 0xff) << 24) +#define TX_FIFOSEGSIZE(x) (((x) & 0xf) << 16) +#define TX_FIFOADDR(x) (((x) & 0x1fff) << 0) + +/* U3D_RX1CSR0 */ +#define RX_DMAREQEN BIT(29) +#define RX_SENTSTALL BIT(22) +#define RX_SENDSTALL BIT(21) +#define RX_RXPKTRDY BIT(16) +#define RX_RXMAXPKTSZ_MSK GENMASK(10, 0) +#define RX_RXMAXPKTSZ(x) ((x) & RX_RXMAXPKTSZ_MSK) +#define RX_W1C_BITS (~(RX_SENTSTALL | RX_RXPKTRDY)) + +/* U3D_RX1CSR1 */ +#define RX_MULT(x) (((x) & 0x3) << 22) +#define RX_MAX_PKT(x) (((x) & 0x3f) << 16) +#define RX_SLOT(x) (((x) & 0x3f) << 8) +#define RX_TYPE(x) (((x) & 0x3) << 4) +#define RX_SS_BURST(x) (((x) & 0xf) << 0) + +/* U3D_RX1CSR2 */ +#define RX_BINTERVAL(x) (((x) & 0xff) << 24) +#define RX_FIFOSEGSIZE(x) (((x) & 0xf) << 16) +#define RX_FIFOADDR(x) (((x) & 0x1fff) << 0) + +/* U3D_QCR0 */ +#define QMU_RX_CS_EN(x) (BIT(16) << (x)) +#define QMU_TX_CS_EN(x) (BIT(0) << (x)) +#define QMU_CS16B_EN BIT(0) + +/* U3D_QCR1 */ +#define QMU_TX_ZLP(x) (BIT(0) << (x)) + +/* U3D_QCR3 */ +#define QMU_RX_COZ(x) (BIT(16) << (x)) +#define QMU_RX_ZLP(x) (BIT(0) << (x)) + +/* U3D_TXQCSR1 */ +/* U3D_RXQCSR1 */ +#define QMU_Q_ACTIVE BIT(15) +#define QMU_Q_STOP BIT(2) +#define QMU_Q_RESUME BIT(1) +#define QMU_Q_START BIT(0) + +/* U3D_QISAR0, U3D_QIER0, U3D_QIESR0, U3D_QIECR0 */ +#define QMU_RX_DONE_INT(x) (BIT(16) << (x)) +#define QMU_TX_DONE_INT(x) (BIT(0) << (x)) + +/* U3D_QISAR1, U3D_QIER1, U3D_QIESR1, U3D_QIECR1 */ +#define RXQ_ZLPERR_INT BIT(20) +#define RXQ_LENERR_INT BIT(18) +#define RXQ_CSERR_INT BIT(17) +#define RXQ_EMPTY_INT BIT(16) +#define TXQ_LENERR_INT BIT(2) +#define TXQ_CSERR_INT BIT(1) +#define TXQ_EMPTY_INT BIT(0) + +/* U3D_TQERRIR0, U3D_TQERRIER0, U3D_TQERRIESR0, U3D_TQERRIECR0 */ +#define QMU_TX_LEN_ERR(x) (BIT(16) << (x)) +#define QMU_TX_CS_ERR(x) (BIT(0) << (x)) + +/* U3D_RQERRIR0, U3D_RQERRIER0, U3D_RQERRIESR0, U3D_RQERRIECR0 */ +#define QMU_RX_LEN_ERR(x) (BIT(16) << (x)) +#define QMU_RX_CS_ERR(x) (BIT(0) << (x)) + +/* U3D_RQERRIR1, U3D_RQERRIER1, U3D_RQERRIESR1, U3D_RQERRIECR1 */ +#define QMU_RX_ZLP_ERR(n) (BIT(16) << (n)) + +/* U3D_CAP_EPINFO */ +#define CAP_RX_EP_NUM(x) (((x) >> 8) & 0x1f) +#define CAP_TX_EP_NUM(x) ((x) & 0x1f) + +/* U3D_MISC_CTRL */ +#define VBUS_ON BIT(1) +#define VBUS_FRC_EN BIT(0) + + +/*---------------- SSUSB_EPCTL_CSR REGISTER DEFINITION ----------------*/ + +#define U3D_DEVICE_CONF (SSUSB_EPCTL_CSR_BASE + 0x0000) +#define U3D_EP_RST (SSUSB_EPCTL_CSR_BASE + 0x0004) + +#define U3D_DEV_LINK_INTR_ENABLE (SSUSB_EPCTL_CSR_BASE + 0x0050) +#define U3D_DEV_LINK_INTR (SSUSB_EPCTL_CSR_BASE + 0x0054) + +/*---------------- SSUSB_EPCTL_CSR FIELD DEFINITION ----------------*/ + +/* U3D_DEVICE_CONF */ +#define DEV_ADDR_MSK GENMASK(30, 24) +#define DEV_ADDR(x) ((0x7f & (x)) << 24) +#define HW_USB2_3_SEL BIT(18) +#define SW_USB2_3_SEL_EN BIT(17) +#define SW_USB2_3_SEL BIT(16) +#define SSUSB_DEV_SPEED(x) ((x) & 0x7) + +/* U3D_EP_RST */ +#define EP1_IN_RST BIT(17) +#define EP1_OUT_RST BIT(1) +#define EP_RST(is_in, epnum) (((is_in) ? BIT(16) : BIT(0)) << (epnum)) +#define EP0_RST BIT(0) + +/* U3D_DEV_LINK_INTR_ENABLE */ +/* U3D_DEV_LINK_INTR */ +#define SSUSB_DEV_SPEED_CHG_INTR BIT(0) + + +/*---------------- SSUSB_USB3_MAC_CSR REGISTER DEFINITION ----------------*/ + +#define U3D_LTSSM_CTRL (SSUSB_USB3_MAC_CSR_BASE + 0x0010) +#define U3D_USB3_CONFIG (SSUSB_USB3_MAC_CSR_BASE + 0x001C) + +#define U3D_LTSSM_INTR_ENABLE (SSUSB_USB3_MAC_CSR_BASE + 0x013C) +#define U3D_LTSSM_INTR (SSUSB_USB3_MAC_CSR_BASE + 0x0140) + +/*---------------- SSUSB_USB3_MAC_CSR FIELD DEFINITION ----------------*/ + +/* U3D_LTSSM_CTRL */ +#define FORCE_POLLING_FAIL BIT(4) +#define FORCE_RXDETECT_FAIL BIT(3) +#define SOFT_U3_EXIT_EN BIT(2) +#define COMPLIANCE_EN BIT(1) +#define U1_GO_U2_EN BIT(0) + +/* U3D_USB3_CONFIG */ +#define USB3_EN BIT(0) + +/* U3D_LTSSM_INTR_ENABLE */ +/* U3D_LTSSM_INTR */ +#define U3_RESUME_INTR BIT(18) +#define U3_LFPS_TMOUT_INTR BIT(17) +#define VBUS_FALL_INTR BIT(16) +#define VBUS_RISE_INTR BIT(15) +#define RXDET_SUCCESS_INTR BIT(14) +#define EXIT_U3_INTR BIT(13) +#define EXIT_U2_INTR BIT(12) +#define EXIT_U1_INTR BIT(11) +#define ENTER_U3_INTR BIT(10) +#define ENTER_U2_INTR BIT(9) +#define ENTER_U1_INTR BIT(8) +#define ENTER_U0_INTR BIT(7) +#define RECOVERY_INTR BIT(6) +#define WARM_RST_INTR BIT(5) +#define HOT_RST_INTR BIT(4) +#define LOOPBACK_INTR BIT(3) +#define COMPLIANCE_INTR BIT(2) +#define SS_DISABLE_INTR BIT(1) +#define SS_INACTIVE_INTR BIT(0) + +/*---------------- SSUSB_USB3_SYS_CSR REGISTER DEFINITION ----------------*/ + +#define U3D_LINK_UX_INACT_TIMER (SSUSB_USB3_SYS_CSR_BASE + 0x020C) +#define U3D_LINK_POWER_CONTROL (SSUSB_USB3_SYS_CSR_BASE + 0x0210) +#define U3D_LINK_ERR_COUNT (SSUSB_USB3_SYS_CSR_BASE + 0x0214) + +/*---------------- SSUSB_USB3_SYS_CSR FIELD DEFINITION ----------------*/ + +/* U3D_LINK_UX_INACT_TIMER */ +#define DEV_U2_INACT_TIMEOUT_MSK GENMASK(23, 16) +#define DEV_U2_INACT_TIMEOUT_VALUE(x) (((x) & 0xff) << 16) +#define U2_INACT_TIMEOUT_MSK GENMASK(15, 8) +#define U1_INACT_TIMEOUT_MSK GENMASK(7, 0) +#define U1_INACT_TIMEOUT_VALUE(x) ((x) & 0xff) + +/* U3D_LINK_POWER_CONTROL */ +#define SW_U2_ACCEPT_ENABLE BIT(9) +#define SW_U1_ACCEPT_ENABLE BIT(8) +#define UX_EXIT BIT(5) +#define LGO_U3 BIT(4) +#define LGO_U2 BIT(3) +#define LGO_U1 BIT(2) +#define SW_U2_REQUEST_ENABLE BIT(1) +#define SW_U1_REQUEST_ENABLE BIT(0) + +/* U3D_LINK_ERR_COUNT */ +#define CLR_LINK_ERR_CNT BIT(16) +#define LINK_ERROR_COUNT GENMASK(15, 0) + +/*---------------- SSUSB_USB2_CSR REGISTER DEFINITION ----------------*/ + +#define U3D_POWER_MANAGEMENT (SSUSB_USB2_CSR_BASE + 0x0004) +#define U3D_DEVICE_CONTROL (SSUSB_USB2_CSR_BASE + 0x000C) +#define U3D_USB2_TEST_MODE (SSUSB_USB2_CSR_BASE + 0x0014) +#define U3D_COMMON_USB_INTR_ENABLE (SSUSB_USB2_CSR_BASE + 0x0018) +#define U3D_COMMON_USB_INTR (SSUSB_USB2_CSR_BASE + 0x001C) +#define U3D_LINK_RESET_INFO (SSUSB_USB2_CSR_BASE + 0x0024) +#define U3D_USB20_FRAME_NUM (SSUSB_USB2_CSR_BASE + 0x003C) +#define U3D_USB20_LPM_PARAMETER (SSUSB_USB2_CSR_BASE + 0x0044) +#define U3D_USB20_MISC_CONTROL (SSUSB_USB2_CSR_BASE + 0x004C) + +/*---------------- SSUSB_USB2_CSR FIELD DEFINITION ----------------*/ + +/* U3D_POWER_MANAGEMENT */ +#define LPM_BESL_STALL BIT(14) +#define LPM_BESLD_STALL BIT(13) +#define LPM_RWP BIT(11) +#define LPM_HRWE BIT(10) +#define LPM_MODE(x) (((x) & 0x3) << 8) +#define ISO_UPDATE BIT(7) +#define SOFT_CONN BIT(6) +#define HS_ENABLE BIT(5) +#define RESUME BIT(2) +#define SUSPENDM_ENABLE BIT(0) + +/* U3D_DEVICE_CONTROL */ +#define DC_HOSTREQ BIT(1) +#define DC_SESSION BIT(0) + +/* U3D_USB2_TEST_MODE */ +#define U2U3_AUTO_SWITCH BIT(10) +#define LPM_FORCE_STALL BIT(8) +#define FIFO_ACCESS BIT(6) +#define FORCE_FS BIT(5) +#define FORCE_HS BIT(4) +#define TEST_PACKET_MODE BIT(3) +#define TEST_K_MODE BIT(2) +#define TEST_J_MODE BIT(1) +#define TEST_SE0_NAK_MODE BIT(0) + +/* U3D_COMMON_USB_INTR_ENABLE */ +/* U3D_COMMON_USB_INTR */ +#define LPM_RESUME_INTR BIT(9) +#define LPM_INTR BIT(8) +#define DISCONN_INTR BIT(5) +#define CONN_INTR BIT(4) +#define SOF_INTR BIT(3) +#define RESET_INTR BIT(2) +#define RESUME_INTR BIT(1) +#define SUSPEND_INTR BIT(0) + +/* U3D_LINK_RESET_INFO */ +#define WTCHRP_MSK GENMASK(19, 16) + +/* U3D_USB20_LPM_PARAMETER */ +#define LPM_BESLCK_U3(x) (((x) & 0xf) << 12) +#define LPM_BESLCK(x) (((x) & 0xf) << 8) +#define LPM_BESLDCK(x) (((x) & 0xf) << 4) +#define LPM_BESL GENMASK(3, 0) + +/* U3D_USB20_MISC_CONTROL */ +#define LPM_U3_ACK_EN BIT(0) + +/*---------------- SSUSB_SIFSLV_IPPC REGISTER DEFINITION ----------------*/ + +#define U3D_SSUSB_IP_PW_CTRL0 (SSUSB_SIFSLV_IPPC_BASE + 0x0000) +#define U3D_SSUSB_IP_PW_CTRL1 (SSUSB_SIFSLV_IPPC_BASE + 0x0004) +#define U3D_SSUSB_IP_PW_CTRL2 (SSUSB_SIFSLV_IPPC_BASE + 0x0008) +#define U3D_SSUSB_IP_PW_CTRL3 (SSUSB_SIFSLV_IPPC_BASE + 0x000C) +#define U3D_SSUSB_IP_PW_STS1 (SSUSB_SIFSLV_IPPC_BASE + 0x0010) +#define U3D_SSUSB_IP_PW_STS2 (SSUSB_SIFSLV_IPPC_BASE + 0x0014) +#define U3D_SSUSB_OTG_STS (SSUSB_SIFSLV_IPPC_BASE + 0x0018) +#define U3D_SSUSB_OTG_STS_CLR (SSUSB_SIFSLV_IPPC_BASE + 0x001C) +#define U3D_SSUSB_IP_XHCI_CAP (SSUSB_SIFSLV_IPPC_BASE + 0x0024) +#define U3D_SSUSB_IP_DEV_CAP (SSUSB_SIFSLV_IPPC_BASE + 0x0028) +#define U3D_SSUSB_OTG_INT_EN (SSUSB_SIFSLV_IPPC_BASE + 0x002C) +#define U3D_SSUSB_U3_CTRL_0P (SSUSB_SIFSLV_IPPC_BASE + 0x0030) +#define U3D_SSUSB_U2_CTRL_0P (SSUSB_SIFSLV_IPPC_BASE + 0x0050) +#define U3D_SSUSB_REF_CK_CTRL (SSUSB_SIFSLV_IPPC_BASE + 0x008C) +#define U3D_SSUSB_DEV_RST_CTRL (SSUSB_SIFSLV_IPPC_BASE + 0x0098) +#define U3D_SSUSB_HW_ID (SSUSB_SIFSLV_IPPC_BASE + 0x00A0) +#define U3D_SSUSB_HW_SUB_ID (SSUSB_SIFSLV_IPPC_BASE + 0x00A4) +#define U3D_SSUSB_IP_SPARE0 (SSUSB_SIFSLV_IPPC_BASE + 0x00C8) + +/*---------------- SSUSB_SIFSLV_IPPC FIELD DEFINITION ----------------*/ + +/* U3D_SSUSB_IP_PW_CTRL0 */ +#define SSUSB_IP_SW_RST BIT(0) + +/* U3D_SSUSB_IP_PW_CTRL1 */ +#define SSUSB_IP_HOST_PDN BIT(0) + +/* U3D_SSUSB_IP_PW_CTRL2 */ +#define SSUSB_IP_DEV_PDN BIT(0) + +/* U3D_SSUSB_IP_PW_CTRL3 */ +#define SSUSB_IP_PCIE_PDN BIT(0) + +/* U3D_SSUSB_IP_PW_STS1 */ +#define SSUSB_IP_SLEEP_STS BIT(30) +#define SSUSB_U3_MAC_RST_B_STS BIT(16) +#define SSUSB_XHCI_RST_B_STS BIT(11) +#define SSUSB_SYS125_RST_B_STS BIT(10) +#define SSUSB_REF_RST_B_STS BIT(8) +#define SSUSB_SYSPLL_STABLE BIT(0) + +/* U3D_SSUSB_IP_PW_STS2 */ +#define SSUSB_U2_MAC_SYS_RST_B_STS BIT(0) + +/* U3D_SSUSB_OTG_STS */ +#define SSUSB_VBUS_VALID BIT(9) + +/* U3D_SSUSB_OTG_STS_CLR */ +#define SSUSB_VBUS_INTR_CLR BIT(6) + +/* U3D_SSUSB_IP_XHCI_CAP */ +#define SSUSB_IP_XHCI_U2_PORT_NUM(x) (((x) >> 8) & 0xff) +#define SSUSB_IP_XHCI_U3_PORT_NUM(x) ((x) & 0xff) + +/* U3D_SSUSB_IP_DEV_CAP */ +#define SSUSB_IP_DEV_U3_PORT_NUM(x) ((x) & 0xff) + +/* U3D_SSUSB_OTG_INT_EN */ +#define SSUSB_VBUS_CHG_INT_A_EN BIT(7) +#define SSUSB_VBUS_CHG_INT_B_EN BIT(6) + +/* U3D_SSUSB_U3_CTRL_0P */ +#define SSUSB_U3_PORT_HOST_SEL BIT(2) +#define SSUSB_U3_PORT_PDN BIT(1) +#define SSUSB_U3_PORT_DIS BIT(0) + +/* U3D_SSUSB_U2_CTRL_0P */ +#define SSUSB_U2_PORT_OTG_SEL BIT(7) +#define SSUSB_U2_PORT_HOST_SEL BIT(2) +#define SSUSB_U2_PORT_PDN BIT(1) +#define SSUSB_U2_PORT_DIS BIT(0) + +/* U3D_SSUSB_DEV_RST_CTRL */ +#define SSUSB_DEV_SW_RST BIT(0) + +#endif /* _SSUSB_HW_REGS_H_ */ diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c new file mode 100644 index 0000000000000000000000000000000000000000..783367805c99ffb1fe24e7ac9303e846e43e0ea6 --- /dev/null +++ b/drivers/usb/mtu3/mtu3_plat.c @@ -0,0 +1,484 @@ +/* + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mtu3.h" +#include "mtu3_dr.h" + +/* u2-port0 should be powered on and enabled; */ +int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks) +{ + void __iomem *ibase = ssusb->ippc_base; + u32 value, check_val; + int ret; + + check_val = ex_clks | SSUSB_SYS125_RST_B_STS | SSUSB_SYSPLL_STABLE | + SSUSB_REF_RST_B_STS; + + ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS1, value, + (check_val == (value & check_val)), 100, 20000); + if (ret) { + dev_err(ssusb->dev, "clks of sts1 are not stable!\n"); + return ret; + } + + ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS2, value, + (value & SSUSB_U2_MAC_SYS_RST_B_STS), 100, 10000); + if (ret) { + dev_err(ssusb->dev, "mac2 clock is not stable\n"); + return ret; + } + + return 0; +} + +static int ssusb_phy_init(struct ssusb_mtk *ssusb) +{ + int i; + int ret; + + for (i = 0; i < ssusb->num_phys; i++) { + ret = phy_init(ssusb->phys[i]); + if (ret) + goto exit_phy; + } + return 0; + +exit_phy: + for (; i > 0; i--) + phy_exit(ssusb->phys[i - 1]); + + return ret; +} + +static int ssusb_phy_exit(struct ssusb_mtk *ssusb) +{ + int i; + + for (i = 0; i < ssusb->num_phys; i++) + phy_exit(ssusb->phys[i]); + + return 0; +} + +static int ssusb_phy_power_on(struct ssusb_mtk *ssusb) +{ + int i; + int ret; + + for (i = 0; i < ssusb->num_phys; i++) { + ret = phy_power_on(ssusb->phys[i]); + if (ret) + goto power_off_phy; + } + return 0; + +power_off_phy: + for (; i > 0; i--) + phy_power_off(ssusb->phys[i - 1]); + + return ret; +} + +static void ssusb_phy_power_off(struct ssusb_mtk *ssusb) +{ + unsigned int i; + + for (i = 0; i < ssusb->num_phys; i++) + phy_power_off(ssusb->phys[i]); +} + +static int ssusb_rscs_init(struct ssusb_mtk *ssusb) +{ + int ret = 0; + + ret = regulator_enable(ssusb->vusb33); + if (ret) { + dev_err(ssusb->dev, "failed to enable vusb33\n"); + goto vusb33_err; + } + + ret = clk_prepare_enable(ssusb->sys_clk); + if (ret) { + dev_err(ssusb->dev, "failed to enable sys_clk\n"); + goto clk_err; + } + + ret = ssusb_phy_init(ssusb); + if (ret) { + dev_err(ssusb->dev, "failed to init phy\n"); + goto phy_init_err; + } + + ret = ssusb_phy_power_on(ssusb); + if (ret) { + dev_err(ssusb->dev, "failed to power on phy\n"); + goto phy_err; + } + + return 0; + +phy_err: + ssusb_phy_exit(ssusb); +phy_init_err: + clk_disable_unprepare(ssusb->sys_clk); +clk_err: + regulator_disable(ssusb->vusb33); +vusb33_err: + + return ret; +} + +static void ssusb_rscs_exit(struct ssusb_mtk *ssusb) +{ + clk_disable_unprepare(ssusb->sys_clk); + regulator_disable(ssusb->vusb33); + ssusb_phy_power_off(ssusb); + ssusb_phy_exit(ssusb); +} + +static void ssusb_ip_sw_reset(struct ssusb_mtk *ssusb) +{ + /* reset whole ip (xhci & u3d) */ + mtu3_setbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST); + udelay(1); + mtu3_clrbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST); +} + +static int get_iddig_pinctrl(struct ssusb_mtk *ssusb) +{ + struct otg_switch_mtk *otg_sx = &ssusb->otg_switch; + + otg_sx->id_pinctrl = devm_pinctrl_get(ssusb->dev); + if (IS_ERR(otg_sx->id_pinctrl)) { + dev_err(ssusb->dev, "Cannot find id pinctrl!\n"); + return PTR_ERR(otg_sx->id_pinctrl); + } + + otg_sx->id_float = + pinctrl_lookup_state(otg_sx->id_pinctrl, "id_float"); + if (IS_ERR(otg_sx->id_float)) { + dev_err(ssusb->dev, "Cannot find pinctrl id_float!\n"); + return PTR_ERR(otg_sx->id_float); + } + + otg_sx->id_ground = + pinctrl_lookup_state(otg_sx->id_pinctrl, "id_ground"); + if (IS_ERR(otg_sx->id_ground)) { + dev_err(ssusb->dev, "Cannot find pinctrl id_ground!\n"); + return PTR_ERR(otg_sx->id_ground); + } + + return 0; +} + +static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb) +{ + struct device_node *node = pdev->dev.of_node; + struct otg_switch_mtk *otg_sx = &ssusb->otg_switch; + struct device *dev = &pdev->dev; + struct regulator *vbus; + struct resource *res; + int i; + int ret; + + ssusb->num_phys = of_count_phandle_with_args(node, + "phys", "#phy-cells"); + if (ssusb->num_phys > 0) { + ssusb->phys = devm_kcalloc(dev, ssusb->num_phys, + sizeof(*ssusb->phys), GFP_KERNEL); + if (!ssusb->phys) + return -ENOMEM; + } else { + ssusb->num_phys = 0; + } + + for (i = 0; i < ssusb->num_phys; i++) { + ssusb->phys[i] = devm_of_phy_get_by_index(dev, node, i); + if (IS_ERR(ssusb->phys[i])) { + dev_err(dev, "failed to get phy-%d\n", i); + return PTR_ERR(ssusb->phys[i]); + } + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ippc"); + ssusb->ippc_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ssusb->ippc_base)) { + dev_err(dev, "failed to map memory for ippc\n"); + return PTR_ERR(ssusb->ippc_base); + } + + ssusb->vusb33 = devm_regulator_get(&pdev->dev, "vusb33"); + if (IS_ERR(ssusb->vusb33)) { + dev_err(dev, "failed to get vusb33\n"); + return PTR_ERR(ssusb->vusb33); + } + + ssusb->sys_clk = devm_clk_get(dev, "sys_ck"); + if (IS_ERR(ssusb->sys_clk)) { + dev_err(dev, "failed to get sys clock\n"); + return PTR_ERR(ssusb->sys_clk); + } + + ssusb->dr_mode = usb_get_dr_mode(dev); + if (ssusb->dr_mode == USB_DR_MODE_UNKNOWN) { + dev_err(dev, "dr_mode is error\n"); + return -EINVAL; + } + + if (ssusb->dr_mode == USB_DR_MODE_PERIPHERAL) + return 0; + + /* if host role is supported */ + ret = ssusb_wakeup_of_property_parse(ssusb, node); + if (ret) + return ret; + + if (ssusb->dr_mode != USB_DR_MODE_OTG) + return 0; + + /* if dual-role mode is supported */ + vbus = devm_regulator_get(&pdev->dev, "vbus"); + if (IS_ERR(vbus)) { + dev_err(dev, "failed to get vbus\n"); + return PTR_ERR(vbus); + } + otg_sx->vbus = vbus; + + otg_sx->is_u3_drd = of_property_read_bool(node, "mediatek,usb3-drd"); + otg_sx->manual_drd_enabled = + of_property_read_bool(node, "enable-manual-drd"); + + if (of_property_read_bool(node, "extcon")) { + otg_sx->edev = extcon_get_edev_by_phandle(ssusb->dev, 0); + if (IS_ERR(otg_sx->edev)) { + dev_err(ssusb->dev, "couldn't get extcon device\n"); + return -EPROBE_DEFER; + } + if (otg_sx->manual_drd_enabled) { + ret = get_iddig_pinctrl(ssusb); + if (ret) + return ret; + } + } + + dev_info(dev, "dr_mode: %d, is_u3_dr: %d\n", + ssusb->dr_mode, otg_sx->is_u3_drd); + + return 0; +} + +static int mtu3_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct ssusb_mtk *ssusb; + int ret = -ENOMEM; + + /* all elements are set to ZERO as default value */ + ssusb = devm_kzalloc(dev, sizeof(*ssusb), GFP_KERNEL); + if (!ssusb) + return -ENOMEM; + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "No suitable DMA config available\n"); + return -ENOTSUPP; + } + + platform_set_drvdata(pdev, ssusb); + ssusb->dev = dev; + + ret = get_ssusb_rscs(pdev, ssusb); + if (ret) + return ret; + + /* enable power domain */ + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + device_enable_async_suspend(dev); + + ret = ssusb_rscs_init(ssusb); + if (ret) + goto comm_init_err; + + ssusb_ip_sw_reset(ssusb); + + if (IS_ENABLED(CONFIG_USB_MTU3_HOST)) + ssusb->dr_mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_MTU3_GADGET)) + ssusb->dr_mode = USB_DR_MODE_PERIPHERAL; + + /* default as host */ + ssusb->is_host = !(ssusb->dr_mode == USB_DR_MODE_PERIPHERAL); + + switch (ssusb->dr_mode) { + case USB_DR_MODE_PERIPHERAL: + ret = ssusb_gadget_init(ssusb); + if (ret) { + dev_err(dev, "failed to initialize gadget\n"); + goto comm_exit; + } + break; + case USB_DR_MODE_HOST: + ret = ssusb_host_init(ssusb, node); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + goto comm_exit; + } + break; + case USB_DR_MODE_OTG: + ret = ssusb_gadget_init(ssusb); + if (ret) { + dev_err(dev, "failed to initialize gadget\n"); + goto comm_exit; + } + + ret = ssusb_host_init(ssusb, node); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + goto gadget_exit; + } + + ssusb_otg_switch_init(ssusb); + break; + default: + dev_err(dev, "unsupported mode: %d\n", ssusb->dr_mode); + ret = -EINVAL; + goto comm_exit; + } + + return 0; + +gadget_exit: + ssusb_gadget_exit(ssusb); +comm_exit: + ssusb_rscs_exit(ssusb); +comm_init_err: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + + return ret; +} + +static int mtu3_remove(struct platform_device *pdev) +{ + struct ssusb_mtk *ssusb = platform_get_drvdata(pdev); + + switch (ssusb->dr_mode) { + case USB_DR_MODE_PERIPHERAL: + ssusb_gadget_exit(ssusb); + break; + case USB_DR_MODE_HOST: + ssusb_host_exit(ssusb); + break; + case USB_DR_MODE_OTG: + ssusb_otg_switch_exit(ssusb); + ssusb_gadget_exit(ssusb); + ssusb_host_exit(ssusb); + break; + default: + return -EINVAL; + } + + ssusb_rscs_exit(ssusb); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +/* + * when support dual-role mode, we reject suspend when + * it works as device mode; + */ +static int __maybe_unused mtu3_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ssusb_mtk *ssusb = platform_get_drvdata(pdev); + + dev_dbg(dev, "%s\n", __func__); + + /* REVISIT: disconnect it for only device mode? */ + if (!ssusb->is_host) + return 0; + + ssusb_host_disable(ssusb, true); + ssusb_phy_power_off(ssusb); + clk_disable_unprepare(ssusb->sys_clk); + ssusb_wakeup_enable(ssusb); + + return 0; +} + +static int __maybe_unused mtu3_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ssusb_mtk *ssusb = platform_get_drvdata(pdev); + + dev_dbg(dev, "%s\n", __func__); + + if (!ssusb->is_host) + return 0; + + ssusb_wakeup_disable(ssusb); + clk_prepare_enable(ssusb->sys_clk); + ssusb_phy_power_on(ssusb); + ssusb_host_enable(ssusb); + + return 0; +} + +static const struct dev_pm_ops mtu3_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(mtu3_suspend, mtu3_resume) +}; + +#define DEV_PM_OPS (IS_ENABLED(CONFIG_PM) ? &mtu3_pm_ops : NULL) + +#ifdef CONFIG_OF + +static const struct of_device_id mtu3_of_match[] = { + {.compatible = "mediatek,mt8173-mtu3",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, mtu3_of_match); + +#endif + +static struct platform_driver mtu3_driver = { + .probe = mtu3_probe, + .remove = mtu3_remove, + .driver = { + .name = MTU3_DRIVER_NAME, + .pm = DEV_PM_OPS, + .of_match_table = of_match_ptr(mtu3_of_match), + }, +}; +module_platform_driver(mtu3_driver); + +MODULE_AUTHOR("Chunfeng Yun "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MediaTek USB3 DRD Controller Driver"); diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c new file mode 100644 index 0000000000000000000000000000000000000000..7d9ba8a52368f6a4e3ed7b95fd6daa99f8fd9430 --- /dev/null +++ b/drivers/usb/mtu3/mtu3_qmu.c @@ -0,0 +1,573 @@ +/* + * mtu3_qmu.c - Queue Management Unit driver for device controller + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * Queue Management Unit (QMU) is designed to unload SW effort + * to serve DMA interrupts. + * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD), + * SW links data buffers and triggers QMU to send / receive data to + * host / from device at a time. + * And now only GPD is supported. + * + * For more detailed information, please refer to QMU Programming Guide + */ + +#include +#include + +#include "mtu3.h" + +#define QMU_CHECKSUM_LEN 16 + +#define GPD_FLAGS_HWO BIT(0) +#define GPD_FLAGS_BDP BIT(1) +#define GPD_FLAGS_BPS BIT(2) +#define GPD_FLAGS_IOC BIT(7) + +#define GPD_EXT_FLAG_ZLP BIT(5) + + +static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring, + dma_addr_t dma_addr) +{ + dma_addr_t dma_base = ring->dma; + struct qmu_gpd *gpd_head = ring->start; + u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head); + + if (offset >= MAX_GPD_NUM) + return NULL; + + return gpd_head + offset; +} + +static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring, + struct qmu_gpd *gpd) +{ + dma_addr_t dma_base = ring->dma; + struct qmu_gpd *gpd_head = ring->start; + u32 offset; + + offset = gpd - gpd_head; + if (offset >= MAX_GPD_NUM) + return 0; + + return dma_base + (offset * sizeof(*gpd)); +} + +static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd) +{ + ring->start = gpd; + ring->enqueue = gpd; + ring->dequeue = gpd; + ring->end = gpd + MAX_GPD_NUM - 1; +} + +static void reset_gpd_list(struct mtu3_ep *mep) +{ + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + struct qmu_gpd *gpd = ring->start; + + if (gpd) { + gpd->flag &= ~GPD_FLAGS_HWO; + gpd_ring_init(ring, gpd); + } +} + +int mtu3_gpd_ring_alloc(struct mtu3_ep *mep) +{ + struct qmu_gpd *gpd; + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + + /* software own all gpds as default */ + gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma); + if (gpd == NULL) + return -ENOMEM; + + gpd_ring_init(ring, gpd); + + return 0; +} + +void mtu3_gpd_ring_free(struct mtu3_ep *mep) +{ + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + + dma_pool_free(mep->mtu->qmu_gpd_pool, + ring->start, ring->dma); + memset(ring, 0, sizeof(*ring)); +} + +/* + * calculate check sum of a gpd or bd + * add "noinline" and "mb" to prevent wrong calculation + */ +static noinline u8 qmu_calc_checksum(u8 *data) +{ + u8 chksum = 0; + int i; + + data[1] = 0x0; /* set checksum to 0 */ + + mb(); /* ensure the gpd/bd is really up-to-date */ + for (i = 0; i < QMU_CHECKSUM_LEN; i++) + chksum += data[i]; + + /* Default: HWO=1, @flag[bit0] */ + chksum += 1; + + return 0xFF - chksum; +} + +void mtu3_qmu_resume(struct mtu3_ep *mep) +{ + struct mtu3 *mtu = mep->mtu; + void __iomem *mbase = mtu->mac_base; + int epnum = mep->epnum; + u32 offset; + + offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum); + + mtu3_writel(mbase, offset, QMU_Q_RESUME); + if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE)) + mtu3_writel(mbase, offset, QMU_Q_RESUME); +} + +static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring) +{ + if (ring->enqueue < ring->end) + ring->enqueue++; + else + ring->enqueue = ring->start; + + return ring->enqueue; +} + +static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring) +{ + if (ring->dequeue < ring->end) + ring->dequeue++; + else + ring->dequeue = ring->start; + + return ring->dequeue; +} + +/* check if a ring is emtpy */ +static int gpd_ring_empty(struct mtu3_gpd_ring *ring) +{ + struct qmu_gpd *enq = ring->enqueue; + struct qmu_gpd *next; + + if (ring->enqueue < ring->end) + next = enq + 1; + else + next = ring->start; + + /* one gpd is reserved to simplify gpd preparation */ + return next == ring->dequeue; +} + +int mtu3_prepare_transfer(struct mtu3_ep *mep) +{ + return gpd_ring_empty(&mep->gpd_ring); +} + +static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) +{ + struct qmu_gpd *enq; + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + struct qmu_gpd *gpd = ring->enqueue; + struct usb_request *req = &mreq->request; + + /* set all fields to zero as default value */ + memset(gpd, 0, sizeof(*gpd)); + + gpd->buffer = cpu_to_le32((u32)req->dma); + gpd->buf_len = cpu_to_le16(req->length); + gpd->flag |= GPD_FLAGS_IOC; + + /* get the next GPD */ + enq = advance_enq_gpd(ring); + dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p\n", + mep->epnum, gpd, enq); + + enq->flag &= ~GPD_FLAGS_HWO; + gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq)); + + if (req->zero) + gpd->ext_flag |= GPD_EXT_FLAG_ZLP; + + gpd->chksum = qmu_calc_checksum((u8 *)gpd); + gpd->flag |= GPD_FLAGS_HWO; + + mreq->gpd = gpd; + + return 0; +} + +static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) +{ + struct qmu_gpd *enq; + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + struct qmu_gpd *gpd = ring->enqueue; + struct usb_request *req = &mreq->request; + + /* set all fields to zero as default value */ + memset(gpd, 0, sizeof(*gpd)); + + gpd->buffer = cpu_to_le32((u32)req->dma); + gpd->data_buf_len = cpu_to_le16(req->length); + gpd->flag |= GPD_FLAGS_IOC; + + /* get the next GPD */ + enq = advance_enq_gpd(ring); + dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p\n", + mep->epnum, gpd, enq); + + enq->flag &= ~GPD_FLAGS_HWO; + gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq)); + gpd->chksum = qmu_calc_checksum((u8 *)gpd); + gpd->flag |= GPD_FLAGS_HWO; + + mreq->gpd = gpd; + + return 0; +} + +void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) +{ + + if (mep->is_in) + mtu3_prepare_tx_gpd(mep, mreq); + else + mtu3_prepare_rx_gpd(mep, mreq); +} + +int mtu3_qmu_start(struct mtu3_ep *mep) +{ + struct mtu3 *mtu = mep->mtu; + void __iomem *mbase = mtu->mac_base; + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + u8 epnum = mep->epnum; + + if (mep->is_in) { + /* set QMU start address */ + mtu3_writel(mbase, USB_QMU_TQSAR(mep->epnum), ring->dma); + mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); + mtu3_setbits(mbase, U3D_QCR0, QMU_TX_CS_EN(epnum)); + /* send zero length packet according to ZLP flag in GPD */ + mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum)); + mtu3_writel(mbase, U3D_TQERRIESR0, + QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum)); + + if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) { + dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum); + return 0; + } + mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START); + + } else { + mtu3_writel(mbase, USB_QMU_RQSAR(mep->epnum), ring->dma); + mtu3_setbits(mbase, MU3D_EP_RXCR0(mep->epnum), RX_DMAREQEN); + mtu3_setbits(mbase, U3D_QCR0, QMU_RX_CS_EN(epnum)); + /* don't expect ZLP */ + mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum)); + /* move to next GPD when receive ZLP */ + mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum)); + mtu3_writel(mbase, U3D_RQERRIESR0, + QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum)); + mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum)); + + if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) { + dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum); + return 0; + } + mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START); + } + + return 0; +} + +/* may called in atomic context */ +void mtu3_qmu_stop(struct mtu3_ep *mep) +{ + struct mtu3 *mtu = mep->mtu; + void __iomem *mbase = mtu->mac_base; + int epnum = mep->epnum; + u32 value = 0; + u32 qcsr; + int ret; + + qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum); + + if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) { + dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name); + return; + } + mtu3_writel(mbase, qcsr, QMU_Q_STOP); + + ret = readl_poll_timeout_atomic(mbase + qcsr, value, + !(value & QMU_Q_ACTIVE), 1, 1000); + if (ret) { + dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name); + return; + } + + dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name); +} + +void mtu3_qmu_flush(struct mtu3_ep *mep) +{ + + dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__, + ((mep->is_in) ? "TX" : "RX")); + + /*Stop QMU */ + mtu3_qmu_stop(mep); + reset_gpd_list(mep); +} + +/* + * QMU can't transfer zero length packet directly (a hardware limit + * on old SoCs), so when needs to send ZLP, we intentionally trigger + * a length error interrupt, and in the ISR sends a ZLP by BMU. + */ +static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum) +{ + struct mtu3_ep *mep = mtu->in_eps + epnum; + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + void __iomem *mbase = mtu->mac_base; + struct qmu_gpd *gpd_current = NULL; + dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum)); + struct usb_request *req = NULL; + struct mtu3_request *mreq; + u32 txcsr = 0; + int ret; + + mreq = next_request(mep); + if (mreq && mreq->request.length == 0) + req = &mreq->request; + else + return; + + gpd_current = gpd_dma_to_virt(ring, gpd_dma); + + if (le16_to_cpu(gpd_current->buf_len) != 0) { + dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum); + return; + } + + dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq); + + mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); + + ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum), + txcsr, !(txcsr & TX_FIFOFULL), 1, 1000); + if (ret) { + dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__); + return; + } + mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY); + + /* by pass the current GDP */ + gpd_current->flag |= GPD_FLAGS_BPS; + gpd_current->chksum = qmu_calc_checksum((u8 *)gpd_current); + gpd_current->flag |= GPD_FLAGS_HWO; + + /*enable DMAREQEN, switch back to QMU mode */ + mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); + mtu3_qmu_resume(mep); +} + +/* + * NOTE: request list maybe is already empty as following case: + * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)--> + * queue_tx --> process_tasklet(meanwhile, the second one is transferred, + * tasklet process both of them)-->qmu_interrupt for second one. + * To avoid upper case, put qmu_done_tx in ISR directly to process it. + */ +static void qmu_done_tx(struct mtu3 *mtu, u8 epnum) +{ + struct mtu3_ep *mep = mtu->in_eps + epnum; + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + void __iomem *mbase = mtu->mac_base; + struct qmu_gpd *gpd = ring->dequeue; + struct qmu_gpd *gpd_current = NULL; + dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum)); + struct usb_request *request = NULL; + struct mtu3_request *mreq; + + /*transfer phy address got from QMU register to virtual address */ + gpd_current = gpd_dma_to_virt(ring, gpd_dma); + + dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", + __func__, epnum, gpd, gpd_current, ring->enqueue); + + while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { + + mreq = next_request(mep); + + if (mreq == NULL || mreq->gpd != gpd) { + dev_err(mtu->dev, "no correct TX req is found\n"); + break; + } + + request = &mreq->request; + request->actual = le16_to_cpu(gpd->buf_len); + mtu3_req_complete(mep, request, 0); + + gpd = advance_deq_gpd(ring); + } + + dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n", + __func__, epnum, ring->dequeue, ring->enqueue); + +} + +static void qmu_done_rx(struct mtu3 *mtu, u8 epnum) +{ + struct mtu3_ep *mep = mtu->out_eps + epnum; + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + void __iomem *mbase = mtu->mac_base; + struct qmu_gpd *gpd = ring->dequeue; + struct qmu_gpd *gpd_current = NULL; + dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_RQCPR(epnum)); + struct usb_request *req = NULL; + struct mtu3_request *mreq; + + gpd_current = gpd_dma_to_virt(ring, gpd_dma); + + dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", + __func__, epnum, gpd, gpd_current, ring->enqueue); + + while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { + + mreq = next_request(mep); + + if (mreq == NULL || mreq->gpd != gpd) { + dev_err(mtu->dev, "no correct RX req is found\n"); + break; + } + req = &mreq->request; + + req->actual = le16_to_cpu(gpd->buf_len); + mtu3_req_complete(mep, req, 0); + + gpd = advance_deq_gpd(ring); + } + + dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n", + __func__, epnum, ring->dequeue, ring->enqueue); +} + +static void qmu_done_isr(struct mtu3 *mtu, u32 done_status) +{ + int i; + + for (i = 1; i < mtu->num_eps; i++) { + if (done_status & QMU_RX_DONE_INT(i)) + qmu_done_rx(mtu, i); + if (done_status & QMU_TX_DONE_INT(i)) + qmu_done_tx(mtu, i); + } +} + +static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status) +{ + void __iomem *mbase = mtu->mac_base; + u32 errval; + int i; + + if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) { + errval = mtu3_readl(mbase, U3D_RQERRIR0); + for (i = 1; i < mtu->num_eps; i++) { + if (errval & QMU_RX_CS_ERR(i)) + dev_err(mtu->dev, "Rx %d CS error!\n", i); + + if (errval & QMU_RX_LEN_ERR(i)) + dev_err(mtu->dev, "RX %d Length error\n", i); + } + mtu3_writel(mbase, U3D_RQERRIR0, errval); + } + + if (qmu_status & RXQ_ZLPERR_INT) { + errval = mtu3_readl(mbase, U3D_RQERRIR1); + for (i = 1; i < mtu->num_eps; i++) { + if (errval & QMU_RX_ZLP_ERR(i)) + dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i); + } + mtu3_writel(mbase, U3D_RQERRIR1, errval); + } + + if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) { + errval = mtu3_readl(mbase, U3D_TQERRIR0); + for (i = 1; i < mtu->num_eps; i++) { + if (errval & QMU_TX_CS_ERR(i)) + dev_err(mtu->dev, "Tx %d checksum error!\n", i); + + if (errval & QMU_TX_LEN_ERR(i)) + qmu_tx_zlp_error_handler(mtu, i); + } + mtu3_writel(mbase, U3D_TQERRIR0, errval); + } +} + +irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + u32 qmu_status; + u32 qmu_done_status; + + /* U3D_QISAR1 is read update */ + qmu_status = mtu3_readl(mbase, U3D_QISAR1); + qmu_status &= mtu3_readl(mbase, U3D_QIER1); + + qmu_done_status = mtu3_readl(mbase, U3D_QISAR0); + qmu_done_status &= mtu3_readl(mbase, U3D_QIER0); + mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */ + dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n", + (qmu_done_status & 0xFFFF), qmu_done_status >> 16, + qmu_status); + + if (qmu_done_status) + qmu_done_isr(mtu, qmu_done_status); + + if (qmu_status) + qmu_exception_isr(mtu, qmu_status); + + return IRQ_HANDLED; +} + +int mtu3_qmu_init(struct mtu3 *mtu) +{ + + compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B"); + + mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev, + QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0); + + if (!mtu->qmu_gpd_pool) + return -ENOMEM; + + return 0; +} + +void mtu3_qmu_exit(struct mtu3 *mtu) +{ + dma_pool_destroy(mtu->qmu_gpd_pool); +} diff --git a/drivers/usb/mtu3/mtu3_qmu.h b/drivers/usb/mtu3/mtu3_qmu.h new file mode 100644 index 0000000000000000000000000000000000000000..4dafa16bf12036ee173e215e9ae3ae31b953ee4f --- /dev/null +++ b/drivers/usb/mtu3/mtu3_qmu.h @@ -0,0 +1,43 @@ +/* + * mtu3_qmu.h - Queue Management Unit driver header + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __MTK_QMU_H__ +#define __MTK_QMU_H__ + +#define MAX_GPD_NUM 64 +#define QMU_GPD_SIZE (sizeof(struct qmu_gpd)) +#define QMU_GPD_RING_SIZE (MAX_GPD_NUM * QMU_GPD_SIZE) + +#define GPD_BUF_SIZE 65532 + +void mtu3_qmu_stop(struct mtu3_ep *mep); +int mtu3_qmu_start(struct mtu3_ep *mep); +void mtu3_qmu_resume(struct mtu3_ep *mep); +void mtu3_qmu_flush(struct mtu3_ep *mep); + +void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq); +int mtu3_prepare_transfer(struct mtu3_ep *mep); + +int mtu3_gpd_ring_alloc(struct mtu3_ep *mep); +void mtu3_gpd_ring_free(struct mtu3_ep *mep); + +irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu); +int mtu3_qmu_init(struct mtu3 *mtu); +void mtu3_qmu_exit(struct mtu3 *mtu); + +#endif diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index 210b7e43a6fd40bb6a626a9f300fc2bbe1eb0200..e89708d839e504d772727a6be7b0623964e6f6d1 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c @@ -6,6 +6,9 @@ * Based on the DaVinci "glue layer" code. * Copyright (C) 2005-2006 by Texas Instruments * + * DT support + * Copyright (c) 2016 Petr Kulhavy + * * This file is part of the Inventra Controller Driver for Linux. * * The Inventra Controller Driver for Linux is free software; you @@ -340,6 +343,13 @@ static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode) struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent); enum phy_mode phy_mode; + /* + * The PHY has some issues when it is forced in device or host mode. + * Unless the user request another mode, configure the PHY in OTG mode. + */ + if (!musb->is_initialized) + return phy_set_mode(glue->phy, PHY_MODE_USB_OTG); + switch (musb_mode) { case MUSB_HOST: /* Force VBUS valid, ID = 0 */ phy_mode = PHY_MODE_USB_HOST; @@ -366,6 +376,12 @@ static int da8xx_musb_init(struct musb *musb) musb->mregs += DA8XX_MENTOR_CORE_OFFSET; + ret = clk_prepare_enable(glue->clk); + if (ret) { + dev_err(glue->dev, "failed to enable clock\n"); + return ret; + } + /* Returns zero if e.g. not clocked */ rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG); if (!rev) @@ -377,12 +393,6 @@ static int da8xx_musb_init(struct musb *musb) goto fail; } - ret = clk_prepare_enable(glue->clk); - if (ret) { - dev_err(glue->dev, "failed to enable clock\n"); - goto fail; - } - setup_timer(&otg_workaround, otg_timer, (unsigned long)musb); /* Reset the controller */ @@ -392,7 +402,7 @@ static int da8xx_musb_init(struct musb *musb) ret = phy_init(glue->phy); if (ret) { dev_err(glue->dev, "Failed to init phy.\n"); - goto err_phy_init; + goto fail; } ret = phy_power_on(glue->phy); @@ -412,9 +422,8 @@ static int da8xx_musb_init(struct musb *musb) err_phy_power_on: phy_exit(glue->phy); -err_phy_init: - clk_disable_unprepare(glue->clk); fail: + clk_disable_unprepare(glue->clk); return ret; } @@ -433,6 +442,21 @@ static int da8xx_musb_exit(struct musb *musb) return 0; } +static inline u8 get_vbus_power(struct device *dev) +{ + struct regulator *vbus_supply; + int current_uA; + + vbus_supply = regulator_get_optional(dev, "vbus"); + if (IS_ERR(vbus_supply)) + return 255; + current_uA = regulator_get_current_limit(vbus_supply); + regulator_put(vbus_supply); + if (current_uA <= 0 || current_uA > 510000) + return 255; + return current_uA / 1000 / 2; +} + static const struct musb_platform_ops da8xx_ops = { .quirks = MUSB_DMA_CPPI | MUSB_INDEXED_EP, .init = da8xx_musb_init, @@ -458,6 +482,12 @@ static const struct platform_device_info da8xx_dev_info = { .dma_mask = DMA_BIT_MASK(32), }; +static const struct musb_hdrc_config da8xx_config = { + .ram_bits = 10, + .num_eps = 5, + .multipoint = 1, +}; + static int da8xx_probe(struct platform_device *pdev) { struct resource musb_resources[2]; @@ -465,6 +495,7 @@ static int da8xx_probe(struct platform_device *pdev) struct da8xx_glue *glue; struct platform_device_info pinfo; struct clk *clk; + struct device_node *np = pdev->dev.of_node; int ret; glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); @@ -479,13 +510,24 @@ static int da8xx_probe(struct platform_device *pdev) glue->phy = devm_phy_get(&pdev->dev, "usb-phy"); if (IS_ERR(glue->phy)) { - dev_err(&pdev->dev, "failed to get phy\n"); + if (PTR_ERR(glue->phy) != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get phy\n"); return PTR_ERR(glue->phy); } glue->dev = &pdev->dev; glue->clk = clk; + if (IS_ENABLED(CONFIG_OF) && np) { + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + pdata->config = &da8xx_config; + pdata->mode = musb_get_mode(&pdev->dev); + pdata->power = get_vbus_power(&pdev->dev); + } + pdata->platform_ops = &da8xx_ops; glue->usb_phy = usb_phy_generic_register(); @@ -536,11 +578,22 @@ static int da8xx_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_OF +static const struct of_device_id da8xx_id_table[] = { + { + .compatible = "ti,da830-musb", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, da8xx_id_table); +#endif + static struct platform_driver da8xx_driver = { .probe = da8xx_probe, .remove = da8xx_remove, .driver = { .name = "musb-da8xx", + .of_match_table = of_match_ptr(da8xx_id_table), }, }; diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 27dadc0d9114bf14d6034906153d7f1df60be4a5..9e226468a13eb07af0bf5a6407d81cd304436369 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -100,6 +100,7 @@ #include #include #include +#include #include "musb_core.h" #include "musb_trace.h" @@ -130,6 +131,24 @@ static inline struct musb *dev_to_musb(struct device *dev) return dev_get_drvdata(dev); } +enum musb_mode musb_get_mode(struct device *dev) +{ + enum usb_dr_mode mode; + + mode = usb_get_dr_mode(dev); + switch (mode) { + case USB_DR_MODE_HOST: + return MUSB_HOST; + case USB_DR_MODE_PERIPHERAL: + return MUSB_PERIPHERAL; + case USB_DR_MODE_OTG: + case USB_DR_MODE_UNKNOWN: + default: + return MUSB_OTG; + } +} +EXPORT_SYMBOL_GPL(musb_get_mode); + /*-------------------------------------------------------------------------*/ #ifndef CONFIG_BLACKFIN @@ -569,10 +588,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, if (devctl & MUSB_DEVCTL_HM) { switch (musb->xceiv->otg->state) { case OTG_STATE_A_SUSPEND: - /* remote wakeup? later, GetPortStatus - * will stop RESUME signaling - */ - + /* remote wakeup? */ musb->port1_status |= (USB_PORT_STAT_C_SUSPEND << 16) | MUSB_PORT_STAT_RESUME; @@ -986,7 +1002,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, } #endif - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); return handled; } @@ -1855,14 +1871,23 @@ static void musb_pm_runtime_check_session(struct musb *musb) MUSB_DEVCTL_HR; switch (devctl & ~s) { case MUSB_QUIRK_B_INVALID_VBUS_91: - if (!musb->session && !musb->quirk_invalid_vbus) { - musb->quirk_invalid_vbus = true; + if (musb->quirk_retries--) { musb_dbg(musb, - "First invalid vbus, assume no session"); + "Poll devctl on invalid vbus, assume no session"); + schedule_delayed_work(&musb->irq_work, + msecs_to_jiffies(1000)); + return; } - break; case MUSB_QUIRK_A_DISCONNECT_19: + if (musb->quirk_retries--) { + musb_dbg(musb, + "Poll devctl on possible host mode disconnect"); + schedule_delayed_work(&musb->irq_work, + msecs_to_jiffies(1000)); + + return; + } if (!musb->session) break; musb_dbg(musb, "Allow PM on possible host mode disconnect"); @@ -1886,9 +1911,9 @@ static void musb_pm_runtime_check_session(struct musb *musb) if (error < 0) dev_err(musb->controller, "Could not enable: %i\n", error); + musb->quirk_retries = 3; } else { musb_dbg(musb, "Allow PM with no session: %02x", devctl); - musb->quirk_invalid_vbus = false; pm_runtime_mark_last_busy(musb->controller); pm_runtime_put_autosuspend(musb->controller); } @@ -1899,7 +1924,7 @@ static void musb_pm_runtime_check_session(struct musb *musb) /* Only used to provide driver mode change events */ static void musb_irq_work(struct work_struct *data) { - struct musb *musb = container_of(data, struct musb, irq_work); + struct musb *musb = container_of(data, struct musb, irq_work.work); musb_pm_runtime_check_session(musb); @@ -1969,6 +1994,7 @@ static struct musb *allocate_instance(struct device *dev, INIT_LIST_HEAD(&musb->control); INIT_LIST_HEAD(&musb->in_bulk); INIT_LIST_HEAD(&musb->out_bulk); + INIT_LIST_HEAD(&musb->pending_list); musb->vbuserr_retry = VBUSERR_RETRY_COUNT; musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; @@ -2018,6 +2044,84 @@ static void musb_free(struct musb *musb) musb_host_free(musb); } +struct musb_pending_work { + int (*callback)(struct musb *musb, void *data); + void *data; + struct list_head node; +}; + +/* + * Called from musb_runtime_resume(), musb_resume(), and + * musb_queue_resume_work(). Callers must take musb->lock. + */ +static int musb_run_resume_work(struct musb *musb) +{ + struct musb_pending_work *w, *_w; + unsigned long flags; + int error = 0; + + spin_lock_irqsave(&musb->list_lock, flags); + list_for_each_entry_safe(w, _w, &musb->pending_list, node) { + if (w->callback) { + error = w->callback(musb, w->data); + if (error < 0) { + dev_err(musb->controller, + "resume callback %p failed: %i\n", + w->callback, error); + } + } + list_del(&w->node); + devm_kfree(musb->controller, w); + } + spin_unlock_irqrestore(&musb->list_lock, flags); + + return error; +} + +/* + * Called to run work if device is active or else queue the work to happen + * on resume. Caller must take musb->lock and must hold an RPM reference. + * + * Note that we cowardly refuse queuing work after musb PM runtime + * resume is done calling musb_run_resume_work() and return -EINPROGRESS + * instead. + */ +int musb_queue_resume_work(struct musb *musb, + int (*callback)(struct musb *musb, void *data), + void *data) +{ + struct musb_pending_work *w; + unsigned long flags; + int error; + + if (WARN_ON(!callback)) + return -EINVAL; + + if (pm_runtime_active(musb->controller)) + return callback(musb, data); + + w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC); + if (!w) + return -ENOMEM; + + w->callback = callback; + w->data = data; + spin_lock_irqsave(&musb->list_lock, flags); + if (musb->is_runtime_suspended) { + list_add_tail(&w->node, &musb->pending_list); + error = 0; + } else { + dev_err(musb->controller, "could not add resume work %p\n", + callback); + devm_kfree(musb->controller, w); + error = -EINPROGRESS; + } + spin_unlock_irqrestore(&musb->list_lock, flags); + + return error; +} +EXPORT_SYMBOL_GPL(musb_queue_resume_work); + static void musb_deassert_reset(struct work_struct *work) { struct musb *musb; @@ -2065,6 +2169,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) } spin_lock_init(&musb->lock); + spin_lock_init(&musb->list_lock); musb->board_set_power = plat->set_power; musb->min_power = plat->min_power; musb->ops = plat->platform_ops; @@ -2114,11 +2219,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb->io.ep_offset = musb_flat_ep_offset; musb->io.ep_select = musb_flat_ep_select; } - /* And override them with platform specific ops if specified. */ - if (musb->ops->ep_offset) - musb->io.ep_offset = musb->ops->ep_offset; - if (musb->ops->ep_select) - musb->io.ep_select = musb->ops->ep_select; /* At least tusb6010 has its own offsets */ if (musb->ops->ep_offset) @@ -2213,7 +2313,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb_generic_disable(musb); /* Init IRQ workqueue before request_irq */ - INIT_WORK(&musb->irq_work, musb_irq_work); + INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work); INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset); INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume); @@ -2296,6 +2396,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) if (status) goto fail5; + musb->is_initialized = 1; pm_runtime_mark_last_busy(musb->controller); pm_runtime_put_autosuspend(musb->controller); @@ -2309,7 +2410,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb_host_cleanup(musb); fail3: - cancel_work_sync(&musb->irq_work); + cancel_delayed_work_sync(&musb->irq_work); cancel_delayed_work_sync(&musb->finish_resume_work); cancel_delayed_work_sync(&musb->deassert_reset_work); if (musb->dma_controller) @@ -2329,8 +2430,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb_platform_exit(musb); fail1: - dev_err(musb->controller, - "musb_init_controller failed with status %d\n", status); + if (status != -EPROBE_DEFER) + dev_err(musb->controller, + "%s failed with status %d\n", __func__, status); musb_free(musb); @@ -2376,7 +2478,7 @@ static int musb_remove(struct platform_device *pdev) */ musb_exit_debugfs(musb); - cancel_work_sync(&musb->irq_work); + cancel_delayed_work_sync(&musb->irq_work); cancel_delayed_work_sync(&musb->finish_resume_work); cancel_delayed_work_sync(&musb->deassert_reset_work); pm_runtime_get_sync(musb->controller); @@ -2562,6 +2664,7 @@ static int musb_suspend(struct device *dev) musb_platform_disable(musb); musb_generic_disable(musb); + WARN_ON(!list_empty(&musb->pending_list)); spin_lock_irqsave(&musb->lock, flags); @@ -2583,9 +2686,11 @@ static int musb_suspend(struct device *dev) static int musb_resume(struct device *dev) { - struct musb *musb = dev_to_musb(dev); - u8 devctl; - u8 mask; + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + int error; + u8 devctl; + u8 mask; /* * For static cmos like DaVinci, register values were preserved @@ -2619,6 +2724,13 @@ static int musb_resume(struct device *dev) musb_start(musb); + spin_lock_irqsave(&musb->lock, flags); + error = musb_run_resume_work(musb); + if (error) + dev_err(musb->controller, "resume work failed with %i\n", + error); + spin_unlock_irqrestore(&musb->lock, flags); + return 0; } @@ -2627,14 +2739,16 @@ static int musb_runtime_suspend(struct device *dev) struct musb *musb = dev_to_musb(dev); musb_save_context(musb); + musb->is_runtime_suspended = 1; return 0; } static int musb_runtime_resume(struct device *dev) { - struct musb *musb = dev_to_musb(dev); - static int first = 1; + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + int error; /* * When pm_runtime_get_sync called for the first time in driver @@ -2645,9 +2759,10 @@ static int musb_runtime_resume(struct device *dev) * Also context restore without save does not make * any sense */ - if (!first) - musb_restore_context(musb); - first = 0; + if (!musb->is_initialized) + return 0; + + musb_restore_context(musb); if (musb->need_finish_resume) { musb->need_finish_resume = 0; @@ -2655,6 +2770,14 @@ static int musb_runtime_resume(struct device *dev) msecs_to_jiffies(USB_RESUME_TIMEOUT)); } + spin_lock_irqsave(&musb->lock, flags); + error = musb_run_resume_work(musb); + if (error) + dev_err(musb->controller, "resume work failed with %i\n", + error); + musb->is_runtime_suspended = 0; + spin_unlock_irqrestore(&musb->lock, flags); + return 0; } diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 2cb88a498f8a5681265654e079456e0ae944667e..a611e2f67bdc9e3233e4d6efde27d8ce18fe0a67 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -303,13 +303,14 @@ struct musb_context_registers { struct musb { /* device lock */ spinlock_t lock; + spinlock_t list_lock; /* resume work list lock */ struct musb_io io; const struct musb_platform_ops *ops; struct musb_context_registers context; irqreturn_t (*isr)(int, void *); - struct work_struct irq_work; + struct delayed_work irq_work; struct delayed_work deassert_reset_work; struct delayed_work finish_resume_work; struct delayed_work gadget_work; @@ -337,6 +338,7 @@ struct musb { struct list_head control; /* of musb_qh */ struct list_head in_bulk; /* of musb_qh */ struct list_head out_bulk; /* of musb_qh */ + struct list_head pending_list; /* pending work list */ struct timer_list otg_timer; struct notifier_block nb; @@ -379,12 +381,15 @@ struct musb { int port_mode; /* MUSB_PORT_MODE_* */ bool session; - bool quirk_invalid_vbus; + unsigned long quirk_retries; bool is_host; int a_wait_bcon; /* VBUS timeout in msecs */ unsigned long idle_timeout; /* Next timeout in jiffies */ + unsigned is_initialized:1; + unsigned is_runtime_suspended:1; + /* active means connected and not suspended */ unsigned is_active:1; @@ -540,6 +545,10 @@ extern irqreturn_t musb_interrupt(struct musb *); extern void musb_hnp_stop(struct musb *musb); +int musb_queue_resume_work(struct musb *musb, + int (*callback)(struct musb *musb, void *data), + void *data); + static inline void musb_platform_set_vbus(struct musb *musb, int is_on) { if (musb->ops->set_vbus) @@ -617,4 +626,10 @@ static inline void musb_platform_post_root_reset_end(struct musb *musb) musb->ops->post_root_reset_end(musb); } +/* + * gets the "dr_mode" property from DT and converts it into musb_mode + * if the property is not found or not recognized returns MUSB_OTG + */ +extern enum musb_mode musb_get_mode(struct device *dev); + #endif /* __MUSB_CORE_H__ */ diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index d4d7c56b48c7edf70aba49d6c2b9aa04347acd7c..16363852c0346b28b6452e04eddedfc630635428 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c @@ -197,8 +197,7 @@ static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer) if (!list_empty(&controller->early_tx_list) && !hrtimer_is_queued(&controller->early_tx)) { ret = HRTIMER_RESTART; - hrtimer_forward_now(&controller->early_tx, - ktime_set(0, 20 * NSEC_PER_USEC)); + hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC); } spin_unlock_irqrestore(&musb->lock, flags); @@ -280,9 +279,9 @@ static void cppi41_dma_callback(void *private_data) unsigned long usecs = cppi41_channel->total_len / 10; hrtimer_start_range_ns(&controller->early_tx, - ktime_set(0, usecs * NSEC_PER_USEC), - 20 * NSEC_PER_USEC, - HRTIMER_MODE_REL); + usecs * NSEC_PER_USEC, + 20 * NSEC_PER_USEC, + HRTIMER_MODE_REL); } out: diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c index 9b22d946c089698e2083ab3457e0c5bda1f9bde2..4fef50e5c8c1295364aca0a28ba9cf8da005ac21 100644 --- a/drivers/usb/musb/musb_debugfs.c +++ b/drivers/usb/musb/musb_debugfs.c @@ -37,7 +37,7 @@ #include #include -#include +#include #include "musb_core.h" #include "musb_debug.h" diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 0f17d2140db6e5c36eceef43b185d2c7c183e091..feae1561b9abb6924d2fe2fc6f1222dfac9d2be7 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -185,24 +185,19 @@ static void dsps_musb_disable(struct musb *musb) musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap); musb_writel(reg_base, wrp->epintr_clear, wrp->txep_bitmap | wrp->rxep_bitmap); + del_timer_sync(&glue->timer); musb_writeb(musb->mregs, MUSB_DEVCTL, 0); } -static void otg_timer(unsigned long _musb) +/* Caller must take musb->lock */ +static int dsps_check_status(struct musb *musb, void *unused) { - struct musb *musb = (void *)_musb; void __iomem *mregs = musb->mregs; struct device *dev = musb->controller; struct dsps_glue *glue = dev_get_drvdata(dev->parent); const struct dsps_musb_wrapper *wrp = glue->wrp; u8 devctl; - unsigned long flags; int skip_session = 0; - int err; - - err = pm_runtime_get_sync(dev); - if (err < 0) - dev_err(dev, "Poll could not pm_runtime_get: %i\n", err); /* * We poll because DSPS IP's won't expose several OTG-critical @@ -212,7 +207,6 @@ static void otg_timer(unsigned long _musb) dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, usb_otg_state_string(musb->xceiv->otg->state)); - spin_lock_irqsave(&musb->lock, flags); switch (musb->xceiv->otg->state) { case OTG_STATE_A_WAIT_VRISE: mod_timer(&glue->timer, jiffies + @@ -245,8 +239,30 @@ static void otg_timer(unsigned long _musb) default: break; } - spin_unlock_irqrestore(&musb->lock, flags); + return 0; +} + +static void otg_timer(unsigned long _musb) +{ + struct musb *musb = (void *)_musb; + struct device *dev = musb->controller; + unsigned long flags; + int err; + + err = pm_runtime_get(dev); + if ((err != -EINPROGRESS) && err < 0) { + dev_err(dev, "Poll could not pm_runtime_get: %i\n", err); + pm_runtime_put_noidle(dev); + + return; + } + + spin_lock_irqsave(&musb->lock, flags); + err = musb_queue_resume_work(musb, dsps_check_status, NULL); + if (err < 0) + dev_err(dev, "%s resume work: %i\n", __func__, err); + spin_unlock_irqrestore(&musb->lock, flags); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } @@ -767,28 +783,13 @@ static int dsps_probe(struct platform_device *pdev) platform_set_drvdata(pdev, glue); pm_runtime_enable(&pdev->dev); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_autosuspend_delay(&pdev->dev, 200); - - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) { - dev_err(&pdev->dev, "pm_runtime_get_sync FAILED"); - goto err2; - } - ret = dsps_create_musb_pdev(glue, pdev); if (ret) - goto err3; - - pm_runtime_mark_last_busy(&pdev->dev); - pm_runtime_put_autosuspend(&pdev->dev); + goto err; return 0; -err3: - pm_runtime_put_sync(&pdev->dev); -err2: - pm_runtime_dont_use_autosuspend(&pdev->dev); +err: pm_runtime_disable(&pdev->dev); return ret; } @@ -799,9 +800,6 @@ static int dsps_remove(struct platform_device *pdev) platform_device_unregister(glue->musb); - /* disable usbss clocks */ - pm_runtime_dont_use_autosuspend(&pdev->dev); - pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 4042ea017985de56346d0ac1dd070f33db079d77..1acc4864f9f6f0bf52cb5a084475fdab13bbab07 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -974,8 +974,8 @@ static int musb_gadget_enable(struct usb_ep *ep, goto fail; /* REVISIT this rules out high bandwidth periodic transfers */ - tmp = usb_endpoint_maxp(desc); - if (tmp & ~0x07ff) { + tmp = usb_endpoint_maxp_mult(desc) - 1; + if (tmp) { int ok; if (usb_endpoint_dir_in(desc)) @@ -987,12 +987,12 @@ static int musb_gadget_enable(struct usb_ep *ep, musb_dbg(musb, "no support for high bandwidth ISO"); goto fail; } - musb_ep->hb_mult = (tmp >> 11) & 3; + musb_ep->hb_mult = tmp; } else { musb_ep->hb_mult = 0; } - musb_ep->packet_sz = tmp & 0x7ff; + musb_ep->packet_sz = usb_endpoint_maxp(desc); tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1); /* enable the interrupts for the endpoint, set the endpoint @@ -1114,7 +1114,7 @@ static int musb_gadget_enable(struct usb_ep *ep, musb_ep->dma ? "dma, " : "", musb_ep->packet_sz); - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); fail: spin_unlock_irqrestore(&musb->lock, flags); @@ -1158,7 +1158,7 @@ static int musb_gadget_disable(struct usb_ep *ep) musb_ep->desc = NULL; musb_ep->end_point.desc = NULL; - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); spin_unlock_irqrestore(&(musb->lock), flags); @@ -1222,13 +1222,22 @@ void musb_ep_restart(struct musb *musb, struct musb_request *req) rxstate(musb, req); } +static int musb_ep_restart_resume_work(struct musb *musb, void *data) +{ + struct musb_request *req = data; + + musb_ep_restart(musb, req); + + return 0; +} + static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { struct musb_ep *musb_ep; struct musb_request *request; struct musb *musb; - int status = 0; + int status; unsigned long lockflags; if (!ep || !req) @@ -1245,6 +1254,17 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, if (request->ep != musb_ep) return -EINVAL; + status = pm_runtime_get(musb->controller); + if ((status != -EINPROGRESS) && status < 0) { + dev_err(musb->controller, + "pm runtime get failed in %s\n", + __func__); + pm_runtime_put_noidle(musb->controller); + + return status; + } + status = 0; + trace_musb_req_enq(request); /* request is mine now... */ @@ -1255,7 +1275,6 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, map_dma_buffer(request, musb, musb_ep); - pm_runtime_get_sync(musb->controller); spin_lock_irqsave(&musb->lock, lockflags); /* don't queue if the ep is down */ @@ -1271,8 +1290,14 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, list_add_tail(&request->list, &musb_ep->req_list); /* it this is the head of the queue, start i/o ... */ - if (!musb_ep->busy && &request->list == musb_ep->req_list.next) - musb_ep_restart(musb, request); + if (!musb_ep->busy && &request->list == musb_ep->req_list.next) { + status = musb_queue_resume_work(musb, + musb_ep_restart_resume_work, + request); + if (status < 0) + dev_err(musb->controller, "%s resume work: %i\n", + __func__, status); + } unlock: spin_unlock_irqrestore(&musb->lock, lockflags); @@ -1969,7 +1994,7 @@ static int musb_gadget_stop(struct usb_gadget *g) */ /* Force check of devctl register for PM runtime */ - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); pm_runtime_mark_last_busy(musb->controller); pm_runtime_put_autosuspend(musb->controller); diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 53bc4ceefe89ad16dcb576fb2fd18c37ad404e23..f6cdbad00daceb5837a8cfcdec37438389aae800 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -2237,7 +2237,7 @@ static int musb_urb_enqueue( * Some musb cores don't support high bandwidth ISO transfers; and * we don't (yet!) support high bandwidth interrupt transfers. */ - qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03); + qh->hb_mult = usb_endpoint_maxp_mult(epd); if (qh->hb_mult > 1) { int ok = (qh->type == USB_ENDPOINT_XFER_ISOC); diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index 61b5f1c3c5bcd12e53364e074b5a77ba60362ed5..0b4595439d51390ed8f57521f626d047520e37c4 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -132,7 +132,6 @@ void musb_port_suspend(struct musb *musb, bool do_suspend) musb_dbg(musb, "Root port resuming, power %02x", power); - /* later, GetPortStatus will stop RESUME signaling */ musb->port1_status |= MUSB_PORT_STAT_RESUME; schedule_delayed_work(&musb->finish_resume_work, msecs_to_jiffies(USB_RESUME_TIMEOUT)); diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index cc1225485509cbabda9c3241334889776edf8870..8b73214a9ea3b41aa40848bb17c6f2c25397a457 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -277,12 +277,12 @@ static int omap2430_musb_init(struct musb *musb) if (status == -ENXIO) return status; - pr_err("HS USB OTG: no transceiver configured\n"); + dev_dbg(dev, "HS USB OTG: no transceiver configured\n"); return -EPROBE_DEFER; } if (IS_ERR(musb->phy)) { - pr_err("HS USB OTG: no PHY configured\n"); + dev_err(dev, "HS USB OTG: no PHY configured\n"); return PTR_ERR(musb->phy); } musb->isr = omap2430_musb_interrupt; @@ -301,7 +301,7 @@ static int omap2430_musb_init(struct musb *musb) musb_writel(musb->mregs, OTG_INTERFSEL, l); - pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, " + dev_dbg(dev, "HS USB OTG: revision 0x%x, sysconfig 0x%02x, " "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n", musb_readl(musb->mregs, OTG_REVISION), musb_readl(musb->mregs, OTG_SYSCONFIG), @@ -513,17 +513,18 @@ static int omap2430_probe(struct platform_device *pdev) } pm_runtime_enable(glue->dev); - pm_runtime_use_autosuspend(glue->dev); - pm_runtime_set_autosuspend_delay(glue->dev, 100); ret = platform_device_add(musb); if (ret) { dev_err(&pdev->dev, "failed to register musb device\n"); - goto err2; + goto err3; } return 0; +err3: + pm_runtime_disable(glue->dev); + err2: platform_device_put(musb); @@ -535,10 +536,7 @@ static int omap2430_remove(struct platform_device *pdev) { struct omap2430_glue *glue = platform_get_drvdata(pdev); - pm_runtime_get_sync(glue->dev); platform_device_unregister(glue->musb); - pm_runtime_put_sync(glue->dev); - pm_runtime_dont_use_autosuspend(glue->dev); pm_runtime_disable(glue->dev); return 0; diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c index 1408245be18e0402c9170ecedcb6f9e2c0b524f1..d0be0eadd0d94033b1a171d76a96e0baa0a94808 100644 --- a/drivers/usb/musb/sunxi.c +++ b/drivers/usb/musb/sunxi.c @@ -186,16 +186,6 @@ static irqreturn_t sunxi_musb_interrupt(int irq, void *__hci) if (musb->int_usb) writeb(musb->int_usb, musb->mregs + SUNXI_MUSB_INTRUSB); - /* - * sunxi musb often signals babble on low / full speed device - * disconnect, without ever raising MUSB_INTR_DISCONNECT, since - * normally babble never happens treat it as disconnect. - */ - if ((musb->int_usb & MUSB_INTR_BABBLE) && is_host_active(musb)) { - musb->int_usb &= ~MUSB_INTR_BABBLE; - musb->int_usb |= MUSB_INTR_DISCONNECT; - } - if ((musb->int_usb & MUSB_INTR_RESET) && !is_host_active(musb)) { /* ep0 FADDR must be 0 when (re)entering peripheral mode */ musb_ep_select(musb->mregs, 0); @@ -390,6 +380,20 @@ static int sunxi_musb_set_mode(struct musb *musb, u8 mode) return 0; } +static int sunxi_musb_recover(struct musb *musb) +{ + struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); + + /* + * Schedule a phy_set_mode with the current glue->phy_mode value, + * this will force end the current session. + */ + set_bit(SUNXI_MUSB_FL_PHY_MODE_PEND, &glue->flags); + schedule_work(&glue->work); + + return 0; +} + /* * sunxi musb register layout * 0x00 - 0x17 fifo regs, 1 long per fifo @@ -618,6 +622,7 @@ static const struct musb_platform_ops sunxi_musb_ops = { .dma_init = sunxi_musb_dma_controller_create, .dma_exit = sunxi_musb_dma_controller_destroy, .set_mode = sunxi_musb_set_mode, + .recover = sunxi_musb_recover, .set_vbus = sunxi_musb_set_vbus, .pre_root_reset_end = sunxi_musb_pre_root_reset_end, .post_root_reset_end = sunxi_musb_post_root_reset_end, diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index df7c9f46be548f61800b7beaa7f182b5dc447ad5..e85cc8e4e7a9c02e32fdef579d04d8adb22469df 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -724,7 +724,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", usb_otg_state_string(musb->xceiv->otg->state), otg_stat); idle_timeout = jiffies + (1 * HZ); - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); } else /* A-dev state machine */ { dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", @@ -814,7 +814,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) break; } } - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); return idle_timeout; } @@ -864,7 +864,7 @@ static irqreturn_t tusb_musb_interrupt(int irq, void *__hci) musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg); if (reg & ~TUSB_PRCM_WNORCS) { musb->is_active = 1; - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); } dev_dbg(musb->controller, "wake %sactive %02x\n", musb->is_active ? "" : "in", reg); diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index b9c409a18faaea511d3280a01bd519769a3ccbd4..61cef7511a506af48e3aa2165e0ac992b9cf4d8d 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -84,6 +84,7 @@ config SAMSUNG_USBPHY config TWL6030_USB tristate "TWL6030 USB Transceiver Driver" depends on TWL4030_CORE && OMAP_USB2 && USB_MUSB_OMAP2PLUS + depends on OF help Enable this to support the USB OTG transceiver on TWL6030 family chips. This TWL6030 transceiver has the VBUS and ID GND diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c index 42a1afe36a905240cd5eb043004e931e21df0d1d..5f5f19813fde15eba836edacc4c71220426ff636 100644 --- a/drivers/usb/phy/phy-am335x-control.c +++ b/drivers/usb/phy/phy-am335x-control.c @@ -134,10 +134,12 @@ struct phy_control *am335x_get_phy_control(struct device *dev) return NULL; dev = bus_find_device(&platform_bus_type, NULL, node, match); + of_node_put(node); if (!dev) return NULL; ctrl_usb = dev_get_drvdata(dev); + put_device(dev); if (!ctrl_usb) return NULL; return &ctrl_usb->phy_ctrl; diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index 8311ba2968cd1e14f21b6c497b463d4ec4cf9abe..89d6e7a5fdb7ab35548a9622a5d266e47b4e9355 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c @@ -59,6 +59,15 @@ EXPORT_SYMBOL_GPL(usb_phy_generic_unregister); static int nop_set_suspend(struct usb_phy *x, int suspend) { + struct usb_phy_generic *nop = dev_get_drvdata(x->dev); + + if (!IS_ERR(nop->clk)) { + if (suspend) + clk_disable_unprepare(nop->clk); + else + clk_prepare_enable(nop->clk); + } + return 0; } diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c index 8d111ec653e41fa06efd690671c789ddc59958be..042c5a8fd423d84a8a66418b227fabb0d60ef03a 100644 --- a/drivers/usb/phy/phy-isp1301-omap.c +++ b/drivers/usb/phy/phy-isp1301-omap.c @@ -94,7 +94,7 @@ struct isp1301 { #if defined(CONFIG_MACH_OMAP_H2) || defined(CONFIG_MACH_OMAP_H3) -#if defined(CONFIG_TPS65010) || (defined(CONFIG_TPS65010_MODULE) && defined(MODULE)) +#if IS_REACHABLE(CONFIG_TPS65010) #include diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index a72e8d670adc846b225d5cfd6fc27b43f5bc9bea..628b600b02b174b4a06f6a6c80e40eb5b8ef3692 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c @@ -108,7 +108,6 @@ struct twl6030_usb { enum musb_vbus_id_status linkstat; u8 asleep; bool vbus_enable; - const char *regulator; }; #define comparator_to_twl(x) container_of((x), struct twl6030_usb, comparator) @@ -166,7 +165,7 @@ static int twl6030_usb_ldo_init(struct twl6030_usb *twl) /* Program MISC2 register and set bit VUSB_IN_VBAT */ twl6030_writeb(twl, TWL6030_MODULE_ID0, 0x10, TWL6030_MISC2); - twl->usb3v3 = regulator_get(twl->dev, twl->regulator); + twl->usb3v3 = regulator_get(twl->dev, "usb"); if (IS_ERR(twl->usb3v3)) return -ENODEV; @@ -341,7 +340,11 @@ static int twl6030_usb_probe(struct platform_device *pdev) int status, err; struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; - struct twl4030_usb_data *pdata = dev_get_platdata(dev); + + if (!np) { + dev_err(dev, "no DT info\n"); + return -EINVAL; + } twl = devm_kzalloc(dev, sizeof(*twl), GFP_KERNEL); if (!twl) @@ -361,18 +364,6 @@ static int twl6030_usb_probe(struct platform_device *pdev) return -EPROBE_DEFER; } - if (np) { - twl->regulator = "usb"; - } else if (pdata) { - if (pdata->features & TWL6032_SUBCLASS) - twl->regulator = "ldousb"; - else - twl->regulator = "vusb"; - } else { - dev_err(&pdev->dev, "twl6030 initialized without pdata\n"); - return -EINVAL; - } - /* init spinlock for workqueue */ spin_lock_init(&twl->lock); @@ -436,13 +427,11 @@ static int twl6030_usb_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_OF static const struct of_device_id twl6030_usb_id_table[] = { { .compatible = "ti,twl6030-usb" }, {} }; MODULE_DEVICE_TABLE(of, twl6030_usb_id_table); -#endif static struct platform_driver twl6030_usb_driver = { .probe = twl6030_usb_probe, diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 857e78337324b6488a77d8310da1a32c615e9349..d1af831f43ebad07e86e1d62b0730eb0579ebb4c 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -100,10 +100,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt) static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe) { - if (list_empty(&pipe->list)) - return NULL; - - return list_first_entry(&pipe->list, struct usbhs_pkt, node); + return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node); } static void usbhsf_fifo_clear(struct usbhs_pipe *pipe, diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index 56ecb8b5115dd66b8d0db52849e9738e127a2afa..d9bc8dafe000c84732e19c4a7530ab150c1f23fc 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -255,6 +255,16 @@ config USB_SERIAL_F81232 To compile this driver as a module, choose M here: the module will be called f81232. +config USB_SERIAL_F8153X + tristate "USB Fintek F81532/534 Multi-Ports Serial Driver" + help + Say Y here if you want to use the Fintek F81532/534 Multi-Ports + USB to serial adapter. + + To compile this driver as a module, choose M here: the + module will be called f81534. + + config USB_SERIAL_GARMIN tristate "USB Garmin GPS driver" help diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile index 349d9df0895f5afc4139b046383362bc46348739..9e43b7b002eba7dc7c98c81919e0889f4be8b877 100644 --- a/drivers/usb/serial/Makefile +++ b/drivers/usb/serial/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_USB_SERIAL_EDGEPORT) += io_edgeport.o obj-$(CONFIG_USB_SERIAL_EDGEPORT_TI) += io_ti.o obj-$(CONFIG_USB_SERIAL_EMPEG) += empeg.o obj-$(CONFIG_USB_SERIAL_F81232) += f81232.o +obj-$(CONFIG_USB_SERIAL_F8153X) += f81534.o obj-$(CONFIG_USB_SERIAL_FTDI_SIO) += ftdi_sio.o obj-$(CONFIG_USB_SERIAL_GARMIN) += garmin_gps.o obj-$(CONFIG_USB_SERIAL_IPAQ) += ipaq.o diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index f139488d0816c04374a83eeb3af3ef29ab30d33c..2597b83a8ae254d0eff28b2d2d124ff51c196248 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -61,13 +61,26 @@ * the Net/FreeBSD uchcom.c driver by Takanori Watanabe. Domo arigato. */ +#define CH341_REQ_READ_VERSION 0x5F #define CH341_REQ_WRITE_REG 0x9A #define CH341_REQ_READ_REG 0x95 -#define CH341_REG_BREAK1 0x05 -#define CH341_REG_BREAK2 0x18 -#define CH341_NBREAK_BITS_REG1 0x01 -#define CH341_NBREAK_BITS_REG2 0x40 - +#define CH341_REQ_SERIAL_INIT 0xA1 +#define CH341_REQ_MODEM_CTRL 0xA4 + +#define CH341_REG_BREAK 0x05 +#define CH341_REG_LCR 0x18 +#define CH341_NBREAK_BITS 0x01 + +#define CH341_LCR_ENABLE_RX 0x80 +#define CH341_LCR_ENABLE_TX 0x40 +#define CH341_LCR_MARK_SPACE 0x20 +#define CH341_LCR_PAR_EVEN 0x10 +#define CH341_LCR_ENABLE_PAR 0x08 +#define CH341_LCR_STOP_BITS_2 0x04 +#define CH341_LCR_CS8 0x03 +#define CH341_LCR_CS7 0x02 +#define CH341_LCR_CS6 0x01 +#define CH341_LCR_CS5 0x00 static const struct usb_device_id id_table[] = { { USB_DEVICE(0x4348, 0x5523) }, @@ -119,10 +132,10 @@ static int ch341_control_in(struct usb_device *dev, return r; } -static int ch341_set_baudrate(struct usb_device *dev, - struct ch341_private *priv) +static int ch341_init_set_baudrate(struct usb_device *dev, + struct ch341_private *priv, unsigned ctrl) { - short a, b; + short a; int r; unsigned long factor; short divisor; @@ -142,18 +155,17 @@ static int ch341_set_baudrate(struct usb_device *dev, factor = 0x10000 - factor; a = (factor & 0xff00) | divisor; - b = factor & 0xff; - r = ch341_control_out(dev, 0x9a, 0x1312, a); - if (!r) - r = ch341_control_out(dev, 0x9a, 0x0f2c, b); + /* 0x9c is "enable SFR_UART Control register and timer" */ + r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, + 0x9c | (ctrl << 8), a | 0x80); return r; } static int ch341_set_handshake(struct usb_device *dev, u8 control) { - return ch341_control_out(dev, 0xa4, ~control, 0); + return ch341_control_out(dev, CH341_REQ_MODEM_CTRL, ~control, 0); } static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) @@ -167,7 +179,7 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) if (!buffer) return -ENOMEM; - r = ch341_control_in(dev, 0x95, 0x0706, 0, buffer, size); + r = ch341_control_in(dev, CH341_REQ_READ_REG, 0x0706, 0, buffer, size); if (r < 0) goto out; @@ -197,24 +209,21 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv) return -ENOMEM; /* expect two bytes 0x27 0x00 */ - r = ch341_control_in(dev, 0x5f, 0, 0, buffer, size); + r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size); if (r < 0) goto out; + dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]); - r = ch341_control_out(dev, 0xa1, 0, 0); - if (r < 0) - goto out; - - r = ch341_set_baudrate(dev, priv); + r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0); if (r < 0) goto out; /* expect two bytes 0x56 0x00 */ - r = ch341_control_in(dev, 0x95, 0x2518, 0, buffer, size); + r = ch341_control_in(dev, CH341_REQ_READ_REG, 0x2518, 0, buffer, size); if (r < 0) goto out; - r = ch341_control_out(dev, 0x9a, 0x2518, 0x0050); + r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, 0x0050); if (r < 0) goto out; @@ -223,11 +232,7 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv) if (r < 0) goto out; - r = ch341_control_out(dev, 0xa1, 0x501f, 0xd90a); - if (r < 0) - goto out; - - r = ch341_set_baudrate(dev, priv); + r = ch341_init_set_baudrate(dev, priv, 0); if (r < 0) goto out; @@ -342,16 +347,53 @@ static void ch341_set_termios(struct tty_struct *tty, struct ch341_private *priv = usb_get_serial_port_data(port); unsigned baud_rate; unsigned long flags; + unsigned char ctrl; + int r; + + /* redundant changes may cause the chip to lose bytes */ + if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios)) + return; baud_rate = tty_get_baud_rate(tty); priv->baud_rate = baud_rate; + ctrl = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX; + + switch (C_CSIZE(tty)) { + case CS5: + ctrl |= CH341_LCR_CS5; + break; + case CS6: + ctrl |= CH341_LCR_CS6; + break; + case CS7: + ctrl |= CH341_LCR_CS7; + break; + case CS8: + ctrl |= CH341_LCR_CS8; + break; + } + + if (C_PARENB(tty)) { + ctrl |= CH341_LCR_ENABLE_PAR; + if (C_PARODD(tty) == 0) + ctrl |= CH341_LCR_PAR_EVEN; + if (C_CMSPAR(tty)) + ctrl |= CH341_LCR_MARK_SPACE; + } + + if (C_CSTOPB(tty)) + ctrl |= CH341_LCR_STOP_BITS_2; if (baud_rate) { spin_lock_irqsave(&priv->lock, flags); priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS); spin_unlock_irqrestore(&priv->lock, flags); - ch341_set_baudrate(port->serial->dev, priv); + r = ch341_init_set_baudrate(port->serial->dev, priv, ctrl); + if (r < 0 && old_termios) { + priv->baud_rate = tty_termios_baud_rate(old_termios); + tty_termios_copy_hw(&tty->termios, old_termios); + } } else { spin_lock_irqsave(&priv->lock, flags); priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS); @@ -360,17 +402,12 @@ static void ch341_set_termios(struct tty_struct *tty, ch341_set_handshake(port->serial->dev, priv->line_control); - /* Unimplemented: - * (cflag & CSIZE) : data bits [5, 8] - * (cflag & PARENB) : parity {NONE, EVEN, ODD} - * (cflag & CSTOPB) : stop bits [1, 2] - */ } static void ch341_break_ctl(struct tty_struct *tty, int break_state) { const uint16_t ch341_break_reg = - ((uint16_t) CH341_REG_BREAK2 << 8) | CH341_REG_BREAK1; + ((uint16_t) CH341_REG_LCR << 8) | CH341_REG_BREAK; struct usb_serial_port *port = tty->driver_data; int r; uint16_t reg_contents; @@ -391,12 +428,12 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state) __func__, break_reg[0], break_reg[1]); if (break_state != 0) { dev_dbg(&port->dev, "%s - Enter break state requested\n", __func__); - break_reg[0] &= ~CH341_NBREAK_BITS_REG1; - break_reg[1] &= ~CH341_NBREAK_BITS_REG2; + break_reg[0] &= ~CH341_NBREAK_BITS; + break_reg[1] &= ~CH341_LCR_ENABLE_TX; } else { dev_dbg(&port->dev, "%s - Leave break state requested\n", __func__); - break_reg[0] |= CH341_NBREAK_BITS_REG1; - break_reg[1] |= CH341_NBREAK_BITS_REG2; + break_reg[0] |= CH341_NBREAK_BITS; + break_reg[1] |= CH341_LCR_ENABLE_TX; } dev_dbg(&port->dev, "%s - New ch341 break register contents - reg1: %x, reg2: %x\n", __func__, break_reg[0], break_reg[1]); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f61477bed3a8d3b16ec444dc090def82bd509798..fff718352e0cda323d4d5d30817a92af36e9de16 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -23,6 +23,9 @@ #include #include #include +#include +#include +#include #define DRIVER_DESC "Silicon Labs CP210x RS232 serial adaptor driver" @@ -33,7 +36,7 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *); static void cp210x_close(struct usb_serial_port *); static void cp210x_get_termios(struct tty_struct *, struct usb_serial_port *); static void cp210x_get_termios_port(struct usb_serial_port *port, - unsigned int *cflagp, unsigned int *baudp); + tcflag_t *cflagp, unsigned int *baudp); static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *, struct ktermios *); static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *, @@ -44,6 +47,9 @@ static int cp210x_tiocmset(struct tty_struct *, unsigned int, unsigned int); static int cp210x_tiocmset_port(struct usb_serial_port *port, unsigned int, unsigned int); static void cp210x_break_ctl(struct tty_struct *, int); +static int cp210x_attach(struct usb_serial *); +static void cp210x_disconnect(struct usb_serial *); +static void cp210x_release(struct usb_serial *); static int cp210x_port_probe(struct usb_serial_port *); static int cp210x_port_remove(struct usb_serial_port *); static void cp210x_dtr_rts(struct usb_serial_port *p, int on); @@ -131,6 +137,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ + { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ @@ -208,6 +215,16 @@ static const struct usb_device_id id_table[] = { MODULE_DEVICE_TABLE(usb, id_table); +struct cp210x_serial_private { +#ifdef CONFIG_GPIOLIB + struct gpio_chip gc; + u8 config; + u8 gpio_mode; + bool gpio_registered; +#endif + u8 partnum; +}; + struct cp210x_port_private { __u8 bInterfaceNumber; bool has_swapped_line_ctl; @@ -229,6 +246,9 @@ static struct usb_serial_driver cp210x_device = { .tx_empty = cp210x_tx_empty, .tiocmget = cp210x_tiocmget, .tiocmset = cp210x_tiocmset, + .attach = cp210x_attach, + .disconnect = cp210x_disconnect, + .release = cp210x_release, .port_probe = cp210x_port_probe, .port_remove = cp210x_port_remove, .dtr_rts = cp210x_dtr_rts @@ -271,6 +291,7 @@ static struct usb_serial_driver * const serial_drivers[] = { #define CP210X_SET_CHARS 0x19 #define CP210X_GET_BAUDRATE 0x1D #define CP210X_SET_BAUDRATE 0x1E +#define CP210X_VENDOR_SPECIFIC 0xFF /* CP210X_IFC_ENABLE */ #define UART_ENABLE 0x0001 @@ -313,6 +334,21 @@ static struct usb_serial_driver * const serial_drivers[] = { #define CONTROL_WRITE_DTR 0x0100 #define CONTROL_WRITE_RTS 0x0200 +/* CP210X_VENDOR_SPECIFIC values */ +#define CP210X_READ_LATCH 0x00C2 +#define CP210X_GET_PARTNUM 0x370B +#define CP210X_GET_PORTCONFIG 0x370C +#define CP210X_GET_DEVICEMODE 0x3711 +#define CP210X_WRITE_LATCH 0x37E1 + +/* Part number definitions */ +#define CP210X_PARTNUM_CP2101 0x01 +#define CP210X_PARTNUM_CP2102 0x02 +#define CP210X_PARTNUM_CP2103 0x03 +#define CP210X_PARTNUM_CP2104 0x04 +#define CP210X_PARTNUM_CP2105 0x05 +#define CP210X_PARTNUM_CP2108 0x08 + /* CP210X_GET_COMM_STATUS returns these 0x13 bytes */ struct cp210x_comm_status { __le32 ulErrors; @@ -368,6 +404,60 @@ struct cp210x_flow_ctl { #define CP210X_SERIAL_RTS_ACTIVE 1 #define CP210X_SERIAL_RTS_FLOW_CTL 2 +/* CP210X_VENDOR_SPECIFIC, CP210X_GET_DEVICEMODE call reads these 0x2 bytes. */ +struct cp210x_pin_mode { + u8 eci; + u8 sci; +} __packed; + +#define CP210X_PIN_MODE_MODEM 0 +#define CP210X_PIN_MODE_GPIO BIT(0) + +/* + * CP210X_VENDOR_SPECIFIC, CP210X_GET_PORTCONFIG call reads these 0xf bytes. + * Structure needs padding due to unused/unspecified bytes. + */ +struct cp210x_config { + __le16 gpio_mode; + u8 __pad0[2]; + __le16 reset_state; + u8 __pad1[4]; + __le16 suspend_state; + u8 sci_cfg; + u8 eci_cfg; + u8 device_cfg; +} __packed; + +/* GPIO modes */ +#define CP210X_SCI_GPIO_MODE_OFFSET 9 +#define CP210X_SCI_GPIO_MODE_MASK GENMASK(11, 9) + +#define CP210X_ECI_GPIO_MODE_OFFSET 2 +#define CP210X_ECI_GPIO_MODE_MASK GENMASK(3, 2) + +/* CP2105 port configuration values */ +#define CP2105_GPIO0_TXLED_MODE BIT(0) +#define CP2105_GPIO1_RXLED_MODE BIT(1) +#define CP2105_GPIO1_RS485_MODE BIT(2) + +/* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x2 bytes. */ +struct cp210x_gpio_write { + u8 mask; + u8 state; +} __packed; + +/* + * Helper to get interface number when we only have struct usb_serial. + */ +static u8 cp210x_interface_num(struct usb_serial *serial) +{ + struct usb_host_interface *cur_altsetting; + + cur_altsetting = serial->interface->cur_altsetting; + + return cur_altsetting->desc.bInterfaceNumber; +} + /* * Reads a variable-sized block of CP210X_ registers, identified by req. * Returns data into buf in native USB byte order. @@ -401,7 +491,7 @@ static int cp210x_read_reg_block(struct usb_serial_port *port, u8 req, dev_err(&port->dev, "failed get req 0x%x size %d status: %d\n", req, bufsize, result); if (result >= 0) - result = -EPROTO; + result = -EIO; /* * FIXME Some callers don't bother to check for error, @@ -463,6 +553,40 @@ static int cp210x_read_u8_reg(struct usb_serial_port *port, u8 req, u8 *val) return cp210x_read_reg_block(port, req, val, sizeof(*val)); } +/* + * Reads a variable-sized vendor block of CP210X_ registers, identified by val. + * Returns data into buf in native USB byte order. + */ +static int cp210x_read_vendor_block(struct usb_serial *serial, u8 type, u16 val, + void *buf, int bufsize) +{ + void *dmabuf; + int result; + + dmabuf = kmalloc(bufsize, GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), + CP210X_VENDOR_SPECIFIC, type, val, + cp210x_interface_num(serial), dmabuf, bufsize, + USB_CTRL_GET_TIMEOUT); + if (result == bufsize) { + memcpy(buf, dmabuf, bufsize); + result = 0; + } else { + dev_err(&serial->interface->dev, + "failed to get vendor val 0x%04x size %d: %d\n", val, + bufsize, result); + if (result >= 0) + result = -EIO; + } + + kfree(dmabuf); + + return result; +} + /* * Writes any 16-bit CP210X_ register (req) whose value is passed * entirely in the wValue field of the USB request. @@ -514,7 +638,7 @@ static int cp210x_write_reg_block(struct usb_serial_port *port, u8 req, dev_err(&port->dev, "failed set req 0x%x size %d status: %d\n", req, bufsize, result); if (result >= 0) - result = -EPROTO; + result = -EIO; } return result; @@ -532,6 +656,42 @@ static int cp210x_write_u32_reg(struct usb_serial_port *port, u8 req, u32 val) return cp210x_write_reg_block(port, req, &le32_val, sizeof(le32_val)); } +#ifdef CONFIG_GPIOLIB +/* + * Writes a variable-sized vendor block of CP210X_ registers, identified by val. + * Data in buf must be in native USB byte order. + */ +static int cp210x_write_vendor_block(struct usb_serial *serial, u8 type, + u16 val, void *buf, int bufsize) +{ + void *dmabuf; + int result; + + dmabuf = kmemdup(buf, bufsize, GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), + CP210X_VENDOR_SPECIFIC, type, val, + cp210x_interface_num(serial), dmabuf, bufsize, + USB_CTRL_SET_TIMEOUT); + + kfree(dmabuf); + + if (result == bufsize) { + result = 0; + } else { + dev_err(&serial->interface->dev, + "failed to set vendor val 0x%04x size %d: %d\n", val, + bufsize, result); + if (result >= 0) + result = -EIO; + } + + return result; +} +#endif + /* * Detect CP2108 GET_LINE_CTL bug and activate workaround. * Write a known good value 0x800, read it back. @@ -682,7 +842,7 @@ static int cp210x_get_tx_queue_byte_count(struct usb_serial_port *port, } else { dev_err(&port->dev, "failed to get comm status: %d\n", result); if (result >= 0) - result = -EPROTO; + result = -EIO; } kfree(sts); @@ -718,7 +878,7 @@ static void cp210x_get_termios(struct tty_struct *tty, &tty->termios.c_cflag, &baud); tty_encode_baud_rate(tty, baud, baud); } else { - unsigned int cflag; + tcflag_t cflag; cflag = 0; cp210x_get_termios_port(port, &cflag, &baud); } @@ -729,10 +889,10 @@ static void cp210x_get_termios(struct tty_struct *tty, * This is the heart of cp210x_get_termios which always uses a &usb_serial_port. */ static void cp210x_get_termios_port(struct usb_serial_port *port, - unsigned int *cflagp, unsigned int *baudp) + tcflag_t *cflagp, unsigned int *baudp) { struct device *dev = &port->dev; - unsigned int cflag; + tcflag_t cflag; struct cp210x_flow_ctl flow_ctl; u32 baud; u16 bits; @@ -929,16 +1089,9 @@ static void cp210x_set_termios(struct tty_struct *tty, dev_dbg(dev, "%s - data bits = 7\n", __func__); break; case CS8: - bits |= BITS_DATA_8; - dev_dbg(dev, "%s - data bits = 8\n", __func__); - break; - /*case CS9: - bits |= BITS_DATA_9; - dev_dbg(dev, "%s - data bits = 9\n", __func__); - break;*/ default: - dev_dbg(dev, "cp210x driver does not support the number of bits requested, using 8 bit mode\n"); bits |= BITS_DATA_8; + dev_dbg(dev, "%s - data bits = 8\n", __func__); break; } if (cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, bits)) @@ -1107,10 +1260,188 @@ static void cp210x_break_ctl(struct tty_struct *tty, int break_state) cp210x_write_u16_reg(port, CP210X_SET_BREAK, state); } +#ifdef CONFIG_GPIOLIB +static int cp210x_gpio_request(struct gpio_chip *gc, unsigned int offset) +{ + struct usb_serial *serial = gpiochip_get_data(gc); + struct cp210x_serial_private *priv = usb_get_serial_data(serial); + + switch (offset) { + case 0: + if (priv->config & CP2105_GPIO0_TXLED_MODE) + return -ENODEV; + break; + case 1: + if (priv->config & (CP2105_GPIO1_RXLED_MODE | + CP2105_GPIO1_RS485_MODE)) + return -ENODEV; + break; + } + + return 0; +} + +static int cp210x_gpio_get(struct gpio_chip *gc, unsigned int gpio) +{ + struct usb_serial *serial = gpiochip_get_data(gc); + int result; + u8 buf; + + result = cp210x_read_vendor_block(serial, REQTYPE_INTERFACE_TO_HOST, + CP210X_READ_LATCH, &buf, sizeof(buf)); + if (result < 0) + return result; + + return !!(buf & BIT(gpio)); +} + +static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value) +{ + struct usb_serial *serial = gpiochip_get_data(gc); + struct cp210x_gpio_write buf; + + if (value == 1) + buf.state = BIT(gpio); + else + buf.state = 0; + + buf.mask = BIT(gpio); + + cp210x_write_vendor_block(serial, REQTYPE_HOST_TO_INTERFACE, + CP210X_WRITE_LATCH, &buf, sizeof(buf)); +} + +static int cp210x_gpio_direction_get(struct gpio_chip *gc, unsigned int gpio) +{ + /* Hardware does not support an input mode */ + return 0; +} + +static int cp210x_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio) +{ + /* Hardware does not support an input mode */ + return -ENOTSUPP; +} + +static int cp210x_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio, + int value) +{ + return 0; +} + +static int cp210x_gpio_set_single_ended(struct gpio_chip *gc, unsigned int gpio, + enum single_ended_mode mode) +{ + struct usb_serial *serial = gpiochip_get_data(gc); + struct cp210x_serial_private *priv = usb_get_serial_data(serial); + + /* Succeed only if in correct mode (this can't be set at runtime) */ + if ((mode == LINE_MODE_PUSH_PULL) && (priv->gpio_mode & BIT(gpio))) + return 0; + + if ((mode == LINE_MODE_OPEN_DRAIN) && !(priv->gpio_mode & BIT(gpio))) + return 0; + + return -ENOTSUPP; +} + +/* + * This function is for configuring GPIO using shared pins, where other signals + * are made unavailable by configuring the use of GPIO. This is believed to be + * only applicable to the cp2105 at this point, the other devices supported by + * this driver that provide GPIO do so in a way that does not impact other + * signals and are thus expected to have very different initialisation. + */ +static int cp2105_shared_gpio_init(struct usb_serial *serial) +{ + struct cp210x_serial_private *priv = usb_get_serial_data(serial); + struct cp210x_pin_mode mode; + struct cp210x_config config; + u8 intf_num = cp210x_interface_num(serial); + int result; + + result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, + CP210X_GET_DEVICEMODE, &mode, + sizeof(mode)); + if (result < 0) + return result; + + result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, + CP210X_GET_PORTCONFIG, &config, + sizeof(config)); + if (result < 0) + return result; + + /* 2 banks of GPIO - One for the pins taken from each serial port */ + if (intf_num == 0) { + if (mode.eci == CP210X_PIN_MODE_MODEM) + return 0; + + priv->config = config.eci_cfg; + priv->gpio_mode = (u8)((le16_to_cpu(config.gpio_mode) & + CP210X_ECI_GPIO_MODE_MASK) >> + CP210X_ECI_GPIO_MODE_OFFSET); + priv->gc.ngpio = 2; + } else if (intf_num == 1) { + if (mode.sci == CP210X_PIN_MODE_MODEM) + return 0; + + priv->config = config.sci_cfg; + priv->gpio_mode = (u8)((le16_to_cpu(config.gpio_mode) & + CP210X_SCI_GPIO_MODE_MASK) >> + CP210X_SCI_GPIO_MODE_OFFSET); + priv->gc.ngpio = 3; + } else { + return -ENODEV; + } + + priv->gc.label = "cp210x"; + priv->gc.request = cp210x_gpio_request; + priv->gc.get_direction = cp210x_gpio_direction_get; + priv->gc.direction_input = cp210x_gpio_direction_input; + priv->gc.direction_output = cp210x_gpio_direction_output; + priv->gc.get = cp210x_gpio_get; + priv->gc.set = cp210x_gpio_set; + priv->gc.set_single_ended = cp210x_gpio_set_single_ended; + priv->gc.owner = THIS_MODULE; + priv->gc.parent = &serial->interface->dev; + priv->gc.base = -1; + priv->gc.can_sleep = true; + + result = gpiochip_add_data(&priv->gc, serial); + if (!result) + priv->gpio_registered = true; + + return result; +} + +static void cp210x_gpio_remove(struct usb_serial *serial) +{ + struct cp210x_serial_private *priv = usb_get_serial_data(serial); + + if (priv->gpio_registered) { + gpiochip_remove(&priv->gc); + priv->gpio_registered = false; + } +} + +#else + +static int cp2105_shared_gpio_init(struct usb_serial *serial) +{ + return 0; +} + +static void cp210x_gpio_remove(struct usb_serial *serial) +{ + /* Nothing to do */ +} + +#endif + static int cp210x_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; - struct usb_host_interface *cur_altsetting; struct cp210x_port_private *port_priv; int ret; @@ -1118,8 +1449,7 @@ static int cp210x_port_probe(struct usb_serial_port *port) if (!port_priv) return -ENOMEM; - cur_altsetting = serial->interface->cur_altsetting; - port_priv->bInterfaceNumber = cur_altsetting->desc.bInterfaceNumber; + port_priv->bInterfaceNumber = cp210x_interface_num(serial); usb_set_serial_port_data(port, port_priv); @@ -1142,6 +1472,52 @@ static int cp210x_port_remove(struct usb_serial_port *port) return 0; } +static int cp210x_attach(struct usb_serial *serial) +{ + int result; + struct cp210x_serial_private *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, + CP210X_GET_PARTNUM, &priv->partnum, + sizeof(priv->partnum)); + if (result < 0) + goto err_free_priv; + + usb_set_serial_data(serial, priv); + + if (priv->partnum == CP210X_PARTNUM_CP2105) { + result = cp2105_shared_gpio_init(serial); + if (result < 0) { + dev_err(&serial->interface->dev, + "GPIO initialisation failed, continuing without GPIO support\n"); + } + } + + return 0; +err_free_priv: + kfree(priv); + + return result; +} + +static void cp210x_disconnect(struct usb_serial *serial) +{ + cp210x_gpio_remove(serial); +} + +static void cp210x_release(struct usb_serial *serial) +{ + struct cp210x_serial_private *priv = usb_get_serial_data(serial); + + cp210x_gpio_remove(serial); + + kfree(priv); +} + module_usb_serial_driver(serial_drivers, id_table); MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c new file mode 100644 index 0000000000000000000000000000000000000000..8282a6a18fee83f6f1838b2f6f55b37a0ec4d409 --- /dev/null +++ b/drivers/usb/serial/f81534.c @@ -0,0 +1,1409 @@ +/* + * F81532/F81534 USB to Serial Ports Bridge + * + * F81532 => 2 Serial Ports + * F81534 => 4 Serial Ports + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Copyright (C) 2016 Feature Integration Technology Inc., (Fintek) + * Copyright (C) 2016 Tom Tsai (Tom_Tsai@fintek.com.tw) + * Copyright (C) 2016 Peter Hong (Peter_Hong@fintek.com.tw) + * + * The F81532/F81534 had 1 control endpoint for setting, 1 endpoint bulk-out + * for all serial port TX and 1 endpoint bulk-in for all serial port read in + * (Read Data/MSR/LSR). + * + * Write URB is fixed with 512bytes, per serial port used 128Bytes. + * It can be described by f81534_prepare_write_buffer() + * + * Read URB is 512Bytes max, per serial port used 128Bytes. + * It can be described by f81534_process_read_urb() and maybe received with + * 128x1,2,3,4 bytes. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* Serial Port register Address */ +#define F81534_UART_BASE_ADDRESS 0x1200 +#define F81534_UART_OFFSET 0x10 +#define F81534_DIVISOR_LSB_REG (0x00 + F81534_UART_BASE_ADDRESS) +#define F81534_DIVISOR_MSB_REG (0x01 + F81534_UART_BASE_ADDRESS) +#define F81534_FIFO_CONTROL_REG (0x02 + F81534_UART_BASE_ADDRESS) +#define F81534_LINE_CONTROL_REG (0x03 + F81534_UART_BASE_ADDRESS) +#define F81534_MODEM_CONTROL_REG (0x04 + F81534_UART_BASE_ADDRESS) +#define F81534_MODEM_STATUS_REG (0x06 + F81534_UART_BASE_ADDRESS) +#define F81534_CONFIG1_REG (0x09 + F81534_UART_BASE_ADDRESS) + +#define F81534_DEF_CONF_ADDRESS_START 0x3000 +#define F81534_DEF_CONF_SIZE 8 + +#define F81534_CUSTOM_ADDRESS_START 0x2f00 +#define F81534_CUSTOM_DATA_SIZE 0x10 +#define F81534_CUSTOM_NO_CUSTOM_DATA 0xff +#define F81534_CUSTOM_VALID_TOKEN 0xf0 +#define F81534_CONF_OFFSET 1 + +#define F81534_MAX_DATA_BLOCK 64 +#define F81534_MAX_BUS_RETRY 20 + +/* Default URB timeout for USB operations */ +#define F81534_USB_MAX_RETRY 10 +#define F81534_USB_TIMEOUT 1000 +#define F81534_SET_GET_REGISTER 0xA0 + +#define F81534_NUM_PORT 4 +#define F81534_UNUSED_PORT 0xff +#define F81534_WRITE_BUFFER_SIZE 512 + +#define DRIVER_DESC "Fintek F81532/F81534" +#define FINTEK_VENDOR_ID_1 0x1934 +#define FINTEK_VENDOR_ID_2 0x2C42 +#define FINTEK_DEVICE_ID 0x1202 +#define F81534_MAX_TX_SIZE 124 +#define F81534_MAX_RX_SIZE 124 +#define F81534_RECEIVE_BLOCK_SIZE 128 +#define F81534_MAX_RECEIVE_BLOCK_SIZE 512 + +#define F81534_TOKEN_RECEIVE 0x01 +#define F81534_TOKEN_WRITE 0x02 +#define F81534_TOKEN_TX_EMPTY 0x03 +#define F81534_TOKEN_MSR_CHANGE 0x04 + +/* + * We used interal SPI bus to access FLASH section. We must wait the SPI bus to + * idle if we performed any command. + * + * SPI Bus status register: F81534_BUS_REG_STATUS + * Bit 0/1 : BUSY + * Bit 2 : IDLE + */ +#define F81534_BUS_BUSY (BIT(0) | BIT(1)) +#define F81534_BUS_IDLE BIT(2) +#define F81534_BUS_READ_DATA 0x1004 +#define F81534_BUS_REG_STATUS 0x1003 +#define F81534_BUS_REG_START 0x1002 +#define F81534_BUS_REG_END 0x1001 + +#define F81534_CMD_READ 0x03 + +#define F81534_DEFAULT_BAUD_RATE 9600 +#define F81534_MAX_BAUDRATE 115200 + +#define F81534_PORT_CONF_DISABLE_PORT BIT(3) +#define F81534_PORT_CONF_NOT_EXIST_PORT BIT(7) +#define F81534_PORT_UNAVAILABLE \ + (F81534_PORT_CONF_DISABLE_PORT | F81534_PORT_CONF_NOT_EXIST_PORT) + +#define F81534_1X_RXTRIGGER 0xc3 +#define F81534_8X_RXTRIGGER 0xcf + +static const struct usb_device_id f81534_id_table[] = { + { USB_DEVICE(FINTEK_VENDOR_ID_1, FINTEK_DEVICE_ID) }, + { USB_DEVICE(FINTEK_VENDOR_ID_2, FINTEK_DEVICE_ID) }, + {} /* Terminating entry */ +}; + +#define F81534_TX_EMPTY_BIT 0 + +struct f81534_serial_private { + u8 conf_data[F81534_DEF_CONF_SIZE]; + int tty_idx[F81534_NUM_PORT]; + u8 setting_idx; + int opened_port; + struct mutex urb_mutex; +}; + +struct f81534_port_private { + struct mutex mcr_mutex; + unsigned long tx_empty; + spinlock_t msr_lock; + u8 shadow_mcr; + u8 shadow_msr; + u8 phy_num; +}; + +static int f81534_logic_to_phy_port(struct usb_serial *serial, + struct usb_serial_port *port) +{ + struct f81534_serial_private *serial_priv = + usb_get_serial_data(port->serial); + int count = 0; + int i; + + for (i = 0; i < F81534_NUM_PORT; ++i) { + if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE) + continue; + + if (port->port_number == count) + return i; + + ++count; + } + + return -ENODEV; +} + +static int f81534_set_register(struct usb_serial *serial, u16 reg, u8 data) +{ + struct usb_interface *interface = serial->interface; + struct usb_device *dev = serial->dev; + size_t count = F81534_USB_MAX_RETRY; + int status; + u8 *tmp; + + tmp = kmalloc(sizeof(u8), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + *tmp = data; + + /* + * Our device maybe not reply when heavily loading, We'll retry for + * F81534_USB_MAX_RETRY times. + */ + while (count--) { + status = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + F81534_SET_GET_REGISTER, + USB_TYPE_VENDOR | USB_DIR_OUT, + reg, 0, tmp, sizeof(u8), + F81534_USB_TIMEOUT); + if (status > 0) { + status = 0; + break; + } else if (status == 0) { + status = -EIO; + } + } + + if (status < 0) { + dev_err(&interface->dev, "%s: reg: %x data: %x failed: %d\n", + __func__, reg, data, status); + } + + kfree(tmp); + return status; +} + +static int f81534_get_register(struct usb_serial *serial, u16 reg, u8 *data) +{ + struct usb_interface *interface = serial->interface; + struct usb_device *dev = serial->dev; + size_t count = F81534_USB_MAX_RETRY; + int status; + u8 *tmp; + + tmp = kmalloc(sizeof(u8), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + /* + * Our device maybe not reply when heavily loading, We'll retry for + * F81534_USB_MAX_RETRY times. + */ + while (count--) { + status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), + F81534_SET_GET_REGISTER, + USB_TYPE_VENDOR | USB_DIR_IN, + reg, 0, tmp, sizeof(u8), + F81534_USB_TIMEOUT); + if (status > 0) { + status = 0; + break; + } else if (status == 0) { + status = -EIO; + } + } + + if (status < 0) { + dev_err(&interface->dev, "%s: reg: %x failed: %d\n", __func__, + reg, status); + goto end; + } + + *data = *tmp; + +end: + kfree(tmp); + return status; +} + +static int f81534_set_port_register(struct usb_serial_port *port, u16 reg, + u8 data) +{ + struct f81534_port_private *port_priv = usb_get_serial_port_data(port); + + return f81534_set_register(port->serial, + reg + port_priv->phy_num * F81534_UART_OFFSET, data); +} + +static int f81534_get_port_register(struct usb_serial_port *port, u16 reg, + u8 *data) +{ + struct f81534_port_private *port_priv = usb_get_serial_port_data(port); + + return f81534_get_register(port->serial, + reg + port_priv->phy_num * F81534_UART_OFFSET, data); +} + +/* + * If we try to access the internal flash via SPI bus, we should check the bus + * status for every command. e.g., F81534_BUS_REG_START/F81534_BUS_REG_END + */ +static int f81534_wait_for_spi_idle(struct usb_serial *serial) +{ + size_t count = F81534_MAX_BUS_RETRY; + u8 tmp; + int status; + + do { + status = f81534_get_register(serial, F81534_BUS_REG_STATUS, + &tmp); + if (status) + return status; + + if (tmp & F81534_BUS_BUSY) + continue; + + if (tmp & F81534_BUS_IDLE) + break; + + } while (--count); + + if (!count) { + dev_err(&serial->interface->dev, + "%s: timed out waiting for idle SPI bus\n", + __func__); + return -EIO; + } + + return f81534_set_register(serial, F81534_BUS_REG_STATUS, + tmp & ~F81534_BUS_IDLE); +} + +static int f81534_get_spi_register(struct usb_serial *serial, u16 reg, + u8 *data) +{ + int status; + + status = f81534_get_register(serial, reg, data); + if (status) + return status; + + return f81534_wait_for_spi_idle(serial); +} + +static int f81534_set_spi_register(struct usb_serial *serial, u16 reg, u8 data) +{ + int status; + + status = f81534_set_register(serial, reg, data); + if (status) + return status; + + return f81534_wait_for_spi_idle(serial); +} + +static int f81534_read_flash(struct usb_serial *serial, u32 address, + size_t size, u8 *buf) +{ + u8 tmp_buf[F81534_MAX_DATA_BLOCK]; + size_t block = 0; + size_t read_size; + size_t count; + int status; + int offset; + u16 reg_tmp; + + status = f81534_set_spi_register(serial, F81534_BUS_REG_START, + F81534_CMD_READ); + if (status) + return status; + + status = f81534_set_spi_register(serial, F81534_BUS_REG_START, + (address >> 16) & 0xff); + if (status) + return status; + + status = f81534_set_spi_register(serial, F81534_BUS_REG_START, + (address >> 8) & 0xff); + if (status) + return status; + + status = f81534_set_spi_register(serial, F81534_BUS_REG_START, + (address >> 0) & 0xff); + if (status) + return status; + + /* Continuous read mode */ + do { + read_size = min_t(size_t, F81534_MAX_DATA_BLOCK, size); + + for (count = 0; count < read_size; ++count) { + /* To write F81534_BUS_REG_END when final byte */ + if (size <= F81534_MAX_DATA_BLOCK && + read_size == count + 1) + reg_tmp = F81534_BUS_REG_END; + else + reg_tmp = F81534_BUS_REG_START; + + /* + * Dummy code, force IC to generate a read pulse, the + * set of value 0xf1 is dont care (any value is ok) + */ + status = f81534_set_spi_register(serial, reg_tmp, + 0xf1); + if (status) + return status; + + status = f81534_get_spi_register(serial, + F81534_BUS_READ_DATA, + &tmp_buf[count]); + if (status) + return status; + + offset = count + block * F81534_MAX_DATA_BLOCK; + buf[offset] = tmp_buf[count]; + } + + size -= read_size; + ++block; + } while (size); + + return 0; +} + +static void f81534_prepare_write_buffer(struct usb_serial_port *port, u8 *buf) +{ + struct f81534_port_private *port_priv = usb_get_serial_port_data(port); + int phy_num = port_priv->phy_num; + u8 tx_len; + int i; + + /* + * The block layout is fixed with 4x128 Bytes, per 128 Bytes a port. + * index 0: port phy idx (e.g., 0,1,2,3) + * index 1: only F81534_TOKEN_WRITE + * index 2: serial TX out length + * index 3: fix to 0 + * index 4~127: serial out data block + */ + for (i = 0; i < F81534_NUM_PORT; ++i) { + buf[i * F81534_RECEIVE_BLOCK_SIZE] = i; + buf[i * F81534_RECEIVE_BLOCK_SIZE + 1] = F81534_TOKEN_WRITE; + buf[i * F81534_RECEIVE_BLOCK_SIZE + 2] = 0; + buf[i * F81534_RECEIVE_BLOCK_SIZE + 3] = 0; + } + + tx_len = kfifo_out_locked(&port->write_fifo, + &buf[phy_num * F81534_RECEIVE_BLOCK_SIZE + 4], + F81534_MAX_TX_SIZE, &port->lock); + + buf[phy_num * F81534_RECEIVE_BLOCK_SIZE + 2] = tx_len; +} + +static int f81534_submit_writer(struct usb_serial_port *port, gfp_t mem_flags) +{ + struct f81534_port_private *port_priv = usb_get_serial_port_data(port); + struct urb *urb; + unsigned long flags; + int result; + + /* Check is any data in write_fifo */ + spin_lock_irqsave(&port->lock, flags); + + if (kfifo_is_empty(&port->write_fifo)) { + spin_unlock_irqrestore(&port->lock, flags); + return 0; + } + + spin_unlock_irqrestore(&port->lock, flags); + + /* Check H/W is TXEMPTY */ + if (!test_and_clear_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty)) + return 0; + + urb = port->write_urbs[0]; + f81534_prepare_write_buffer(port, port->bulk_out_buffers[0]); + urb->transfer_buffer_length = F81534_WRITE_BUFFER_SIZE; + + result = usb_submit_urb(urb, mem_flags); + if (result) { + set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty); + dev_err(&port->dev, "%s: submit failed: %d\n", __func__, + result); + return result; + } + + usb_serial_port_softint(port); + return 0; +} + +static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate) +{ + if (!baudrate) + return 0; + + /* Round to nearest divisor */ + return DIV_ROUND_CLOSEST(clockrate, baudrate); +} + +static int f81534_set_port_config(struct usb_serial_port *port, u32 baudrate, + u8 lcr) +{ + u32 divisor; + int status; + u8 value; + + if (baudrate <= 1200) + value = F81534_1X_RXTRIGGER; /* 128 FIFO & TL: 1x */ + else + value = F81534_8X_RXTRIGGER; /* 128 FIFO & TL: 8x */ + + status = f81534_set_port_register(port, F81534_CONFIG1_REG, value); + if (status) { + dev_err(&port->dev, "%s: CONFIG1 setting failed\n", __func__); + return status; + } + + if (baudrate <= 1200) + value = UART_FCR_TRIGGER_1 | UART_FCR_ENABLE_FIFO; /* TL: 1 */ + else + value = UART_FCR_R_TRIG_11 | UART_FCR_ENABLE_FIFO; /* TL: 14 */ + + status = f81534_set_port_register(port, F81534_FIFO_CONTROL_REG, + value); + if (status) { + dev_err(&port->dev, "%s: FCR setting failed\n", __func__); + return status; + } + + divisor = f81534_calc_baud_divisor(baudrate, F81534_MAX_BAUDRATE); + value = UART_LCR_DLAB; + status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG, + value); + if (status) { + dev_err(&port->dev, "%s: set LCR failed\n", __func__); + return status; + } + + value = divisor & 0xff; + status = f81534_set_port_register(port, F81534_DIVISOR_LSB_REG, value); + if (status) { + dev_err(&port->dev, "%s: set DLAB LSB failed\n", __func__); + return status; + } + + value = (divisor >> 8) & 0xff; + status = f81534_set_port_register(port, F81534_DIVISOR_MSB_REG, value); + if (status) { + dev_err(&port->dev, "%s: set DLAB MSB failed\n", __func__); + return status; + } + + status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG, lcr); + if (status) { + dev_err(&port->dev, "%s: set LCR failed\n", __func__); + return status; + } + + return 0; +} + +static int f81534_update_mctrl(struct usb_serial_port *port, unsigned int set, + unsigned int clear) +{ + struct f81534_port_private *port_priv = usb_get_serial_port_data(port); + int status; + u8 tmp; + + if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0) + return 0; /* no change */ + + mutex_lock(&port_priv->mcr_mutex); + + /* 'Set' takes precedence over 'Clear' */ + clear &= ~set; + + /* Always enable UART_MCR_OUT2 */ + tmp = UART_MCR_OUT2 | port_priv->shadow_mcr; + + if (clear & TIOCM_DTR) + tmp &= ~UART_MCR_DTR; + + if (clear & TIOCM_RTS) + tmp &= ~UART_MCR_RTS; + + if (set & TIOCM_DTR) + tmp |= UART_MCR_DTR; + + if (set & TIOCM_RTS) + tmp |= UART_MCR_RTS; + + status = f81534_set_port_register(port, F81534_MODEM_CONTROL_REG, tmp); + if (status < 0) { + dev_err(&port->dev, "%s: MCR write failed\n", __func__); + mutex_unlock(&port_priv->mcr_mutex); + return status; + } + + port_priv->shadow_mcr = tmp; + mutex_unlock(&port_priv->mcr_mutex); + return 0; +} + +/* + * This function will search the data area with token F81534_CUSTOM_VALID_TOKEN + * for latest configuration index. If nothing found + * (*index = F81534_CUSTOM_NO_CUSTOM_DATA), We'll load default configure in + * F81534_DEF_CONF_ADDRESS_START section. + * + * Due to we only use block0 to save data, so *index should be 0 or + * F81534_CUSTOM_NO_CUSTOM_DATA. + */ +static int f81534_find_config_idx(struct usb_serial *serial, u8 *index) +{ + u8 tmp; + int status; + + status = f81534_read_flash(serial, F81534_CUSTOM_ADDRESS_START, 1, + &tmp); + if (status) { + dev_err(&serial->interface->dev, "%s: read failed: %d\n", + __func__, status); + return status; + } + + /* We'll use the custom data when the data is valid. */ + if (tmp == F81534_CUSTOM_VALID_TOKEN) + *index = 0; + else + *index = F81534_CUSTOM_NO_CUSTOM_DATA; + + return 0; +} + +/* + * We had 2 generation of F81532/534 IC. All has an internal storage. + * + * 1st is pure USB-to-TTL RS232 IC and designed for 4 ports only, no any + * internal data will used. All mode and gpio control should manually set + * by AP or Driver and all storage space value are 0xff. The + * f81534_calc_num_ports() will run to final we marked as "oldest version" + * for this IC. + * + * 2rd is designed to more generic to use any transceiver and this is our + * mass production type. We'll save data in F81534_CUSTOM_ADDRESS_START + * (0x2f00) with 9bytes. The 1st byte is a indicater. If the token is + * F81534_CUSTOM_VALID_TOKEN(0xf0), the IC is 2nd gen type, the following + * 4bytes save port mode (0:RS232/1:RS485 Invert/2:RS485), and the last + * 4bytes save GPIO state(value from 0~7 to represent 3 GPIO output pin). + * The f81534_calc_num_ports() will run to "new style" with checking + * F81534_PORT_UNAVAILABLE section. + */ +static int f81534_calc_num_ports(struct usb_serial *serial) +{ + u8 setting[F81534_CUSTOM_DATA_SIZE]; + u8 setting_idx; + u8 num_port = 0; + int status; + size_t i; + + /* Check had custom setting */ + status = f81534_find_config_idx(serial, &setting_idx); + if (status) { + dev_err(&serial->interface->dev, "%s: find idx failed: %d\n", + __func__, status); + return 0; + } + + /* + * We'll read custom data only when data available, otherwise we'll + * read default value instead. + */ + if (setting_idx != F81534_CUSTOM_NO_CUSTOM_DATA) { + status = f81534_read_flash(serial, + F81534_CUSTOM_ADDRESS_START + + F81534_CONF_OFFSET, + sizeof(setting), setting); + if (status) { + dev_err(&serial->interface->dev, + "%s: get custom data failed: %d\n", + __func__, status); + return 0; + } + + dev_dbg(&serial->interface->dev, + "%s: read config from block: %d\n", __func__, + setting_idx); + } else { + /* Read default board setting */ + status = f81534_read_flash(serial, + F81534_DEF_CONF_ADDRESS_START, F81534_NUM_PORT, + setting); + + if (status) { + dev_err(&serial->interface->dev, + "%s: read failed: %d\n", __func__, + status); + return 0; + } + + dev_dbg(&serial->interface->dev, "%s: read default config\n", + __func__); + } + + /* New style, find all possible ports */ + for (i = 0; i < F81534_NUM_PORT; ++i) { + if (setting[i] & F81534_PORT_UNAVAILABLE) + continue; + + ++num_port; + } + + if (num_port) + return num_port; + + dev_warn(&serial->interface->dev, "%s: Read Failed. default 4 ports\n", + __func__); + return 4; /* Nothing found, oldest version IC */ +} + +static void f81534_set_termios(struct tty_struct *tty, + struct usb_serial_port *port, + struct ktermios *old_termios) +{ + u8 new_lcr = 0; + int status; + u32 baud; + + if (C_BAUD(tty) == B0) + f81534_update_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS); + else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) + f81534_update_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0); + + if (C_PARENB(tty)) { + new_lcr |= UART_LCR_PARITY; + + if (!C_PARODD(tty)) + new_lcr |= UART_LCR_EPAR; + + if (C_CMSPAR(tty)) + new_lcr |= UART_LCR_SPAR; + } + + if (C_CSTOPB(tty)) + new_lcr |= UART_LCR_STOP; + + switch (C_CSIZE(tty)) { + case CS5: + new_lcr |= UART_LCR_WLEN5; + break; + case CS6: + new_lcr |= UART_LCR_WLEN6; + break; + case CS7: + new_lcr |= UART_LCR_WLEN7; + break; + default: + case CS8: + new_lcr |= UART_LCR_WLEN8; + break; + } + + baud = tty_get_baud_rate(tty); + if (!baud) + return; + + if (baud > F81534_MAX_BAUDRATE) { + if (old_termios) + baud = tty_termios_baud_rate(old_termios); + else + baud = F81534_DEFAULT_BAUD_RATE; + + tty_encode_baud_rate(tty, baud, baud); + } + + dev_dbg(&port->dev, "%s: baud: %d\n", __func__, baud); + + status = f81534_set_port_config(port, baud, new_lcr); + if (status < 0) { + dev_err(&port->dev, "%s: set port config failed: %d\n", + __func__, status); + } +} + +static int f81534_submit_read_urb(struct usb_serial *serial, gfp_t flags) +{ + return usb_serial_generic_submit_read_urbs(serial->port[0], flags); +} + +static void f81534_msr_changed(struct usb_serial_port *port, u8 msr) +{ + struct f81534_port_private *port_priv = usb_get_serial_port_data(port); + struct tty_struct *tty; + unsigned long flags; + u8 old_msr; + + if (!(msr & UART_MSR_ANY_DELTA)) + return; + + spin_lock_irqsave(&port_priv->msr_lock, flags); + old_msr = port_priv->shadow_msr; + port_priv->shadow_msr = msr; + spin_unlock_irqrestore(&port_priv->msr_lock, flags); + + dev_dbg(&port->dev, "%s: MSR from %02x to %02x\n", __func__, old_msr, + msr); + + /* Update input line counters */ + if (msr & UART_MSR_DCTS) + port->icount.cts++; + if (msr & UART_MSR_DDSR) + port->icount.dsr++; + if (msr & UART_MSR_DDCD) + port->icount.dcd++; + if (msr & UART_MSR_TERI) + port->icount.rng++; + + wake_up_interruptible(&port->port.delta_msr_wait); + + if (!(msr & UART_MSR_DDCD)) + return; + + dev_dbg(&port->dev, "%s: DCD Changed: phy_num: %d from %x to %x\n", + __func__, port_priv->phy_num, old_msr, msr); + + tty = tty_port_tty_get(&port->port); + if (!tty) + return; + + usb_serial_handle_dcd_change(port, tty, msr & UART_MSR_DCD); + tty_kref_put(tty); +} + +static int f81534_read_msr(struct usb_serial_port *port) +{ + struct f81534_port_private *port_priv = usb_get_serial_port_data(port); + unsigned long flags; + int status; + u8 msr; + + /* Get MSR initial value */ + status = f81534_get_port_register(port, F81534_MODEM_STATUS_REG, &msr); + if (status) + return status; + + /* Force update current state */ + spin_lock_irqsave(&port_priv->msr_lock, flags); + port_priv->shadow_msr = msr; + spin_unlock_irqrestore(&port_priv->msr_lock, flags); + + return 0; +} + +static int f81534_open(struct tty_struct *tty, struct usb_serial_port *port) +{ + struct f81534_serial_private *serial_priv = + usb_get_serial_data(port->serial); + struct f81534_port_private *port_priv = usb_get_serial_port_data(port); + int status; + + status = f81534_set_port_register(port, + F81534_FIFO_CONTROL_REG, UART_FCR_ENABLE_FIFO | + UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); + if (status) { + dev_err(&port->dev, "%s: Clear FIFO failed: %d\n", __func__, + status); + return status; + } + + if (tty) + f81534_set_termios(tty, port, NULL); + + status = f81534_read_msr(port); + if (status) + return status; + + mutex_lock(&serial_priv->urb_mutex); + + /* Submit Read URBs for first port opened */ + if (!serial_priv->opened_port) { + status = f81534_submit_read_urb(port->serial, GFP_KERNEL); + if (status) + goto exit; + } + + serial_priv->opened_port++; + +exit: + mutex_unlock(&serial_priv->urb_mutex); + + set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty); + return status; +} + +static void f81534_close(struct usb_serial_port *port) +{ + struct f81534_serial_private *serial_priv = + usb_get_serial_data(port->serial); + struct usb_serial_port *port0 = port->serial->port[0]; + unsigned long flags; + size_t i; + + usb_kill_urb(port->write_urbs[0]); + + spin_lock_irqsave(&port->lock, flags); + kfifo_reset_out(&port->write_fifo); + spin_unlock_irqrestore(&port->lock, flags); + + /* Kill Read URBs when final port closed */ + mutex_lock(&serial_priv->urb_mutex); + serial_priv->opened_port--; + + if (!serial_priv->opened_port) { + for (i = 0; i < ARRAY_SIZE(port0->read_urbs); ++i) + usb_kill_urb(port0->read_urbs[i]); + } + + mutex_unlock(&serial_priv->urb_mutex); +} + +static int f81534_get_serial_info(struct usb_serial_port *port, + struct serial_struct __user *retinfo) +{ + struct f81534_port_private *port_priv; + struct serial_struct tmp; + + port_priv = usb_get_serial_port_data(port); + + memset(&tmp, 0, sizeof(tmp)); + + tmp.type = PORT_16550A; + tmp.port = port->port_number; + tmp.line = port->minor; + tmp.baud_base = F81534_MAX_BAUDRATE; + + if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) + return -EFAULT; + + return 0; +} + +static int f81534_ioctl(struct tty_struct *tty, unsigned int cmd, + unsigned long arg) +{ + struct usb_serial_port *port = tty->driver_data; + struct serial_struct __user *buf = (struct serial_struct __user *)arg; + + switch (cmd) { + case TIOCGSERIAL: + return f81534_get_serial_info(port, buf); + default: + break; + } + + return -ENOIOCTLCMD; +} + +static void f81534_process_per_serial_block(struct usb_serial_port *port, + u8 *data) +{ + struct f81534_port_private *port_priv = usb_get_serial_port_data(port); + int phy_num = data[0]; + size_t read_size = 0; + size_t i; + char tty_flag; + int status; + u8 lsr; + + /* + * The block layout is 128 Bytes + * index 0: port phy idx (e.g., 0,1,2,3), + * index 1: It's could be + * F81534_TOKEN_RECEIVE + * F81534_TOKEN_TX_EMPTY + * F81534_TOKEN_MSR_CHANGE + * index 2: serial in size (data+lsr, must be even) + * meaningful for F81534_TOKEN_RECEIVE only + * index 3: current MSR with this device + * index 4~127: serial in data block (data+lsr, must be even) + */ + switch (data[1]) { + case F81534_TOKEN_TX_EMPTY: + set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty); + + /* Try to submit writer */ + status = f81534_submit_writer(port, GFP_ATOMIC); + if (status) + dev_err(&port->dev, "%s: submit failed\n", __func__); + return; + + case F81534_TOKEN_MSR_CHANGE: + f81534_msr_changed(port, data[3]); + return; + + case F81534_TOKEN_RECEIVE: + read_size = data[2]; + if (read_size > F81534_MAX_RX_SIZE) { + dev_err(&port->dev, + "%s: phy: %d read_size: %zu larger than: %d\n", + __func__, phy_num, read_size, + F81534_MAX_RX_SIZE); + return; + } + + break; + + default: + dev_warn(&port->dev, "%s: unknown token: %02x\n", __func__, + data[1]); + return; + } + + for (i = 4; i < 4 + read_size; i += 2) { + tty_flag = TTY_NORMAL; + lsr = data[i + 1]; + + if (lsr & UART_LSR_BRK_ERROR_BITS) { + if (lsr & UART_LSR_BI) { + tty_flag = TTY_BREAK; + port->icount.brk++; + usb_serial_handle_break(port); + } else if (lsr & UART_LSR_PE) { + tty_flag = TTY_PARITY; + port->icount.parity++; + } else if (lsr & UART_LSR_FE) { + tty_flag = TTY_FRAME; + port->icount.frame++; + } + + if (lsr & UART_LSR_OE) { + port->icount.overrun++; + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } + } + + if (port->port.console && port->sysrq) { + if (usb_serial_handle_sysrq_char(port, data[i])) + continue; + } + + tty_insert_flip_char(&port->port, data[i], tty_flag); + } + + tty_flip_buffer_push(&port->port); +} + +static void f81534_process_read_urb(struct urb *urb) +{ + struct f81534_serial_private *serial_priv; + struct usb_serial_port *port; + struct usb_serial *serial; + u8 *buf; + int phy_port_num; + int tty_port_num; + size_t i; + + if (!urb->actual_length || + urb->actual_length % F81534_RECEIVE_BLOCK_SIZE) { + return; + } + + port = urb->context; + serial = port->serial; + buf = urb->transfer_buffer; + serial_priv = usb_get_serial_data(serial); + + for (i = 0; i < urb->actual_length; i += F81534_RECEIVE_BLOCK_SIZE) { + phy_port_num = buf[i]; + if (phy_port_num >= F81534_NUM_PORT) { + dev_err(&port->dev, + "%s: phy_port_num: %d larger than: %d\n", + __func__, phy_port_num, F81534_NUM_PORT); + continue; + } + + tty_port_num = serial_priv->tty_idx[phy_port_num]; + port = serial->port[tty_port_num]; + + if (tty_port_initialized(&port->port)) + f81534_process_per_serial_block(port, &buf[i]); + } +} + +static void f81534_write_usb_callback(struct urb *urb) +{ + struct usb_serial_port *port = urb->context; + + switch (urb->status) { + case 0: + break; + case -ENOENT: + case -ECONNRESET: + case -ESHUTDOWN: + dev_dbg(&port->dev, "%s - urb stopped: %d\n", + __func__, urb->status); + return; + case -EPIPE: + dev_err(&port->dev, "%s - urb stopped: %d\n", + __func__, urb->status); + return; + default: + dev_dbg(&port->dev, "%s - nonzero urb status: %d\n", + __func__, urb->status); + break; + } +} + +static int f81534_setup_ports(struct usb_serial *serial) +{ + struct usb_serial_port *port; + u8 port0_out_address; + int buffer_size; + size_t i; + + /* + * In our system architecture, we had 2 or 4 serial ports, + * but only get 1 set of bulk in/out endpoints. + * + * The usb-serial subsystem will generate port 0 data, + * but port 1/2/3 will not. It's will generate write URB and buffer + * by following code and use the port0 read URB for read operation. + */ + for (i = 1; i < serial->num_ports; ++i) { + port0_out_address = serial->port[0]->bulk_out_endpointAddress; + buffer_size = serial->port[0]->bulk_out_size; + port = serial->port[i]; + + if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL)) + return -ENOMEM; + + port->bulk_out_size = buffer_size; + port->bulk_out_endpointAddress = port0_out_address; + + port->write_urbs[0] = usb_alloc_urb(0, GFP_KERNEL); + if (!port->write_urbs[0]) + return -ENOMEM; + + port->bulk_out_buffers[0] = kzalloc(buffer_size, GFP_KERNEL); + if (!port->bulk_out_buffers[0]) + return -ENOMEM; + + usb_fill_bulk_urb(port->write_urbs[0], serial->dev, + usb_sndbulkpipe(serial->dev, + port0_out_address), + port->bulk_out_buffers[0], buffer_size, + serial->type->write_bulk_callback, port); + + port->write_urb = port->write_urbs[0]; + port->bulk_out_buffer = port->bulk_out_buffers[0]; + } + + return 0; +} + +static int f81534_probe(struct usb_serial *serial, + const struct usb_device_id *id) +{ + struct usb_endpoint_descriptor *endpoint; + struct usb_host_interface *iface_desc; + struct device *dev; + int num_bulk_in = 0; + int num_bulk_out = 0; + int size_bulk_in = 0; + int size_bulk_out = 0; + int i; + + dev = &serial->interface->dev; + iface_desc = serial->interface->cur_altsetting; + + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + + if (usb_endpoint_is_bulk_in(endpoint)) { + ++num_bulk_in; + size_bulk_in = usb_endpoint_maxp(endpoint); + } + + if (usb_endpoint_is_bulk_out(endpoint)) { + ++num_bulk_out; + size_bulk_out = usb_endpoint_maxp(endpoint); + } + } + + if (num_bulk_in != 1 || num_bulk_out != 1) { + dev_err(dev, "expected endpoints not found\n"); + return -ENODEV; + } + + if (size_bulk_out != F81534_WRITE_BUFFER_SIZE || + size_bulk_in != F81534_MAX_RECEIVE_BLOCK_SIZE) { + dev_err(dev, "unsupported endpoint max packet size\n"); + return -ENODEV; + } + + return 0; +} + +static int f81534_attach(struct usb_serial *serial) +{ + struct f81534_serial_private *serial_priv; + int index = 0; + int status; + int i; + + serial_priv = devm_kzalloc(&serial->interface->dev, + sizeof(*serial_priv), GFP_KERNEL); + if (!serial_priv) + return -ENOMEM; + + usb_set_serial_data(serial, serial_priv); + + mutex_init(&serial_priv->urb_mutex); + + status = f81534_setup_ports(serial); + if (status) + return status; + + /* Check had custom setting */ + status = f81534_find_config_idx(serial, &serial_priv->setting_idx); + if (status) { + dev_err(&serial->interface->dev, "%s: find idx failed: %d\n", + __func__, status); + return status; + } + + /* + * We'll read custom data only when data available, otherwise we'll + * read default value instead. + */ + if (serial_priv->setting_idx == F81534_CUSTOM_NO_CUSTOM_DATA) { + /* + * The default configuration layout: + * byte 0/1/2/3: uart setting + */ + status = f81534_read_flash(serial, + F81534_DEF_CONF_ADDRESS_START, + F81534_DEF_CONF_SIZE, + serial_priv->conf_data); + if (status) { + dev_err(&serial->interface->dev, + "%s: read reserve data failed: %d\n", + __func__, status); + return status; + } + } else { + /* Only read 8 bytes for mode & GPIO */ + status = f81534_read_flash(serial, + F81534_CUSTOM_ADDRESS_START + + F81534_CONF_OFFSET, + sizeof(serial_priv->conf_data), + serial_priv->conf_data); + if (status) { + dev_err(&serial->interface->dev, + "%s: idx: %d get data failed: %d\n", + __func__, serial_priv->setting_idx, + status); + return status; + } + } + + /* Assign phy-to-logic mapping */ + for (i = 0; i < F81534_NUM_PORT; ++i) { + if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE) + continue; + + serial_priv->tty_idx[i] = index++; + dev_dbg(&serial->interface->dev, + "%s: phy_num: %d, tty_idx: %d\n", __func__, i, + serial_priv->tty_idx[i]); + } + + return 0; +} + +static int f81534_port_probe(struct usb_serial_port *port) +{ + struct f81534_port_private *port_priv; + + port_priv = devm_kzalloc(&port->dev, sizeof(*port_priv), GFP_KERNEL); + if (!port_priv) + return -ENOMEM; + + spin_lock_init(&port_priv->msr_lock); + mutex_init(&port_priv->mcr_mutex); + + /* Assign logic-to-phy mapping */ + port_priv->phy_num = f81534_logic_to_phy_port(port->serial, port); + if (port_priv->phy_num < 0 || port_priv->phy_num >= F81534_NUM_PORT) + return -ENODEV; + + usb_set_serial_port_data(port, port_priv); + dev_dbg(&port->dev, "%s: port_number: %d, phy_num: %d\n", __func__, + port->port_number, port_priv->phy_num); + + return 0; +} + +static int f81534_tiocmget(struct tty_struct *tty) +{ + struct usb_serial_port *port = tty->driver_data; + struct f81534_port_private *port_priv = usb_get_serial_port_data(port); + int status; + int r; + u8 msr; + u8 mcr; + + /* Read current MSR from device */ + status = f81534_get_port_register(port, F81534_MODEM_STATUS_REG, &msr); + if (status) + return status; + + mutex_lock(&port_priv->mcr_mutex); + mcr = port_priv->shadow_mcr; + mutex_unlock(&port_priv->mcr_mutex); + + r = (mcr & UART_MCR_DTR ? TIOCM_DTR : 0) | + (mcr & UART_MCR_RTS ? TIOCM_RTS : 0) | + (msr & UART_MSR_CTS ? TIOCM_CTS : 0) | + (msr & UART_MSR_DCD ? TIOCM_CAR : 0) | + (msr & UART_MSR_RI ? TIOCM_RI : 0) | + (msr & UART_MSR_DSR ? TIOCM_DSR : 0); + + return r; +} + +static int f81534_tiocmset(struct tty_struct *tty, unsigned int set, + unsigned int clear) +{ + struct usb_serial_port *port = tty->driver_data; + + return f81534_update_mctrl(port, set, clear); +} + +static void f81534_dtr_rts(struct usb_serial_port *port, int on) +{ + if (on) + f81534_update_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0); + else + f81534_update_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS); +} + +static int f81534_write(struct tty_struct *tty, struct usb_serial_port *port, + const u8 *buf, int count) +{ + int bytes_out, status; + + if (!count) + return 0; + + bytes_out = kfifo_in_locked(&port->write_fifo, buf, count, + &port->lock); + + status = f81534_submit_writer(port, GFP_ATOMIC); + if (status) { + dev_err(&port->dev, "%s: submit failed\n", __func__); + return status; + } + + return bytes_out; +} + +static bool f81534_tx_empty(struct usb_serial_port *port) +{ + struct f81534_port_private *port_priv = usb_get_serial_port_data(port); + + return test_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty); +} + +static int f81534_resume(struct usb_serial *serial) +{ + struct f81534_serial_private *serial_priv = + usb_get_serial_data(serial); + struct usb_serial_port *port; + int error = 0; + int status; + size_t i; + + /* + * We'll register port 0 bulkin when port had opened, It'll take all + * port received data, MSR register change and TX_EMPTY information. + */ + mutex_lock(&serial_priv->urb_mutex); + + if (serial_priv->opened_port) { + status = f81534_submit_read_urb(serial, GFP_NOIO); + if (status) { + mutex_unlock(&serial_priv->urb_mutex); + return status; + } + } + + mutex_unlock(&serial_priv->urb_mutex); + + for (i = 0; i < serial->num_ports; i++) { + port = serial->port[i]; + if (!tty_port_initialized(&port->port)) + continue; + + status = f81534_submit_writer(port, GFP_NOIO); + if (status) { + dev_err(&port->dev, "%s: submit failed\n", __func__); + ++error; + } + } + + if (error) + return -EIO; + + return 0; +} + +static struct usb_serial_driver f81534_device = { + .driver = { + .owner = THIS_MODULE, + .name = "f81534", + }, + .description = DRIVER_DESC, + .id_table = f81534_id_table, + .open = f81534_open, + .close = f81534_close, + .write = f81534_write, + .tx_empty = f81534_tx_empty, + .calc_num_ports = f81534_calc_num_ports, + .probe = f81534_probe, + .attach = f81534_attach, + .port_probe = f81534_port_probe, + .dtr_rts = f81534_dtr_rts, + .process_read_urb = f81534_process_read_urb, + .ioctl = f81534_ioctl, + .tiocmget = f81534_tiocmget, + .tiocmset = f81534_tiocmset, + .write_bulk_callback = f81534_write_usb_callback, + .set_termios = f81534_set_termios, + .resume = f81534_resume, +}; + +static struct usb_serial_driver *const serial_drivers[] = { + &f81534_device, NULL +}; + +module_usb_serial_driver(serial_drivers, f81534_id_table); + +MODULE_DEVICE_TABLE(usb, f81534_id_table); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("Peter Hong "); +MODULE_AUTHOR("Tom Tsai "); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 0ff7f38d7800502cdf9159299e603d7475ff9beb..23d14b98ae2a778282ec186d7a80805ee2e103aa 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1012,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, + { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { } /* Terminating entry */ }; @@ -1453,8 +1455,6 @@ static int get_serial_info(struct usb_serial_port *port, struct ftdi_private *priv = usb_get_serial_port_data(port); struct serial_struct tmp; - if (!retinfo) - return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.flags = priv->flags; tmp.baud_base = priv->baud_base; @@ -1536,9 +1536,6 @@ static int get_lsr_info(struct usb_serial_port *port, struct ftdi_private *priv = usb_get_serial_port_data(port); unsigned int result = 0; - if (!retinfo) - return -EFAULT; - if (priv->transmit_empty) result = TIOCSER_TEMT; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 21011c0a4c6401ffd942e83040f3df53099d9e91..48ee04c94a7541ad6c5f9023be107246207c56fd 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -595,6 +595,12 @@ #define ATMEL_VID 0x03eb /* Vendor ID */ #define STK541_PID 0x2109 /* Zigbee Controller */ +/* + * Texas Instruments + */ +#define TI_VID 0x0451 +#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */ + /* * Blackfin gnICE JTAG * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 11c05ce2f35f8cd8ffe1ea46b0f6b0fed83e5064..dcc0c58aaad5adaf89bdd535d4cd9699445de3ac 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -1554,9 +1554,6 @@ static int get_serial_info(struct edgeport_port *edge_port, { struct serial_struct tmp; - if (!retinfo) - return -EFAULT; - memset(&tmp, 0, sizeof(tmp)); tmp.type = PORT_16550A; diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index fce82fd79f77bf59db864c2d1f09e166aa5d018a..c339163698eb9960f8cc71735d8ed1cf24905e75 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -2459,9 +2459,6 @@ static int get_serial_info(struct edgeport_port *edge_port, struct serial_struct tmp; unsigned cwait; - if (!retinfo) - return -EFAULT; - cwait = edge_port->port->port.closing_wait; if (cwait != ASYNC_CLOSING_WAIT_NONE) cwait = jiffies_to_msecs(cwait) / 10; diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index fc5d3a791e08c61fad21ba48292659d1f1924d12..0ee190fc1bf8ce7154a4a4eac7023a377aa03365 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c @@ -296,7 +296,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) rc = usb_serial_generic_open(tty, port); if (rc) { retval = rc; - goto exit; + goto err_free_cfg; } rc = usb_control_msg(port->serial->dev, @@ -311,21 +311,38 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) if (rc < 0) { dev_err(&port->dev, "Enabling read failed (error = %d)\n", rc); retval = rc; + goto err_generic_close; } else dev_dbg(&port->dev, "%s - enabled reading\n", __func__); rc = klsi_105_get_line_state(port, &line_state); - if (rc >= 0) { - spin_lock_irqsave(&priv->lock, flags); - priv->line_state = line_state; - spin_unlock_irqrestore(&priv->lock, flags); - dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state); - retval = 0; - } else + if (rc < 0) { retval = rc; + goto err_disable_read; + } + + spin_lock_irqsave(&priv->lock, flags); + priv->line_state = line_state; + spin_unlock_irqrestore(&priv->lock, flags); + dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, + line_state); + + return 0; -exit: +err_disable_read: + usb_control_msg(port->serial->dev, + usb_sndctrlpipe(port->serial->dev, 0), + KL5KUSB105A_SIO_CONFIGURE, + USB_TYPE_VENDOR | USB_DIR_OUT, + KL5KUSB105A_SIO_CONFIGURE_READ_OFF, + 0, /* index */ + NULL, 0, + KLSI_TIMEOUT); +err_generic_close: + usb_serial_generic_close(port); +err_free_cfg: kfree(cfg); + return retval; } diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index de9992b492b08ba447fb1f594e0a4f12f1d3c9a7..d52caa03679c6cbc5886316cb27d471d0c4b7bc4 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -1861,9 +1861,6 @@ static int get_serial_info(struct moschip_port *mos7720_port, { struct serial_struct tmp; - if (!retinfo) - return -EFAULT; - memset(&tmp, 0, sizeof(tmp)); tmp.type = PORT_16550A; diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 57426d703a098dd2d45d67de11acc0ccb5b2f3a5..9a220b8e810f161be985d9655ec6d9dee90147dc 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -1956,9 +1956,6 @@ static int mos7840_get_serial_info(struct moschip_port *mos7840_port, if (mos7840_port == NULL) return -1; - if (!retinfo) - return -EFAULT; - memset(&tmp, 0, sizeof(tmp)); tmp.type = PORT_16550A; diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index 4b7bfb394a32aba025e8e07ad3384759ea1723d8..5ded6f524d59128e66e1864255515b5720d64921 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -336,9 +336,6 @@ static int get_serial_info(struct usb_serial_port *port, { struct serial_struct tmp; - if (!serial) - return -EFAULT; - memset(&tmp, 0x00, sizeof(tmp)); /* fake emulate a 16550 uart to make userspace code happy */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 9894e341c6ac93953a119c224729881a00682ef9..7ce31a4c7e7fd3d186e8e05b20b9a3ca52700b6c 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -268,6 +268,8 @@ static void option_instat_callback(struct urb *urb); #define TELIT_PRODUCT_CC864_SINGLE 0x1006 #define TELIT_PRODUCT_DE910_DUAL 0x1010 #define TELIT_PRODUCT_UE910_V2 0x1012 +#define TELIT_PRODUCT_LE922_USBCFG1 0x1040 +#define TELIT_PRODUCT_LE922_USBCFG2 0x1041 #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 @@ -1210,6 +1212,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0), .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1), + .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), @@ -1989,6 +1995,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index 85acb50a7ee2f4fe74ae26dc3dc96611cbd9d918..659cb8606bd953a7585a013beeb3dc3989400a14 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c @@ -463,9 +463,6 @@ static int get_serial_info(struct usb_serial_port *port, { struct serial_struct tmp; - if (!retinfo) - return -EFAULT; - memset(&tmp, 0, sizeof(tmp)); tmp.line = port->minor; tmp.port = 0; diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c index 70a098de429fc39934ef8808d3e6c5011f063352..2a156144c76c51d907ba5b60b62ed72d7596dbad 100644 --- a/drivers/usb/serial/ssu100.c +++ b/drivers/usb/serial/ssu100.c @@ -318,9 +318,6 @@ static int get_serial_info(struct usb_serial_port *port, { struct serial_struct tmp; - if (!retinfo) - return -EFAULT; - memset(&tmp, 0, sizeof(tmp)); tmp.line = port->minor; tmp.port = 0; diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index a8b9bdba314fdc7bba899a29368f085f6d8a61ed..8db9d071d9409a9f6e85ce6b23a1515f90f11157 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -1426,9 +1426,6 @@ static int ti_get_serial_info(struct ti_port *tport, struct serial_struct ret_serial; unsigned cwait; - if (!ret_arg) - return -EFAULT; - cwait = port->port.closing_wait; if (cwait != ASYNC_CLOSING_WAIT_NONE) cwait = jiffies_to_msecs(cwait) / 10; diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 3dfdfc81254b827cc2e1f04feb926d0ad9666cf0..59bfcb3da116c4a49398750a1678f1d0349678fd 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -140,9 +140,6 @@ static int get_serial_info(struct usb_serial_port *port, { struct serial_struct tmp; - if (!retinfo) - return -EFAULT; - memset(&tmp, 0, sizeof(tmp)); tmp.line = port->minor; tmp.port = port->port_number; diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index ffd086733421316bed0d3c0cf56aa20134fb54f5..1a59f335b063e7e79f8ece2173f2f03574ce3a5b 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -954,10 +954,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us) /* COMMAND STAGE */ /* let's send the command via the control pipe */ + /* + * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack. + * Stack may be vmallocated. So no DMA for us. Make a copy. + */ + memcpy(us->iobuf, srb->cmnd, srb->cmd_len); result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, US_CBI_ADSC, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, - us->ifnum, srb->cmnd, srb->cmd_len); + us->ifnum, us->iobuf, srb->cmd_len); /* check the return code for the command */ usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n", diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 2cba13a532cd82b2a4a53ddf853eced763e7f852..615bea08ec0aeb1788155e067ac4f999a737f9a0 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -52,7 +52,6 @@ #include #include -#include #include #include #include diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 03eccf29ace068f07597931799fb6b57254c5d7b..c4724fb3a6912793fddf81615b2ccb40f166fdfe 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -460,13 +460,14 @@ static void vhci_tx_urb(struct urb *urb) { struct vhci_device *vdev = get_vdev(urb->dev); struct vhci_priv *priv; - struct vhci_hcd *vhci = vdev_to_vhci(vdev); + struct vhci_hcd *vhci; unsigned long flags; if (!vdev) { pr_err("could not get virtual device"); return; } + vhci = vdev_to_vhci(vdev); priv = kzalloc(sizeof(struct vhci_priv), GFP_ATOMIC); if (!priv) { diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index c404017c1b5ae26aa1df115c0ac1652284ed21b4..b96e5b1892696edb582512a1f384a35977a650bb 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -361,6 +361,7 @@ static void set_status_attr(int id) status->attr.attr.name = status->name; status->attr.attr.mode = S_IRUGO; status->attr.show = status_show; + sysfs_attr_init(&status->attr.attr); } static int init_status_attrs(void) diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c index 7091848df6c8b54579c231181cfb8be32fa30498..968471b62cbcd5542d50579213255df0e3cc561d 100644 --- a/drivers/usb/usbip/vudc_dev.c +++ b/drivers/usb/usbip/vudc_dev.c @@ -242,10 +242,10 @@ static const struct usb_gadget_ops vgadget_ops = { static int vep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { - struct vep *ep; - struct vudc *udc; - unsigned maxp; - unsigned long flags; + struct vep *ep; + struct vudc *udc; + unsigned int maxp; + unsigned long flags; ep = to_vep(_ep); udc = ep_to_vudc(ep); @@ -259,7 +259,7 @@ static int vep_enable(struct usb_ep *_ep, spin_lock_irqsave(&udc->lock, flags); - maxp = usb_endpoint_maxp(desc) & 0x7ff; + maxp = usb_endpoint_maxp(desc); _ep->maxpacket = maxp; ep->desc = desc; ep->type = usb_endpoint_type(desc); @@ -549,30 +549,34 @@ static int init_vudc_hw(struct vudc *udc) sprintf(ep->name, "ep%d%s", num, i ? (is_out ? "out" : "in") : ""); ep->ep.name = ep->name; + + ep->ep.ops = &vep_ops; + + usb_ep_set_maxpacket_limit(&ep->ep, ~0); + ep->ep.max_streams = 16; + ep->gadget = &udc->gadget; + INIT_LIST_HEAD(&ep->req_queue); + if (i == 0) { + /* ep0 */ ep->ep.caps.type_control = true; ep->ep.caps.dir_out = true; ep->ep.caps.dir_in = true; + + udc->gadget.ep0 = &ep->ep; } else { + /* All other eps */ ep->ep.caps.type_iso = true; ep->ep.caps.type_int = true; ep->ep.caps.type_bulk = true; - } - if (is_out) - ep->ep.caps.dir_out = true; - else - ep->ep.caps.dir_in = true; + if (is_out) + ep->ep.caps.dir_out = true; + else + ep->ep.caps.dir_in = true; - ep->ep.ops = &vep_ops; - list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); - ep->halted = ep->wedged = ep->already_seen = - ep->setup_stage = 0; - usb_ep_set_maxpacket_limit(&ep->ep, ~0); - ep->ep.max_streams = 16; - ep->gadget = &udc->gadget; - ep->desc = NULL; - INIT_LIST_HEAD(&ep->req_queue); + list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); + } } spin_lock_init(&udc->lock); @@ -589,9 +593,6 @@ static int init_vudc_hw(struct vudc *udc) ud->eh_ops.reset = vudc_device_reset; ud->eh_ops.unusable = vudc_device_unusable; - udc->gadget.ep0 = &udc->ep[0].ep; - list_del_init(&udc->ep[0].ep.ep_list); - v_init_timer(udc); return 0; diff --git a/drivers/usb/usbip/vudc_transfer.c b/drivers/usb/usbip/vudc_transfer.c index aba6bd478045113524935ccdd0055ffb4996e9eb..4cfd475ee86532c9fb6bb793f9aeda789694e3b5 100644 --- a/drivers/usb/usbip/vudc_transfer.c +++ b/drivers/usb/usbip/vudc_transfer.c @@ -73,8 +73,8 @@ static int handle_control_request(struct vudc *udc, struct urb *urb, { struct vep *ep2; int ret_val = 1; - unsigned w_index; - unsigned w_value; + unsigned int w_index; + unsigned int w_value; w_index = le16_to_cpu(setup->wIndex); w_value = le16_to_cpu(setup->wValue); @@ -200,7 +200,7 @@ static int transfer(struct vudc *udc, top: /* if there's no request queued, the device is NAKing; return */ list_for_each_entry(req, &ep->req_queue, req_entry) { - unsigned host_len, dev_len, len; + unsigned int host_len, dev_len, len; void *ubuf_pos, *rbuf_pos; int is_short, to_host; int rescan = 0; @@ -339,6 +339,8 @@ static void v_timer(unsigned long _vudc) total = timer->frame_limit; } + /* We have to clear ep0 flags separately as it's not on the list */ + udc->ep[0].already_seen = 0; list_for_each_entry(_ep, &udc->gadget.ep_list, ep_list) { ep = to_vep(_ep); ep->already_seen = 0; diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c index 415b14002a61eaa528ddc46ffe593ac6ddd5f028..d4de56b93d68170d503f5be9c28bb6b06d41af45 100644 --- a/drivers/usb/wusbcore/dev-sysfs.c +++ b/drivers/usb/wusbcore/dev-sysfs.c @@ -53,7 +53,7 @@ static ssize_t wusb_disconnect_store(struct device *dev, wusbhc_put(wusbhc); return size; } -static DEVICE_ATTR(wusb_disconnect, 0200, NULL, wusb_disconnect_store); +static DEVICE_ATTR_WO(wusb_disconnect); static ssize_t wusb_cdid_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -69,7 +69,7 @@ static ssize_t wusb_cdid_show(struct device *dev, wusb_dev_put(wusb_dev); return result + 1; } -static DEVICE_ATTR(wusb_cdid, 0444, wusb_cdid_show, NULL); +static DEVICE_ATTR_RO(wusb_cdid); static ssize_t wusb_ck_store(struct device *dev, struct device_attribute *attr, @@ -105,7 +105,7 @@ static ssize_t wusb_ck_store(struct device *dev, wusbhc_put(wusbhc); return result < 0 ? result : size; } -static DEVICE_ATTR(wusb_ck, 0200, NULL, wusb_ck_store); +static DEVICE_ATTR_WO(wusb_ck); static struct attribute *wusb_dev_attrs[] = { &dev_attr_wusb_disconnect.attr, diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c index 8c9421b69da00adb560dc7d18719591065c0f8d8..170f2c38de9b5695e37008c9dc975787f66021cb 100644 --- a/drivers/usb/wusbcore/security.c +++ b/drivers/usb/wusbcore/security.c @@ -240,6 +240,7 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc, if (new_secd == NULL) { dev_err(dev, "Can't allocate space for security descriptors\n"); + result = -ENOMEM; goto out; } secd = new_secd; diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c index ed4622279c63c0bb4abc76e90fba8cca4493e537..e3819fc182b0b76169dc2b9396261c0604f47ada 100644 --- a/drivers/usb/wusbcore/wa-nep.c +++ b/drivers/usb/wusbcore/wa-nep.c @@ -198,6 +198,7 @@ static int wa_nep_queue(struct wahc *wa, size_t size) if (nw == NULL) { if (printk_ratelimit()) dev_err(dev, "No memory to queue notification\n"); + result = -ENOMEM; goto out; } INIT_WORK(&nw->work, wa_notif_dispatch); diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 167fcc71f5f6e95b16f49028a1852d216a0b9e64..e70322b1dd020d27082273df2b394e1d526afabd 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -1203,6 +1203,7 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size) sizeof(struct wa_xfer_packet_info_hwaiso) + (seg_isoc_frame_count * sizeof(__le16)); } + result = -ENOMEM; seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size, GFP_ATOMIC); if (seg == NULL) diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c index 94f401ab859f4c000634628b9364f2a71c313a13..a273a91cf6673e457a8e74b21a6606765e59d519 100644 --- a/drivers/usb/wusbcore/wusbhc.c +++ b/drivers/usb/wusbcore/wusbhc.c @@ -84,8 +84,7 @@ static ssize_t wusb_trust_timeout_store(struct device *dev, out: return result < 0 ? result : size; } -static DEVICE_ATTR(wusb_trust_timeout, 0644, wusb_trust_timeout_show, - wusb_trust_timeout_store); +static DEVICE_ATTR_RW(wusb_trust_timeout); /* * Show the current WUSB CHID. @@ -145,7 +144,7 @@ static ssize_t wusb_chid_store(struct device *dev, result = wusbhc_chid_set(wusbhc, &chid); return result < 0 ? result : size; } -static DEVICE_ATTR(wusb_chid, 0644, wusb_chid_show, wusb_chid_store); +static DEVICE_ATTR_RW(wusb_chid); static ssize_t wusb_phy_rate_show(struct device *dev, @@ -174,8 +173,7 @@ static ssize_t wusb_phy_rate_store(struct device *dev, wusbhc->phy_rate = phy_rate; return size; } -static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show, - wusb_phy_rate_store); +static DEVICE_ATTR_RW(wusb_phy_rate); static ssize_t wusb_dnts_show(struct device *dev, struct device_attribute *attr, @@ -205,7 +203,7 @@ static ssize_t wusb_dnts_store(struct device *dev, return size; } -static DEVICE_ATTR(wusb_dnts, 0644, wusb_dnts_show, wusb_dnts_store); +static DEVICE_ATTR_RW(wusb_dnts); static ssize_t wusb_retry_count_show(struct device *dev, struct device_attribute *attr, @@ -234,8 +232,7 @@ static ssize_t wusb_retry_count_store(struct device *dev, return size; } -static DEVICE_ATTR(wusb_retry_count, 0644, wusb_retry_count_show, - wusb_retry_count_store); +static DEVICE_ATTR_RW(wusb_retry_count); /* Group all the WUSBHC attributes */ static struct attribute *wusbhc_attrs[] = { diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c index d059ad4d0dbdc76f8fbbdbfc47dfd990d2006fc5..97ee1b46db698f03dee8ea50beb00e864bff0579 100644 --- a/drivers/uwb/lc-rc.c +++ b/drivers/uwb/lc-rc.c @@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index) struct uwb_rc *rc = NULL; dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match); - if (dev) + if (dev) { rc = dev_get_drvdata(dev); + put_device(dev); + } + return rc; } @@ -467,7 +470,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc) if (dev) { rc = dev_get_drvdata(dev); __uwb_rc_get(rc); + put_device(dev); } + return rc; } EXPORT_SYMBOL_GPL(__uwb_rc_try_get); @@ -520,8 +525,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev) dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev, find_rc_grandpa); - if (dev) + if (dev) { rc = dev_get_drvdata(dev); + put_device(dev); + } + return rc; } EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa); @@ -553,8 +561,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr) struct uwb_rc *rc = NULL; dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev); - if (dev) + if (dev) { rc = dev_get_drvdata(dev); + put_device(dev); + } return rc; } diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c index c1304b8d498530124c7ca30319c12cc823e84d6e..678e93741ae156bf5d2c41b7a52f7b215262210d 100644 --- a/drivers/uwb/pal.c +++ b/drivers/uwb/pal.c @@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc) dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc); + put_device(dev); + return (dev != NULL); } diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index da6e2ce77495b21ec127209664567b7e2f8f467e..23eced02aaf687fabdc5d76a8ad7649b7bcd5555 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -48,4 +48,5 @@ menuconfig VFIO_NOIOMMU source "drivers/vfio/pci/Kconfig" source "drivers/vfio/platform/Kconfig" +source "drivers/vfio/mdev/Kconfig" source "virt/lib/Kconfig" diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile index 7b8a31f63fea81487fed01224d978c0804f02faf..4a23c13b6be44b63ad258766e767c62c64d42512 100644 --- a/drivers/vfio/Makefile +++ b/drivers/vfio/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spapr_eeh.o obj-$(CONFIG_VFIO_PCI) += pci/ obj-$(CONFIG_VFIO_PLATFORM) += platform/ +obj-$(CONFIG_VFIO_MDEV) += mdev/ diff --git a/drivers/vfio/mdev/Kconfig b/drivers/vfio/mdev/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..14fdb106a827ac8dc2fb07c7831748990bd7df99 --- /dev/null +++ b/drivers/vfio/mdev/Kconfig @@ -0,0 +1,17 @@ + +config VFIO_MDEV + tristate "Mediated device driver framework" + depends on VFIO + default n + help + Provides a framework to virtualize devices. + See Documentation/vfio-mediated-device.txt for more details. + + If you don't know what do here, say N. + +config VFIO_MDEV_DEVICE + tristate "VFIO driver for Mediated devices" + depends on VFIO && VFIO_MDEV + default n + help + VFIO based driver for Mediated devices. diff --git a/drivers/vfio/mdev/Makefile b/drivers/vfio/mdev/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..fa2d5ea466eea5da03613f0288d5c27467363c86 --- /dev/null +++ b/drivers/vfio/mdev/Makefile @@ -0,0 +1,5 @@ + +mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o + +obj-$(CONFIG_VFIO_MDEV) += mdev.o +obj-$(CONFIG_VFIO_MDEV_DEVICE) += vfio_mdev.o diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c new file mode 100644 index 0000000000000000000000000000000000000000..be1ee89ee9172f4408eab231c9b1e6a52a253c88 --- /dev/null +++ b/drivers/vfio/mdev/mdev_core.c @@ -0,0 +1,385 @@ +/* + * Mediated device Core Driver + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Author: Neo Jia + * Kirti Wankhede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include "mdev_private.h" + +#define DRIVER_VERSION "0.1" +#define DRIVER_AUTHOR "NVIDIA Corporation" +#define DRIVER_DESC "Mediated device Core Driver" + +static LIST_HEAD(parent_list); +static DEFINE_MUTEX(parent_list_lock); +static struct class_compat *mdev_bus_compat_class; + +static int _find_mdev_device(struct device *dev, void *data) +{ + struct mdev_device *mdev; + + if (!dev_is_mdev(dev)) + return 0; + + mdev = to_mdev_device(dev); + + if (uuid_le_cmp(mdev->uuid, *(uuid_le *)data) == 0) + return 1; + + return 0; +} + +static bool mdev_device_exist(struct parent_device *parent, uuid_le uuid) +{ + struct device *dev; + + dev = device_find_child(parent->dev, &uuid, _find_mdev_device); + if (dev) { + put_device(dev); + return true; + } + + return false; +} + +/* Should be called holding parent_list_lock */ +static struct parent_device *__find_parent_device(struct device *dev) +{ + struct parent_device *parent; + + list_for_each_entry(parent, &parent_list, next) { + if (parent->dev == dev) + return parent; + } + return NULL; +} + +static void mdev_release_parent(struct kref *kref) +{ + struct parent_device *parent = container_of(kref, struct parent_device, + ref); + struct device *dev = parent->dev; + + kfree(parent); + put_device(dev); +} + +static +inline struct parent_device *mdev_get_parent(struct parent_device *parent) +{ + if (parent) + kref_get(&parent->ref); + + return parent; +} + +static inline void mdev_put_parent(struct parent_device *parent) +{ + if (parent) + kref_put(&parent->ref, mdev_release_parent); +} + +static int mdev_device_create_ops(struct kobject *kobj, + struct mdev_device *mdev) +{ + struct parent_device *parent = mdev->parent; + int ret; + + ret = parent->ops->create(kobj, mdev); + if (ret) + return ret; + + ret = sysfs_create_groups(&mdev->dev.kobj, + parent->ops->mdev_attr_groups); + if (ret) + parent->ops->remove(mdev); + + return ret; +} + +/* + * mdev_device_remove_ops gets called from sysfs's 'remove' and when parent + * device is being unregistered from mdev device framework. + * - 'force_remove' is set to 'false' when called from sysfs's 'remove' which + * indicates that if the mdev device is active, used by VMM or userspace + * application, vendor driver could return error then don't remove the device. + * - 'force_remove' is set to 'true' when called from mdev_unregister_device() + * which indicate that parent device is being removed from mdev device + * framework so remove mdev device forcefully. + */ +static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove) +{ + struct parent_device *parent = mdev->parent; + int ret; + + /* + * Vendor driver can return error if VMM or userspace application is + * using this mdev device. + */ + ret = parent->ops->remove(mdev); + if (ret && !force_remove) + return -EBUSY; + + sysfs_remove_groups(&mdev->dev.kobj, parent->ops->mdev_attr_groups); + return 0; +} + +static int mdev_device_remove_cb(struct device *dev, void *data) +{ + if (!dev_is_mdev(dev)) + return 0; + + return mdev_device_remove(dev, data ? *(bool *)data : true); +} + +/* + * mdev_register_device : Register a device + * @dev: device structure representing parent device. + * @ops: Parent device operation structure to be registered. + * + * Add device to list of registered parent devices. + * Returns a negative value on error, otherwise 0. + */ +int mdev_register_device(struct device *dev, const struct parent_ops *ops) +{ + int ret; + struct parent_device *parent; + + /* check for mandatory ops */ + if (!ops || !ops->create || !ops->remove || !ops->supported_type_groups) + return -EINVAL; + + dev = get_device(dev); + if (!dev) + return -EINVAL; + + mutex_lock(&parent_list_lock); + + /* Check for duplicate */ + parent = __find_parent_device(dev); + if (parent) { + ret = -EEXIST; + goto add_dev_err; + } + + parent = kzalloc(sizeof(*parent), GFP_KERNEL); + if (!parent) { + ret = -ENOMEM; + goto add_dev_err; + } + + kref_init(&parent->ref); + mutex_init(&parent->lock); + + parent->dev = dev; + parent->ops = ops; + + if (!mdev_bus_compat_class) { + mdev_bus_compat_class = class_compat_register("mdev_bus"); + if (!mdev_bus_compat_class) { + ret = -ENOMEM; + goto add_dev_err; + } + } + + ret = parent_create_sysfs_files(parent); + if (ret) + goto add_dev_err; + + ret = class_compat_create_link(mdev_bus_compat_class, dev, NULL); + if (ret) + dev_warn(dev, "Failed to create compatibility class link\n"); + + list_add(&parent->next, &parent_list); + mutex_unlock(&parent_list_lock); + + dev_info(dev, "MDEV: Registered\n"); + return 0; + +add_dev_err: + mutex_unlock(&parent_list_lock); + if (parent) + mdev_put_parent(parent); + else + put_device(dev); + return ret; +} +EXPORT_SYMBOL(mdev_register_device); + +/* + * mdev_unregister_device : Unregister a parent device + * @dev: device structure representing parent device. + * + * Remove device from list of registered parent devices. Give a chance to free + * existing mediated devices for given device. + */ + +void mdev_unregister_device(struct device *dev) +{ + struct parent_device *parent; + bool force_remove = true; + + mutex_lock(&parent_list_lock); + parent = __find_parent_device(dev); + + if (!parent) { + mutex_unlock(&parent_list_lock); + return; + } + dev_info(dev, "MDEV: Unregistering\n"); + + list_del(&parent->next); + class_compat_remove_link(mdev_bus_compat_class, dev, NULL); + + device_for_each_child(dev, (void *)&force_remove, + mdev_device_remove_cb); + + parent_remove_sysfs_files(parent); + + mutex_unlock(&parent_list_lock); + mdev_put_parent(parent); +} +EXPORT_SYMBOL(mdev_unregister_device); + +static void mdev_device_release(struct device *dev) +{ + struct mdev_device *mdev = to_mdev_device(dev); + + dev_dbg(&mdev->dev, "MDEV: destroying\n"); + kfree(mdev); +} + +int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) +{ + int ret; + struct mdev_device *mdev; + struct parent_device *parent; + struct mdev_type *type = to_mdev_type(kobj); + + parent = mdev_get_parent(type->parent); + if (!parent) + return -EINVAL; + + mutex_lock(&parent->lock); + + /* Check for duplicate */ + if (mdev_device_exist(parent, uuid)) { + ret = -EEXIST; + goto create_err; + } + + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) { + ret = -ENOMEM; + goto create_err; + } + + memcpy(&mdev->uuid, &uuid, sizeof(uuid_le)); + mdev->parent = parent; + kref_init(&mdev->ref); + + mdev->dev.parent = dev; + mdev->dev.bus = &mdev_bus_type; + mdev->dev.release = mdev_device_release; + dev_set_name(&mdev->dev, "%pUl", uuid.b); + + ret = device_register(&mdev->dev); + if (ret) { + put_device(&mdev->dev); + goto create_err; + } + + ret = mdev_device_create_ops(kobj, mdev); + if (ret) + goto create_failed; + + ret = mdev_create_sysfs_files(&mdev->dev, type); + if (ret) { + mdev_device_remove_ops(mdev, true); + goto create_failed; + } + + mdev->type_kobj = kobj; + dev_dbg(&mdev->dev, "MDEV: created\n"); + + mutex_unlock(&parent->lock); + return ret; + +create_failed: + device_unregister(&mdev->dev); + +create_err: + mutex_unlock(&parent->lock); + mdev_put_parent(parent); + return ret; +} + +int mdev_device_remove(struct device *dev, bool force_remove) +{ + struct mdev_device *mdev; + struct parent_device *parent; + struct mdev_type *type; + int ret; + + mdev = to_mdev_device(dev); + type = to_mdev_type(mdev->type_kobj); + parent = mdev->parent; + mutex_lock(&parent->lock); + + ret = mdev_device_remove_ops(mdev, force_remove); + if (ret) { + mutex_unlock(&parent->lock); + return ret; + } + + mdev_remove_sysfs_files(dev, type); + device_unregister(dev); + mutex_unlock(&parent->lock); + mdev_put_parent(parent); + return ret; +} + +static int __init mdev_init(void) +{ + int ret; + + ret = mdev_bus_register(); + + /* + * Attempt to load known vfio_mdev. This gives us a working environment + * without the user needing to explicitly load vfio_mdev driver. + */ + if (!ret) + request_module_nowait("vfio_mdev"); + + return ret; +} + +static void __exit mdev_exit(void) +{ + if (mdev_bus_compat_class) + class_compat_unregister(mdev_bus_compat_class); + + mdev_bus_unregister(); +} + +module_init(mdev_init) +module_exit(mdev_exit) + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/vfio/mdev/mdev_driver.c b/drivers/vfio/mdev/mdev_driver.c new file mode 100644 index 0000000000000000000000000000000000000000..6f0391f6f9b602301b1d5cb4bc691e21fc2e9100 --- /dev/null +++ b/drivers/vfio/mdev/mdev_driver.c @@ -0,0 +1,119 @@ +/* + * MDEV driver + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Author: Neo Jia + * Kirti Wankhede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include "mdev_private.h" + +static int mdev_attach_iommu(struct mdev_device *mdev) +{ + int ret; + struct iommu_group *group; + + group = iommu_group_alloc(); + if (IS_ERR(group)) + return PTR_ERR(group); + + ret = iommu_group_add_device(group, &mdev->dev); + if (!ret) + dev_info(&mdev->dev, "MDEV: group_id = %d\n", + iommu_group_id(group)); + + iommu_group_put(group); + return ret; +} + +static void mdev_detach_iommu(struct mdev_device *mdev) +{ + iommu_group_remove_device(&mdev->dev); + dev_info(&mdev->dev, "MDEV: detaching iommu\n"); +} + +static int mdev_probe(struct device *dev) +{ + struct mdev_driver *drv = to_mdev_driver(dev->driver); + struct mdev_device *mdev = to_mdev_device(dev); + int ret; + + ret = mdev_attach_iommu(mdev); + if (ret) + return ret; + + if (drv && drv->probe) { + ret = drv->probe(dev); + if (ret) + mdev_detach_iommu(mdev); + } + + return ret; +} + +static int mdev_remove(struct device *dev) +{ + struct mdev_driver *drv = to_mdev_driver(dev->driver); + struct mdev_device *mdev = to_mdev_device(dev); + + if (drv && drv->remove) + drv->remove(dev); + + mdev_detach_iommu(mdev); + + return 0; +} + +struct bus_type mdev_bus_type = { + .name = "mdev", + .probe = mdev_probe, + .remove = mdev_remove, +}; +EXPORT_SYMBOL_GPL(mdev_bus_type); + +/** + * mdev_register_driver - register a new MDEV driver + * @drv: the driver to register + * @owner: module owner of driver to be registered + * + * Returns a negative value on error, otherwise 0. + **/ +int mdev_register_driver(struct mdev_driver *drv, struct module *owner) +{ + /* initialize common driver fields */ + drv->driver.name = drv->name; + drv->driver.bus = &mdev_bus_type; + drv->driver.owner = owner; + + /* register with core */ + return driver_register(&drv->driver); +} +EXPORT_SYMBOL(mdev_register_driver); + +/* + * mdev_unregister_driver - unregister MDEV driver + * @drv: the driver to unregister + */ +void mdev_unregister_driver(struct mdev_driver *drv) +{ + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL(mdev_unregister_driver); + +int mdev_bus_register(void) +{ + return bus_register(&mdev_bus_type); +} + +void mdev_bus_unregister(void) +{ + bus_unregister(&mdev_bus_type); +} diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h new file mode 100644 index 0000000000000000000000000000000000000000..d35097cbf3d755c027b7a98a8e6daed835754117 --- /dev/null +++ b/drivers/vfio/mdev/mdev_private.h @@ -0,0 +1,41 @@ +/* + * Mediated device interal definitions + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Author: Neo Jia + * Kirti Wankhede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MDEV_PRIVATE_H +#define MDEV_PRIVATE_H + +int mdev_bus_register(void); +void mdev_bus_unregister(void); + +struct mdev_type { + struct kobject kobj; + struct kobject *devices_kobj; + struct parent_device *parent; + struct list_head next; + struct attribute_group *group; +}; + +#define to_mdev_type_attr(_attr) \ + container_of(_attr, struct mdev_type_attribute, attr) +#define to_mdev_type(_kobj) \ + container_of(_kobj, struct mdev_type, kobj) + +int parent_create_sysfs_files(struct parent_device *parent); +void parent_remove_sysfs_files(struct parent_device *parent); + +int mdev_create_sysfs_files(struct device *dev, struct mdev_type *type); +void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type); + +int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid); +int mdev_device_remove(struct device *dev, bool force_remove); + +#endif /* MDEV_PRIVATE_H */ diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..1a53deb2ee102b677f92d7388eb340a6e0204ffd --- /dev/null +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -0,0 +1,286 @@ +/* + * File attributes for Mediated devices + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Author: Neo Jia + * Kirti Wankhede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include "mdev_private.h" + +/* Static functions */ + +static ssize_t mdev_type_attr_show(struct kobject *kobj, + struct attribute *__attr, char *buf) +{ + struct mdev_type_attribute *attr = to_mdev_type_attr(__attr); + struct mdev_type *type = to_mdev_type(kobj); + ssize_t ret = -EIO; + + if (attr->show) + ret = attr->show(kobj, type->parent->dev, buf); + return ret; +} + +static ssize_t mdev_type_attr_store(struct kobject *kobj, + struct attribute *__attr, + const char *buf, size_t count) +{ + struct mdev_type_attribute *attr = to_mdev_type_attr(__attr); + struct mdev_type *type = to_mdev_type(kobj); + ssize_t ret = -EIO; + + if (attr->store) + ret = attr->store(&type->kobj, type->parent->dev, buf, count); + return ret; +} + +static const struct sysfs_ops mdev_type_sysfs_ops = { + .show = mdev_type_attr_show, + .store = mdev_type_attr_store, +}; + +static ssize_t create_store(struct kobject *kobj, struct device *dev, + const char *buf, size_t count) +{ + char *str; + uuid_le uuid; + int ret; + + if ((count < UUID_STRING_LEN) || (count > UUID_STRING_LEN + 1)) + return -EINVAL; + + str = kstrndup(buf, count, GFP_KERNEL); + if (!str) + return -ENOMEM; + + ret = uuid_le_to_bin(str, &uuid); + kfree(str); + if (ret) + return ret; + + ret = mdev_device_create(kobj, dev, uuid); + if (ret) + return ret; + + return count; +} + +MDEV_TYPE_ATTR_WO(create); + +static void mdev_type_release(struct kobject *kobj) +{ + struct mdev_type *type = to_mdev_type(kobj); + + pr_debug("Releasing group %s\n", kobj->name); + kfree(type); +} + +static struct kobj_type mdev_type_ktype = { + .sysfs_ops = &mdev_type_sysfs_ops, + .release = mdev_type_release, +}; + +struct mdev_type *add_mdev_supported_type(struct parent_device *parent, + struct attribute_group *group) +{ + struct mdev_type *type; + int ret; + + if (!group->name) { + pr_err("%s: Type name empty!\n", __func__); + return ERR_PTR(-EINVAL); + } + + type = kzalloc(sizeof(*type), GFP_KERNEL); + if (!type) + return ERR_PTR(-ENOMEM); + + type->kobj.kset = parent->mdev_types_kset; + + ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL, + "%s-%s", dev_driver_string(parent->dev), + group->name); + if (ret) { + kfree(type); + return ERR_PTR(ret); + } + + ret = sysfs_create_file(&type->kobj, &mdev_type_attr_create.attr); + if (ret) + goto attr_create_failed; + + type->devices_kobj = kobject_create_and_add("devices", &type->kobj); + if (!type->devices_kobj) { + ret = -ENOMEM; + goto attr_devices_failed; + } + + ret = sysfs_create_files(&type->kobj, + (const struct attribute **)group->attrs); + if (ret) { + ret = -ENOMEM; + goto attrs_failed; + } + + type->group = group; + type->parent = parent; + return type; + +attrs_failed: + kobject_put(type->devices_kobj); +attr_devices_failed: + sysfs_remove_file(&type->kobj, &mdev_type_attr_create.attr); +attr_create_failed: + kobject_del(&type->kobj); + kobject_put(&type->kobj); + return ERR_PTR(ret); +} + +static void remove_mdev_supported_type(struct mdev_type *type) +{ + sysfs_remove_files(&type->kobj, + (const struct attribute **)type->group->attrs); + kobject_put(type->devices_kobj); + sysfs_remove_file(&type->kobj, &mdev_type_attr_create.attr); + kobject_del(&type->kobj); + kobject_put(&type->kobj); +} + +static int add_mdev_supported_type_groups(struct parent_device *parent) +{ + int i; + + for (i = 0; parent->ops->supported_type_groups[i]; i++) { + struct mdev_type *type; + + type = add_mdev_supported_type(parent, + parent->ops->supported_type_groups[i]); + if (IS_ERR(type)) { + struct mdev_type *ltype, *tmp; + + list_for_each_entry_safe(ltype, tmp, &parent->type_list, + next) { + list_del(<ype->next); + remove_mdev_supported_type(ltype); + } + return PTR_ERR(type); + } + list_add(&type->next, &parent->type_list); + } + return 0; +} + +/* mdev sysfs functions */ +void parent_remove_sysfs_files(struct parent_device *parent) +{ + struct mdev_type *type, *tmp; + + list_for_each_entry_safe(type, tmp, &parent->type_list, next) { + list_del(&type->next); + remove_mdev_supported_type(type); + } + + sysfs_remove_groups(&parent->dev->kobj, parent->ops->dev_attr_groups); + kset_unregister(parent->mdev_types_kset); +} + +int parent_create_sysfs_files(struct parent_device *parent) +{ + int ret; + + parent->mdev_types_kset = kset_create_and_add("mdev_supported_types", + NULL, &parent->dev->kobj); + + if (!parent->mdev_types_kset) + return -ENOMEM; + + INIT_LIST_HEAD(&parent->type_list); + + ret = sysfs_create_groups(&parent->dev->kobj, + parent->ops->dev_attr_groups); + if (ret) + goto create_err; + + ret = add_mdev_supported_type_groups(parent); + if (ret) + sysfs_remove_groups(&parent->dev->kobj, + parent->ops->dev_attr_groups); + else + return ret; + +create_err: + kset_unregister(parent->mdev_types_kset); + return ret; +} + +static ssize_t remove_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long val; + + if (kstrtoul(buf, 0, &val) < 0) + return -EINVAL; + + if (val && device_remove_file_self(dev, attr)) { + int ret; + + ret = mdev_device_remove(dev, false); + if (ret) { + device_create_file(dev, attr); + return ret; + } + } + + return count; +} + +static DEVICE_ATTR_WO(remove); + +static const struct attribute *mdev_device_attrs[] = { + &dev_attr_remove.attr, + NULL, +}; + +int mdev_create_sysfs_files(struct device *dev, struct mdev_type *type) +{ + int ret; + + ret = sysfs_create_files(&dev->kobj, mdev_device_attrs); + if (ret) + return ret; + + ret = sysfs_create_link(type->devices_kobj, &dev->kobj, dev_name(dev)); + if (ret) + goto device_link_failed; + + ret = sysfs_create_link(&dev->kobj, &type->kobj, "mdev_type"); + if (ret) + goto type_link_failed; + + return ret; + +type_link_failed: + sysfs_remove_link(type->devices_kobj, dev_name(dev)); +device_link_failed: + sysfs_remove_files(&dev->kobj, mdev_device_attrs); + return ret; +} + +void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type) +{ + sysfs_remove_link(&dev->kobj, "mdev_type"); + sysfs_remove_link(type->devices_kobj, dev_name(dev)); + sysfs_remove_files(&dev->kobj, mdev_device_attrs); +} diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c new file mode 100644 index 0000000000000000000000000000000000000000..ffc36758cb84c08ce1b2c4d516bd73b0c18d52d1 --- /dev/null +++ b/drivers/vfio/mdev/vfio_mdev.c @@ -0,0 +1,148 @@ +/* + * VFIO based driver for Mediated device + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Author: Neo Jia + * Kirti Wankhede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "mdev_private.h" + +#define DRIVER_VERSION "0.1" +#define DRIVER_AUTHOR "NVIDIA Corporation" +#define DRIVER_DESC "VFIO based driver for Mediated device" + +static int vfio_mdev_open(void *device_data) +{ + struct mdev_device *mdev = device_data; + struct parent_device *parent = mdev->parent; + int ret; + + if (unlikely(!parent->ops->open)) + return -EINVAL; + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + ret = parent->ops->open(mdev); + if (ret) + module_put(THIS_MODULE); + + return ret; +} + +static void vfio_mdev_release(void *device_data) +{ + struct mdev_device *mdev = device_data; + struct parent_device *parent = mdev->parent; + + if (likely(parent->ops->release)) + parent->ops->release(mdev); + + module_put(THIS_MODULE); +} + +static long vfio_mdev_unlocked_ioctl(void *device_data, + unsigned int cmd, unsigned long arg) +{ + struct mdev_device *mdev = device_data; + struct parent_device *parent = mdev->parent; + + if (unlikely(!parent->ops->ioctl)) + return -EINVAL; + + return parent->ops->ioctl(mdev, cmd, arg); +} + +static ssize_t vfio_mdev_read(void *device_data, char __user *buf, + size_t count, loff_t *ppos) +{ + struct mdev_device *mdev = device_data; + struct parent_device *parent = mdev->parent; + + if (unlikely(!parent->ops->read)) + return -EINVAL; + + return parent->ops->read(mdev, buf, count, ppos); +} + +static ssize_t vfio_mdev_write(void *device_data, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct mdev_device *mdev = device_data; + struct parent_device *parent = mdev->parent; + + if (unlikely(!parent->ops->write)) + return -EINVAL; + + return parent->ops->write(mdev, buf, count, ppos); +} + +static int vfio_mdev_mmap(void *device_data, struct vm_area_struct *vma) +{ + struct mdev_device *mdev = device_data; + struct parent_device *parent = mdev->parent; + + if (unlikely(!parent->ops->mmap)) + return -EINVAL; + + return parent->ops->mmap(mdev, vma); +} + +static const struct vfio_device_ops vfio_mdev_dev_ops = { + .name = "vfio-mdev", + .open = vfio_mdev_open, + .release = vfio_mdev_release, + .ioctl = vfio_mdev_unlocked_ioctl, + .read = vfio_mdev_read, + .write = vfio_mdev_write, + .mmap = vfio_mdev_mmap, +}; + +int vfio_mdev_probe(struct device *dev) +{ + struct mdev_device *mdev = to_mdev_device(dev); + + return vfio_add_group_dev(dev, &vfio_mdev_dev_ops, mdev); +} + +void vfio_mdev_remove(struct device *dev) +{ + vfio_del_group_dev(dev); +} + +struct mdev_driver vfio_mdev_driver = { + .name = "vfio_mdev", + .probe = vfio_mdev_probe, + .remove = vfio_mdev_remove, +}; + +static int __init vfio_mdev_init(void) +{ + return mdev_register_driver(&vfio_mdev_driver, THIS_MODULE); +} + +static void __exit vfio_mdev_exit(void) +{ + mdev_unregister_driver(&vfio_mdev_driver); +} + +module_init(vfio_mdev_init) +module_exit(vfio_mdev_exit) + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 031bc08d000d4a7d774f3793df7be5168712e161..dcd7c2a9961830b7fd1ff4155d5c302cd55fd809 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -558,10 +558,9 @@ static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev, static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev, struct vfio_info_cap *caps) { - struct vfio_info_cap_header *header; struct vfio_region_info_cap_sparse_mmap *sparse; size_t end, size; - int nr_areas = 2, i = 0; + int nr_areas = 2, i = 0, ret; end = pci_resource_len(vdev->pdev, vdev->msix_bar); @@ -572,13 +571,10 @@ static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev, size = sizeof(*sparse) + (nr_areas * sizeof(*sparse->areas)); - header = vfio_info_cap_add(caps, size, - VFIO_REGION_INFO_CAP_SPARSE_MMAP, 1); - if (IS_ERR(header)) - return PTR_ERR(header); + sparse = kzalloc(size, GFP_KERNEL); + if (!sparse) + return -ENOMEM; - sparse = container_of(header, - struct vfio_region_info_cap_sparse_mmap, header); sparse->nr_areas = nr_areas; if (vdev->msix_offset & PAGE_MASK) { @@ -594,26 +590,11 @@ static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev, i++; } - return 0; -} - -static int region_type_cap(struct vfio_pci_device *vdev, - struct vfio_info_cap *caps, - unsigned int type, unsigned int subtype) -{ - struct vfio_info_cap_header *header; - struct vfio_region_info_cap_type *cap; - - header = vfio_info_cap_add(caps, sizeof(*cap), - VFIO_REGION_INFO_CAP_TYPE, 1); - if (IS_ERR(header)) - return PTR_ERR(header); - - cap = container_of(header, struct vfio_region_info_cap_type, header); - cap->type = type; - cap->subtype = subtype; + ret = vfio_info_add_capability(caps, VFIO_REGION_INFO_CAP_SPARSE_MMAP, + sparse); + kfree(sparse); - return 0; + return ret; } int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, @@ -752,6 +733,9 @@ static long vfio_pci_ioctl(void *device_data, break; default: + { + struct vfio_region_info_cap_type cap_type; + if (info.index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) return -EINVAL; @@ -762,11 +746,16 @@ static long vfio_pci_ioctl(void *device_data, info.size = vdev->region[i].size; info.flags = vdev->region[i].flags; - ret = region_type_cap(vdev, &caps, - vdev->region[i].type, - vdev->region[i].subtype); + cap_type.type = vdev->region[i].type; + cap_type.subtype = vdev->region[i].subtype; + + ret = vfio_info_add_capability(&caps, + VFIO_REGION_INFO_CAP_TYPE, + &cap_type); if (ret) return ret; + + } } if (caps.size) { @@ -829,45 +818,25 @@ static long vfio_pci_ioctl(void *device_data, } else if (cmd == VFIO_DEVICE_SET_IRQS) { struct vfio_irq_set hdr; - size_t size; u8 *data = NULL; int max, ret = 0; + size_t data_size = 0; minsz = offsetofend(struct vfio_irq_set, count); if (copy_from_user(&hdr, (void __user *)arg, minsz)) return -EFAULT; - if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS || - hdr.count >= (U32_MAX - hdr.start) || - hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | - VFIO_IRQ_SET_ACTION_TYPE_MASK)) - return -EINVAL; - max = vfio_pci_get_irq_count(vdev, hdr.index); - if (hdr.start >= max || hdr.start + hdr.count > max) - return -EINVAL; - switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { - case VFIO_IRQ_SET_DATA_NONE: - size = 0; - break; - case VFIO_IRQ_SET_DATA_BOOL: - size = sizeof(uint8_t); - break; - case VFIO_IRQ_SET_DATA_EVENTFD: - size = sizeof(int32_t); - break; - default: - return -EINVAL; - } - - if (size) { - if (hdr.argsz - minsz < hdr.count * size) - return -EINVAL; + ret = vfio_set_irqs_validate_and_prepare(&hdr, max, + VFIO_PCI_NUM_IRQS, &data_size); + if (ret) + return ret; + if (data_size) { data = memdup_user((void __user *)(arg + minsz), - hdr.count * size); + data_size); if (IS_ERR(data)) return PTR_ERR(data); } diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 65d4a3015542874e46a299954645705656aeb98d..330a57024cbc5414b381b26e6ef80cf129cd7f9b 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -31,8 +31,6 @@ #include "vfio_pci_private.h" -#define PCI_CFG_SPACE_SIZE 256 - /* Fake capability ID for standard config space */ #define PCI_CAP_ID_BASIC 0 @@ -152,7 +150,7 @@ static int vfio_user_config_read(struct pci_dev *pdev, int offset, *val = cpu_to_le32(tmp_val); - return pcibios_err_to_errno(ret); + return ret; } static int vfio_user_config_write(struct pci_dev *pdev, int offset, @@ -173,7 +171,7 @@ static int vfio_user_config_write(struct pci_dev *pdev, int offset, break; } - return pcibios_err_to_errno(ret); + return ret; } static int vfio_default_config_read(struct vfio_pci_device *vdev, int pos, @@ -257,7 +255,7 @@ static int vfio_direct_config_read(struct vfio_pci_device *vdev, int pos, ret = vfio_user_config_read(vdev->pdev, pos, val, count); if (ret) - return pcibios_err_to_errno(ret); + return ret; if (pos >= PCI_CFG_SPACE_SIZE) { /* Extended cap header mangling */ if (offset < 4) @@ -295,7 +293,7 @@ static int vfio_raw_config_read(struct vfio_pci_device *vdev, int pos, ret = vfio_user_config_read(vdev->pdev, pos, val, count); if (ret) - return pcibios_err_to_errno(ret); + return ret; return count; } @@ -1089,7 +1087,7 @@ static int vfio_msi_config_write(struct vfio_pci_device *vdev, int pos, start + PCI_MSI_FLAGS, flags); if (ret) - return pcibios_err_to_errno(ret); + return ret; } return count; diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index d7814283075463340509a1da99a7204743c02636..4c27f4be3c3d038598304729dbb1af90c6b2395a 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -364,36 +364,21 @@ static long vfio_platform_ioctl(void *device_data, struct vfio_irq_set hdr; u8 *data = NULL; int ret = 0; + size_t data_size = 0; minsz = offsetofend(struct vfio_irq_set, count); if (copy_from_user(&hdr, (void __user *)arg, minsz)) return -EFAULT; - if (hdr.argsz < minsz) - return -EINVAL; - - if (hdr.index >= vdev->num_irqs) - return -EINVAL; - - if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | - VFIO_IRQ_SET_ACTION_TYPE_MASK)) - return -EINVAL; - - if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { - size_t size; - - if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) - size = sizeof(uint8_t); - else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD) - size = sizeof(int32_t); - else - return -EINVAL; - - if (hdr.argsz - minsz < size) - return -EINVAL; + ret = vfio_set_irqs_validate_and_prepare(&hdr, vdev->num_irqs, + vdev->num_irqs, &data_size); + if (ret) + return ret; - data = memdup_user((void __user *)(arg + minsz), size); + if (data_size) { + data = memdup_user((void __user *)(arg + minsz), + data_size); if (IS_ERR(data)) return PTR_ERR(data); } diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index d1d70e0b011bf41a8d49255a2a4359be342041c7..9901c4671e2fd6c9cf19a44e3b6faea2b64abe8c 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -86,6 +86,8 @@ struct vfio_group { struct mutex unbound_lock; atomic_t opened; bool noiommu; + struct kvm *kvm; + struct blocking_notifier_head notifier; }; struct vfio_device { @@ -339,6 +341,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) #ifdef CONFIG_VFIO_NOIOMMU group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu); #endif + BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); group->nb.notifier_call = vfio_iommu_group_notifier; @@ -480,6 +483,21 @@ static struct vfio_group *vfio_group_get_from_minor(int minor) return group; } +static struct vfio_group *vfio_group_get_from_dev(struct device *dev) +{ + struct iommu_group *iommu_group; + struct vfio_group *group; + + iommu_group = iommu_group_get(dev); + if (!iommu_group) + return NULL; + + group = vfio_group_get_from_iommu(iommu_group); + iommu_group_put(iommu_group); + + return group; +} + /** * Device objects - create, release, get, put, search */ @@ -811,16 +829,10 @@ EXPORT_SYMBOL_GPL(vfio_add_group_dev); */ struct vfio_device *vfio_device_get_from_dev(struct device *dev) { - struct iommu_group *iommu_group; struct vfio_group *group; struct vfio_device *device; - iommu_group = iommu_group_get(dev); - if (!iommu_group) - return NULL; - - group = vfio_group_get_from_iommu(iommu_group); - iommu_group_put(iommu_group); + group = vfio_group_get_from_dev(dev); if (!group) return NULL; @@ -1376,6 +1388,23 @@ static bool vfio_group_viable(struct vfio_group *group) group, vfio_dev_viable) == 0); } +static int vfio_group_add_container_user(struct vfio_group *group) +{ + if (!atomic_inc_not_zero(&group->container_users)) + return -EINVAL; + + if (group->noiommu) { + atomic_dec(&group->container_users); + return -EPERM; + } + if (!group->container->iommu_driver || !vfio_group_viable(group)) { + atomic_dec(&group->container_users); + return -EINVAL; + } + + return 0; +} + static const struct file_operations vfio_device_fops; static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) @@ -1555,6 +1584,9 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep) filep->private_data = NULL; + /* Any user didn't unregister? */ + WARN_ON(group->notifier.head); + vfio_group_try_dissolve_container(group); atomic_dec(&group->opened); @@ -1685,23 +1717,14 @@ static const struct file_operations vfio_device_fops = { struct vfio_group *vfio_group_get_external_user(struct file *filep) { struct vfio_group *group = filep->private_data; + int ret; if (filep->f_op != &vfio_group_fops) return ERR_PTR(-EINVAL); - if (!atomic_inc_not_zero(&group->container_users)) - return ERR_PTR(-EINVAL); - - if (group->noiommu) { - atomic_dec(&group->container_users); - return ERR_PTR(-EPERM); - } - - if (!group->container->iommu_driver || - !vfio_group_viable(group)) { - atomic_dec(&group->container_users); - return ERR_PTR(-EINVAL); - } + ret = vfio_group_add_container_user(group); + if (ret) + return ERR_PTR(ret); vfio_group_get(group); @@ -1763,7 +1786,7 @@ struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps, header->version = version; /* Add to the end of the capability chain */ - for (tmp = caps->buf; tmp->next; tmp = (void *)tmp + tmp->next) + for (tmp = buf; tmp->next; tmp = buf + tmp->next) ; /* nothing */ tmp->next = caps->size; @@ -1776,11 +1799,403 @@ EXPORT_SYMBOL_GPL(vfio_info_cap_add); void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset) { struct vfio_info_cap_header *tmp; + void *buf = (void *)caps->buf; - for (tmp = caps->buf; tmp->next; tmp = (void *)tmp + tmp->next - offset) + for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset) tmp->next += offset; } -EXPORT_SYMBOL_GPL(vfio_info_cap_shift); +EXPORT_SYMBOL(vfio_info_cap_shift); + +static int sparse_mmap_cap(struct vfio_info_cap *caps, void *cap_type) +{ + struct vfio_info_cap_header *header; + struct vfio_region_info_cap_sparse_mmap *sparse_cap, *sparse = cap_type; + size_t size; + + size = sizeof(*sparse) + sparse->nr_areas * sizeof(*sparse->areas); + header = vfio_info_cap_add(caps, size, + VFIO_REGION_INFO_CAP_SPARSE_MMAP, 1); + if (IS_ERR(header)) + return PTR_ERR(header); + + sparse_cap = container_of(header, + struct vfio_region_info_cap_sparse_mmap, header); + sparse_cap->nr_areas = sparse->nr_areas; + memcpy(sparse_cap->areas, sparse->areas, + sparse->nr_areas * sizeof(*sparse->areas)); + return 0; +} + +static int region_type_cap(struct vfio_info_cap *caps, void *cap_type) +{ + struct vfio_info_cap_header *header; + struct vfio_region_info_cap_type *type_cap, *cap = cap_type; + + header = vfio_info_cap_add(caps, sizeof(*cap), + VFIO_REGION_INFO_CAP_TYPE, 1); + if (IS_ERR(header)) + return PTR_ERR(header); + + type_cap = container_of(header, struct vfio_region_info_cap_type, + header); + type_cap->type = cap->type; + type_cap->subtype = cap->subtype; + return 0; +} + +int vfio_info_add_capability(struct vfio_info_cap *caps, int cap_type_id, + void *cap_type) +{ + int ret = -EINVAL; + + if (!cap_type) + return 0; + + switch (cap_type_id) { + case VFIO_REGION_INFO_CAP_SPARSE_MMAP: + ret = sparse_mmap_cap(caps, cap_type); + break; + + case VFIO_REGION_INFO_CAP_TYPE: + ret = region_type_cap(caps, cap_type); + break; + } + + return ret; +} +EXPORT_SYMBOL(vfio_info_add_capability); + +int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs, + int max_irq_type, size_t *data_size) +{ + unsigned long minsz; + size_t size; + + minsz = offsetofend(struct vfio_irq_set, count); + + if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) || + (hdr->count >= (U32_MAX - hdr->start)) || + (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | + VFIO_IRQ_SET_ACTION_TYPE_MASK))) + return -EINVAL; + + if (data_size) + *data_size = 0; + + if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs) + return -EINVAL; + + switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { + case VFIO_IRQ_SET_DATA_NONE: + size = 0; + break; + case VFIO_IRQ_SET_DATA_BOOL: + size = sizeof(uint8_t); + break; + case VFIO_IRQ_SET_DATA_EVENTFD: + size = sizeof(int32_t); + break; + default: + return -EINVAL; + } + + if (size) { + if (hdr->argsz - minsz < hdr->count * size) + return -EINVAL; + + if (!data_size) + return -EINVAL; + + *data_size = hdr->count * size; + } + + return 0; +} +EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare); + +/* + * Pin a set of guest PFNs and return their associated host PFNs for local + * domain only. + * @dev [in] : device + * @user_pfn [in]: array of user/guest PFNs to be unpinned. + * @npage [in] : count of elements in user_pfn array. This count should not + * be greater VFIO_PIN_PAGES_MAX_ENTRIES. + * @prot [in] : protection flags + * @phys_pfn[out]: array of host PFNs + * Return error or number of pages pinned. + */ +int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage, + int prot, unsigned long *phys_pfn) +{ + struct vfio_container *container; + struct vfio_group *group; + struct vfio_iommu_driver *driver; + int ret; + + if (!dev || !user_pfn || !phys_pfn || !npage) + return -EINVAL; + + if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) + return -E2BIG; + + group = vfio_group_get_from_dev(dev); + if (!group) + return -ENODEV; + + ret = vfio_group_add_container_user(group); + if (ret) + goto err_pin_pages; + + container = group->container; + down_read(&container->group_lock); + + driver = container->iommu_driver; + if (likely(driver && driver->ops->pin_pages)) + ret = driver->ops->pin_pages(container->iommu_data, user_pfn, + npage, prot, phys_pfn); + else + ret = -ENOTTY; + + up_read(&container->group_lock); + vfio_group_try_dissolve_container(group); + +err_pin_pages: + vfio_group_put(group); + return ret; +} +EXPORT_SYMBOL(vfio_pin_pages); + +/* + * Unpin set of host PFNs for local domain only. + * @dev [in] : device + * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest + * PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES. + * @npage [in] : count of elements in user_pfn array. This count should not + * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. + * Return error or number of pages unpinned. + */ +int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage) +{ + struct vfio_container *container; + struct vfio_group *group; + struct vfio_iommu_driver *driver; + int ret; + + if (!dev || !user_pfn || !npage) + return -EINVAL; + + if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) + return -E2BIG; + + group = vfio_group_get_from_dev(dev); + if (!group) + return -ENODEV; + + ret = vfio_group_add_container_user(group); + if (ret) + goto err_unpin_pages; + + container = group->container; + down_read(&container->group_lock); + + driver = container->iommu_driver; + if (likely(driver && driver->ops->unpin_pages)) + ret = driver->ops->unpin_pages(container->iommu_data, user_pfn, + npage); + else + ret = -ENOTTY; + + up_read(&container->group_lock); + vfio_group_try_dissolve_container(group); + +err_unpin_pages: + vfio_group_put(group); + return ret; +} +EXPORT_SYMBOL(vfio_unpin_pages); + +static int vfio_register_iommu_notifier(struct vfio_group *group, + unsigned long *events, + struct notifier_block *nb) +{ + struct vfio_container *container; + struct vfio_iommu_driver *driver; + int ret; + + ret = vfio_group_add_container_user(group); + if (ret) + return -EINVAL; + + container = group->container; + down_read(&container->group_lock); + + driver = container->iommu_driver; + if (likely(driver && driver->ops->register_notifier)) + ret = driver->ops->register_notifier(container->iommu_data, + events, nb); + else + ret = -ENOTTY; + + up_read(&container->group_lock); + vfio_group_try_dissolve_container(group); + + return ret; +} + +static int vfio_unregister_iommu_notifier(struct vfio_group *group, + struct notifier_block *nb) +{ + struct vfio_container *container; + struct vfio_iommu_driver *driver; + int ret; + + ret = vfio_group_add_container_user(group); + if (ret) + return -EINVAL; + + container = group->container; + down_read(&container->group_lock); + + driver = container->iommu_driver; + if (likely(driver && driver->ops->unregister_notifier)) + ret = driver->ops->unregister_notifier(container->iommu_data, + nb); + else + ret = -ENOTTY; + + up_read(&container->group_lock); + vfio_group_try_dissolve_container(group); + + return ret; +} + +void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm) +{ + group->kvm = kvm; + blocking_notifier_call_chain(&group->notifier, + VFIO_GROUP_NOTIFY_SET_KVM, kvm); +} +EXPORT_SYMBOL_GPL(vfio_group_set_kvm); + +static int vfio_register_group_notifier(struct vfio_group *group, + unsigned long *events, + struct notifier_block *nb) +{ + struct vfio_container *container; + int ret; + bool set_kvm = false; + + if (*events & VFIO_GROUP_NOTIFY_SET_KVM) + set_kvm = true; + + /* clear known events */ + *events &= ~VFIO_GROUP_NOTIFY_SET_KVM; + + /* refuse to continue if still events remaining */ + if (*events) + return -EINVAL; + + ret = vfio_group_add_container_user(group); + if (ret) + return -EINVAL; + + container = group->container; + down_read(&container->group_lock); + + ret = blocking_notifier_chain_register(&group->notifier, nb); + + /* + * The attaching of kvm and vfio_group might already happen, so + * here we replay once upon registration. + */ + if (!ret && set_kvm && group->kvm) + blocking_notifier_call_chain(&group->notifier, + VFIO_GROUP_NOTIFY_SET_KVM, group->kvm); + + up_read(&container->group_lock); + vfio_group_try_dissolve_container(group); + + return ret; +} + +static int vfio_unregister_group_notifier(struct vfio_group *group, + struct notifier_block *nb) +{ + struct vfio_container *container; + int ret; + + ret = vfio_group_add_container_user(group); + if (ret) + return -EINVAL; + + container = group->container; + down_read(&container->group_lock); + + ret = blocking_notifier_chain_unregister(&group->notifier, nb); + + up_read(&container->group_lock); + vfio_group_try_dissolve_container(group); + + return ret; +} + +int vfio_register_notifier(struct device *dev, enum vfio_notify_type type, + unsigned long *events, struct notifier_block *nb) +{ + struct vfio_group *group; + int ret; + + if (!dev || !nb || !events || (*events == 0)) + return -EINVAL; + + group = vfio_group_get_from_dev(dev); + if (!group) + return -ENODEV; + + switch (type) { + case VFIO_IOMMU_NOTIFY: + ret = vfio_register_iommu_notifier(group, events, nb); + break; + case VFIO_GROUP_NOTIFY: + ret = vfio_register_group_notifier(group, events, nb); + break; + default: + ret = -EINVAL; + } + + vfio_group_put(group); + return ret; +} +EXPORT_SYMBOL(vfio_register_notifier); + +int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type, + struct notifier_block *nb) +{ + struct vfio_group *group; + int ret; + + if (!dev || !nb) + return -EINVAL; + + group = vfio_group_get_from_dev(dev); + if (!group) + return -ENODEV; + + switch (type) { + case VFIO_IOMMU_NOTIFY: + ret = vfio_unregister_iommu_notifier(group, nb); + break; + case VFIO_GROUP_NOTIFY: + ret = vfio_unregister_group_notifier(group, nb); + break; + default: + ret = -EINVAL; + } + + vfio_group_put(group); + return ret; +} +EXPORT_SYMBOL(vfio_unregister_notifier); /** * Module/class support diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 80378ddadc5ca4581bcba2792dc49f56209f2902..c8823578a1b2afd3ae7a36c2f526fd071116876b 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -31,49 +31,49 @@ static void tce_iommu_detach_group(void *iommu_data, struct iommu_group *iommu_group); -static long try_increment_locked_vm(long npages) +static long try_increment_locked_vm(struct mm_struct *mm, long npages) { long ret = 0, locked, lock_limit; - if (!current || !current->mm) - return -ESRCH; /* process exited */ + if (WARN_ON_ONCE(!mm)) + return -EPERM; if (!npages) return 0; - down_write(¤t->mm->mmap_sem); - locked = current->mm->locked_vm + npages; + down_write(&mm->mmap_sem); + locked = mm->locked_vm + npages; lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) ret = -ENOMEM; else - current->mm->locked_vm += npages; + mm->locked_vm += npages; pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid, npages << PAGE_SHIFT, - current->mm->locked_vm << PAGE_SHIFT, + mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK), ret ? " - exceeded" : ""); - up_write(¤t->mm->mmap_sem); + up_write(&mm->mmap_sem); return ret; } -static void decrement_locked_vm(long npages) +static void decrement_locked_vm(struct mm_struct *mm, long npages) { - if (!current || !current->mm || !npages) - return; /* process exited */ + if (!mm || !npages) + return; - down_write(¤t->mm->mmap_sem); - if (WARN_ON_ONCE(npages > current->mm->locked_vm)) - npages = current->mm->locked_vm; - current->mm->locked_vm -= npages; + down_write(&mm->mmap_sem); + if (WARN_ON_ONCE(npages > mm->locked_vm)) + npages = mm->locked_vm; + mm->locked_vm -= npages; pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid, npages << PAGE_SHIFT, - current->mm->locked_vm << PAGE_SHIFT, + mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK)); - up_write(¤t->mm->mmap_sem); + up_write(&mm->mmap_sem); } /* @@ -88,6 +88,15 @@ struct tce_iommu_group { struct iommu_group *grp; }; +/* + * A container needs to remember which preregistered region it has + * referenced to do proper cleanup at the userspace process exit. + */ +struct tce_iommu_prereg { + struct list_head next; + struct mm_iommu_table_group_mem_t *mem; +}; + /* * The container descriptor supports only a single group per container. * Required by the API as the container is not supplied with the IOMMU group @@ -97,24 +106,68 @@ struct tce_container { struct mutex lock; bool enabled; bool v2; + bool def_window_pending; unsigned long locked_pages; + struct mm_struct *mm; struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; struct list_head group_list; + struct list_head prereg_list; }; +static long tce_iommu_mm_set(struct tce_container *container) +{ + if (container->mm) { + if (container->mm == current->mm) + return 0; + return -EPERM; + } + BUG_ON(!current->mm); + container->mm = current->mm; + atomic_inc(&container->mm->mm_count); + + return 0; +} + +static long tce_iommu_prereg_free(struct tce_container *container, + struct tce_iommu_prereg *tcemem) +{ + long ret; + + ret = mm_iommu_put(container->mm, tcemem->mem); + if (ret) + return ret; + + list_del(&tcemem->next); + kfree(tcemem); + + return 0; +} + static long tce_iommu_unregister_pages(struct tce_container *container, __u64 vaddr, __u64 size) { struct mm_iommu_table_group_mem_t *mem; + struct tce_iommu_prereg *tcemem; + bool found = false; if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK)) return -EINVAL; - mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT); + mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT); if (!mem) return -ENOENT; - return mm_iommu_put(mem); + list_for_each_entry(tcemem, &container->prereg_list, next) { + if (tcemem->mem == mem) { + found = true; + break; + } + } + + if (!found) + return -ENOENT; + + return tce_iommu_prereg_free(container, tcemem); } static long tce_iommu_register_pages(struct tce_container *container, @@ -122,22 +175,36 @@ static long tce_iommu_register_pages(struct tce_container *container, { long ret = 0; struct mm_iommu_table_group_mem_t *mem = NULL; + struct tce_iommu_prereg *tcemem; unsigned long entries = size >> PAGE_SHIFT; if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) || ((vaddr + size) < vaddr)) return -EINVAL; - ret = mm_iommu_get(vaddr, entries, &mem); + mem = mm_iommu_find(container->mm, vaddr, entries); + if (mem) { + list_for_each_entry(tcemem, &container->prereg_list, next) { + if (tcemem->mem == mem) + return -EBUSY; + } + } + + ret = mm_iommu_get(container->mm, vaddr, entries, &mem); if (ret) return ret; + tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL); + tcemem->mem = mem; + list_add(&tcemem->next, &container->prereg_list); + container->enabled = true; return 0; } -static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) +static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl, + struct mm_struct *mm) { unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * tbl->it_size, PAGE_SIZE); @@ -146,13 +213,13 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) BUG_ON(tbl->it_userspace); - ret = try_increment_locked_vm(cb >> PAGE_SHIFT); + ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT); if (ret) return ret; uas = vzalloc(cb); if (!uas) { - decrement_locked_vm(cb >> PAGE_SHIFT); + decrement_locked_vm(mm, cb >> PAGE_SHIFT); return -ENOMEM; } tbl->it_userspace = uas; @@ -160,7 +227,8 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) return 0; } -static void tce_iommu_userspace_view_free(struct iommu_table *tbl) +static void tce_iommu_userspace_view_free(struct iommu_table *tbl, + struct mm_struct *mm) { unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * tbl->it_size, PAGE_SIZE); @@ -170,7 +238,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl) vfree(tbl->it_userspace); tbl->it_userspace = NULL; - decrement_locked_vm(cb >> PAGE_SHIFT); + decrement_locked_vm(mm, cb >> PAGE_SHIFT); } static bool tce_page_is_contained(struct page *page, unsigned page_shift) @@ -230,9 +298,6 @@ static int tce_iommu_enable(struct tce_container *container) struct iommu_table_group *table_group; struct tce_iommu_group *tcegrp; - if (!current->mm) - return -ESRCH; /* process exited */ - if (container->enabled) return -EBUSY; @@ -277,8 +342,12 @@ static int tce_iommu_enable(struct tce_container *container) if (!table_group->tce32_size) return -EPERM; + ret = tce_iommu_mm_set(container); + if (ret) + return ret; + locked = table_group->tce32_size >> PAGE_SHIFT; - ret = try_increment_locked_vm(locked); + ret = try_increment_locked_vm(container->mm, locked); if (ret) return ret; @@ -296,10 +365,8 @@ static void tce_iommu_disable(struct tce_container *container) container->enabled = false; - if (!current->mm) - return; - - decrement_locked_vm(container->locked_pages); + BUG_ON(!container->mm); + decrement_locked_vm(container->mm, container->locked_pages); } static void *tce_iommu_open(unsigned long arg) @@ -317,6 +384,7 @@ static void *tce_iommu_open(unsigned long arg) mutex_init(&container->lock); INIT_LIST_HEAD_RCU(&container->group_list); + INIT_LIST_HEAD_RCU(&container->prereg_list); container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU; @@ -326,7 +394,8 @@ static void *tce_iommu_open(unsigned long arg) static int tce_iommu_clear(struct tce_container *container, struct iommu_table *tbl, unsigned long entry, unsigned long pages); -static void tce_iommu_free_table(struct iommu_table *tbl); +static void tce_iommu_free_table(struct tce_container *container, + struct iommu_table *tbl); static void tce_iommu_release(void *iommu_data) { @@ -351,10 +420,20 @@ static void tce_iommu_release(void *iommu_data) continue; tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); - tce_iommu_free_table(tbl); + tce_iommu_free_table(container, tbl); + } + + while (!list_empty(&container->prereg_list)) { + struct tce_iommu_prereg *tcemem; + + tcemem = list_first_entry(&container->prereg_list, + struct tce_iommu_prereg, next); + WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem)); } tce_iommu_disable(container); + if (container->mm) + mmdrop(container->mm); mutex_destroy(&container->lock); kfree(container); @@ -369,13 +448,14 @@ static void tce_iommu_unuse_page(struct tce_container *container, put_page(page); } -static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size, +static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, + unsigned long tce, unsigned long size, unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) { long ret = 0; struct mm_iommu_table_group_mem_t *mem; - mem = mm_iommu_lookup(tce, size); + mem = mm_iommu_lookup(container->mm, tce, size); if (!mem) return -EINVAL; @@ -388,18 +468,18 @@ static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size, return 0; } -static void tce_iommu_unuse_page_v2(struct iommu_table *tbl, - unsigned long entry) +static void tce_iommu_unuse_page_v2(struct tce_container *container, + struct iommu_table *tbl, unsigned long entry) { struct mm_iommu_table_group_mem_t *mem = NULL; int ret; unsigned long hpa = 0; unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); - if (!pua || !current || !current->mm) + if (!pua) return; - ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl), + ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl), &hpa, &mem); if (ret) pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", @@ -429,7 +509,7 @@ static int tce_iommu_clear(struct tce_container *container, continue; if (container->v2) { - tce_iommu_unuse_page_v2(tbl, entry); + tce_iommu_unuse_page_v2(container, tbl, entry); continue; } @@ -509,13 +589,19 @@ static long tce_iommu_build_v2(struct tce_container *container, unsigned long hpa; enum dma_data_direction dirtmp; + if (!tbl->it_userspace) { + ret = tce_iommu_userspace_view_alloc(tbl, container->mm); + if (ret) + return ret; + } + for (i = 0; i < pages; ++i) { struct mm_iommu_table_group_mem_t *mem = NULL; unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i); - ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl), - &hpa, &mem); + ret = tce_iommu_prereg_ua_to_hpa(container, + tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem); if (ret) break; @@ -536,7 +622,7 @@ static long tce_iommu_build_v2(struct tce_container *container, ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp); if (ret) { /* dirtmp cannot be DMA_NONE here */ - tce_iommu_unuse_page_v2(tbl, entry + i); + tce_iommu_unuse_page_v2(container, tbl, entry + i); pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n", __func__, entry << tbl->it_page_shift, tce, ret); @@ -544,7 +630,7 @@ static long tce_iommu_build_v2(struct tce_container *container, } if (dirtmp != DMA_NONE) - tce_iommu_unuse_page_v2(tbl, entry + i); + tce_iommu_unuse_page_v2(container, tbl, entry + i); *pua = tce; @@ -572,7 +658,7 @@ static long tce_iommu_create_table(struct tce_container *container, if (!table_size) return -EINVAL; - ret = try_increment_locked_vm(table_size >> PAGE_SHIFT); + ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT); if (ret) return ret; @@ -582,25 +668,17 @@ static long tce_iommu_create_table(struct tce_container *container, WARN_ON(!ret && !(*ptbl)->it_ops->free); WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size)); - if (!ret && container->v2) { - ret = tce_iommu_userspace_view_alloc(*ptbl); - if (ret) - (*ptbl)->it_ops->free(*ptbl); - } - - if (ret) - decrement_locked_vm(table_size >> PAGE_SHIFT); - return ret; } -static void tce_iommu_free_table(struct iommu_table *tbl) +static void tce_iommu_free_table(struct tce_container *container, + struct iommu_table *tbl) { unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; - tce_iommu_userspace_view_free(tbl); + tce_iommu_userspace_view_free(tbl, container->mm); tbl->it_ops->free(tbl); - decrement_locked_vm(pages); + decrement_locked_vm(container->mm, pages); } static long tce_iommu_create_window(struct tce_container *container, @@ -663,7 +741,7 @@ static long tce_iommu_create_window(struct tce_container *container, table_group = iommu_group_get_iommudata(tcegrp->grp); table_group->ops->unset_window(table_group, num); } - tce_iommu_free_table(tbl); + tce_iommu_free_table(container, tbl); return ret; } @@ -701,12 +779,41 @@ static long tce_iommu_remove_window(struct tce_container *container, /* Free table */ tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); - tce_iommu_free_table(tbl); + tce_iommu_free_table(container, tbl); container->tables[num] = NULL; return 0; } +static long tce_iommu_create_default_window(struct tce_container *container) +{ + long ret; + __u64 start_addr = 0; + struct tce_iommu_group *tcegrp; + struct iommu_table_group *table_group; + + if (!container->def_window_pending) + return 0; + + if (!tce_groups_attached(container)) + return -ENODEV; + + tcegrp = list_first_entry(&container->group_list, + struct tce_iommu_group, next); + table_group = iommu_group_get_iommudata(tcegrp->grp); + if (!table_group) + return -ENODEV; + + ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K, + table_group->tce32_size, 1, &start_addr); + WARN_ON_ONCE(!ret && start_addr); + + if (!ret) + container->def_window_pending = false; + + return ret; +} + static long tce_iommu_ioctl(void *iommu_data, unsigned int cmd, unsigned long arg) { @@ -727,7 +834,17 @@ static long tce_iommu_ioctl(void *iommu_data, } return (ret < 0) ? 0 : ret; + } + + /* + * Sanity check to prevent one userspace from manipulating + * another userspace mm. + */ + BUG_ON(!container); + if (container->mm && container->mm != current->mm) + return -EPERM; + switch (cmd) { case VFIO_IOMMU_SPAPR_TCE_GET_INFO: { struct vfio_iommu_spapr_tce_info info; struct tce_iommu_group *tcegrp; @@ -797,6 +914,10 @@ static long tce_iommu_ioctl(void *iommu_data, VFIO_DMA_MAP_FLAG_WRITE)) return -EINVAL; + ret = tce_iommu_create_default_window(container); + if (ret) + return ret; + num = tce_iommu_find_table(container, param.iova, &tbl); if (num < 0) return -ENXIO; @@ -860,6 +981,10 @@ static long tce_iommu_ioctl(void *iommu_data, if (param.flags) return -EINVAL; + ret = tce_iommu_create_default_window(container); + if (ret) + return ret; + num = tce_iommu_find_table(container, param.iova, &tbl); if (num < 0) return -ENXIO; @@ -888,6 +1013,10 @@ static long tce_iommu_ioctl(void *iommu_data, minsz = offsetofend(struct vfio_iommu_spapr_register_memory, size); + ret = tce_iommu_mm_set(container); + if (ret) + return ret; + if (copy_from_user(¶m, (void __user *)arg, minsz)) return -EFAULT; @@ -911,6 +1040,9 @@ static long tce_iommu_ioctl(void *iommu_data, if (!container->v2) break; + if (!container->mm) + return -EPERM; + minsz = offsetofend(struct vfio_iommu_spapr_register_memory, size); @@ -969,6 +1101,10 @@ static long tce_iommu_ioctl(void *iommu_data, if (!container->v2) break; + ret = tce_iommu_mm_set(container); + if (ret) + return ret; + if (!tce_groups_attached(container)) return -ENXIO; @@ -986,6 +1122,10 @@ static long tce_iommu_ioctl(void *iommu_data, mutex_lock(&container->lock); + ret = tce_iommu_create_default_window(container); + if (ret) + return ret; + ret = tce_iommu_create_window(container, create.page_shift, create.window_size, create.levels, &create.start_addr); @@ -1003,6 +1143,10 @@ static long tce_iommu_ioctl(void *iommu_data, if (!container->v2) break; + ret = tce_iommu_mm_set(container); + if (ret) + return ret; + if (!tce_groups_attached(container)) return -ENXIO; @@ -1018,6 +1162,11 @@ static long tce_iommu_ioctl(void *iommu_data, if (remove.flags) return -EINVAL; + if (container->def_window_pending && !remove.start_addr) { + container->def_window_pending = false; + return 0; + } + mutex_lock(&container->lock); ret = tce_iommu_remove_window(container, remove.start_addr); @@ -1043,7 +1192,7 @@ static void tce_iommu_release_ownership(struct tce_container *container, continue; tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); - tce_iommu_userspace_view_free(tbl); + tce_iommu_userspace_view_free(tbl, container->mm); if (tbl->it_map) iommu_release_ownership(tbl); @@ -1062,10 +1211,7 @@ static int tce_iommu_take_ownership(struct tce_container *container, if (!tbl || !tbl->it_map) continue; - rc = tce_iommu_userspace_view_alloc(tbl); - if (!rc) - rc = iommu_take_ownership(tbl); - + rc = iommu_take_ownership(tbl); if (rc) { for (j = 0; j < i; ++j) iommu_release_ownership( @@ -1100,9 +1246,6 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container, static long tce_iommu_take_ownership_ddw(struct tce_container *container, struct iommu_table_group *table_group) { - long i, ret = 0; - struct iommu_table *tbl = NULL; - if (!table_group->ops->create_table || !table_group->ops->set_window || !table_group->ops->release_ownership) { WARN_ON_ONCE(1); @@ -1111,47 +1254,7 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container, table_group->ops->take_ownership(table_group); - /* - * If it the first group attached, check if there is - * a default DMA window and create one if none as - * the userspace expects it to exist. - */ - if (!tce_groups_attached(container) && !container->tables[0]) { - ret = tce_iommu_create_table(container, - table_group, - 0, /* window number */ - IOMMU_PAGE_SHIFT_4K, - table_group->tce32_size, - 1, /* default levels */ - &tbl); - if (ret) - goto release_exit; - else - container->tables[0] = tbl; - } - - /* Set all windows to the new group */ - for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { - tbl = container->tables[i]; - - if (!tbl) - continue; - - /* Set the default window to a new group */ - ret = table_group->ops->set_window(table_group, i, tbl); - if (ret) - goto release_exit; - } - return 0; - -release_exit: - for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) - table_group->ops->unset_window(table_group, i); - - table_group->ops->release_ownership(table_group); - - return ret; } static int tce_iommu_attach_group(void *iommu_data, @@ -1203,10 +1306,13 @@ static int tce_iommu_attach_group(void *iommu_data, } if (!table_group->ops || !table_group->ops->take_ownership || - !table_group->ops->release_ownership) + !table_group->ops->release_ownership) { ret = tce_iommu_take_ownership(container, table_group); - else + } else { ret = tce_iommu_take_ownership_ddw(container, table_group); + if (!tce_groups_attached(container) && !container->tables[0]) + container->def_window_pending = true; + } if (!ret) { tcegrp->grp = iommu_group; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 2ba19424e4a18f33555f370247ffc28204212089..f3726ba12aa6ceccff8a0c96523aa683537be8f4 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -36,6 +36,9 @@ #include #include #include +#include +#include +#include #define DRIVER_VERSION "0.2" #define DRIVER_AUTHOR "Alex Williamson " @@ -55,8 +58,10 @@ MODULE_PARM_DESC(disable_hugepages, struct vfio_iommu { struct list_head domain_list; + struct vfio_domain *external_domain; /* domain for external user */ struct mutex lock; struct rb_root dma_list; + struct blocking_notifier_head notifier; bool v2; bool nesting; }; @@ -75,6 +80,9 @@ struct vfio_dma { unsigned long vaddr; /* Process virtual addr */ size_t size; /* Map size (bytes) */ int prot; /* IOMMU_READ/WRITE */ + bool iommu_mapped; + struct task_struct *task; + struct rb_root pfn_list; /* Ex-user pinned pfn list */ }; struct vfio_group { @@ -82,6 +90,21 @@ struct vfio_group { struct list_head next; }; +/* + * Guest RAM pinning working set or DMA target + */ +struct vfio_pfn { + struct rb_node node; + dma_addr_t iova; /* Device address */ + unsigned long pfn; /* Host pfn */ + atomic_t ref_count; +}; + +#define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \ + (!list_empty(&iommu->domain_list)) + +static int put_pfn(unsigned long pfn, int prot); + /* * This code handles mapping and unmapping of user data buffers * into DMA'ble space using the IOMMU @@ -130,6 +153,97 @@ static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old) rb_erase(&old->node, &iommu->dma_list); } +/* + * Helper Functions for host iova-pfn list + */ +static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova) +{ + struct vfio_pfn *vpfn; + struct rb_node *node = dma->pfn_list.rb_node; + + while (node) { + vpfn = rb_entry(node, struct vfio_pfn, node); + + if (iova < vpfn->iova) + node = node->rb_left; + else if (iova > vpfn->iova) + node = node->rb_right; + else + return vpfn; + } + return NULL; +} + +static void vfio_link_pfn(struct vfio_dma *dma, + struct vfio_pfn *new) +{ + struct rb_node **link, *parent = NULL; + struct vfio_pfn *vpfn; + + link = &dma->pfn_list.rb_node; + while (*link) { + parent = *link; + vpfn = rb_entry(parent, struct vfio_pfn, node); + + if (new->iova < vpfn->iova) + link = &(*link)->rb_left; + else + link = &(*link)->rb_right; + } + + rb_link_node(&new->node, parent, link); + rb_insert_color(&new->node, &dma->pfn_list); +} + +static void vfio_unlink_pfn(struct vfio_dma *dma, struct vfio_pfn *old) +{ + rb_erase(&old->node, &dma->pfn_list); +} + +static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova, + unsigned long pfn) +{ + struct vfio_pfn *vpfn; + + vpfn = kzalloc(sizeof(*vpfn), GFP_KERNEL); + if (!vpfn) + return -ENOMEM; + + vpfn->iova = iova; + vpfn->pfn = pfn; + atomic_set(&vpfn->ref_count, 1); + vfio_link_pfn(dma, vpfn); + return 0; +} + +static void vfio_remove_from_pfn_list(struct vfio_dma *dma, + struct vfio_pfn *vpfn) +{ + vfio_unlink_pfn(dma, vpfn); + kfree(vpfn); +} + +static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma, + unsigned long iova) +{ + struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova); + + if (vpfn) + atomic_inc(&vpfn->ref_count); + return vpfn; +} + +static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) +{ + int ret = 0; + + if (atomic_dec_and_test(&vpfn->ref_count)) { + ret = put_pfn(vpfn->pfn, dma->prot); + vfio_remove_from_pfn_list(dma, vpfn); + } + return ret; +} + struct vwork { struct mm_struct *mm; long npage; @@ -150,17 +264,22 @@ static void vfio_lock_acct_bg(struct work_struct *work) kfree(vwork); } -static void vfio_lock_acct(long npage) +static void vfio_lock_acct(struct task_struct *task, long npage) { struct vwork *vwork; struct mm_struct *mm; - if (!current->mm || !npage) + if (!npage) + return; + + mm = get_task_mm(task); + if (!mm) return; /* process exited or nothing to do */ - if (down_write_trylock(¤t->mm->mmap_sem)) { - current->mm->locked_vm += npage; - up_write(¤t->mm->mmap_sem); + if (down_write_trylock(&mm->mmap_sem)) { + mm->locked_vm += npage; + up_write(&mm->mmap_sem); + mmput(mm); return; } @@ -170,11 +289,8 @@ static void vfio_lock_acct(long npage) * wouldn't need this silliness */ vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL); - if (!vwork) - return; - mm = get_task_mm(current); - if (!mm) { - kfree(vwork); + if (!vwork) { + mmput(mm); return; } INIT_WORK(&vwork->work, vfio_lock_acct_bg); @@ -228,20 +344,36 @@ static int put_pfn(unsigned long pfn, int prot) return 0; } -static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) +static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, + int prot, unsigned long *pfn) { struct page *page[1]; struct vm_area_struct *vma; - int ret = -EFAULT; + int ret; + + if (mm == current->mm) { + ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), + page); + } else { + unsigned int flags = 0; + + if (prot & IOMMU_WRITE) + flags |= FOLL_WRITE; + + down_read(&mm->mmap_sem); + ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, + NULL, NULL); + up_read(&mm->mmap_sem); + } - if (get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), page) == 1) { + if (ret == 1) { *pfn = page_to_pfn(page[0]); return 0; } - down_read(¤t->mm->mmap_sem); + down_read(&mm->mmap_sem); - vma = find_vma_intersection(current->mm, vaddr, vaddr + 1); + vma = find_vma_intersection(mm, vaddr, vaddr + 1); if (vma && vma->vm_flags & VM_PFNMAP) { *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; @@ -249,8 +381,7 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) ret = 0; } - up_read(¤t->mm->mmap_sem); - + up_read(&mm->mmap_sem); return ret; } @@ -259,88 +390,299 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) * the iommu can only map chunks of consecutive pfns anyway, so get the * first page and all consecutive pages with the same locking. */ -static long vfio_pin_pages(unsigned long vaddr, long npage, - int prot, unsigned long *pfn_base) +static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, + long npage, unsigned long *pfn_base) { - unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; - bool lock_cap = capable(CAP_IPC_LOCK); - long ret, i; + unsigned long limit; + bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns, + CAP_IPC_LOCK); + struct mm_struct *mm; + long ret, i = 0, lock_acct = 0; bool rsvd; + dma_addr_t iova = vaddr - dma->vaddr + dma->iova; - if (!current->mm) + mm = get_task_mm(dma->task); + if (!mm) return -ENODEV; - ret = vaddr_get_pfn(vaddr, prot, pfn_base); + ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); if (ret) - return ret; + goto pin_pg_remote_exit; rsvd = is_invalid_reserved_pfn(*pfn_base); + limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; - if (!rsvd && !lock_cap && current->mm->locked_vm + 1 > limit) { - put_pfn(*pfn_base, prot); - pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, - limit << PAGE_SHIFT); - return -ENOMEM; + /* + * Reserved pages aren't counted against the user, externally pinned + * pages are already counted against the user. + */ + if (!rsvd && !vfio_find_vpfn(dma, iova)) { + if (!lock_cap && mm->locked_vm + 1 > limit) { + put_pfn(*pfn_base, dma->prot); + pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, + limit << PAGE_SHIFT); + ret = -ENOMEM; + goto pin_pg_remote_exit; + } + lock_acct++; } - if (unlikely(disable_hugepages)) { - if (!rsvd) - vfio_lock_acct(1); - return 1; - } + i++; + if (likely(!disable_hugepages)) { + /* Lock all the consecutive pages from pfn_base */ + for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; i < npage; + i++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { + unsigned long pfn = 0; - /* Lock all the consecutive pages from pfn_base */ - for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) { - unsigned long pfn = 0; + ret = vaddr_get_pfn(mm, vaddr, dma->prot, &pfn); + if (ret) + break; - ret = vaddr_get_pfn(vaddr, prot, &pfn); - if (ret) - break; + if (pfn != *pfn_base + i || + rsvd != is_invalid_reserved_pfn(pfn)) { + put_pfn(pfn, dma->prot); + break; + } - if (pfn != *pfn_base + i || - rsvd != is_invalid_reserved_pfn(pfn)) { - put_pfn(pfn, prot); - break; + if (!rsvd && !vfio_find_vpfn(dma, iova)) { + if (!lock_cap && + mm->locked_vm + lock_acct + 1 > limit) { + put_pfn(pfn, dma->prot); + pr_warn("%s: RLIMIT_MEMLOCK (%ld) " + "exceeded\n", __func__, + limit << PAGE_SHIFT); + break; + } + lock_acct++; + } } + } - if (!rsvd && !lock_cap && - current->mm->locked_vm + i + 1 > limit) { - put_pfn(pfn, prot); - pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", - __func__, limit << PAGE_SHIFT); - break; + vfio_lock_acct(dma->task, lock_acct); + ret = i; + +pin_pg_remote_exit: + mmput(mm); + return ret; +} + +static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, + unsigned long pfn, long npage, + bool do_accounting) +{ + long unlocked = 0, locked = 0; + long i; + + for (i = 0; i < npage; i++) { + if (put_pfn(pfn++, dma->prot)) { + unlocked++; + if (vfio_find_vpfn(dma, iova + (i << PAGE_SHIFT))) + locked++; } } - if (!rsvd) - vfio_lock_acct(i); + if (do_accounting) + vfio_lock_acct(dma->task, locked - unlocked); - return i; + return unlocked; } -static long vfio_unpin_pages(unsigned long pfn, long npage, - int prot, bool do_accounting) +static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, + unsigned long *pfn_base, bool do_accounting) { - unsigned long unlocked = 0; - long i; + unsigned long limit; + bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns, + CAP_IPC_LOCK); + struct mm_struct *mm; + int ret; + bool rsvd; - for (i = 0; i < npage; i++) - unlocked += put_pfn(pfn++, prot); + mm = get_task_mm(dma->task); + if (!mm) + return -ENODEV; + + ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); + if (ret) + goto pin_page_exit; + + rsvd = is_invalid_reserved_pfn(*pfn_base); + limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; + + if (!rsvd && !lock_cap && mm->locked_vm + 1 > limit) { + put_pfn(*pfn_base, dma->prot); + pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK (%ld) exceeded\n", + __func__, dma->task->comm, task_pid_nr(dma->task), + limit << PAGE_SHIFT); + ret = -ENOMEM; + goto pin_page_exit; + } + + if (!rsvd && do_accounting) + vfio_lock_acct(dma->task, 1); + ret = 1; + +pin_page_exit: + mmput(mm); + return ret; +} + +static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, + bool do_accounting) +{ + int unlocked; + struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova); + + if (!vpfn) + return 0; + + unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); if (do_accounting) - vfio_lock_acct(-unlocked); + vfio_lock_acct(dma->task, -unlocked); return unlocked; } -static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma) +static int vfio_iommu_type1_pin_pages(void *iommu_data, + unsigned long *user_pfn, + int npage, int prot, + unsigned long *phys_pfn) +{ + struct vfio_iommu *iommu = iommu_data; + int i, j, ret; + unsigned long remote_vaddr; + struct vfio_dma *dma; + bool do_accounting; + + if (!iommu || !user_pfn || !phys_pfn) + return -EINVAL; + + /* Supported for v2 version only */ + if (!iommu->v2) + return -EACCES; + + mutex_lock(&iommu->lock); + + /* Fail if notifier list is empty */ + if ((!iommu->external_domain) || (!iommu->notifier.head)) { + ret = -EINVAL; + goto pin_done; + } + + /* + * If iommu capable domain exist in the container then all pages are + * already pinned and accounted. Accouting should be done if there is no + * iommu capable domain in the container. + */ + do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu); + + for (i = 0; i < npage; i++) { + dma_addr_t iova; + struct vfio_pfn *vpfn; + + iova = user_pfn[i] << PAGE_SHIFT; + dma = vfio_find_dma(iommu, iova, PAGE_SIZE); + if (!dma) { + ret = -EINVAL; + goto pin_unwind; + } + + if ((dma->prot & prot) != prot) { + ret = -EPERM; + goto pin_unwind; + } + + vpfn = vfio_iova_get_vfio_pfn(dma, iova); + if (vpfn) { + phys_pfn[i] = vpfn->pfn; + continue; + } + + remote_vaddr = dma->vaddr + iova - dma->iova; + ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i], + do_accounting); + if (ret <= 0) { + WARN_ON(!ret); + goto pin_unwind; + } + + ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]); + if (ret) { + vfio_unpin_page_external(dma, iova, do_accounting); + goto pin_unwind; + } + } + + ret = i; + goto pin_done; + +pin_unwind: + phys_pfn[i] = 0; + for (j = 0; j < i; j++) { + dma_addr_t iova; + + iova = user_pfn[j] << PAGE_SHIFT; + dma = vfio_find_dma(iommu, iova, PAGE_SIZE); + vfio_unpin_page_external(dma, iova, do_accounting); + phys_pfn[j] = 0; + } +pin_done: + mutex_unlock(&iommu->lock); + return ret; +} + +static int vfio_iommu_type1_unpin_pages(void *iommu_data, + unsigned long *user_pfn, + int npage) +{ + struct vfio_iommu *iommu = iommu_data; + bool do_accounting; + int i; + + if (!iommu || !user_pfn) + return -EINVAL; + + /* Supported for v2 version only */ + if (!iommu->v2) + return -EACCES; + + mutex_lock(&iommu->lock); + + if (!iommu->external_domain) { + mutex_unlock(&iommu->lock); + return -EINVAL; + } + + do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu); + for (i = 0; i < npage; i++) { + struct vfio_dma *dma; + dma_addr_t iova; + + iova = user_pfn[i] << PAGE_SHIFT; + dma = vfio_find_dma(iommu, iova, PAGE_SIZE); + if (!dma) + goto unpin_exit; + vfio_unpin_page_external(dma, iova, do_accounting); + } + +unpin_exit: + mutex_unlock(&iommu->lock); + return i > npage ? npage : (i > 0 ? i : -EINVAL); +} + +static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, + bool do_accounting) { dma_addr_t iova = dma->iova, end = dma->iova + dma->size; struct vfio_domain *domain, *d; long unlocked = 0; if (!dma->size) - return; + return 0; + + if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) + return 0; + /* * We use the IOMMU to track the physical addresses, otherwise we'd * need a much more complicated tracking system. Unfortunately that @@ -382,21 +724,28 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma) if (WARN_ON(!unmapped)) break; - unlocked += vfio_unpin_pages(phys >> PAGE_SHIFT, - unmapped >> PAGE_SHIFT, - dma->prot, false); + unlocked += vfio_unpin_pages_remote(dma, iova, + phys >> PAGE_SHIFT, + unmapped >> PAGE_SHIFT, + false); iova += unmapped; cond_resched(); } - vfio_lock_acct(-unlocked); + dma->iommu_mapped = false; + if (do_accounting) { + vfio_lock_acct(dma->task, -unlocked); + return 0; + } + return unlocked; } static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) { - vfio_unmap_unpin(iommu, dma); + vfio_unmap_unpin(iommu, dma, true); vfio_unlink_dma(iommu, dma); + put_task_struct(dma->task); kfree(dma); } @@ -430,9 +779,9 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, struct vfio_iommu_type1_dma_unmap *unmap) { uint64_t mask; - struct vfio_dma *dma; + struct vfio_dma *dma, *dma_last = NULL; size_t unmapped = 0; - int ret = 0; + int ret = 0, retries = 0; mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; @@ -442,7 +791,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, return -EINVAL; WARN_ON(mask & PAGE_MASK); - +again: mutex_lock(&iommu->lock); /* @@ -477,7 +826,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, * mappings within the range. */ if (iommu->v2) { - dma = vfio_find_dma(iommu, unmap->iova, 0); + dma = vfio_find_dma(iommu, unmap->iova, 1); if (dma && dma->iova != unmap->iova) { ret = -EINVAL; goto unlock; @@ -492,6 +841,38 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) { if (!iommu->v2 && unmap->iova > dma->iova) break; + /* + * Task with same address space who mapped this iova range is + * allowed to unmap the iova range. + */ + if (dma->task->mm != current->mm) + break; + + if (!RB_EMPTY_ROOT(&dma->pfn_list)) { + struct vfio_iommu_type1_dma_unmap nb_unmap; + + if (dma_last == dma) { + BUG_ON(++retries > 10); + } else { + dma_last = dma; + retries = 0; + } + + nb_unmap.iova = dma->iova; + nb_unmap.size = dma->size; + + /* + * Notify anyone (mdev vendor drivers) to invalidate and + * unmap iovas within the range we're about to unmap. + * Vendor drivers MUST unpin pages in response to an + * invalidation. + */ + mutex_unlock(&iommu->lock); + blocking_notifier_call_chain(&iommu->notifier, + VFIO_IOMMU_NOTIFY_DMA_UNMAP, + &nb_unmap); + goto again; + } unmapped += dma->size; vfio_remove_dma(iommu, dma); } @@ -558,17 +939,56 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, return ret; } +static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, + size_t map_size) +{ + dma_addr_t iova = dma->iova; + unsigned long vaddr = dma->vaddr; + size_t size = map_size; + long npage; + unsigned long pfn; + int ret = 0; + + while (size) { + /* Pin a contiguous chunk of memory */ + npage = vfio_pin_pages_remote(dma, vaddr + dma->size, + size >> PAGE_SHIFT, &pfn); + if (npage <= 0) { + WARN_ON(!npage); + ret = (int)npage; + break; + } + + /* Map it! */ + ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, + dma->prot); + if (ret) { + vfio_unpin_pages_remote(dma, iova + dma->size, pfn, + npage, true); + break; + } + + size -= npage << PAGE_SHIFT; + dma->size += npage << PAGE_SHIFT; + } + + dma->iommu_mapped = true; + + if (ret) + vfio_remove_dma(iommu, dma); + + return ret; +} + static int vfio_dma_do_map(struct vfio_iommu *iommu, struct vfio_iommu_type1_dma_map *map) { dma_addr_t iova = map->iova; unsigned long vaddr = map->vaddr; size_t size = map->size; - long npage; int ret = 0, prot = 0; uint64_t mask; struct vfio_dma *dma; - unsigned long pfn; /* Verify that none of our __u64 fields overflow */ if (map->size != size || map->vaddr != vaddr || map->iova != iova) @@ -594,47 +1014,33 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, mutex_lock(&iommu->lock); if (vfio_find_dma(iommu, iova, size)) { - mutex_unlock(&iommu->lock); - return -EEXIST; + ret = -EEXIST; + goto out_unlock; } dma = kzalloc(sizeof(*dma), GFP_KERNEL); if (!dma) { - mutex_unlock(&iommu->lock); - return -ENOMEM; + ret = -ENOMEM; + goto out_unlock; } dma->iova = iova; dma->vaddr = vaddr; dma->prot = prot; + get_task_struct(current); + dma->task = current; + dma->pfn_list = RB_ROOT; /* Insert zero-sized and grow as we map chunks of it */ vfio_link_dma(iommu, dma); - while (size) { - /* Pin a contiguous chunk of memory */ - npage = vfio_pin_pages(vaddr + dma->size, - size >> PAGE_SHIFT, prot, &pfn); - if (npage <= 0) { - WARN_ON(!npage); - ret = (int)npage; - break; - } - - /* Map it! */ - ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot); - if (ret) { - vfio_unpin_pages(pfn, npage, prot, true); - break; - } - - size -= npage << PAGE_SHIFT; - dma->size += npage << PAGE_SHIFT; - } - - if (ret) - vfio_remove_dma(iommu, dma); + /* Don't pin and map if container doesn't contain IOMMU capable domain*/ + if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) + dma->size = size; + else + ret = vfio_pin_map_dma(iommu, dma, size); +out_unlock: mutex_unlock(&iommu->lock); return ret; } @@ -662,10 +1068,6 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); n = rb_first(&iommu->dma_list); - /* If there's not a domain, there better not be any mappings */ - if (WARN_ON(n && !d)) - return -EINVAL; - for (; n; n = rb_next(n)) { struct vfio_dma *dma; dma_addr_t iova; @@ -674,21 +1076,49 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, iova = dma->iova; while (iova < dma->iova + dma->size) { - phys_addr_t phys = iommu_iova_to_phys(d->domain, iova); + phys_addr_t phys; size_t size; - if (WARN_ON(!phys)) { - iova += PAGE_SIZE; - continue; + if (dma->iommu_mapped) { + phys_addr_t p; + dma_addr_t i; + + phys = iommu_iova_to_phys(d->domain, iova); + + if (WARN_ON(!phys)) { + iova += PAGE_SIZE; + continue; + } + + size = PAGE_SIZE; + p = phys + size; + i = iova + size; + while (i < dma->iova + dma->size && + p == iommu_iova_to_phys(d->domain, i)) { + size += PAGE_SIZE; + p += PAGE_SIZE; + i += PAGE_SIZE; + } + } else { + unsigned long pfn; + unsigned long vaddr = dma->vaddr + + (iova - dma->iova); + size_t n = dma->iova + dma->size - iova; + long npage; + + npage = vfio_pin_pages_remote(dma, vaddr, + n >> PAGE_SHIFT, + &pfn); + if (npage <= 0) { + WARN_ON(!npage); + ret = (int)npage; + return ret; + } + + phys = pfn << PAGE_SHIFT; + size = npage << PAGE_SHIFT; } - size = PAGE_SIZE; - - while (iova + size < dma->iova + dma->size && - phys + size == iommu_iova_to_phys(d->domain, - iova + size)) - size += PAGE_SIZE; - ret = iommu_map(domain->domain, iova, phys, size, dma->prot | domain->prot); if (ret) @@ -696,8 +1126,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, iova += size; } + dma->iommu_mapped = true; } - return 0; } @@ -734,22 +1164,39 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain) __free_pages(pages, order); } +static struct vfio_group *find_iommu_group(struct vfio_domain *domain, + struct iommu_group *iommu_group) +{ + struct vfio_group *g; + + list_for_each_entry(g, &domain->group_list, next) { + if (g->iommu_group == iommu_group) + return g; + } + + return NULL; +} + static int vfio_iommu_type1_attach_group(void *iommu_data, struct iommu_group *iommu_group) { struct vfio_iommu *iommu = iommu_data; - struct vfio_group *group, *g; + struct vfio_group *group; struct vfio_domain *domain, *d; - struct bus_type *bus = NULL; + struct bus_type *bus = NULL, *mdev_bus; int ret; mutex_lock(&iommu->lock); list_for_each_entry(d, &iommu->domain_list, next) { - list_for_each_entry(g, &d->group_list, next) { - if (g->iommu_group != iommu_group) - continue; + if (find_iommu_group(d, iommu_group)) { + mutex_unlock(&iommu->lock); + return -EINVAL; + } + } + if (iommu->external_domain) { + if (find_iommu_group(iommu->external_domain, iommu_group)) { mutex_unlock(&iommu->lock); return -EINVAL; } @@ -769,6 +1216,25 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, if (ret) goto out_free; + mdev_bus = symbol_get(mdev_bus_type); + + if (mdev_bus) { + if ((bus == mdev_bus) && !iommu_present(bus)) { + symbol_put(mdev_bus_type); + if (!iommu->external_domain) { + INIT_LIST_HEAD(&domain->group_list); + iommu->external_domain = domain; + } else + kfree(domain); + + list_add(&group->next, + &iommu->external_domain->group_list); + mutex_unlock(&iommu->lock); + return 0; + } + symbol_put(mdev_bus_type); + } + domain->domain = iommu_domain_alloc(bus); if (!domain->domain) { ret = -EIO; @@ -859,6 +1325,46 @@ static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu) vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node)); } +static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) +{ + struct rb_node *n, *p; + + n = rb_first(&iommu->dma_list); + for (; n; n = rb_next(n)) { + struct vfio_dma *dma; + long locked = 0, unlocked = 0; + + dma = rb_entry(n, struct vfio_dma, node); + unlocked += vfio_unmap_unpin(iommu, dma, false); + p = rb_first(&dma->pfn_list); + for (; p; p = rb_next(p)) { + struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, + node); + + if (!is_invalid_reserved_pfn(vpfn->pfn)) + locked++; + } + vfio_lock_acct(dma->task, locked - unlocked); + } +} + +static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu) +{ + struct rb_node *n; + + n = rb_first(&iommu->dma_list); + for (; n; n = rb_next(n)) { + struct vfio_dma *dma; + + dma = rb_entry(n, struct vfio_dma, node); + + if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list))) + break; + } + /* mdev vendor driver must unregister notifier */ + WARN_ON(iommu->notifier.head); +} + static void vfio_iommu_type1_detach_group(void *iommu_data, struct iommu_group *iommu_group) { @@ -868,31 +1374,55 @@ static void vfio_iommu_type1_detach_group(void *iommu_data, mutex_lock(&iommu->lock); - list_for_each_entry(domain, &iommu->domain_list, next) { - list_for_each_entry(group, &domain->group_list, next) { - if (group->iommu_group != iommu_group) - continue; - - iommu_detach_group(domain->domain, iommu_group); + if (iommu->external_domain) { + group = find_iommu_group(iommu->external_domain, iommu_group); + if (group) { list_del(&group->next); kfree(group); - /* - * Group ownership provides privilege, if the group - * list is empty, the domain goes away. If it's the - * last domain, then all the mappings go away too. - */ - if (list_empty(&domain->group_list)) { - if (list_is_singular(&iommu->domain_list)) + + if (list_empty(&iommu->external_domain->group_list)) { + vfio_sanity_check_pfn_list(iommu); + + if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) vfio_iommu_unmap_unpin_all(iommu); - iommu_domain_free(domain->domain); - list_del(&domain->next); - kfree(domain); + + kfree(iommu->external_domain); + iommu->external_domain = NULL; + } + goto detach_group_done; + } + } + + list_for_each_entry(domain, &iommu->domain_list, next) { + group = find_iommu_group(domain, iommu_group); + if (!group) + continue; + + iommu_detach_group(domain->domain, iommu_group); + list_del(&group->next); + kfree(group); + /* + * Group ownership provides privilege, if the group list is + * empty, the domain goes away. If it's the last domain with + * iommu and external domain doesn't exist, then all the + * mappings go away too. If it's the last domain with iommu and + * external domain exist, update accounting + */ + if (list_empty(&domain->group_list)) { + if (list_is_singular(&iommu->domain_list)) { + if (!iommu->external_domain) + vfio_iommu_unmap_unpin_all(iommu); + else + vfio_iommu_unmap_unpin_reaccount(iommu); } - goto done; + iommu_domain_free(domain->domain); + list_del(&domain->next); + kfree(domain); } + break; } -done: +detach_group_done: mutex_unlock(&iommu->lock); } @@ -920,31 +1450,46 @@ static void *vfio_iommu_type1_open(unsigned long arg) INIT_LIST_HEAD(&iommu->domain_list); iommu->dma_list = RB_ROOT; mutex_init(&iommu->lock); + BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); return iommu; } +static void vfio_release_domain(struct vfio_domain *domain, bool external) +{ + struct vfio_group *group, *group_tmp; + + list_for_each_entry_safe(group, group_tmp, + &domain->group_list, next) { + if (!external) + iommu_detach_group(domain->domain, group->iommu_group); + list_del(&group->next); + kfree(group); + } + + if (!external) + iommu_domain_free(domain->domain); +} + static void vfio_iommu_type1_release(void *iommu_data) { struct vfio_iommu *iommu = iommu_data; struct vfio_domain *domain, *domain_tmp; - struct vfio_group *group, *group_tmp; + + if (iommu->external_domain) { + vfio_release_domain(iommu->external_domain, true); + vfio_sanity_check_pfn_list(iommu); + kfree(iommu->external_domain); + } vfio_iommu_unmap_unpin_all(iommu); list_for_each_entry_safe(domain, domain_tmp, &iommu->domain_list, next) { - list_for_each_entry_safe(group, group_tmp, - &domain->group_list, next) { - iommu_detach_group(domain->domain, group->iommu_group); - list_del(&group->next); - kfree(group); - } - iommu_domain_free(domain->domain); + vfio_release_domain(domain, false); list_del(&domain->next); kfree(domain); } - kfree(iommu); } @@ -1040,14 +1585,42 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, return -ENOTTY; } +static int vfio_iommu_type1_register_notifier(void *iommu_data, + unsigned long *events, + struct notifier_block *nb) +{ + struct vfio_iommu *iommu = iommu_data; + + /* clear known events */ + *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP; + + /* refuse to register if still events remaining */ + if (*events) + return -EINVAL; + + return blocking_notifier_chain_register(&iommu->notifier, nb); +} + +static int vfio_iommu_type1_unregister_notifier(void *iommu_data, + struct notifier_block *nb) +{ + struct vfio_iommu *iommu = iommu_data; + + return blocking_notifier_chain_unregister(&iommu->notifier, nb); +} + static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { - .name = "vfio-iommu-type1", - .owner = THIS_MODULE, - .open = vfio_iommu_type1_open, - .release = vfio_iommu_type1_release, - .ioctl = vfio_iommu_type1_ioctl, - .attach_group = vfio_iommu_type1_attach_group, - .detach_group = vfio_iommu_type1_detach_group, + .name = "vfio-iommu-type1", + .owner = THIS_MODULE, + .open = vfio_iommu_type1_open, + .release = vfio_iommu_type1_release, + .ioctl = vfio_iommu_type1_ioctl, + .attach_group = vfio_iommu_type1_attach_group, + .detach_group = vfio_iommu_type1_detach_group, + .pin_pages = vfio_iommu_type1_pin_pages, + .unpin_pages = vfio_iommu_type1_unpin_pages, + .register_notifier = vfio_iommu_type1_register_notifier, + .unregister_notifier = vfio_iommu_type1_unregister_notifier, }; static int __init vfio_iommu_type1_init(void) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 5dc128a8da83e5c724dbd1d8748fa0db2a300d7a..5dc34653274ae3655cf2ba97ab9c8dce9e8a2632 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -342,7 +342,7 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net, endtime = busy_clock() + vq->busyloop_timeout; while (vhost_can_busy_poll(vq->dev, endtime) && vhost_vq_avail_empty(vq->dev, vq)) - cpu_relax_lowlatency(); + cpu_relax(); preempt_enable(); r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), out_num, in_num, NULL, NULL); @@ -533,7 +533,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) while (vhost_can_busy_poll(&net->dev, endtime) && !sk_has_rx_data(sk) && vhost_vq_avail_empty(&net->dev, vq)) - cpu_relax_lowlatency(); + cpu_relax(); preempt_enable(); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 6e29d053843d0b7f29a1aed672ac88ef8150b9d7..253310cdaacabc25d67aa997e45033bf7137b3b9 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -922,8 +922,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) */ iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size); - ret = copy_from_iter(req, req_size, &out_iter); - if (unlikely(ret != req_size)) { + if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) { vq_err(vq, "Faulted on copy_from_iter\n"); vhost_scsi_send_bad_target(vs, vq, head, out); continue; @@ -1749,7 +1748,6 @@ static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg, static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, const char *name) { - struct se_portal_group *se_tpg; struct vhost_scsi_nexus *tv_nexus; mutex_lock(&tpg->tv_tpg_mutex); @@ -1758,7 +1756,6 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, pr_debug("tpg->tpg_nexus already exists\n"); return -EEXIST; } - se_tpg = &tpg->se_tpg; tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL); if (!tv_nexus) { diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index c6f2d89c0e97cd4c184e615be6e3d86aca722a0d..d6432603880c1343ea2451eba6df1973e6d61822 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -49,7 +49,7 @@ enum { INTERVAL_TREE_DEFINE(struct vhost_umem_node, rb, __u64, __subtree_last, - START, LAST, , vhost_umem_interval_tree); + START, LAST, static inline, vhost_umem_interval_tree); #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) @@ -261,8 +261,8 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) { /* We can only add the work to the list after we're * sure it was not in the list. + * test_and_set_bit() implies a memory barrier. */ - smp_mb(); llist_add(&work->node, &dev->work_list); wake_up_process(dev->worker); } @@ -290,6 +290,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->avail = NULL; vq->used = NULL; vq->last_avail_idx = 0; + vq->last_used_event = 0; vq->avail_idx = 0; vq->last_used_idx = 0; vq->signalled_used = 0; @@ -719,7 +720,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem, static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, struct iovec iov[], int iov_size, int access); -static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to, +static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, const void *from, unsigned size) { int ret; @@ -749,7 +750,7 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to, } static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, - void *from, unsigned size) + void __user *from, unsigned size) { int ret; @@ -783,7 +784,7 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, } static void __user *__vhost_get_user(struct vhost_virtqueue *vq, - void *addr, unsigned size) + void __user *addr, unsigned size) { int ret; @@ -934,8 +935,8 @@ static int umem_access_ok(u64 uaddr, u64 size, int access) return 0; } -int vhost_process_iotlb_msg(struct vhost_dev *dev, - struct vhost_iotlb_msg *msg) +static int vhost_process_iotlb_msg(struct vhost_dev *dev, + struct vhost_iotlb_msg *msg) { int ret = 0; @@ -1324,7 +1325,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) r = -EINVAL; break; } - vq->last_avail_idx = s.num; + vq->last_avail_idx = vq->last_used_event = s.num; /* Forget the cached index value. */ vq->avail_idx = vq->last_avail_idx; break; @@ -1862,8 +1863,7 @@ static int get_indirect(struct vhost_virtqueue *vq, i, count); return -EINVAL; } - if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) != - sizeof(desc))) { + if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) { vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); return -EINVAL; @@ -2159,10 +2159,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) __u16 old, new; __virtio16 event; bool v; - /* Flush out used index updates. This is paired - * with the barrier that the Guest executes when enabling - * interrupts. */ - smp_mb(); if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && unlikely(vq->avail_idx == vq->last_avail_idx)) @@ -2170,6 +2166,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { __virtio16 flags; + /* Flush out used index updates. This is paired + * with the barrier that the Guest executes when enabling + * interrupts. */ + smp_mb(); if (vhost_get_user(vq, flags, &vq->avail->flags)) { vq_err(vq, "Failed to get flags"); return true; @@ -2184,11 +2184,26 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (unlikely(!v)) return true; + /* We're sure if the following conditions are met, there's no + * need to notify guest: + * 1) cached used event is ahead of new + * 2) old to new updating does not cross cached used event. */ + if (vring_need_event(vq->last_used_event, new + vq->num, new) && + !vring_need_event(vq->last_used_event, new, old)) + return false; + + /* Flush out used index updates. This is paired + * with the barrier that the Guest executes when enabling + * interrupts. */ + smp_mb(); + if (vhost_get_user(vq, event, vhost_used_event(vq))) { vq_err(vq, "Failed to get used event idx"); return true; } - return vring_need_event(vhost16_to_cpu(vq, event), new, old); + vq->last_used_event = vhost16_to_cpu(vq, event); + + return vring_need_event(vq->last_used_event, new, old); } /* This actually signals the guest, using eventfd. */ diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 78f3c5fc02e47d92bced439249a4bb3491e42e9a..a9cbbb148f460e52ef8a051770fd32f616696f14 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -107,6 +107,9 @@ struct vhost_virtqueue { /* Last index we used. */ u16 last_used_idx; + /* Last used evet we've seen */ + u16 last_used_event; + /* Used flags */ u16 used_flags; diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 3bb02c60a2f5fe85a4d38e5d884b66f03b8756e9..bb8971f2a634bc478a97c672ebde8e7d7c7da3cc 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -3,6 +3,7 @@ * * Since these may be in userspace, we use (inline) accessors. */ +#include #include #include #include @@ -820,13 +821,13 @@ EXPORT_SYMBOL(vringh_need_notify_user); static inline int getu16_kern(const struct vringh *vrh, u16 *val, const __virtio16 *p) { - *val = vringh16_to_cpu(vrh, ACCESS_ONCE(*p)); + *val = vringh16_to_cpu(vrh, READ_ONCE(*p)); return 0; } static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val) { - ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val); + WRITE_ONCE(*p, cpu_to_vringh16(vrh, val)); return 0; } diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index e3b30ea9ece5945c935791798ab27ed8f6c3dd11..bbbf588540ed71d82ed63deb355b77736a2a9628 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -50,11 +50,10 @@ static u32 vhost_transport_get_local_cid(void) return VHOST_VSOCK_DEFAULT_HOST_CID; } -static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) +static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) { struct vhost_vsock *vsock; - spin_lock_bh(&vhost_vsock_lock); list_for_each_entry(vsock, &vhost_vsock_list, list) { u32 other_cid = vsock->guest_cid; @@ -63,15 +62,24 @@ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) continue; if (other_cid == guest_cid) { - spin_unlock_bh(&vhost_vsock_lock); return vsock; } } - spin_unlock_bh(&vhost_vsock_lock); return NULL; } +static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) +{ + struct vhost_vsock *vsock; + + spin_lock_bh(&vhost_vsock_lock); + vsock = __vhost_vsock_get(guest_cid); + spin_unlock_bh(&vhost_vsock_lock); + + return vsock; +} + static void vhost_transport_do_send_pkt(struct vhost_vsock *vsock, struct vhost_virtqueue *vq) @@ -195,7 +203,6 @@ static int vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) { struct vhost_vsock *vsock; - struct vhost_virtqueue *vq; int len = pkt->len; /* Find the vhost_vsock according to guest context id */ @@ -205,8 +212,6 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) return -ENODEV; } - vq = &vsock->vqs[VSOCK_VQ_RX]; - if (pkt->reply) atomic_inc(&vsock->queued_replies); @@ -506,7 +511,7 @@ static void vhost_vsock_reset_orphans(struct sock *sk) * executing. */ - if (!vhost_vsock_get(vsk->local_addr.svm_cid)) { + if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) { sock_set_flag(sk, SOCK_DONE); vsk->peer_shutdown = SHUTDOWN_MASK; sk->sk_state = SS_UNCONNECTED; @@ -562,11 +567,12 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) return -EINVAL; /* Refuse if CID is already in use */ - other = vhost_vsock_get(guest_cid); - if (other && other != vsock) - return -EADDRINUSE; - spin_lock_bh(&vhost_vsock_lock); + other = __vhost_vsock_get(guest_cid); + if (other && other != vsock) { + spin_unlock_bh(&vhost_vsock_lock); + return -EADDRINUSE; + } vsock->guest_cid = guest_cid; spin_unlock_bh(&vhost_vsock_lock); diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 38da6e2991491064263d0010782e464e42698e85..c3f1fb9ee8203cbb307605d8e7deca7ed5a7399a 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -9,7 +9,7 @@ config VGA_CONSOLE depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && \ !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \ (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \ - !ARM64 && !ARC && !MICROBLAZE + !ARM64 && !ARC && !MICROBLAZE && !OPENRISC default y help Saying Y here will allow you to use Linux in text mode through a diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index b87f5cfdaea5cb364fc1043f9df4db835ca2cf5a..a44f5627b82a3f4e7e9b079e2546ad4a5f4931c5 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -164,8 +164,6 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s, int count, int ypos, int xpos); static void fbcon_clear_margins(struct vc_data *vc, int bottom_only); static void fbcon_cursor(struct vc_data *vc, int mode); -static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, - int count); static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, int height, int width); static int fbcon_switch(struct vc_data *vc); @@ -1795,15 +1793,15 @@ static inline void fbcon_softback_note(struct vc_data *vc, int t, softback_curr = softback_in; } -static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, - int count) +static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, + enum con_scroll dir, unsigned int count) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct display *p = &fb_display[vc->vc_num]; int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK; if (fbcon_is_inactive(vc, info)) - return -EINVAL; + return true; fbcon_cursor(vc, CM_ERASE); @@ -1831,7 +1829,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, (b - count)), vc->vc_video_erase_char, vc->vc_size_row * count); - return 1; + return true; break; case SCROLL_WRAP_MOVE: @@ -1903,7 +1901,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, (b - count)), vc->vc_video_erase_char, vc->vc_size_row * count); - return 1; + return true; } break; @@ -1922,7 +1920,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, t), vc->vc_video_erase_char, vc->vc_size_row * count); - return 1; + return true; break; case SCROLL_WRAP_MOVE: @@ -1992,10 +1990,10 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, t), vc->vc_video_erase_char, vc->vc_size_row * count); - return 1; + return true; } } - return 0; + return false; } diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c index bacbb044d77cd8dbeb3d6cf0e308796287ff854d..ec192a1bf297232a183e1ae2f51f83b601649c15 100644 --- a/drivers/video/console/mdacon.c +++ b/drivers/video/console/mdacon.c @@ -488,12 +488,13 @@ static void mdacon_cursor(struct vc_data *c, int mode) } } -static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines) +static bool mdacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, + enum con_scroll dir, unsigned int lines) { u16 eattr = mda_convert_attr(c->vc_video_erase_char); if (!lines) - return 0; + return false; if (lines > c->vc_rows) /* maximum realistic size */ lines = c->vc_rows; @@ -514,7 +515,7 @@ static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines) break; } - return 0; + return false; } diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c index e3b9521e4ec3e884f75ee7690b8abd4f90cc5eba..42d02a206059b55b198c797d5c8816dbe7c0a872 100644 --- a/drivers/video/console/newport_con.c +++ b/drivers/video/console/newport_con.c @@ -21,7 +21,7 @@ #include #include -#include +#include #include #include #include @@ -574,8 +574,8 @@ static int newport_font_set(struct vc_data *vc, struct console_font *font, unsig return newport_set_font(vc->vc_num, font); } -static int newport_scroll(struct vc_data *vc, int t, int b, int dir, - int lines) +static bool newport_scroll(struct vc_data *vc, unsigned int t, unsigned int b, + enum con_scroll dir, unsigned int lines) { int count, x, y; unsigned short *s, *d; @@ -595,7 +595,7 @@ static int newport_scroll(struct vc_data *vc, int t, int b, int dir, (vc->vc_color & 0xf0) >> 4); } npregs->cset.topscan = (topscan - 1) & 0x3ff; - return 0; + return false; } count = (b - t - lines) * vc->vc_cols; @@ -670,7 +670,7 @@ static int newport_scroll(struct vc_data *vc, int t, int b, int dir, } } } - return 1; + return true; } static int newport_dummy(struct vc_data *c) diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c index 3a10ac19598faf8306be2954891e1b100df8cd77..79c9bd8d30254a9001a0b139734b780a240041b2 100644 --- a/drivers/video/console/sticon.c +++ b/drivers/video/console/sticon.c @@ -153,12 +153,13 @@ static void sticon_cursor(struct vc_data *conp, int mode) } } -static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count) +static bool sticon_scroll(struct vc_data *conp, unsigned int t, + unsigned int b, enum con_scroll dir, unsigned int count) { struct sti_struct *sti = sticon_sti; if (vga_is_gfx) - return 0; + return false; sticon_cursor(conp, CM_ERASE); @@ -174,7 +175,7 @@ static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count) break; } - return 0; + return false; } static void sticon_init(struct vc_data *c, int init) diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 11576611a974bae39a356f21ae7ed3b5d5e0a613..c22a56232b7c0fa0d7542fd4a77db3ca8871f516 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -60,15 +60,6 @@ static struct vgastate vgastate; #define BLANK 0x0020 -#define CAN_LOAD_EGA_FONTS /* undefine if the user must not do this */ -#define CAN_LOAD_PALETTE /* undefine if the user must not do this */ - -/* You really do _NOT_ want to define this, unless you have buggy - * Trident VGA which will resize cursor when moving it between column - * 15 & 16. If you define this and your VGA is OK, inverse bug will - * appear. - */ -#undef TRIDENT_GLITCH #define VGA_FONTWIDTH 8 /* VGA does not support fontwidths != 8 */ /* * Interface used by the world @@ -83,14 +74,12 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch); static void vgacon_scrolldelta(struct vc_data *c, int lines); static int vgacon_set_origin(struct vc_data *c); static void vgacon_save_screen(struct vc_data *c); -static int vgacon_scroll(struct vc_data *c, int t, int b, int dir, - int lines); static void vgacon_invert_region(struct vc_data *c, u16 * p, int count); static struct uni_pagedir *vgacon_uni_pagedir; static int vgacon_refcount; /* Description of the hardware situation */ -static int vga_init_done __read_mostly; +static bool vga_init_done; static unsigned long vga_vram_base __read_mostly; /* Base of video memory */ static unsigned long vga_vram_end __read_mostly; /* End of video memory */ static unsigned int vga_vram_size __read_mostly; /* Size of video memory */ @@ -98,31 +87,31 @@ static u16 vga_video_port_reg __read_mostly; /* Video register select port */ static u16 vga_video_port_val __read_mostly; /* Video register value port */ static unsigned int vga_video_num_columns; /* Number of text columns */ static unsigned int vga_video_num_lines; /* Number of text lines */ -static int vga_can_do_color __read_mostly; /* Do we support colors? */ +static bool vga_can_do_color; /* Do we support colors? */ static unsigned int vga_default_font_height __read_mostly; /* Height of default screen font */ static unsigned char vga_video_type __read_mostly; /* Card type */ -static unsigned char vga_hardscroll_enabled __read_mostly; -static unsigned char vga_hardscroll_user_enable __read_mostly = 1; -static unsigned char vga_font_is_default = 1; +static bool vga_font_is_default = true; static int vga_vesa_blanked; -static int vga_palette_blanked; -static int vga_is_gfx; -static int vga_512_chars; +static bool vga_palette_blanked; +static bool vga_is_gfx; +static bool vga_512_chars; static int vga_video_font_height; static int vga_scan_lines __read_mostly; static unsigned int vga_rolled_over; -static int vgacon_text_mode_force; +static bool vgacon_text_mode_force; +static bool vga_hardscroll_enabled; +static bool vga_hardscroll_user_enable = true; bool vgacon_text_force(void) { - return vgacon_text_mode_force ? true : false; + return vgacon_text_mode_force; } EXPORT_SYMBOL(vgacon_text_force); static int __init text_mode(char *str) { - vgacon_text_mode_force = 1; + vgacon_text_mode_force = true; return 1; } @@ -136,7 +125,7 @@ static int __init no_scroll(char *str) * Braille reader made by F.H. Papenmeier (Germany). * Use the "no-scroll" bootflag. */ - vga_hardscroll_user_enable = vga_hardscroll_enabled = 0; + vga_hardscroll_user_enable = vga_hardscroll_enabled = false; return 1; } @@ -159,18 +148,10 @@ static inline void write_vga(unsigned char reg, unsigned int val) * handlers, thus the write has to be IRQ-atomic. */ raw_spin_lock_irqsave(&vga_lock, flags); - -#ifndef SLOW_VGA v1 = reg + (val & 0xff00); v2 = reg + 1 + ((val << 8) & 0xff00); outw(v1, vga_video_port_reg); outw(v2, vga_video_port_reg); -#else - outb_p(reg, vga_video_port_reg); - outb_p(val >> 8, vga_video_port_val); - outb_p(reg + 1, vga_video_port_reg); - outb_p(val & 0xff, vga_video_port_val); -#endif raw_spin_unlock_irqrestore(&vga_lock, flags); } @@ -334,31 +315,8 @@ static void vgacon_restore_screen(struct vc_data *c) static void vgacon_scrolldelta(struct vc_data *c, int lines) { - if (!lines) /* Turn scrollback off */ - c->vc_visible_origin = c->vc_origin; - else { - int margin = c->vc_size_row * 4; - int ul, we, p, st; - - if (vga_rolled_over > - (c->vc_scr_end - vga_vram_base) + margin) { - ul = c->vc_scr_end - vga_vram_base; - we = vga_rolled_over + c->vc_size_row; - } else { - ul = 0; - we = vga_vram_size; - } - p = (c->vc_visible_origin - vga_vram_base - ul + we) % we + - lines * c->vc_size_row; - st = (c->vc_origin - vga_vram_base - ul + we) % we; - if (st < 2 * margin) - margin = 0; - if (p < margin) - p = 0; - if (p > st - margin) - p = st; - c->vc_visible_origin = vga_vram_base + (p + ul) % we; - } + vc_scrolldelta_helper(c, lines, vga_rolled_over, (void *)vga_vram_base, + vga_vram_size); vga_set_mem_top(c); } #endif /* CONFIG_VGACON_SOFT_SCROLLBACK */ @@ -427,7 +385,7 @@ static const char *vgacon_startup(void) } } else { /* If not, it is color. */ - vga_can_do_color = 1; + vga_can_do_color = true; vga_vram_base = 0xb8000; vga_video_port_reg = VGA_CRT_IC; vga_video_port_val = VGA_CRT_DC; @@ -451,18 +409,6 @@ static const char *vgacon_startup(void) request_resource(&ioport_resource, &vga_console_resource); -#ifdef VGA_CAN_DO_64KB - /* - * get 64K rather than 32K of video RAM. - * This doesn't actually work on all "VGA" - * controllers (it seems like setting MM=01 - * and COE=1 isn't necessarily a good idea) - */ - vga_vram_base = 0xa0000; - vga_vram_size = 0x10000; - outb_p(6, VGA_GFX_I); - outb_p(6, VGA_GFX_D); -#endif /* * Normalise the palette registers, to point * the 16 screen colours to the first 16 @@ -542,7 +488,7 @@ static const char *vgacon_startup(void) if (!vga_init_done) { vgacon_scrollback_startup(); - vga_init_done = 1; + vga_init_done = true; } return display_desc; @@ -634,7 +580,7 @@ static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity, static void vgacon_invert_region(struct vc_data *c, u16 * p, int count) { - int col = vga_can_do_color; + const bool col = vga_can_do_color; while (count--) { u16 a = scr_readw(p); @@ -652,11 +598,6 @@ static void vgacon_set_cursor_size(int xpos, int from, int to) unsigned long flags; int curs, cure; -#ifdef TRIDENT_GLITCH - if (xpos < 16) - from--, to--; -#endif - if ((from == cursor_size_lastfrom) && (to == cursor_size_lastto)) return; cursor_size_lastfrom = from; @@ -858,12 +799,10 @@ static void vga_set_palette(struct vc_data *vc, const unsigned char *table) static void vgacon_set_palette(struct vc_data *vc, const unsigned char *table) { -#ifdef CAN_LOAD_PALETTE if (vga_video_type != VIDEO_TYPE_VGAC || vga_palette_blanked || !con_is_visible(vc)) return; vga_set_palette(vc, table); -#endif } /* structure holding original VGA register settings */ @@ -1006,24 +945,24 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch) } if (vga_palette_blanked) { vga_set_palette(c, color_table); - vga_palette_blanked = 0; + vga_palette_blanked = false; return 0; } - vga_is_gfx = 0; + vga_is_gfx = false; /* Tell console.c that it has to restore the screen itself */ return 1; case 1: /* Normal blanking */ case -1: /* Obsolete */ if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) { vga_pal_blank(&vgastate); - vga_palette_blanked = 1; + vga_palette_blanked = true; return 0; } vgacon_set_origin(c); scr_memsetw((void *) vga_vram_base, BLANK, c->vc_screenbuf_size); if (mode_switch) - vga_is_gfx = 1; + vga_is_gfx = true; return 1; default: /* VESA blanking */ if (vga_video_type == VIDEO_TYPE_VGAC) { @@ -1046,15 +985,14 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch) * (sizif@botik.yaroslavl.su). */ -#ifdef CAN_LOAD_EGA_FONTS - #define colourmap 0xa0000 /* Pauline Middelink reports that we should use 0xA0000 for the bwmap as well.. */ #define blackwmap 0xa0000 #define cmapsz 8192 -static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) +static int vgacon_do_font_op(struct vgastate *state, char *arg, int set, + bool ch512) { unsigned short video_port_status = vga_video_port_reg + 6; int font_select = 0x00, beg, i; @@ -1063,10 +1001,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) if (vga_video_type != VIDEO_TYPE_EGAM) { charmap = (char *) VGA_MAP_MEM(colourmap, 0); beg = 0x0e; -#ifdef VGA_CAN_DO_64KB - if (vga_video_type == VIDEO_TYPE_VGAC) - beg = 0x06; -#endif } else { charmap = (char *) VGA_MAP_MEM(blackwmap, 0); beg = 0x0a; @@ -1080,7 +1014,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) if (!arg) return -EINVAL; /* Return to default font not supported */ - vga_font_is_default = 0; + vga_font_is_default = false; font_select = ch512 ? 0x04 : 0x00; #else /* @@ -1091,7 +1025,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) if (set) { vga_font_is_default = !arg; if (!arg) - ch512 = 0; /* Default font is always 256 */ + ch512 = false; /* Default font is always 256 */ font_select = arg ? (ch512 ? 0x0e : 0x0a) : 0x00; } @@ -1295,13 +1229,6 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font) return vgacon_do_font_op(&vgastate, font->data, 0, vga_512_chars); } -#else - -#define vgacon_font_set NULL -#define vgacon_font_get NULL - -#endif - static int vgacon_resize(struct vc_data *c, unsigned int width, unsigned int height, unsigned int user) { @@ -1350,17 +1277,17 @@ static void vgacon_save_screen(struct vc_data *c) c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size); } -static int vgacon_scroll(struct vc_data *c, int t, int b, int dir, - int lines) +static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, + enum con_scroll dir, unsigned int lines) { unsigned long oldo; unsigned int delta; if (t || b != c->vc_rows || vga_is_gfx || c->vc_mode != KD_TEXT) - return 0; + return false; if (!vga_hardscroll_enabled || lines >= c->vc_rows / 2) - return 0; + return false; vgacon_restore_screen(c); oldo = c->vc_origin; @@ -1396,7 +1323,7 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir, c->vc_visible_origin = c->vc_origin; vga_set_mem_top(c); c->vc_pos = (c->vc_pos - oldo) + c->vc_origin; - return 1; + return true; } diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c index 17f21cedff9b549c018ba62c620123d2958f6ce0..c0c6b88d383915fe6e5924bd49434f6a13170278 100644 --- a/drivers/video/fbdev/68328fb.c +++ b/drivers/video/fbdev/68328fb.c @@ -35,7 +35,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c index 19ad8645d93cd25a9f41375bcb30a1677ae721b7..e5d9bfc1703a5ff9fcaafe44fbb3c7e152fa16c7 100644 --- a/drivers/video/fbdev/amba-clcd-versatile.c +++ b/drivers/video/fbdev/amba-clcd-versatile.c @@ -526,8 +526,8 @@ int versatile_clcd_init_panel(struct clcd_fb *fb, np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match, &clcd_id); if (!np) { - dev_err(dev, "no Versatile syscon node\n"); - return -ENODEV; + /* Vexpress does not have this */ + return 0; } versatile_clcd_type = (enum versatile_clcd)clcd_id->data; diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index 37a37c4d04cbeb7211c5a02cef724cf47b16de5f..8c4dc1e1f94fdb53ad7acf2808fa400c03787857 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -118,6 +118,31 @@ static inline bool fb_base_is_valid(void) return false; } +#define efifb_attr_decl(name, fmt) \ +static ssize_t name##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + return sprintf(buf, fmt "\n", (screen_info.lfb_##name)); \ +} \ +static DEVICE_ATTR_RO(name) + +efifb_attr_decl(base, "0x%x"); +efifb_attr_decl(linelength, "%u"); +efifb_attr_decl(height, "%u"); +efifb_attr_decl(width, "%u"); +efifb_attr_decl(depth, "%u"); + +static struct attribute *efifb_attrs[] = { + &dev_attr_base.attr, + &dev_attr_linelength.attr, + &dev_attr_width.attr, + &dev_attr_height.attr, + &dev_attr_depth.attr, + NULL +}; +ATTRIBUTE_GROUPS(efifb); + static int efifb_probe(struct platform_device *dev) { struct fb_info *info; @@ -205,14 +230,13 @@ static int efifb_probe(struct platform_device *dev) } else { /* We cannot make this fatal. Sometimes this comes from magic spaces our resource handlers simply don't know about */ - printk(KERN_WARNING - "efifb: cannot reserve video memory at 0x%lx\n", + pr_warn("efifb: cannot reserve video memory at 0x%lx\n", efifb_fix.smem_start); } info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev); if (!info) { - printk(KERN_ERR "efifb: cannot allocate framebuffer\n"); + pr_err("efifb: cannot allocate framebuffer\n"); err = -ENOMEM; goto err_release_mem; } @@ -230,16 +254,15 @@ static int efifb_probe(struct platform_device *dev) info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len); if (!info->screen_base) { - printk(KERN_ERR "efifb: abort, cannot ioremap video memory " - "0x%x @ 0x%lx\n", + pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n", efifb_fix.smem_len, efifb_fix.smem_start); err = -EIO; goto err_release_fb; } - printk(KERN_INFO "efifb: framebuffer at 0x%lx, using %dk, total %dk\n", + pr_info("efifb: framebuffer at 0x%lx, using %dk, total %dk\n", efifb_fix.smem_start, size_remap/1024, size_total/1024); - printk(KERN_INFO "efifb: mode is %dx%dx%d, linelength=%d, pages=%d\n", + pr_info("efifb: mode is %dx%dx%d, linelength=%d, pages=%d\n", efifb_defined.xres, efifb_defined.yres, efifb_defined.bits_per_pixel, efifb_fix.line_length, screen_info.pages); @@ -247,7 +270,7 @@ static int efifb_probe(struct platform_device *dev) efifb_defined.xres_virtual = efifb_defined.xres; efifb_defined.yres_virtual = efifb_fix.smem_len / efifb_fix.line_length; - printk(KERN_INFO "efifb: scrolling: redraw\n"); + pr_info("efifb: scrolling: redraw\n"); efifb_defined.yres_virtual = efifb_defined.yres; /* some dummy values for timing to make fbset happy */ @@ -265,7 +288,7 @@ static int efifb_probe(struct platform_device *dev) efifb_defined.transp.offset = screen_info.rsvd_pos; efifb_defined.transp.length = screen_info.rsvd_size; - printk(KERN_INFO "efifb: %s: " + pr_info("efifb: %s: " "size=%d:%d:%d:%d, shift=%d:%d:%d:%d\n", "Truecolor", screen_info.rsvd_size, @@ -285,12 +308,19 @@ static int efifb_probe(struct platform_device *dev) info->fix = efifb_fix; info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE; - if ((err = fb_alloc_cmap(&info->cmap, 256, 0)) < 0) { - printk(KERN_ERR "efifb: cannot allocate colormap\n"); + err = sysfs_create_groups(&dev->dev.kobj, efifb_groups); + if (err) { + pr_err("efifb: cannot add sysfs attrs\n"); goto err_unmap; } - if ((err = register_framebuffer(info)) < 0) { - printk(KERN_ERR "efifb: cannot register framebuffer\n"); + err = fb_alloc_cmap(&info->cmap, 256, 0); + if (err < 0) { + pr_err("efifb: cannot allocate colormap\n"); + goto err_groups; + } + err = register_framebuffer(info); + if (err < 0) { + pr_err("efifb: cannot register framebuffer\n"); goto err_fb_dealoc; } fb_info(info, "%s frame buffer device\n", info->fix.id); @@ -298,6 +328,8 @@ static int efifb_probe(struct platform_device *dev) err_fb_dealoc: fb_dealloc_cmap(&info->cmap); +err_groups: + sysfs_remove_groups(&dev->dev.kobj, efifb_groups); err_unmap: iounmap(info->screen_base); err_release_fb: @@ -313,6 +345,7 @@ static int efifb_remove(struct platform_device *pdev) struct fb_info *info = platform_get_drvdata(pdev); unregister_framebuffer(info); + sysfs_remove_groups(&pdev->dev.kobj, efifb_groups); framebuffer_release(info); return 0; diff --git a/drivers/video/fbdev/hitfb.c b/drivers/video/fbdev/hitfb.c index 9d68dc9ee7bf2a3e70f04298df0b295db8751fac..abe3e54d45064d6f929c96e369c0c7a3ca78fbb6 100644 --- a/drivers/video/fbdev/hitfb.c +++ b/drivers/video/fbdev/hitfb.c @@ -22,7 +22,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c index 9476d196f510c2c93e9275befc652bda0b7412f7..16f16f5e1a4bbc1e7d93d97d4b29b0f0356cb11a 100644 --- a/drivers/video/fbdev/hpfb.c +++ b/drivers/video/fbdev/hpfb.c @@ -16,7 +16,7 @@ #include #include -#include +#include static struct fb_info fb_info = { .fix = { diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c index 8778e01cebac642fe4a03a7d292c967d9e3eb13a..1c3c7ab26a959d7dd88f8da0d25f7bc9cecaed9d 100644 --- a/drivers/video/fbdev/mx3fb.c +++ b/drivers/video/fbdev/mx3fb.c @@ -33,7 +33,7 @@ #include #include -#include +#include #define MX3FB_NAME "mx3_sdc_fb" diff --git a/drivers/video/fbdev/q40fb.c b/drivers/video/fbdev/q40fb.c index 7487f76f62750d3f3e7e59911ec6aef98c53cf7e..04ea330ccf5da2501d679fd929bad51cf90f58cf 100644 --- a/drivers/video/fbdev/q40fb.c +++ b/drivers/video/fbdev/q40fb.c @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c index d0a4e2f79a5703c3b0ed1b968fcac40f43052968..d80bc8a3200fa9c8435bc0b606a4bae0f02728f1 100644 --- a/drivers/video/fbdev/sm501fb.c +++ b/drivers/video/fbdev/sm501fb.c @@ -31,7 +31,7 @@ #include #include -#include +#include #include #ifdef CONFIG_PM diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index 7df4228e25f05fa24c6275dce7c940b3f93c63f8..accfef71e984763ce83a380337199d0915b5c649 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -67,7 +67,7 @@ #include #include /* for HP-UX compatibility */ -#include +#include #include "sticore.h" diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c index 10951c82f6ed61fa343ddfcf6f8ff64a65dab35c..d570e19a286494da5f3524d23ff7a3f2d0267cd9 100644 --- a/drivers/video/fbdev/w100fb.c +++ b/drivers/video/fbdev/w100fb.c @@ -35,7 +35,7 @@ #include #include #include -#include +#include #include